0

所以我有两张热图像(我知道马铃薯质量,但这是我必须使用的),这张专辑中的前两张。我正在使用教程中的一些代码,这些代码非常常见,但已经编辑了很多。

http://imgur.com/a/Zch7C

所以我在我的代码中所做的是

1. Detecting KeyPoints
2. Describe the KeyPoints
3. Match the KeyPoints
4. Keep only good points
5. Gather both Query and Train points
6. Find Homography
7. Warp one of the images
8. Repeat the above steps for the warped image and the other original image

现在我的问题是:对于每组点,两个不同图像上两个相同点之间的 (x,y) 距离的变化是否应该相同?

整个框架都在同一个方向移动,所以无论我们看什么匹配点变化都应该是一样的,不是吗?

我发现距离上的点都不同,有些是 5 像素不同,有些是 700 像素,我唯一能想到的事情是匹配实际上并不好,它正在比较两个不匹配的点在不同帧中的同一点附近。

我需要知道偏移量是多少,以便我可以将一帧叠加在另一帧之上,然后平均重叠的像素值并从两个原件的合成/平均值构建新图像。

我正在使用的代码如下:

        #include <stdio.h>
#include <iostream>

#include "opencv2/core/core.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/nonfree/nonfree.hpp"
#include "opencv2/calib3d/calib3d.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "stitch.cpp"
#include "opencv2\stitching\stitcher.hpp"
#include "opencv2\nonfree\features2d.hpp"

using namespace cv;

void readme();
Mat describe(Mat img, vector<KeyPoint> key);
vector<KeyPoint> detect(Mat img);
vector<DMatch> match(Mat descriptionOne, Mat descriptionTwo);

/** @function main */
int main(int argc, char** argv)
{
    VideoCapture cap("vid.mp4");

    vector<Mat> Vimg;

    cout << "Grabbing Images" << endl;
    for (int i = 0; i < 2; i++)
    {
        cout << "Grabbing Frame" << i << endl;
        Mat temp;
        cap.read(temp);
        Vimg.push_back(temp);
        imwrite("image" + to_string(i) + ".jpg", temp);
        for (int j = 0; j < 80; j++)
            cap.grab();
    }
    //Mat cimg1 = Vimg[0];
    //Mat cimg2 = Vimg[1];

    Mat cimg1 = imread("cap1.png");
    Mat cimg2 = imread("cap2.png");

    cout << "Starting Stitching" << endl;

    //Converting the original images to grayscale
    Mat img1, img2;
    cvtColor(cimg1, img1, CV_BGR2GRAY);
    cvtColor(cimg2, img2, CV_BGR2GRAY);

    //Detecting Keypoints for original two images
    vector<KeyPoint> keypointOne = detect(img1), keypointTwo = detect(img2);

    Mat mkeypointOne, mkeypointTwo;

    drawKeypoints(cimg1, keypointOne, mkeypointOne, Scalar(0, 0, 255), DrawMatchesFlags::DRAW_RICH_KEYPOINTS);
    drawKeypoints(cimg2, keypointTwo, mkeypointTwo, Scalar(0, 0, 255), DrawMatchesFlags::DRAW_RICH_KEYPOINTS);

    imwrite("keypointOne.jpg", mkeypointOne);
    imwrite("keypointTwo.jpg", mkeypointTwo);

    //Computing descriptors 
    Mat descriptionOne = describe(img1, keypointOne), descriptionTwo = describe(img2, keypointTwo);

    //Matching descriptors  
    vector<DMatch> matches = match(descriptionOne, descriptionTwo); 


    double max = 0;
    double min = 100;

    //Calculation of max and min distances
    for (int i = 0; i < matches.size(); i++)
    {
        double dist = matches[i].distance;
        if (dist < min) min = dist;
        if (dist > max) max = dist;
    }

    vector<DMatch> goodMatches;

    //Keep only good matches
    for (int i = 0; i < matches.size(); i++)
    {
        if (matches[i].distance < 2*min)
            goodMatches.push_back(matches[i]);
    }

    //Localize
    vector<Point2f> obj;
    vector<Point2f> scene;

    for (int i = 0; i < goodMatches.size(); i++)
    {
        obj.push_back(keypointOne[goodMatches[i].queryIdx].pt);
        scene.push_back(keypointTwo[goodMatches[i].trainIdx].pt);
    }

    /*
    for (int k = 0; k < obj.size(); k++)
    {
        cout << "Point data for Match #" << k << endl;
        cout << "\tImage 1 Point: " << obj[k] << endl;
        cout << "\tImage 2 Point: " << scene[k] << endl;
    }*/

    Mat H = findHomography(obj, scene, CV_RANSAC);

    //Warping the image to fit on first image
    Mat cwarpImage, warpImage;

    //TODO: figure out the right size for this image that is created
    warpPerspective(cimg2, cwarpImage, H, Size(img2.cols + img1.cols, img2.rows + img1.rows));

    /*
    Mat result;
    Mat half(warpImage, Rect(0, 0, img2.cols, img2.rows));
    cimg2.copyTo(half);
    */

    imwrite("warp.jpg", warpImage);

    //Processing Image
    cvtColor(cwarpImage, warpImage, CV_BGR2GRAY);
    vector<KeyPoint> keypointWarp = detect(warpImage);
    Mat descriptionWarp = describe(warpImage, keypointWarp);
    vector<DMatch> warpMatches = match(descriptionOne, descriptionWarp);
    Mat mkeypointWarp;
    drawKeypoints(cwarpImage, keypointWarp, mkeypointWarp, Scalar(0, 0, 255), DrawMatchesFlags::DRAW_RICH_KEYPOINTS);
    imwrite("keypointWarp.jpg", mkeypointWarp);

    Mat match;
    drawMatches(cimg1, keypointOne, warpImage, keypointWarp, warpMatches, match, Scalar(0, 0, 255), Scalar(255, 0, 0), vector<char>(), DrawMatchesFlags::DRAW_RICH_KEYPOINTS);
    //imshow("match", match);
    imwrite("matches.jpg", match);

    //Localize
    vector<Point2f> obj2;
    vector<Point2f> scene2;

    for (int i = 0; i < warpMatches.size(); i++)
    {
        obj2.push_back(keypointOne[warpMatches[i].queryIdx].pt);
        scene2.push_back(keypointWarp[warpMatches[i].trainIdx].pt);
    }


    for (int k = 0; k < obj.size(); k++)
    {
        cout << "Point data for Match #" << k << endl;
        cout << "\tImage 1 Point: " << obj2[k] << endl;
        cout << "\tImage 2 Point: " << scene2[k] << endl;
    }

    vector<unsigned char> inliersMask;
    Mat H2 = findHomography(obj, scene, CV_RANSAC, 3, inliersMask);

    vector<DMatch> inliers;
    for (size_t i = 0; i < inliersMask.size(); i++)
    {
        if (inliersMask[i])
            inliers.push_back(warpMatches[i]);
    }

    warpMatches.swap(inliers);

    Mat match2;
    drawMatches(cimg1, keypointOne, warpImage, keypointWarp, warpMatches, match2, Scalar(0, 0, 255), Scalar(255, 0, 0), vector<char>(), DrawMatchesFlags::DRAW_RICH_KEYPOINTS);
    imwrite("homorgraphyOutlierMatch.jpg", match2);

    cout << "Writing Warp Image" << endl;
    imwrite("warpimage.jpg", warpImage);
    cout << H << endl;

    waitKey(0);
}

Mat describe(Mat img, vector<KeyPoint> key)
{
    Mat temp;
    SurfDescriptorExtractor extractor;
    extractor.compute(img, key, temp);
    return temp;
}

vector<KeyPoint> detect(Mat img)
{
    vector<KeyPoint> temp;
    SurfFeatureDetector detector(400);
    detector.detect(img, temp);
    return temp;
}

vector<DMatch> match(Mat descriptionOne, Mat descriptionTwo)
{
    vector<DMatch> temp;
    BFMatcher matcher(NORM_L2, true);
    matcher.match(descriptionOne, descriptionTwo, temp);    

    return temp;
}

编辑:

我在 BFMatcher 中将 Cross Check 设置为 true,并从 Mastering_OpenCV 实现了 Homography 异常值检测。这是两个新的结果。我不确定我是否应该同时实施交叉检查和 KnnMatch,所以我只做了交叉检查。

http://imgur.com/a/1P7Xt

如您所见,它们要好得多,但仍有一些不应该存在。我用全彩色和热图像运行它。新代码也在上面。

4

1 回答 1

0

虽然在一般情况下,所有点的点对应之间的距离变化不会相同,但您不会期望图像大小为 1300ish 时会有 700 像素数量级的增量。

通过检查您发布的图像,很明显您的点对应不正确(简单地说,图像之间的匹配中有很多交叉线)

这表明您的第 4 步做得不好。您可能想尝试将蛮力匹配器的第二个参数设置为 true 以启用交叉检查测试:

 BFMatcher matcher(NORM_L2, true);

您可能还需要考虑使用比率测试来去除异常值,如此处所述如何应用比率测试以去除多对象检测匹配器中的异常值?

于 2015-05-07T01:23:15.373 回答