0

我测试了 ORB、SIFT 和 SURF 匹配。

SIFT 最好,SURF 次之,ORB 次之。

但是人们说ORB比SIFT好。

就我而言,我想知道为什么? 在此处输入图像描述 在此处输入图像描述

原图也附上。

在此处输入图像描述 在此处输入图像描述

我的代码如下。

int imagematching(Mat &img1, Mat & img2){
    if( !img1.data || !img2.data )
    { 
        std::cout<< " --(!) Error reading images " << std::endl; 
        return FAILED_IN_LOAD_IMAGES; 
    }
    int max_keypoints = 500;
#if defined(USE_SURF)
    Ptr<SURF> detector = SURF::create( max_keypoints );
    Ptr<SURF> extractor = SURF::create();
#elif defined(USE_SIFT)
    Ptr<SIFT> detector = SIFT::create();
    Ptr<SIFT> extractor = SIFT::create();
#elif defined(USE_ORB)
    Ptr<ORB> detector = ORB::create();

#endif
    //--Step 1: Key point detection
    std::vector<KeyPoint> keypoints1, keypoints2;
    //-- Step 2: Calculate descriptors (feature vectors)
    Mat descriptors1, descriptors2;
#if defined(USE_SURF) || defined(USE_SIFT)    
    detector->detect( img1, keypoints1 );
    detector->detect( img2, keypoints2 );   

    extractor->compute(img1, keypoints1, descriptors1);
    extractor->compute(img2, keypoints2, descriptors2);
#elif defined(USE_ORB)
    Mat dp1, dp2;
    detector->detectAndCompute(img1, Mat(), keypoints1, dp1);
    detector->detectAndCompute(img2, Mat(), keypoints2, dp2);
    if(dp1.type()!=CV_32F) {
       dp1.convertTo(descriptors1, CV_32F);
    }
    if(dp2.type()!=CV_32F) {
       dp2.convertTo(descriptors2, CV_32F);
    }
#endif

#if defined(USE_FLANN)
    FlannBasedMatcher matcher;

#endif
    vector<DMatch> matches;
    matcher.match(descriptors1, descriptors2, matches);   

    double max_dist = 0; double min_dist = 999999;

    //-- Quick calculation of max and min distances between keypoints
    for( int i = 0; i < descriptors1.rows; i++ )
    { 
      double dist = matches[i].distance;
      if( dist < min_dist ) min_dist = dist;
      if( dist > max_dist ) max_dist = dist;
    }

    printf("-- Max dist : %f \n", max_dist );
    printf("-- Min dist : %f \n", min_dist );
    //-- Draw only "good" matches (i.e. whose distance is less than 3*min_dist )
    std::vector< DMatch > good_matches;

    for( int i = 0; i < descriptors1.rows; i++ )
    { 
      if( matches[i].distance < 3*min_dist )
         { good_matches.push_back( matches[i]); }
    }
    matches.clear();

    Mat img_matches;
    drawMatches( img1, keypoints1, img2, keypoints2,
               good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
               vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );

    descriptors1.release();
    descriptors2.release();

    //-- Localize the object
    std::vector<Point2f> first_keypoints;
    std::vector<Point2f> second_keypoints;

    for( int i = 0; i < good_matches.size(); i++ )
    {
       //-- Get the keypoints from the good matches
       first_keypoints.push_back( keypoints1[ good_matches[i].queryIdx ].pt );
       second_keypoints.push_back( keypoints2[ good_matches[i].trainIdx ].pt );
    }
    keypoints1.clear();
    keypoints2.clear();
    good_matches.clear();
    Mat H = findHomography( first_keypoints, second_keypoints, CV_RANSAC );

    first_keypoints.clear();
    second_keypoints.clear();

    //-- Get the corners from the image_1 ( the object to be "detected" )
    std::vector<Point2f> first_image_corners(4);
    first_image_corners[0] = cvPoint(0,0); first_image_corners[1] = cvPoint( img1.cols, 0 );
    first_image_corners[2] = cvPoint( img1.cols, img1.rows ); first_image_corners[3] = cvPoint( 0, img1.rows );
    std::vector<Point2f> second_image_corners(4);

    perspectiveTransform( first_image_corners, second_image_corners, H);

    //-- Draw lines between the corners (the mapped object in the scene - image_2 )
    line( img_matches, second_image_corners[0] + Point2f( img1.cols, 0), second_image_corners[1] + Point2f( img1.cols, 0), Scalar(0, 255, 0), 4 );
    line( img_matches, second_image_corners[1] + Point2f( img1.cols, 0), second_image_corners[2] + Point2f( img1.cols, 0), Scalar( 0, 255, 0), 4 );
    line( img_matches, second_image_corners[2] + Point2f( img1.cols, 0), second_image_corners[3] + Point2f( img1.cols, 0), Scalar( 0, 255, 0), 4 );
    line( img_matches, second_image_corners[3] + Point2f( img1.cols, 0), second_image_corners[0] + Point2f( img1.cols, 0), Scalar( 0, 255, 0), 4 );

    //-- Show detected matches
    imshow( "ORB Good Matches & Object detection", img_matches );

    waitKey(0);
    img_matches.release();
    first_image_corners.clear();
    second_image_corners.clear();
    return SUCCESS;
}

我的代码有问题吗?

谢谢

4

1 回答 1

2

二进制描述符在特征匹配方面效率更高,占用内存更少。因此,检测的效率和精度之间存在折衷。

这是一篇比较 ORB 和 SIFT 的论文:https ://www.sciencedirect.com/science/article/pii/S0167865516303300 它说 ORB 比 SIFT 快两个数量级,但权衡是作者发现 ORB对噪音更敏感。

于 2018-06-08T14:37:06.120 回答