3
int method = 0;

std::vector<cv::KeyPoint> keypoints_object, keypoints_scene;
cv::Mat descriptors_object, descriptors_scene;

cv::ORB orb;

int minHessian = 500;
//cv::OrbFeatureDetector detector(500);
//ORB orb(25, 1.0f, 2, 10, 0, 2, 0, 10);
cv::OrbFeatureDetector detector(25, 1.0f, 2, 10, 0, 2, 0, 10);
//cv::OrbFeatureDetector detector(500,1.20000004768,8,31,0,2,ORB::HARRIS_SCORE,31);
cv::OrbDescriptorExtractor extractor;

//-- object
if( method == 0 ) { //-- ORB
    orb.detect(img_object, keypoints_object);
    //cv::drawKeypoints(img_object, keypoints_object, img_object, cv::Scalar(0,255,255));
    //cv::imshow("template", img_object);

    orb.compute(img_object, keypoints_object, descriptors_object);
} else { //-- SURF test
    detector.detect(img_object, keypoints_object);
    extractor.compute(img_object, keypoints_object, descriptors_object);
}
// http://stackoverflow.com/a/11798593
//if(descriptors_object.type() != CV_32F)
//    descriptors_object.convertTo(descriptors_object, CV_32F);


//for(;;) {
    cv::Mat frame = cv::imread("E:\\Projects\\Images\\2-134-2.bmp", 1);
    cv::Mat img_scene = cv::Mat(frame.size(), CV_8UC1);
    cv::cvtColor(frame, img_scene, cv::COLOR_RGB2GRAY);
    //frame.copyTo(img_scene);
    if( method == 0 ) { //-- ORB
        orb.detect(img_scene, keypoints_scene);
        orb.compute(img_scene, keypoints_scene, descriptors_scene);
    } else { //-- SURF
        detector.detect(img_scene, keypoints_scene);
        extractor.compute(img_scene, keypoints_scene, descriptors_scene);
    }

    //-- matching descriptor vectors using FLANN matcher
    cv::BFMatcher matcher;
    std::vector<cv::DMatch> matches;
    cv::Mat img_matches;
    if(!descriptors_object.empty() && !descriptors_scene.empty()) {
        matcher.match (descriptors_object, descriptors_scene, matches);

        double max_dist = 0; double min_dist = 100;

        //-- Quick calculation of max and min idstance between keypoints
        for( int i = 0; i < descriptors_object.rows; i++)
        { double dist = matches[i].distance;
            if( dist < min_dist ) min_dist = dist;
            if( dist > max_dist ) max_dist = dist;
        }
        //printf("-- Max dist : %f \n", max_dist );
        //printf("-- Min dist : %f \n", min_dist );
        //-- Draw only good matches (i.e. whose distance is less than 3*min_dist)
        std::vector< cv::DMatch >good_matches;

        for( int i = 0; i < descriptors_object.rows; i++ )

        { if( matches[i].distance < (max_dist/1.6) )
            { good_matches.push_back( matches[i]); }
        }

        cv::drawMatches(img_object, keypoints_object, img_scene, keypoints_scene, \
                good_matches, img_matches, cv::Scalar::all(-1), cv::Scalar::all(-1),
                std::vector<char>(), cv::DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);

        //-- localize the object
        std::vector<cv::Point2f> obj;
        std::vector<cv::Point2f> scene;

        for( size_t i = 0; i < good_matches.size(); i++) {
            //-- get the keypoints from the good matches
            obj.push_back( keypoints_object[ good_matches[i].queryIdx ].pt );
            scene.push_back( keypoints_scene[ good_matches[i].trainIdx ].pt );
        }
        if( !obj.empty() && !scene.empty() && good_matches.size() >= 4) {
            cv::Mat H = cv::findHomography( obj, scene, cv::RANSAC );

            //-- get the corners from the object to be detected
            std::vector<cv::Point2f> obj_corners(4);
            obj_corners[0] = cv::Point(0,0);
            obj_corners[1] = cv::Point(img_object.cols,0);
            obj_corners[2] = cv::Point(img_object.cols,img_object.rows);
            obj_corners[3] = cv::Point(0,img_object.rows);

            std::vector<cv::Point2f> scene_corners(4);

            cv::perspectiveTransform( obj_corners, scene_corners, H);

            //-- Draw lines between the corners (the mapped object in the scene - image_2 )
            cv::line( img_matches, \
                    scene_corners[0] + cv::Point2f(img_object.cols, 0), \
                    scene_corners[1] + cv::Point2f(img_object.cols, 0), \
                    cv::Scalar(0,255,0), 4 );
            cv::line( img_matches, \
                    scene_corners[1] + cv::Point2f(img_object.cols, 0), \
                    scene_corners[2] + cv::Point2f(img_object.cols, 0), \
                    cv::Scalar(0,255,0), 4 );
            cv::line( img_matches, \
                    scene_corners[2] + cv::Point2f(img_object.cols, 0), \
                    scene_corners[3] + cv::Point2f(img_object.cols, 0), \
                    cv::Scalar(0,255,0), 4 );
            cv::line( img_matches, \
                    scene_corners[3] + cv::Point2f(img_object.cols, 0), \
                    scene_corners[0] + cv::Point2f(img_object.cols, 0), \
                    cv::Scalar(0,255,0), 4 );

        }
    }

        t =(double) getTickCount() - t;
    printf("Time :%f",(double)(t*1000./getTickFrequency()));

    cv::imshow("match result", img_matches );
    cv::waitKey();


return 0;

在这里,我在两个图像之间执行模板匹配。我使用 ORB 算法提取关键点并将其与 BF Matcher 匹配,但我没有得到好的结果。在这里我添加图像以了解问题从帧图像中查找对象图像

在这里,您可以看到泰迪熊上的深蓝色线,它实际上是一个矩形,当通过匹配的关键点识别对象时,它将从帧图像中围绕对象绘制。这里我使用的是 Opencv 2.4.9,我应该做哪些改变才能获得好的结果?

4

1 回答 1

10

在任何特征检测+提取以及随后的单应性估计中,您可以使用许多参数。然而,要实现的要点是,它几乎总是计算时间 VS的问题。准确度

您的代码最关键的失败点是您的 ORB 初始化:

cv::OrbFeatureDetector detector(25, 1.0f, 2, 10, 0, 2, 0, 10);
  1. 第一个参数告诉提取器只使用检测器的前 25 个结果。对于没有参数限制的 8 DOF 单应性的可靠估计,您应该拥有比参数多一个数量级的特征,即 80,或者将其设为偶数 100。
  2. 第二个参数用于在八度音阶(或级别)之间缩小图像(或放大检测器补丁)。使用数字1.0f意味着您不会更改八度音阶之间的比例,这是没有意义的,特别是因为您的第三个参数是级别数,即 2 而不是 1。默认1.2f为比例和8级别,对于较少的计算,请使用缩放1.5f4级别(同样,只是一个建议,其他参数也可以)。
  3. 你的第四个也是最后一个参数说要计算的补丁大小是 10x10,这非常小,但如果你在低分辨率下工作,那很好。
  4. 您的分数类型(最后一个参数之前的一个)可以稍微改变运行时,您可以使用 theORB::FAST_SCORE而不是,ORB::HARRIS_SCORE但这并不重要。

最后但同样重要的是,当你初始化 BruteForce Matcher 对象时,你应该记住使用cv::NORM_HAMMING类型,因为 ORB 是一个二进制特征,这将使匹配过程中的范数计算真正有意义。

于 2016-01-17T13:13:36.500 回答