2

我正在尝试使用此代码,以便可以使用彩色帧/图像实现 SURF,然后使用Kalman_Color_Object_Track此处的代码使用卡尔曼滤波器的颜色值跟踪检测到的对象。所以,这些是我打算做的步骤,但我被卡住了,因为这个 SURF 检测代码不接受/使用彩色图像:

  1. “book1.png”是彩色图像
  2. 从传入帧中检测到图像周围的矩形后,将 Mat 结构更改为 IplImage,因为 Kalman_Color_Object_Track 代码在 C++ 中

    dest_image=cvCloneImage(&(IplImage)image);

    mat_frame=cvCloneImage(&(IplImage)frame);

  3. 调用Kalman_Color_Object_Track( mat_frame,dest_image,30);方法。

问题:(A)如何使此代码工作,以便可以提取和检测彩色图像的 SURF 特征?(B) 我不确定应该在函数签名中传递什么,Kalman_Color_Object_Track()以及 (C) 应该在对象检测模块的哪个位置调用它?

     #include <stdio.h>
     #include <iostream>
     #include "opencv2/core/core.hpp"
     #include "opencv2/features2d/features2d.hpp"
     #include "opencv2/highgui/highgui.hpp"
     #include "opencv2/imgproc/imgproc.hpp"
     #include "opencv2/calib3d/calib3d.hpp"

    using namespace cv;
    IplImage *mat_dest_image=0;
    IplImage *mat_frame=0;
/* Object Detection and recognition from video*/



   int main()
    {
       Mat object = imread( "book1.png", );

        if( !object.data )
        {
            std::cout<< "Error reading object " << std::endl;
            return -1;
        }

        //Detect the keypoints using SURF Detector
        int minHessian = 500;

        SurfFeatureDetector detector( minHessian );
        std::vector<KeyPoint> kp_object;

        detector.detect( object, kp_object );

        //Calculate descriptors (feature vectors)
        SurfDescriptorExtractor extractor;
        Mat des_object;

        extractor.compute( object, kp_object, des_object );

        FlannBasedMatcher matcher;



        namedWindow("Good Matches");
    namedWindow("Tracking");

        std::vector<Point2f> obj_corners(4);

        //Get the corners from the object
        obj_corners[0] = cvPoint(0,0);
        obj_corners[1] = cvPoint( object.cols, 0 );
        obj_corners[2] = cvPoint( object.cols, object.rows );
        obj_corners[3] = cvPoint( 0, object.rows );

        char key = 'a';
        int framecount = 0;
           VideoCapture cap("booksvideo.avi");

           for(; ;)

       {
           Mat frame;
            cap >> frame;
            imshow("Good Matches", frame);


            Mat des_image, img_matches;
            std::vector<KeyPoint> kp_image;
            std::vector<vector<DMatch > > matches;
            std::vector<DMatch > good_matches;
            std::vector<Point2f> obj;
            std::vector<Point2f> scene;
            std::vector<Point2f> scene_corners(4);
            Mat H;
            Mat image;

            //cvtColor(frame, image, CV_RGB2GRAY);

            detector.detect( image, kp_image );
            extractor.compute( image, kp_image, des_image );

            matcher.knnMatch(des_object, des_image, matches, 2);

            for(int i = 0; i < min(des_image.rows-1,(int) matches.size()); i++) //THIS  LOOP IS SENSITIVE TO SEGFAULTS
            {
                if((matches[i][0].distance < 0.6*(matches[i][1].distance)) && ((int) matches[i].size()<=2 && (int) matches[i].size()>0))
                {
                    good_matches.push_back(matches[i][0]);
                }
            }

            //Draw only "good" matches
            drawMatches( object, kp_object, image, kp_image, good_matches, img_matches, Scalar::all(-1), Scalar::all(-1), vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );

            if (good_matches.size() >= 4)
            {
                for( int i = 0; i < good_matches.size(); i++ )
                {
                    //Get the keypoints from the good matches
                    obj.push_back( kp_object[ good_matches[i].queryIdx ].pt );
                    scene.push_back( kp_image[ good_matches[i].trainIdx ].pt );
                }

                H = findHomography( obj, scene, CV_RANSAC );

                perspectiveTransform( obj_corners, scene_corners, H);

                //Draw lines between the corners (the mapped object in the scene image )
                line( img_matches, scene_corners[0] + Point2f( object.cols, 0), scene_corners[1] + Point2f( object.cols, 0), Scalar(0, 255, 0), 4 );
                line( img_matches, scene_corners[1] + Point2f( object.cols, 0), scene_corners[2] + Point2f( object.cols, 0), Scalar( 0, 255, 0), 4 );
                line( img_matches, scene_corners[2] + Point2f( object.cols, 0), scene_corners[3] + Point2f( object.cols, 0), Scalar( 0, 255, 0), 4 );
                line( img_matches, scene_corners[3] + Point2f( object.cols, 0), scene_corners[0] + Point2f( object.cols, 0), Scalar( 0, 255, 0), 4 );
    mat_dest_image=cvCloneImage(&(IplImage)image);
    mat_frame=cvCloneImage(&(IplImage)frame);

    Kalman_Color_Object_Track( ); // The tracking method
            }

            //Show detected matches
            imshow( "Good Matches", img_matches );
            for( int i = 0; i < good_matches.size(); i++ )
    { printf( "-- Good Match [%d] Keypoint 1: %d  -- Keypoint 2: %d  \n", i,    good_matches[i].queryIdx, good_matches[i].trainIdx ); }
            waitKey(0);

       }
        return 0;

    }
4

1 回答 1

1

本文通过独立计算每个通道的梯度直方图来实现彩色图像的 SIFT 描述符。或许您可以对 SURF 功能尝试相同的方法。

于 2013-02-14T22:06:01.950 回答