我正在尝试使用 OpenCV 2.4.0 跟踪一系列灰度图像中的特征点集。
我已经知道如何实现 SIFT 或 SURF 来检测特征点并初步计算描述符。但是,我需要帮助来计算只有我知道位置 (u,v) 的特征点的 SIFT 描述符。SIFT 的工作示例代码如下所示。
例如,如果我使用 Haris 角点检测器来检测以下特征dv_scenePoints_t
:
cvGoodFeaturesToTrack (source2, eig_img, temp_img, dv_scenePoints_t, &corner_count, 0.3, 3.0, mask, 7, 1);
那么在这种情况下,我将如何计算点的 SIFT 描述符dv_scenePoints_t
。
另外,如果我必须通过粒子过滤器跟踪特征点。然后,我将如何使用 SIFT 描述符来计算每个粒子的权重(特征点假设)。谢谢。
#include "stdafx.h"
#include <stdio.h>
#include "opencv2/core/core.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/nonfree/nonfree.hpp"
#include <opencv2/nonfree/features2d.hpp>
#include "opencv2/objdetect/objdetect.hpp"
#include "opencv2/legacy/legacy.hpp"
#include "opencv2/legacy/compat.hpp"
#include <opencv/cv.h>
#include <opencv/highgui.h>
#include <string.h>
#include <iostream>
using namespace cv;
using namespace std;
int main(int argc, char *argv[])
{
Mat source1 = imread("KITTI_train.png",CV_LOAD_IMAGE_GRAYSCALE);
Mat source2 = imread("KITTI_trainRotate90.png",CV_LOAD_IMAGE_GRAYSCALE);
vector<KeyPoint> dv_sceneKeypoints_t, dv_objectKeypoints_t;
vector< DMatch > matches;
SiftFeatureDetector detector(400,5,0.03);
detector.detect(source1, dv_objectKeypoints_t);
detector.detect(source2, dv_sceneKeypoints_t);
SiftDescriptorExtractor extractor;
Mat descriptors1,descriptors2;
extractor.compute(source1,dv_objectKeypoints_t,descriptors1);
extractor.compute(source2,dv_sceneKeypoints_t,descriptors2);
FlannBasedMatcher matcher;
matcher.match(descriptors1,descriptors2, matches);
Mat target;
drawMatches(source1,dv_objectKeypoints_t,source2,dv_sceneKeypoints_t,matches,target);
imshow("Matches", target);
waitKey(0);
return 0;
}