我一直试图让一些特征检测和匹配算法工作(我寻求手),但它们给我的结果很差。我找到了一个用于特征检测和匹配的模板代码,它使用单应矩阵和透视变换在场景图片(即相机输入)中定位匹配的模板对象。我一直在寻找参数校准或可能有帮助的东西,但我找不到太多信息,我在代码中使用的信息来自互联网上的不同站点。我通常会得到错误的结果,算法“认为”他们找到了足够好的匹配来绘制找到的对象。不幸的是,我几乎没有得到好的结果,要做到这一点,我通常需要一个同质的背景。我使用高斯平滑或中值滤波器作为预处理。我希望您对此有一些经验,并且可以帮助解决我可能不太了解的问题,参数化错误或不同的方法。谢谢!
这是我的代码(我写了评论):
Mat object = Mat(ImageObject);
//surfThreshold=500
int minHessian = surfThreshold;
//surfUpright = false
//surfExtended = false
//surfNOctaves = 4
//surfNOctaveLayers = 2
//with SIFT i use SiftFeatureDetector etc.
SurfFeatureDetector detector(minHessian, surfNOctaves, surfNOctaveLayers, surfExtended, surfUpright);
//keypoints on template picture
vector<KeyPoint> kp_object;
detector.detect(object, kp_object);
//with SIFT i use SiftDescriptorExtractor etc.
SurfDescriptorExtractor extractor;
//compute template descriptors
Mat des_object;
extractor.compute(object, kp_object, des_object);
//I tried with bruteforce matcher too, didnt help much
FlannBasedMatcher matcher;
namedWindow("matches");
vector<Point2f> obj_corners(4);
//4 corners of the template image
obj_corners[0] = cvPoint(0, 0);
obj_corners[1] = cvPoint(object.cols, 0);
obj_corners[2] = cvPoint(object.cols, object.rows);
obj_corners[3] = cvPoint(0, object.rows);
Mat des_image, img_matches;
vector<KeyPoint> kp_image;
vector<vector<DMatch > > matches;
vector<DMatch > good_matches;
vector<Point2f> obj;
vector<Point2f> scene;
vector<Point2f> scene_corners(4);
Mat H;
Mat image = Mat(ImageScene);
//compute the scene image's keypoints and descriptors
detector.detect(image, kp_image);
extractor.compute(image, kp_image, des_image);
//matching the descriptors with the matcher (2 best matches)
matcher.knnMatch(des_object, des_image, matches, 2);
//surfMatchDistance = 0.8
for (int i = 0; i < min(des_image.rows - 1, (int)matches.size()); i++)
{
//we dont need the bad matches, so we keep only the good matches, when a match is within a specific threshold
if ((matches[i][0].distance < surfMatchDistance*(matches[i][3].distance)) && ((int)matches[i].size() <= 2 && (int)matches[i].size()>0))
{
good_matches.push_back(matches[i][0]);
}
}
//drawing out the remaining good matches
drawMatches(object, kp_object, image, kp_image, good_matches, img_matches, Scalar::all(-1), Scalar::all(-1), vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);
//if we have "enough" good matches, then we found the template object in the scene
//goodMatchesCount = 7
if (good_matches.size() >= goodMatchesCount)
{
for (int i = 0; i < good_matches.size(); i++)
{
//the good matches' keypoints
obj.push_back(kp_object[good_matches[i].queryIdx].pt);
scene.push_back(kp_image[good_matches[i].trainIdx].pt);
}
//we only keep the good points, so with the ransac classifier we disqualify the wrongs
H = findHomography(obj, scene, CV_RANSAC, ransacThresold);
//trying to find the object on the scene image, we compute its position and size with perspective transformation
perspectiveTransform(obj_corners, scene_corners, H);
//give the found object a frame on the scene image
line(img_matches, scene_corners[0] + Point2f(object.cols, 0), scene_corners[1] + Point2f(object.cols, 0), Scalar(0, 255, 0), 4);
line(img_matches, scene_corners[1] + Point2f(object.cols, 0), scene_corners[2] + Point2f(object.cols, 0), Scalar(0, 255, 0), 4);
line(img_matches, scene_corners[2] + Point2f(object.cols, 0), scene_corners[3] + Point2f(object.cols, 0), Scalar(0, 255, 0), 4);
line(img_matches, scene_corners[3] + Point2f(object.cols, 0), scene_corners[0] + Point2f(object.cols, 0), Scalar(0, 255, 0), 4);
}
imshow("matches", img_matches);