0

我的代码如下,在 iOS 中运行良好,我想将该代码转换为 Android。如果有人使用过 openCV 库,请帮助我。

代码用于查找较小的图像是否在较大的图像中可用,如果可用则查找它的坐标。

请注意:对于 android,由于 SURF 在 opencv 4.1.2 或更高版本中不可用,所以,这里我使用的是 opencv 4.1.1( http://garr.dl.sourceforge.net/project/opencvlibrary/opencv-android/ 2.4.1/OpenCV-2.4.1-android-bin2.tar.bz2 )

代码:

-(void)featureDetection:(UIImage*)largerImage withImage:(UIImage*)subImage
{
cv::Mat tempMat1 = [largerImage CVMat];
cv::Mat tempMat2 = [subImage CVMat];

cv::cvtColor(tempMat1, tempMat1, CV_RGB2GRAY);
cv::cvtColor(tempMat2, tempMat2, CV_RGB2GRAY);

if( !tempMat1.data || !tempMat2.data ) {
    return;
}

//-- Step 1: Detect the keypoints using SURF Detector
int minHessian = 25;

cv::SurfFeatureDetector detector( minHessian ); // More Accurate bt take more time..
//cv::FastFeatureDetector detector( minHessian ); //Less Accurate bt take less time..

std::vector<cv::KeyPoint> keypoints_1, keypoints_2;

detector.detect( tempMat1, keypoints_1 );
detector.detect( tempMat2, keypoints_2 );

//-- Step 2: Calculate descriptors (feature vectors)
cv::SurfDescriptorExtractor extractor;

cv::Mat descriptors_1, descriptors_2;

extractor.compute( tempMat1, keypoints_1, descriptors_1 );
extractor.compute( tempMat2, keypoints_2, descriptors_2 );

std::vector<cv::Point2f> obj_corners(4);

//Get the corners from the object
obj_corners[0] = (cvPoint(0,0));
obj_corners[1] = (cvPoint(tempMat2.cols,0));
obj_corners[2] = (cvPoint(tempMat2.cols,tempMat2.rows));
obj_corners[3] = (cvPoint(0, tempMat2.rows));

//-- Step 3: Matching descriptor vectors with a brute force matcher
//cv::BruteForceMatcher < cv::L2<float> > matcher;
cv::FlannBasedMatcher matcher;
//std::vector< cv::DMatch > matches;
std::vector<cv::vector<cv::DMatch > > matches;

std::vector<cv::DMatch > good_matches;
std::vector<cv::Point2f> obj;
std::vector<cv::Point2f> scene;
std::vector<cv::Point2f> scene_corners(4);
cv::Mat H;

matcher.knnMatch( descriptors_2, descriptors_1, matches,2);

for(int i = 0; i < cv::min(tempMat1.rows-1,(int) matches.size()); i++)  {

    if((matches[i][0].distance < 0.6*(matches[i][1].distance)) && ((int) matches[i].size()<=2 && (int) matches[i].size()>0))  {
        good_matches.push_back(matches[i][0]);
    }
}
cv::Mat img_matches;
drawMatches( tempMat2, keypoints_2, tempMat1, keypoints_1, good_matches, img_matches );

NSLog(@"good matches %lu",good_matches.size());

if (good_matches.size() >= 4)  {

     for( int i = 0; i < good_matches.size(); i++ ) {
         //Get the keypoints from the good matches
         obj.push_back( keypoints_2[ good_matches[i].queryIdx ].pt );
         scene.push_back( keypoints_1[ good_matches[i].trainIdx ].pt );
     }

     H = findHomography( obj, scene, CV_RANSAC );

     perspectiveTransform( obj_corners, scene_corners, H);

     NSLog(@"%f %f",scene_corners[0].x,scene_corners[0].y);//This is the value which i Want to find out
     NSLog(@"%f %f",scene_corners[1].x,scene_corners[1].y);
     NSLog(@"%f %f",scene_corners[2].x,scene_corners[2].y);
     NSLog(@"%f %f",scene_corners[3].x,scene_corners[3].y);


     //Draw lines between the corners (the mapped object in the scene image )
     line( tempMat1, scene_corners[0], scene_corners[1], cvScalar(0, 255, 0), 4 );

     line( tempMat1, scene_corners[1], scene_corners[2], cvScalar( 0, 255, 0), 4 );

     line( tempMat1, scene_corners[2], scene_corners[3], cvScalar( 0, 255, 0), 4 );

     line( tempMat1, scene_corners[3], scene_corners[0], cvScalar( 0, 255, 0), 4 );
 }

 // View matching..

 UIImage *resultimage = [UIImage imageWithCVMat:img_matches];
 UIImageView *imageview = [[UIImageView alloc] initWithImage:resultimage];
 imageview.frame = CGRectMake(0, 0, 320, 240);
 [self.view addSubview:imageview];

 // View Result

 UIImage *resultimage2 = [UIImage imageWithCVMat:tempMat1];
 UIImageView *imageview2 = [[UIImageView alloc] initWithImage:resultimage2];
 imageview2.frame = CGRectMake(0, 240, 320, 240);
 [self.view addSubview:imageview2];
}
4

1 回答 1

0

我得到了自己问题的解决方案。我已经从具有阈值/相似度分数的 OpenCV 特征匹配的对象检测中给出的问题中找到了答案- Java/C++

正如我在那篇文章中提到的那样,需要进行必要的更改。看看这个。

于 2014-01-10T04:25:07.663 回答