-1

也许问题是,我不擅长英语。我是openCV的新手。我想知道哪个拼接器合并的区域。像这样↓</p>

图片

合并图像

4

1 回答 1

0

如果您知道拍摄图像的顺序,则可以按照此代码将图像拼接在一起。如果顺序未知,则解决方案变得更加复杂。此外,此代码是为相同大小的图像设计的,如果您的相机被移动,可能会导致一些错误的结果。实施一些检查以确保正确理解。您可以参考这篇文章“ http://ramsrigoutham.com/2012/11/22/panorama-image-stitching-in-opencv/ ”对main中调用了两次的拼接函数有更正确的理解。

#include <stdio.h>
#include <iostream>
#include "opencv2/core/core.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/nonfree/nonfree.hpp"
#include "opencv2/calib3d/calib3d.hpp"
#include "opencv2/imgproc/imgproc.hpp"
using namespace cv;
void stitching( cv::Mat&,cv::Mat& ,cv::Mat& );
 int main()
{        
 Mat image1= imread("image1.jpg");
 Mat image2= imread("image2.jpg");
 Mat image3= imread("image3.jpg");
 Mat gray_image1;
 Mat gray_image2;
 Mat gray_image3;
 Mat result1,result2;
 // Convert to Grayscale
 cvtColor( image1, gray_image1, CV_RGB2GRAY );
 cvtColor( image2, gray_image2, CV_RGB2GRAY );
 cvtColor( image3, gray_image3, CV_RGB2GRAY );

 stitching(gray_image1,gray_image2,result1);
 stitching(result1,gray_image3,result2);
 cv::imshow("stitched image"result2);
cv::WaitKey(0);


 }


  void stitching( cv::Mat& im1,cv::Mat& im2,cv::Mat& stitch_im)
  {  
 int minHessian = 400;

SurfFeatureDetector detector( minHessian );

std::vector< KeyPoint > keypoints_object, keypoints_scene;

detector.detect(im1, keypoints_object );
 detector.detect(im2, keypoints_scene );

 SurfDescriptorExtractor extractor;

Mat descriptors_object, descriptors_scene;

extractor.compute( im1, keypoints_object, descriptors_object );
extractor.compute( im2, keypoints_scene, descriptors_scene );

 FlannBasedMatcher matcher;
 std::vector< DMatch > matches;
 matcher.match( descriptors_object, descriptors_scene, matches );

double max_dist = 0; double min_dist = 100;

 for( int i = 0; i < descriptors_object.rows; i++ )
 { double dist = matches[i].distance;
 if( dist < min_dist ) min_dist = dist;
 if( dist > max_dist ) max_dist = dist;
 }

 std::vector< DMatch > good_matches;

for( int i = 0; i < descriptors_object.rows; i++ )
 { if( matches[i].distance < 3*min_dist )
 { good_matches.push_back( matches[i]); }
 }
 std::vector< Point2f > obj;
 std::vector< Point2f > scene;

for( int i = 0; i < good_matches.size(); i++ )
 {

 obj.push_back( keypoints_object[ good_matches[i].queryIdx ].pt );
 scene.push_back( keypoints_scene[ good_matches[i].trainIdx ].pt );
 }


 Mat H = findHomography( obj, scene, CV_RANSAC );

 cv::Mat result;
 warpPerspective(im1,stitch_im,H,cv::Size(im1.cols+im2.cols,im1.rows));



 }
于 2016-03-05T05:19:30.070 回答