2

我有一个校准的(内在参数)相机。我从物体中拍摄立体照片,并在重新投影过程中使用它们来找到物体的一些 3-D 信息。

为此,首先我计算了基本矩阵。然后,我得到基本矩阵,从那里,我得到旋转矩阵和平移向量以及其他需要的信息。

问题是,使用不同的参数,我会得到非常不同的结果。我知道对于相同的立体图像,基本矩阵可能不同,但我希望有相同的旋转矩阵和平移向量。然而,每个不同的参数(例如匹配特征的数量)都会导致不同的矩阵。我错过了什么吗?我对相同的立体图像应该提供(相当)相同的旋转、平移和重新投影矩阵吗?

这是我的代码。任何帮助,将不胜感激。谢谢!

Mat img_1 = imread( "images/box01-edited.jpg", CV_LOAD_IMAGE_GRAYSCALE );
Mat img_2 = imread( "images/box02-edited.jpg", CV_LOAD_IMAGE_GRAYSCALE );


if( !img_1.data || !img_2.data )
{ return -1; }

//-- Step 1: Detect the keypoints using SURF Detector
int minHessian = 1000;
SurfFeatureDetector detector( minHessian );
std::vector<KeyPoint> keypoints_1, keypoints_2;
detector.detect( img_1, keypoints_1 );
detector.detect( img_2, keypoints_2 );

//-- Step 2: Calculate descriptors (feature vectors)
SurfDescriptorExtractor extractor;
Mat descriptors_1, descriptors_2;
extractor.compute( img_1, keypoints_1, descriptors_1 );
extractor.compute( img_2, keypoints_2, descriptors_2 );

//-- Step 3: Matching descriptor vectors with a brute force matcher
BFMatcher matcher(NORM_L1, true);
std::vector< DMatch > matches;
matcher.match( descriptors_1, descriptors_2, matches );

//-- Draw matches
Mat img_matches;
drawMatches( img_1, keypoints_1, img_2, keypoints_2, matches, img_matches );
//-- Show detected matches
namedWindow( "Matches", CV_WINDOW_NORMAL );
imshow("Matches", img_matches );
waitKey(0);


//-- Step 4: calculate Fundamental Matrix
vector<Point2f>imgpts1,imgpts2;
for( unsigned int i = 0; i<matches.size(); i++ )
{
// queryIdx is the "left" image
imgpts1.push_back(keypoints_1[matches[i].queryIdx].pt);
// trainIdx is the "right" image
imgpts2.push_back(keypoints_2[matches[i].trainIdx].pt);
}
Mat F =  findFundamentalMat  (imgpts1, imgpts2, FM_RANSAC, 0.1, 0.99);


//-- Step 5: calculate Essential Matrix

double data[] = {1189.46 , 0.0, 805.49, 
                0.0, 1191.78, 597.44,
                0.0, 0.0, 1.0};//Camera Matrix
Mat K(3, 3, CV_64F, data);
Mat_<double> E = K.t() * F * K; //according to HZ (9.12)

//-- Step 6: calculate Rotation Matrix and Translation Vector
Matx34d P;
Matx34d P1;
//decompose E to P' , HZ (9.19)
SVD svd(E,SVD::MODIFY_A);
Mat svd_u = svd.u;
Mat svd_vt = svd.vt;
Mat svd_w = svd.w;
Matx33d W(0,-1,0,1,0,0,0,0,1);//HZ 9.13
Mat_<double> R = svd_u * Mat(W) * svd_vt; //HZ 9.19
Mat_<double> t = svd_u.col(2); //u3

if (!CheckCoherentRotation (R)) {
std::cout<<"resulting rotation is not coherent\n";
P1 = 0;
return 0;
}

P1 = Matx34d(R(0,0),R(0,1),R(0,2),t(0),
             R(1,0),R(1,1),R(1,2),t(1),
             R(2,0),R(2,1),R(2,2),t(2));

//-- Step 7: Reprojection Matrix and rectification data
Mat R1, R2, P1_, P2_, Q;
Rect validRoi[2];
double dist[] = { -0.03432, 0.05332, -0.00347, 0.00106, 0.00000};
Mat D(1, 5, CV_64F, dist);

stereoRectify(K, D, K, D, img_1.size(), R, t, R1, R2, P1_, P2_, Q, CV_CALIB_ZERO_DISPARITY, 1, img_1.size(),  &validRoi[0], &validRoi[1] );
4

1 回答 1

1

I strongly recommend you to refine your fundamental matrix calculation with a standard 8-point algorithm after you eliminate the outlying feature matches with RANSAC algortihm.

Following text is taken from calib3d documentaion for the findFundamentalMat function. status is an optional output parameter to determine outliers in your dataset.

status – Output array of N elements, every element of which is set to 0 for outliers and to 1 for the other points. The array is computed only in the RANSAC and LMedS methods. For other methods, it is set to all 1’s.

You can then use only inlying matches with CV_FM_8POINT option to obtain more reliable fundamental matrix.

于 2014-08-07T14:56:38.567 回答