0

我研究 2d 图像点和 3d 模型点的对应关系。首先,我使用下面的代码:

cv::Mat cameraMatrix(3, 3, cv::DataType<double>::type);

cv::setIdentity(cameraMatrix);

std::cout << "Initial cameraMatrix: " << cameraMatrix << std::endl;

cv::Mat distCoeffs(4, 1, cv::DataType<double>::type);
distCoeffs.at<double>(0) = 0;
distCoeffs.at<double>(1) = 0;
distCoeffs.at<double>(2) = 0;
distCoeffs.at<double>(3) = 0;

cv::Mat rvec(3, 1, cv::DataType<double>::type);
cv::Mat tvec(3, 1, cv::DataType<double>::type);

cv::solvePnP(objectPoints, imagePoints, cameraMatrix, distCoeffs, rvec, tvec);

std::cout << "rvec: " << rvec << std::endl;
std::cout << "tvec: " << tvec << std::endl;

std::vector<cv::Point2f> projectedPoints;
cv::projectPoints(objectPoints, rvec, tvec, cameraMatrix, distCoeffs, projectedPoints);

我希望projectedPoints[i] 的位置等于imagepoints[i] 的位置。但是当我写下他们的价值观时,他们是不同的。

for (unsigned int i = 0; i < projectedPoints.size(); ++i)
{
    std::cout << "Image point: " << imagePoints[i] << " Projected to " << projectedPoints[i] << std::endl;
}

在此处输入图像描述

为什么这些值不同?

4

0 回答 0