我用来解决这个问题的方法如下:
- 创建具有所需输出大小的空图像。
- 对于输出图像中的每个像素,找到 theta 和 phi 坐标。(线性)Theta 从 -Pi 到 Pi 和 phi 从 0 到 Pi
- 设置投影半径 R 并从 theta、phi 和 R 中找到 3D 坐标。
- 查找 3D 点可见的摄像机数量和对应的像素位置。
- 复制图像中像素离主点较近的像素。或任何其他有效标准...
我的代码如下所示:
cv::Mat panoramic;
panoramic=cv::Mat::zeros(PANO_HEIGHT,PANO_WIDTH,CV_8UC3);
double theta, phi;
double R=calibration.getSphereRadius();
int result;
double dRow=0;
double dCol=0;
for(int y = 0; y!= PANO_HEIGHT; y++){
for(int x = 0; x !=PANO_WIDTH ; x++) {
//Rescale to [-pi, pi]
theta=-(2*PI*x/(PANO_WIDTH-1)-PI); //Sign change needed.
phi=PI*y/(PANO_HEIGHT-1);
//From theta and phi find the 3D coordinates.
double globalZ=R*cos(phi);
double globalX=R*sin(phi)*cos(theta);
double globalY=R*sin(phi)*sin(theta);
float minDistanceCenter=5000; // Doesn't depend on the image.
float distanceCenter;
//From the 3D coordinates, find in how many camera falls the point!
for(int cam = 0; cam!= 6; cam++){
result=calibration.ladybugXYZtoRC(globalX, globalY, globalZ, cam, dRow, dCol);
if (result==0){ //The 3d point is visible from this camera
cv::Vec3b intensity = image[cam].at<cv::Vec3b>(dRow,dCol);
distanceCenter=sqrt(pow(dRow-imageHeight/2,2)+pow(dCol-imageWidth/2,2));
if (distanceCenter<minDistanceCenter) {
panoramic.ptr<unsigned char>(y,x)[0]=intensity.val[0];
panoramic.ptr<unsigned char>(y,x)[1]=intensity.val[1];
panoramic.ptr<unsigned char>(y,x)[2]=intensity.val[2];
minDistanceCenter=distanceCenter;
}
}
}
}
}