0

我被分配使用 OpenCV 将图片从 L a b* 颜色空间更改为 RGB。为此,我使用了此处此处提供的信息。

编辑:被分配在没有 OpenCV 附带的 cvtColor 函数的情况下执行此操作。

还尝试直接从这里实现公式。我仍然是图像处理的新手,不知道我的结果是否有效。我可以看到每个通道,RGB 图像的参数在 0 到 255 之间,但是当合并通道时,我获得了灰度图像。我希望在从 L a b* 转换为 RGB 后,我会得到原始的彩色图像。这正常吗?

    Mat image = imread(argv[1], CV_LOAD_IMAGE_UNCHANGED);    
    Mat labimage = Mat::zeros(image.size(), image.type());  //Matriz para almacenar imagen LAB.
        cvtColor(image, labimage, CV_BGR2Lab);  //Conversion automatica RGB to lab.

        Mat lchannel = Mat::zeros(image.size(), labimage.type());   //Matriz para almacenar canal b.
        Mat achannel = Mat::zeros(image.size(), labimage.type());   //Matriz para almacenar canal g.
        Mat bchannel = Mat::zeros(image.size(), labimage.type());   //Matriz para almacenar canal r.
        Mat bwchannel = Mat::zeros(image.size(), labimage.type());  //Matriz para almacenar canal r.

for(int x = 0;x < cols;x++){
            for(int y = 0;y < rows;y++){
                lchannel.at<Vec3b>(y,x)[0] = labimage.at<Vec3b>(y,x)[0];
                achannel.at<Vec3b>(y,x)[1] = labimage.at<Vec3b>(y,x)[1];
                bchannel.at<Vec3b>(y,x)[2] = labimage.at<Vec3b>(y,x)[2];
            }
        }

Mat color = Mat::zeros(image.size(), labimage.type());
    double X, Y, Z, dX, dY, dZ;
    double R, G, B;
    double L, a, b;
    X = Y = Z = dX = dY = dZ = R = G = B = L = a = b = 0;

    for(int x = 0;x < cols;x++){
                for(int y = 0;y < rows;y++){
                    L = (double)(lchannel.at<Vec3b>(y,x)[0] / 255.0) * 100.0;       //Rango 0 a 100.
                    a = (double)(achannel.at<Vec3b>(y,x)[1] / 255) * 128;   //Rango -128 a 128.
                    b = (double)(bchannel.at<Vec3b>(y,x)[2] / 255) * 128;   //Rango -128 a 128.

                // Lab -> normalized XYZ (X,Y,Z are all in 0...1)
                Y = L * (1.0/116.0) + 16.0/116.0;
                X = a * (1.0/500.0) + Y;
                Z = b * (-1.0/200.0) + Y;

                X = X > 6.0/29.0 ? X * X * X : X * (108.0/841.0) - 432.0/24389.0;
                Y = L > 8.0 ? Y * Y * Y : L * (27.0/24389.0);
                Z = Z > 6.0/29.0 ? Z * Z * Z : Z * (108.0/841.0) - 432.0/24389.0;

                // normalized XYZ -> linear sRGB (in 0...1)

                R = X * (1219569.0/395920.0)     + Y * (-608687.0/395920.0)    + Z * (-107481.0/197960.0);
                G = X * (-80960619.0/87888100.0) + Y * (82435961.0/43944050.0) + Z * (3976797.0/87888100.0);
                B = X * (93813.0/1774030.0)      + Y * (-180961.0/887015.0)    + Z * (107481.0/93370.0);

                // linear sRGB -> gamma-compressed sRGB (in 0...1)

                R = R > 0.0031308 ? pow(R, 1.0 / 2.4) * 1.055 - 0.055 : R * 12.92;
                G = G > 0.0031308 ? pow(G, 1.0 / 2.4) * 1.055 - 0.055 : G * 12.92;
                B = B > 0.0031308 ? pow(B, 1.0 / 2.4) * 1.055 - 0.055 : B * 12.92;

                //printf("a0: %d\t L0: %d\t b0: %d\n", achannel.at<Vec3b>(y,x)[1], lchannel.at<Vec3b>(y,x)[0], bchannel.at<Vec3b>(y,x)[2]);
                //printf("a: %f\t L: %f\t b: %f\n", a, L, b);
                //printf("X: %f\t Y: %f\t Z: %f\n", X, Y, Z);
                //printf("R: %f\t G: %f\t B: %f\n", R, G, B);
                //cout<<"R: "<<R<<" G: "<<G<<" B: "<<B<<endl;
                //string str = type2str(color.type());
                //cout<<"Matrix type: "<<str<<endl;

                color.at<Vec3b>(y,x)[0] = R*255;
                color.at<Vec3b>(y,x)[1] = G*255;
                color.at<Vec3b>(y,x)[2] = B*255;
            }
        }

我做对了还是我误解了信息?

4

2 回答 2

1

不要滚动自己的每像素循环,这是非常无效的。

改为使用cvtColor(src,dst,COLOR_Lab2BGR)

(另外,如果我可以这么说,更喜欢文档而不是 SO 答案..)

于 2015-02-08T10:35:57.013 回答
1

没关系。我设法自己解决了它,这非常愉快。对于任何有兴趣并遇到与我曾经遇到过同样麻烦的人,这里是算法和一些代码:

  1. 将 CIE-L a b* 转换为 XYZ。这是必要的,因为 CIE-L a b* 不是线性色彩空间,因此没有已知的直接转换为 RGB。

    void CIElabtoXYZ(cv::Mat& image, cv::Mat& output){
         float WhitePoint[3] = {0.950456, 1, 1.088754};
         Mat fX = Mat::zeros(image.size(), CV_32FC1);
         Mat fY = Mat::zeros(image.size(), CV_32FC1);
         Mat fZ = Mat::zeros(image.size(), CV_32FC1);
         Mat invfX = Mat::zeros(image.size(), CV_32FC1);
         Mat invfY = Mat::zeros(image.size(), CV_32FC1);
         Mat invfZ = Mat::zeros(image.size(), CV_32FC1);
    
         for(int x = 0;x < image.rows;x++){
             for(int y = 0;y < image.cols;y++){
                 fY.at<float>(x,y) = (image.at<Vec3f>(x,y)[0] + 16.0) / 116.0;
                 fX.at<float>(x,y) = fY.at<float>(x,y) + image.at<Vec3f>(x,y)[1] / 500.0;
                 fZ.at<float>(x,y) = fY.at<float>(x,y) - image.at<Vec3f>(x,y)[2] / 200.0;
             }
         }
         invf(fX, invfX);
         invf(fY, invfY);
         invf(fZ, invfZ);
         for(int x = 0;x < image.rows;x++){
             for(int y = 0;y < image.cols;y++){
                 output.at<Vec3f>(x,y)[0] = WhitePoint[0] * invfX.at<float>(x,y);
                 output.at<Vec3f>(x,y)[1] = WhitePoint[1] * invfY.at<float>(x,y);
                 output.at<Vec3f>(x,y)[2] = WhitePoint[2] * invfZ.at<float>(x,y);
             }
         }
     }
    
     void invf(cv::Mat& input, cv::Mat& output){
         for(int x = 0;x < input.rows;x++){
             for(int y = 0;y < input.cols;y++){
                 output.at<float>(x,y) = pow(input.at<float>(x,y), 3);
                 if(output.at<float>(x,y) < 0.008856){
                     output.at<float>(x,y) = (input.at<float>(x,y) - 4.0/29.0)*(108.0/841.0);
                 }
             }
         }
     }
    
  2. 将 XYZ 转换为 RGB

    void XYZtoRGB(cv::Mat& input, cv::Mat& output){
        float data[3][3] = {{3.240479, -1.53715, -0.498535}, {-0.969256, 1.875992, 0.041556}, {0.055648, -0.204043, 1.057311}};
        Mat T = Mat(3, 3, CV_32FC1, &data);
        Mat R = Mat::zeros(input.size(), CV_32FC1);
        Mat G = Mat::zeros(input.size(), CV_32FC1);
        Mat B = Mat::zeros(input.size(), CV_32FC1);
    
        for(int x = 0;x < input.rows;x++){
            for(int y = 0;y < input.cols;y++){
                R.at<float>(x,y) = T.at<float>(0,0)*input.at<Vec3f>(x,y)[0] + T.at<float>(1,0)*input.at<Vec3f>(x,y)[1] + T.at<float>(2,0)*input.at<Vec3f>(x,y)[2];
                G.at<float>(x,y) = T.at<float>(0,1)*input.at<Vec3f>(x,y)[0] + T.at<float>(1,1)*input.at<Vec3f>(x,y)[1] + T.at<float>(2,1)*input.at<Vec3f>(x,y)[2];
                B.at<float>(x,y) = T.at<float>(0,2)*input.at<Vec3f>(x,y)[0] + T.at<float>(1,2)*input.at<Vec3f>(x,y)[1] + T.at<float>(2,2)*input.at<Vec3f>(x,y)[2];
            }
        }
    
        //Desaturate and rescale to constrain resulting RGB values to [0,1]
        double RminVal, GminVal, BminVal;
        double RmaxVal, GmaxVal, BmaxVal;
        Point minLoc;
        Point maxLoc;
    
        minMaxLoc( R, &RminVal, &RmaxVal, &minLoc, &maxLoc );
        minMaxLoc( G, &GminVal, &GmaxVal, &minLoc, &maxLoc );
        minMaxLoc( B, &BminVal, &BmaxVal, &minLoc, &maxLoc );
    
        Mat matMin = Mat::zeros(1, 4, CV_32FC1), matMax = Mat::zeros(1, 4, CV_32FC1);
        matMin.at<float>(0,0) = RminVal; matMin.at<float>(0,1) = GminVal; matMin.at<float>(0,2) = BminVal; matMin.at<float>(0,3) = 0;
        double min, max;
        minMaxLoc( matMin, &min, &max, &minLoc, &maxLoc );
        float addWhite = -min;
        matMax.at<float>(0,0) = RmaxVal + addWhite; matMax.at<float>(0,1) = GmaxVal + addWhite; matMax.at<float>(0,2) = BmaxVal + addWhite; matMax.at<float>(0,3) = 1;
        minMaxLoc( matMax, &min, &max, &minLoc, &maxLoc );
        float Scale = max;
    
        for(int x = 0;x < input.rows;x++){
            for(int y = 0;y < input.cols;y++){
                output.at<Vec3f>(x,y)[2] = (R.at<float>(x,y) + addWhite) / Scale;
                output.at<Vec3f>(x,y)[1] = (G.at<float>(x,y) + addWhite) / Scale;
                output.at<Vec3f>(x,y)[0] = (B.at<float>(x,y) + addWhite) / Scale;
            }
        }
        imshow("Unscaled RGB", output);
    }
    
于 2015-09-25T14:37:01.917 回答