7

我正在尝试使用 opencv EM 算法进行颜色提取。我正在使用基于 opencv 文档中的示例的以下代码:

cv::Mat capturedFrame ( height, width, CV_8UC3 );
int i, j;
int nsamples = 1000;
cv::Mat samples ( nsamples, 2, CV_32FC1 );
cv::Mat labels;
cv::Mat img = cv::Mat::zeros ( height, height, CV_8UC3 );
img = capturedFrame;
cv::Mat sample ( 1, 2, CV_32FC1 );
CvEM em_model;
CvEMParams params;
samples = samples.reshape ( 2, 0 );

    for ( i = 0; i < N; i++ )
    {           
        //from the training samples
        cv::Mat samples_part = samples.rowRange ( i*nsamples/N, (i+1)*nsamples/N);

        cv::Scalar mean (((i%N)+1)*img.rows/(N1+1),((i/N1)+1)*img.rows/(N1+1));
        cv::Scalar sigma (30,30);
        cv::randn(samples_part,mean,sigma);                     

    }       

    samples = samples.reshape ( 1, 0 );

    //initialize model parameters
    params.covs         = NULL;
    params.means        = NULL;
    params.weights      = NULL;
    params.probs        = NULL;
    params.nclusters    = N;
    params.cov_mat_type = CvEM::COV_MAT_SPHERICAL;
    params.start_step   = CvEM::START_AUTO_STEP;
    params.term_crit.max_iter = 300;
    params.term_crit.epsilon  = 0.1;
    params.term_crit.type   = CV_TERMCRIT_ITER|CV_TERMCRIT_EPS;     
    //cluster the data
    em_model.train ( samples, Mat(), params, &labels );     

    cv::Mat probs;
    probs = em_model.getProbs();

    cv::Mat weights;
    weights = em_model.getWeights();

cv::Mat modelIndex = cv::Mat::zeros ( img.rows, img.cols, CV_8UC3 );

for ( i = 0; i < img.rows; i ++ )
{
    for ( j = 0; j < img.cols; j ++ )
    {
        sample.at<float>(0) = (float)j;
    sample.at<float>(1) = (float)i;     

    int response = cvRound ( em_model.predict ( sample ) ); 
    modelIndex.data [ modelIndex.cols*i + j] = response;

    }
}

我的问题是:

首先,我想提取每个模型,这里总共五个,然后将这些对应的像素值存储在五个不同的矩阵中。在这种情况下,我可以分别有五种不同的颜色。这里我只获取了它们的索引,有什么方法可以在这里实现它们对应的颜色吗?为方便起见,我可以从根据这五个 GMM 找到主色开始。

其次,这里我的示例数据点是“100”,它们大约需要 3 秒。但我想在不超过 30 毫秒的时间内完成所有这些事情。我知道使用 GMM 的 OpenCV 背景提取执行得非常快,低于 20 毫秒,这意味着我必须有一种方法可以在 30 毫秒内完成所有 600x800=480000 像素的所有这些。我发现predict函数是最耗时的。

4

1 回答 1

12

第一个问题:

为了进行颜色提取,您首先需要使用输入像素来训练 EM。之后,您只需再次遍历所有输入像素并使用 predict() 对每个像素进行分类。我附上了一个小例子,它利用 EM 基于颜色进行前景/背景分离。它向您展示了如何提取每个高斯的主色(均值)以及如何访问原始像素颜色。

#include <opencv2/opencv.hpp>

int main(int argc, char** argv) {

    cv::Mat source = cv::imread("test.jpg");

    //ouput images
    cv::Mat meanImg(source.rows, source.cols, CV_32FC3);
    cv::Mat fgImg(source.rows, source.cols, CV_8UC3);
    cv::Mat bgImg(source.rows, source.cols, CV_8UC3);

    //convert the input image to float
    cv::Mat floatSource;
    source.convertTo(floatSource, CV_32F);

    //now convert the float image to column vector
    cv::Mat samples(source.rows * source.cols, 3, CV_32FC1);
    int idx = 0;
    for (int y = 0; y < source.rows; y++) {
        cv::Vec3f* row = floatSource.ptr<cv::Vec3f > (y);
        for (int x = 0; x < source.cols; x++) {
            samples.at<cv::Vec3f > (idx++, 0) = row[x];
        }
    }

    //we need just 2 clusters
    cv::EMParams params(2);
    cv::ExpectationMaximization em(samples, cv::Mat(), params);

    //the two dominating colors
    cv::Mat means = em.getMeans();
    //the weights of the two dominant colors
    cv::Mat weights = em.getWeights();

    //we define the foreground as the dominant color with the largest weight
    const int fgId = weights.at<float>(0) > weights.at<float>(1) ? 0 : 1;

    //now classify each of the source pixels
    idx = 0;
    for (int y = 0; y < source.rows; y++) {
        for (int x = 0; x < source.cols; x++) {

            //classify
            const int result = cvRound(em.predict(samples.row(idx++), NULL));
            //get the according mean (dominant color)
            const double* ps = means.ptr<double>(result, 0);

            //set the according mean value to the mean image
            float* pd = meanImg.ptr<float>(y, x);
            //float images need to be in [0..1] range
            pd[0] = ps[0] / 255.0;
            pd[1] = ps[1] / 255.0;
            pd[2] = ps[2] / 255.0;

            //set either foreground or background
            if (result == fgId) {
                fgImg.at<cv::Point3_<uchar> >(y, x, 0) = source.at<cv::Point3_<uchar> >(y, x, 0);
            } else {
                bgImg.at<cv::Point3_<uchar> >(y, x, 0) = source.at<cv::Point3_<uchar> >(y, x, 0);
            }
        }
    }

    cv::imshow("Means", meanImg);
    cv::imshow("Foreground", fgImg);
    cv::imshow("Background", bgImg);
    cv::waitKey(0);

    return 0;
}

我已经用下图测试了代码,它的表现相当不错。

在此处输入图像描述

第二个问题:

我注意到集群的最大数量对性能有很大的影响。因此,最好将其设置为非常保守的值,而不是将其留空或将其设置为示例中的样本数。此外,文档还提到了一个迭代过程,以使用较少约束的参数重复优化模型。也许这会给你一些加速。要了解更多信息,请查看此处为 train() 提供的示例代码中的文档。

于 2012-10-22T15:46:22.967 回答