0

我试图为我在opencv中使用高斯建模技术的图像提供一些草图效果,但我面临一个问题,即执行需要更多时间。当图片尺寸较小时,时间会减少,如果尺寸较大则需要更多时间。请任何人告诉如何在不更改以下代码的图像实际大小的情况下减少执行时间

#include "opencv2/opencv.hpp"
#include <iostream>
#include <vector>
#include "opencv2/ml/ml.hpp"
#include <list>
#include <iostream>
using namespace cv;
using namespace std;

void clustrize_colors(Mat& src,Mat& dst)
{
	// Number of clusters
	int NrGMMComponents = 96;

	cv::GaussianBlur(src,src,Size(3,3),1);

	int srcHeight = src.rows;
	int srcWidth  = src.cols;

	// Get datapoints
	vector<Vec3d> ListSamplePoints;

	for (int y=0; y<srcHeight; y++)
	{
		for (int x=0; x<srcWidth; x++)
		{
			// Collecting points from image
			Vec3b bgrPixel = src.at<Vec3b>(y, x);

			uchar b = bgrPixel.val[0];
			uchar g = bgrPixel.val[1];
			uchar r = bgrPixel.val[2];
			if(rand()%25==0) // peek every 25-th
			{
				ListSamplePoints.push_back(Vec3d(b,g,r));
			}
		} // for (x)
	} // for (y)


	// Form training matrix
	int NrSamples = ListSamplePoints.size();    
	Mat samples( NrSamples, 3, CV_64FC1 );

	for (int s=0; s<NrSamples; s++)
	{
		Vec3d v = ListSamplePoints.at(s);
		samples.at<double>(s,0) = (float) v[0];
		samples.at<double>(s,1) = (float) v[1];
		samples.at<double>(s,2) = (float) v[2];
	}    
	// 
	cout << "Learning to represent the sample distributions with " << NrGMMComponents << " gaussians." << endl;
	cout << "Started GMM training" << endl;

	Ptr<cv::ml::EM> em_model;
	cv::ml::EM::Params params(NrGMMComponents,cv::ml::EM::COV_MAT_GENERIC);

	Mat labels(NrSamples,1,CV_32SC1);
	Mat logLikelihoods( NrSamples, 1, CV_64FC1 );

	// Train classifier
	em_model=cv::ml::EM::train(samples,logLikelihoods,labels,noArray(),params);
	cout << "Finished GMM training" << endl;

	// result image
	Mat img  = Mat::zeros( Size( srcWidth, srcHeight ), CV_8UC3 );

	// predict cluster
	Mat sample( 1, 3, CV_64FC1 );

	Mat means=em_model->getMeans();

	for(int i = 0; i < img.rows; i++ )
	{
		for(int j = 0; j < img.cols; j++ )
		{
			Vec3b v=src.at<Vec3b>(i,j);
			sample.at<double>(0,0) = (float) v[0];
			sample.at<double>(0,1) = (float) v[1];
			sample.at<double>(0,2) = (float) v[2];
			int response = cvRound(em_model->predict( sample ));
			img.at<Vec3b>(i,j)[0]=means.at<double>(response,0);
			img.at<Vec3b>(i,j)[1]=means.at<double>(response,1);
			img.at<Vec3b>(i,j)[2]=means.at<double>(response,2);
		}
	}

	img.convertTo(img,CV_8UC3);
        namedWindow("result",WINDOW_AUTOSIZE);
	imshow("result",img);
        imwrite("D:\\nfr.jpg",img);
	waitKey();
	dst=img;
}

void processLayer(Mat& src,Mat& dst)
{
	Mat tmp=src.clone();
	Mat gx,gy,mag,blurred;
	Sobel( src, gx, -1, 1, 0, 3);
	Sobel( src, gy, -1, 0, 1, 3);
	magnitude(gx,gy,mag);
	//GaussianBlur(mag,blurred,Size(3,3),2);
	//mag+=blurred;
	normalize(mag,mag,0,1,cv::NORM_MINMAX);
	//sqrt(mag,dst);
	dst=mag.clone();
	normalize(dst,dst,0,1,cv::NORM_MINMAX);
}

int main(int ac, char** av)
{
	Mat clusterized;
	Mat frame=imread("image path"); ////load an image//////
        //resize(frame,frame,Size(256,256),0,0,INTER_LINEAR);
	clustrize_colors(frame,clusterized);
	clusterized.convertTo(clusterized,CV_32FC3,1.0/255.0);
	frame.convertTo(frame,CV_32FC3,1.0/255.0);
	Mat result1;
	vector<Mat> ch;
	split(frame, ch);

	processLayer(ch[0],ch[0]);
	processLayer(ch[1],ch[1]);
	processLayer(ch[2],ch[2]);

	merge(ch,result1);

	result1=(0.5*frame-0.9*result1+0.3*clusterized)*2.0;
        namedWindow("result1",WINDOW_AUTOSIZE);
	imshow("result1",result1);
        //cout<<result1;
        imwrite("D:\\finalresult.jpg",result1);
	waitKey(0);
	//destroyAllWindows();
	return 0;
}

4

1 回答 1

0

瓶颈很可能是opencv的 cv::ml::EM::train 方法。训练分类器不是一件容易或简单的任务。分类问题还没有最终解决。这就是为什么算法之间存在很大的权衡和差异,更不用说跨越不同的问题空间了。

至于性能,如果你坚持使用 EM,请查看EM 类文档,可能还有它的父类进行修改:

  • 训练的最大迭代次数和/或
  • 停止训练的期限标准。

由于使用了第三方库,您可以做的事情不多,可以提高速度但不会牺牲准确性。另一方面,该库是开源的,并且可能进行了相当好的优化。我不建议尝试优化实际的库代码。

于 2015-03-11T17:12:32.853 回答