我正在尝试在傅里叶变换域中实现反卷积算法。我自己在 Matlab 中有一个工作实现,我想使用 OpenCV 库将它翻译成 C++。
基本上我所做的是从输入图像中提取梯度,我在转换域中做一些事情,然后我回到空间域。
对我来说有问题的部分是执行这个(元素明智的)划分:
DFT(im) = ( conj( DFT(f) ) * DFT(image) + L2 * conj( DFT( gradKernel-x ) ) * DFT(mux) )+ ... ) / ( norm( DFT(f) )^2 + L2 * norm(gradKernel-x)^2 + ... )
f
是在代码中定义的高斯核。
DFT( gradKernel-x )
是梯度内核在 x 方向的 FFT,即DFT([1,-1])
零填充到模糊图像的大小。mux 是执行反卷积的辅助变量。
在执行逆 DFT 之前,我决定在变换域中分别执行幅度和相位除法。
我不知道我的代码中的错误在哪里,可能在除法中,可能在我的变量的正向/逆变换中,在高斯内核中......
如果有人可以帮助我,我将非常感激。
这是代码的关键部分(请注意,我在发布之前对其进行了简化,因此如果您尝试它,请不要期望获得良好的去模糊效果,基本上我期望这是一个视觉上令人愉悦的输出图像):
imH00=imread("Cameraman256.png",0);
if(!imH00.data)
{
std::cout<< "Could not open or find the image" << std::endl ;
return -1;
}
imH00.convertTo(imH00,CV_32F,1.0/255.0,0.0);
// Gaussian Kernel
Mat ker1D=getGaussianKernel(ksize,sigma,CV_32F);
fkernel.create(imH00.size(),CV_32F);
// zero-padding
fkernel.setTo(Scalar::all(0));
temp=ker1D*ker1D.t();
temp.copyTo(fkernel(Rect(0,0,temp.rows,temp.cols)));
// Fourier transform
Mat planes[] = {Mat_<float>(fkernel), Mat::zeros(fkernel.size(), CV_32F)};
Mat ffkernel;
merge(planes, 2, ffkernel);
dft(ffkernel, ffkernel,DFT_COMPLEX_OUTPUT);
// Gradient filter in frequency domain, trying to do something similar to psf2otf([1;-1],size(imH00)); in Matlab.
dx=Mat::zeros(imH00.size(),CV_32F);
dx.at<float>(0,0)=1;
dx.at<float>(0,1)=-1;
Mat dxplanes[] = {Mat_<float>(dx), Mat::zeros(dx.size(), CV_32F)};
Mat fdx;
merge(dxplanes, 2, fdx);
dft(fdx, fdx,DFT_COMPLEX_OUTPUT);
dy=Mat::zeros(imH00.size(),CV_32F);
dy.at<float>(0,0)=1;
dy.at<float>(1,0)=-1;
Mat dyplanes[] = {Mat_<float>(dy), Mat::zeros(dy.size(), CV_32F)};
Mat fdy;
merge(dyplanes, 2, fdy);
dft(fdy, fdy,DFT_COMPLEX_OUTPUT);
// Denominators
Mat den1;
Mat den2;
Mat den21;
Mat den22;
// ||fdx||^2 and ||fdy||^2
mulSpectrums(fdx,fdx,den21,DFT_COMPLEX_OUTPUT,true);
mulSpectrums(fdy,fdy,den22,DFT_COMPLEX_OUTPUT,true);
add(den21,den22,den2);
mulSpectrums(ffkernel,ffkernel,den1,0,true);
imHk=imH00.clone();
mux=Mat::zeros(imH00.size(),CV_32F);
muy=Mat::zeros(imH00.size(),CV_32F);
// FFT imH00
Mat fHktplanes[] = {Mat_<float>(imH00), Mat::zeros(imH00.size(), CV_32F)};
Mat fHkt;
merge(fHktplanes, 2, fHkt);
dft(fHkt, fHkt,DFT_COMPLEX_OUTPUT);
std::cout<<"starting deconvolution"<<std::endl;
for (int j=0; j<4; j++)
{
// Deconvolution
// Gradients
Mat ddx(1,2,CV_32F,Scalar(0));
ddx.at<float>(0,0)=1;
ddx.at<float>(0,1)=-1;
filter2D(imHk,dHx,CV_32F,ddx);
Mat ddy(2,1,CV_32F,Scalar(0));
ddy.at<float>(0,0)=1;
ddy.at<float>(1,0)=-1;
filter2D(imHk,dHy,CV_32F,ddy);
mux=Scalar((float)-0.5*L1/L2);
add(mux,dHx,mux);
muy=Scalar((float)-0.5*L1/L2);
add(muy,dHy,muy);
// FFT mux, muy
Mat fmuxplanes[] = {Mat_<float>(mux), Mat::zeros(mux.size(), CV_32F)};
Mat fmux;
merge(fmuxplanes, 2, fmux);
dft(fmux, fmux,DFT_COMPLEX_OUTPUT);
Mat fmuyplanes[] = {Mat_<float>(muy), Mat::zeros(muy.size(), CV_32F)};
Mat fmuy;
merge(fmuyplanes, 2, fmuy);
dft(fmuy, fmuy,DFT_COMPLEX_OUTPUT);
Mat num1,num2,num3,num,den;
// Spectrums multiplication, complex conjugate
mulSpectrums(fHkt,ffkernel,num1,DFT_COMPLEX_OUTPUT,true);
mulSpectrums(fmux,fdx,num2,DFT_COMPLEX_OUTPUT,true);
mulSpectrums(fmuy,fdy,num3,DFT_COMPLEX_OUTPUT,true);
add(num2,num3,num2);
add(num1,L2*num2,num);
add(den1,L2*den2,den);
// Division in polar coordinates
Mat auxnum[2];
split(num,auxnum);
Mat auxden[2];
split(den,auxden);
Mat numMag,numPhase;
magnitude(auxnum[0],auxnum[1],numMag);
phase(auxnum[0],auxnum[1],numPhase);
Mat denMag,denPhase;
magnitude(auxden[0],auxden[1],denMag);
phase(auxden[0],auxden[1],denPhase);
Mat division[2];
divide(numMag,denMag,division[0]);
division[1]=numPhase-denPhase;
polarToCart(division[0],division[1],division[0],division[1]);
Mat fHk;
merge(division,2,fHk);
Mat imHkaux;
Mat planesfHk[2];
dft(fHk, imHkaux, DFT_INVERSE+DFT_SCALE);
split(imHkaux,planesfHk);
imHk=planesfHk[0]; // imHk is the Real part
}
imHk.convertTo(imHk,CV_8U,255);
imshow( "Deblurred image", imHk);
谢谢