3

有人可以向我解释如何在 openCv c++ 中使用 SVM 和回归吗?回归应该返回一个值,这个值可以是图像中的一个位置(坐标 x 和 y)?还是一个数字?

我必须在视频分析中使用它。我有三个点,我知道整个视频的轨迹。我想用其中两个的轨迹来推断第三个的轨迹。所以我想用这些轨迹训练一个 SVM。

我正在考虑像这样组织我的训练数据和标签:

  • 训练数据:每帧中两个点的位置(x,y)
  • 标签:每帧中第三个点的位置(x,y)

这是正确的方法吗?或者我应该这样组织它:

  • 训练数据:每帧我加负例的三个点的位置(x,y),改变第三个点的位置,模拟一个错误的训练集
  • 标签:如果第三个点位置属于它的轨迹,则为 1,如果属于错误训练集,则为 -1
4

1 回答 1

2

SVM 可用于回归:请参见此处http://alex.smola.org/papers/2003/SmoSch03b.pdf 但可能对于您的问题将更适合多元 RVM,请参见此处(使用 C++ 源):http:// mi.eng.cam.ac.uk/~at315/MVRVM.htm

这是我的这段代码的opencv版本:

#include<iostream>
#include<fstream>
#include<cstdlib>
#include<windows.h>
#include<string>
#include<vector>
#include<opencv2/opencv.hpp>
using namespace std;

using namespace cv;
#define COUT_VAR(x) cout << #x"=" << x << endl;
#define SHOW_IMG(x) namedWindow(#x);imshow(#x,x);waitKey(20);

//calculates the conolution of two ploynoimals
// n  - size of polynomial x
// m  - size of polynomial y
// z  - resultant polynomial after the convolution
void conv(int n,int m,double* x,double* y,double* z)
{
    int i,j;
    for(i=0; i<(n+m-1); i++)
    {
        z[i]=0.0;
        for(j=0; j<n; j++)
        {
            if(((i-j)>=0)&& ((i-j)<m))
            {
                z[i]+=x[j]*y[i-j];
            }
        }
    }
}


//calculates poly noimal coeefeicents from a number of roots
//input
//n - number of roots
//x - array of roots
//ouput
//y - array of polynomial coefficnets

void poly(int n,double* x,double* y)
{
    int i,j;
    for(j=0; j<=n; j++)
    {
        y[j]=0.0;
    }
    y[0]=1;
    double temp[20];
    for(j=0; j<n; j++)
    {
        for(i=0; i<=(j); i++)
        {
            temp[i+1]=y[i+1]-x[j]*y[i];
        }
        for(i=0; i<=(j); i++)
        {
            y[i+1]=temp[i+1];
        }
    }
}

//find the roots of a polynomial

int Roots(int n,double* cof,double*roots)
{
    bool fnzf=false;
    int k=0,i;
    double acof[100];
    for(i=0; i<n; i++)
    {
        if(!fnzf)
        {
            if(fabs(cof[i])>10e-6)
            {
                fnzf=true;
                acof[k]=cof[i];
                k++;
            }
        }
        else
        {
            acof[k]=cof[i];
            k++;
        }
    }
    if(k==0)
    {
        return 0;
    }

    Mat a(k,1,CV_64FC1);

    for(i=0; i<k; i++)
    {
        a.at<double>(i)=acof[i];
    }
    Mat r;
    cv::solvePoly(a,r);
    Mat ch[2];
    cv::split(r,ch);

    Mat b=ch[0].clone();

    for(i=0; i<b.rows; i++)
    {
        roots[i]=b.at<double>(i);
    }

    return b.rows;
}





//this function sloves a polynomial equation of a single hyperparameter

void SolveForAlpha(int n,Mat& q, Mat& s,Mat& Q,Mat S,double old_alpha,double& newAlpha,double& l_inc)
{
    double ALPHA_MAX=1e12;
    double C[500], P[500], PP[500], SS[500], CC[500];
    int i,k,j;
    for(i=0; i<n; i++)
    {
        C[i]=0.0;
    }
    for(i=0; i<(2*n-1); i++)
    {
        CC[i]=0.0;
    }
    for(i=0; i<n; i++)
    {
        k=0;
        for(j=0; j<n; j++)
        {
            if(i==j)
            {
                continue;
            }
            SS[k]=-s.at<double>(j);
            k++;
        }
        poly(n-1,SS,P);
        conv(n,n,P,P,PP);
        for(j=0; j<n; j++)
        {
            C[j]=C[j]+P[j];
        }
        for(j=0; j<(2*n-1); j++)
        {
            CC[j]=CC[j]+ q.at<double>(i)*q.at<double>(i)*PP[j];
        }
    }
    for(i=0; i<n; i++)
    {
        SS[i]=-s.at<double>(i);
    }
    poly(n,SS,P);
    conv(n+1,n+1,P,P,PP);
    for(i=0; i<(2*n+1); i++)
    {
        PP[i]=n*PP[i];
    }
    double CC0[500];
    conv(n+1,n,P,C,CC0);
    double CCC[500];
    CCC[0]=CC0[0];
    for(i=1; i<(2*n); i++)
    {
        CCC[i]=CC[i-1]+CC0[i];
    }
    CCC[2*n]=0;
    bool fnonzero=false;
    double aa[1000];
    k=0;
    for(i=0; i<(2*n+1); i++)
    {
        aa[i]=PP[i]-CCC[i];
    }
    double roots[100];
    int nroots=Roots(2*n+1,aa,roots);
    roots[nroots]=ALPHA_MAX;
    nroots++;
    double L[100];
    double alphas[100];
    for(i=0; i<100; i++)
    {
        L[i]=0.0;
    }
    k=0;
    for( i=0; i<nroots; i++)
    {
        if(roots[i]<=0)
        {
            continue;
        }
        double new_alpha=roots[i];
        if((old_alpha>=(ALPHA_MAX-1))&& (new_alpha<ALPHA_MAX))
        {
            alphas[k]=new_alpha;
            for(j=0; j<n; j++)
            {
                L[k]=L[k]+ log(new_alpha/(new_alpha+s.at<double>(j)))+((q.at<double>(j)*q.at<double>(j)/(new_alpha+s.at<double>(j))));
            }
        }
        else if((old_alpha<ALPHA_MAX)&& (new_alpha>=(ALPHA_MAX-1)))
        {
            alphas[k]=new_alpha;
            for(j=0; j<n; j++)
            {
                L[k]=L[k]- log(old_alpha/(old_alpha+s.at<double>(j)))-((q.at<double>(j)*q.at<double>(j)/(old_alpha+s.at<double>(j))));
            }
        }
        else if((old_alpha<ALPHA_MAX)&&(new_alpha<ALPHA_MAX))
        {
            alphas[k]=new_alpha;
            double ad=(1/new_alpha)-(1/old_alpha);
            for (j=0; j<n; j++)
            {
                L[k]=L[k]+ log(new_alpha/(new_alpha+s.at<double>(j)))+((q.at<double>(j)*q.at<double>(j)/(new_alpha+s.at<double>(j)))) -log(old_alpha/(old_alpha+s.at<double>(j)))-((q.at<double>(j)*q.at<double>(j)/(old_alpha+s.at<double>(j))));
            }
        }
        else
        {
            alphas[k]=new_alpha;
            L[k]=0.0;
        }
        k++;
    }
    l_inc=-2000000.0;
    for(i=0; i<k; i++) if(L[i]>l_inc)
    {
        l_inc=L[i];
        newAlpha=alphas[i];
    }
}
//-------------------------------------------------------------------------------

Mat distSqrd(Mat& X,Mat& Y)
{
    Mat D2;
    int nx  = X.rows;
    int ny  = Y.rows;
    Mat xsq,ysq;
    pow(X,2,xsq);
    pow(Y,2,ysq);
    Mat sumsqx;
    cv::reduce(xsq,sumsqx,1,CV_REDUCE_SUM);
    Mat sumsqy;
    cv::reduce(ysq,sumsqy,1,CV_REDUCE_SUM);
    D2  = sumsqx*Mat::ones(1,ny,CV_64FC1) + Mat::ones(nx,1,CV_64FC1)*sumsqy.t() - 2*(X*(Y.t()));
    return D2;
}



Mat kernelFunction(Mat& X1,Mat& X2,double lenscal)
{
    Mat K;
    int N1=X1.rows;
    int N2=X2.rows;
    int d=X1.cols;
    double eta  = 1.0/pow(lenscal,2);
    exp(-eta*distSqrd(X1,X2),K);
    Mat K1=Mat(K.rows,K.cols+1,CV_64FC1);
    K.copyTo(K1( Range(0,K.rows),Range(1,K1.cols)));
    K1.col(0)=1;
    return K1;
}

//output
//  weights     trained weights
//  RV_idxs     index into column of PHI of relevant basis

//input
// PHI      basis function/design matrix
//  t       targetmatrix
// N      number of training examples
// M      number of basis functions
//[ N M] =size(PHI)
// P the number of target dimensions
// [ N P]= size(t)
// beta initial beta values for the target dimensions
// beta is set initially to for each target dimension p,  beta_p=(1/(var(t_p))


void TrainRVM( Mat &X,Mat &t,double KernelWidth,int Max_It, Mat& beta,Mat &weights,Mat& RV_idxs,Mat& RV_X)
{
    int N=X.rows;
    int M=N+1;
    int P=t.cols;

    int i,j,p,k,MM;
    const double INF=1e36;

    beta=Mat (P,1,CV_64FC1);
    Scalar mean,stddev;
    // начальное приближение точности
    for(int i=0;i<P;i++)
    {
        cv::meanStdDev(t.col(i),mean,stddev);
        beta.at<double>(i)=1.0/(stddev[0]*stddev[0]+10.0*DBL_EPSILON);
    }

    Mat nonZero=Mat::zeros(M,1,CV_8UC1);

    Mat PHI = kernelFunction(X,X,KernelWidth);
    Mat PHIt;

    PHIt=PHI.t()*t;

    const double ALPHA_MAX=1e12;

    Mat alpha(M,1,CV_64FC1);
    alpha=ALPHA_MAX;

    Mat gamma=Mat::ones(M,1,CV_64FC1);

    bool run_cond;

    Mat PHI2=PHI.t()*PHI;

    Mat s(P,1,CV_64FC1);
    Mat S(P,1,CV_64FC1);
    Mat q(P,1,CV_64FC1);
    Mat Q(P,1,CV_64FC1);
    Mat l_inc(M,1,CV_64FC1);
    Mat newAlpha(M,1,CV_64FC1);

    double max_alpha, max_linc=-INF;
    int max_index=-1;

    int maxidx[2];
    cv::minMaxIdx(l_inc,0,0,0,maxidx);
    max_index=maxidx[0];

    max_alpha=newAlpha.at<double>(max_index);
    alpha.at<double>(max_index)=max_alpha;

        //start the iterations
        run_cond=true;
        i=1;
        while(run_cond)
        {
            MM=0;

            for(j=0; j<M; j++)
            {
                if(alpha.at<double>(j)<ALPHA_MAX)
                {
                    nonZero.at<unsigned char>(j)=true;
                    MM++;
                }
                else
                {
                    nonZero.at<unsigned char>(j)=false;
                }
            }

            Mat PHI_nz=Mat::zeros(N,MM,CV_64FC1);
            Mat alpha_nz(MM,1,CV_64FC1);

            k=0;
            for(j=0; j<M; j++)
            {
                if(nonZero.at<unsigned char>(j))
                {
                    alpha_nz.at<double>(k)=alpha.at<double>(j);
                    PHI.col(j).copyTo(PHI_nz.col(k));
                    k++;
                }
            }

            vector<Mat> SIGMA_nz;
            Mat HH=(PHI_nz.t()*PHI_nz);

            for(p=0; p<P; p++)
            {
                Mat H=beta.at<double>(p)*HH;
                H.diag()+=alpha_nz;
                SIGMA_nz.push_back(H.inv(cv::DECOMP_SVD));
            }

            Mat dp_tmp;
            Mat phi;

            for(k=0; k<M; k++)
            {
                phi=PHI.col(k);
                for(j=0; j<P; j++)
                {                   
                    dp_tmp=beta.at<double>(j)*phi.t()*phi-beta.at<double>(j)*beta.at<double>(j)*phi.t()*PHI_nz*SIGMA_nz[j]*PHI_nz.t()*phi;
                    S.at<double>(j)=dp_tmp.at<double>(0);
                    dp_tmp=beta.at<double>(j)*phi.t()*t.col(j)-beta.at<double>(j)*beta.at<double>(j)*phi.t()*PHI_nz*SIGMA_nz[j]*PHI_nz.t()*t.col(j);
                    Q.at<double>(j)=dp_tmp.at<double>(0);
                }

                if(alpha.at<double>(k)<ALPHA_MAX)
                {
                    s=alpha.at<double>(k)*S/(alpha.at<double>(k)-S);
                    q=alpha.at<double>(k)*Q/(alpha.at<double>(k)-S);
                }
                else
                {
                    s=S.clone();
                    q=Q.clone();
                }
                SolveForAlpha(P,q,s,Q,S,alpha.at<double>(k),newAlpha.at<double>(k),l_inc.at<double>(k));
            }

            cv::minMaxIdx(l_inc,0,&max_linc,0,maxidx);
            max_index=maxidx[0];
            max_alpha=newAlpha.at<double>(max_index);

            alpha.at<double>(max_index)=max_alpha;

            weights=Mat(MM,P,CV_64FC1);

            for(j=0; j<P; j++)
            {
                Mat tmp=beta.at<double>(j)*SIGMA_nz[j]*PHI_nz.t()*t.col(j);
                tmp.copyTo(weights.col(j));

                double gamma_sum=0.0;

                gamma_sum=sum(1.0-alpha_nz.t()*SIGMA_nz[j].diag())[0];

                Mat error=t.col(j)-PHI_nz*(weights.col(j));
                pow(error,2,error);
                double ED=0.0;
                ED=sum(error)[0];
                beta.at<double>(j)=(((double)N)- gamma_sum)/ED;
            }
            i++;

            // Критерий выхода по количеству итераций или по максимальному
            // изменению не превышающему заданный порог.
            if(i>Max_It || max_linc<(100.0*DBL_EPSILON))
            {
                run_cond=false;
            }
        }

        Mat nonZero_m=Mat(nonZero);
        // Количество релевантных векторов
        int nRVS=cv::countNonZero(nonZero_m);
        // Индексы векторов
        RV_idxs=Mat(nRVS,1,CV_32SC1);
        // Координаты векторов (вторая координата по каждому измерению вычисляется)
        RV_X=Mat(nRVS-1,1,CV_64FC1);

        int ind=0;

        for(int i=0;i<nonZero.rows;i++)
        {
            if(nonZero.at<unsigned char>(i))
            {
                RV_idxs.at<int>(ind)=i;
                // первый индекс соответствует добавленной единице смещения
                // поэтому начинаем со второго.
                if(i>0)
                {
                RV_X.at<double>(ind-1)=X.at<double>(i);
                }
                ind++;
            }
        }
}

// -------------------------------------------------------------------
// Загрузка матриц. Для взаимодействия с матлабом во время отладки.
// -------------------------------------------------------------------
void ReadMat(ifstream& ifs,Mat& M)
{
    int rows;
    int cols;
    ifs >> rows;
    char comma;
    ifs >> comma;
    ifs >> cols;
    M=Mat(rows,cols,CV_64FC1);
    for(int i=0;i<rows;i++)
    {
        for(int j=0;j<cols;j++)
        {
            ifs >> M.at<double>(i,j);
            if(j!=cols-1){ifs >> comma;}
        }
    }
}
// -------------------------------------------------------------------
// MATLAB matrix loading (for debug purpose)
// -------------------------------------------------------------------
Mat LoadMatrix(string filename)
{
    Mat M;
    ifstream ifs(filename);
    if (ifs.is_open())
    {
        ReadMat(ifs,M);
        ifs.close();
    }
    return M;
}

void PredictRVM(Mat& X,Mat& RV_X,Mat& weights,double KernelWidth,Mat& y_rvm)
{
    Mat PHI=kernelFunction(X,RV_X,KernelWidth);
    y_rvm=PHI*weights;
}

int main()
{
    int gH=600;
    int gW=600;
    Mat graph=Mat::zeros(gH,gW,CV_8UC3);
    namedWindow("graph");


    Mat t=LoadMatrix("t.txt");
    Mat X=LoadMatrix("X.txt");

    for(int i=0;i<X.rows;i++)
    {
        circle(graph,Point(i*6,gW-(t.at<double>(i,0)+1)*160),2,Scalar(0,255,255),-1,CV_AA);
        circle(graph,Point(i*6,gW-(t.at<double>(i,1)+1)*160),2,Scalar(0,255,255),-1,CV_AA);
    }

    imshow("graph",graph);
    waitKey(10);

    cout.precision(15);

    Mat weights;
    Mat RV_idxs;
    Mat beta;
    Mat RV_X;
    double KernelWidth=4;
    TrainRVM(X,t,KernelWidth,50, beta,weights,RV_idxs,RV_X);

    COUT_VAR(weights);

    Mat y_rvm;
    PredictRVM(X,RV_X,weights,KernelWidth,y_rvm);

    FileStorage fs("test.yml", FileStorage::WRITE);
    fs << "weights" << weights;
    fs << "RV_X" << RV_X;
    fs.release();


    for(int i=1;i<X.rows;i++)
    {
        Point2d p11(i*6,gW-(y_rvm.at<double>(i,0)+1)*160);
        Point2d p21(i*6,gW-(y_rvm.at<double>(i,1)+1)*160);
        i--;
        Point2d p12(i*6,gW-(y_rvm.at<double>(i,0)+1)*160);
        Point2d p22(i*6,gW-(y_rvm.at<double>(i,1)+1)*160);
        i++;
        line(graph,p11,p12,Scalar(255,255,0),1,CV_AA);
        line(graph,p21,p22,Scalar(255,255,0),1,CV_AA);
    }

    imshow("graph",graph);

    waitKey();

    return 0;

}
于 2013-08-19T17:13:41.460 回答