0

我正在做一个项目,我将图像缩小到一个物体的轮廓(实际上是眼睛的瞳孔)。现在我想对包含瞳孔轮廓的图像应用霍夫变换。但是没有任何反应。我的猜测是您只能对灰度图像应用霍夫变换,

所以我想知道如何取回我已经检测到的瞳孔轮廓的灰度部分,以便我可以对其应用霍夫变换。

4

1 回答 1

0

假设您的视频源是普通的网络摄像头。我不确定霍夫是否可以工作。我试过了,但它没有用,如果我花更多的时间它可能会起作用。但这是一种对我有用的基于颜色的方法。结果:https ://s3-ap-southeast-1.amazonaws.com/kzl/eye.png

#include "stdafx.h"
#include "opencv2/core/core.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"
#include <iostream>
#include <time.h>
#include "opencv2/objdetect/objdetect.hpp"
using namespace cv;
using namespace std;


int s0=31,s1=96,s2=189, thresh=9;
String face_cascade_name = "lbpcascade_frontalface.xml";
String eyes_cascade_name = "haarcascade_eye_tree_eyeglasses.xml";
CascadeClassifier face_cascade,eyes_cascade;

void proc( Mat frame )
{
    double time = (double)getTickCount();
    std::vector<Rect> faces;
    Mat frame_gray;
    Mat eyeMask=Mat::zeros(frame.rows, frame.cols, CV_8UC1);;
    cvtColor( frame, frame_gray, CV_BGR2GRAY );
    equalizeHist( frame_gray, frame_gray );

    face_cascade.detectMultiScale( frame_gray, faces, 1.1, 2,0, Size(50, 50));

    for( int i = 0; i < faces.size(); i++ )
    {
        Mat faceROI = frame_gray( faces[i] );
        std::vector<Rect> eyes;

        eyes_cascade.detectMultiScale( faceROI, eyes, 1.1, 2, 0 |CV_HAAR_SCALE_IMAGE, Size(30, 30) );
        if( eyes.size() == 2)
        {
            Point center( faces[i].x + faces[i].width*0.5, faces[i].y + faces[i].height*0.5 );
            ellipse( frame, center, Size( faces[i].width*0.5, faces[i].height*0.5), 0, 0, 360, Scalar( 255, 0, 0 ), 2, 8, 0 );

            for( int j = 0; j < eyes.size(); j++ )
            {
                Rect roi =  Rect(faces[i].x+eyes[j].x,faces[i].y+eyes[j].y+(eyes[j].height*.25),eyes[j].width,eyes[j].height*.75);
                Mat eye(frame,roi);
                Mat eyeYuv;
                vector<Mat> eyechs;
                cvtColor(eye,eyeYuv,CV_BGR2YUV);
                split(eyeYuv,eyechs);
                Mat mask= eyechs[0];
                equalizeHist(mask,mask);
                threshold(mask,mask,thresh,255,CV_THRESH_BINARY_INV);
                mask.copyTo(eyeMask(roi));
            }
            imshow("mask",eyeMask);
        }

    }

    cvtColor( frame, frame, CV_BGR2YUV );
    for(int i=0;i<eyeMask.rows;i++)
    {
        for(int j=0;j<eyeMask.cols;j++)
        {
            if(eyeMask.at<uchar>(i,j)>0)
            {
                frame.at<Vec3b>(i,j)[0]=(frame.at<Vec3b>(i,j)[0]+s0)%255;
                frame.at<Vec3b>(i,j)[1]=(frame.at<Vec3b>(i,j)[1]+s1)%255;
                frame.at<Vec3b>(i,j)[2]=(frame.at<Vec3b>(i,j)[2]+s2)%255;
            }
        }
    }
    cvtColor( frame, frame, CV_YUV2BGR );

    time = ((double)getTickCount() - time)/getTickFrequency();
    std::ostringstream strs;
    strs << time;
    std::string text = strs.str();

    putText(frame, text, Point(30,30), FONT_HERSHEY_SCRIPT_SIMPLEX, .7, Scalar::all(255), 2,8);
    imshow( "cam", frame );
}

void run()
{
    namedWindow("cam", CV_WINDOW_KEEPRATIO);
    namedWindow("ctrl", CV_WINDOW_KEEPRATIO);
    namedWindow("mask", CV_WINDOW_KEEPRATIO);

    createTrackbar( "s0", "ctrl", &s0, 255, NULL );
    createTrackbar( "s1", "ctrl", &s1, 255, NULL );
    createTrackbar( "s2", "ctrl", &s2, 255, NULL );
    createTrackbar( "irisThresh", "ctrl", &thresh, 255, NULL );

    VideoCapture cap;
    cap.open(0);
    if( !cap.isOpened() )
    {
        cout << "cap error\n";
    }

    if( !face_cascade.load( face_cascade_name ) ){ cout << "face cascade error\n"; return ; };
    if( !eyes_cascade.load( eyes_cascade_name ) ){ cout << "eye cascade error\n"; return ; };
    Mat frame;
    cap >> frame;


    for(;;)
    {
        cap >> frame;
        resize(frame,frame,Size(320,240));
        proc(frame);
        char c = (char)waitKey(10);
        if( c == 27 )
            break;
    }
}

int main(int argc, char ** argv)
{
    run(); 
    cin.ignore(1);
    return 0;
}
于 2013-02-23T08:06:45.483 回答