2

我正在使用 OpenCV 进行对象检测(眼睛)。以下是代码,因为它无法识别眼睛对象(准确或附近)。谁能帮我解决这个问题?

if(imageView.image) {
  cvSetErrMode(CV_ErrModeParent);

  IplImage *image = [self CreateIplImageFromUIImage:imageView.image];

  // Scaling down
  IplImage *small_image = cvCreateImage(cvSize(image->width/2,image->height/2), IPL_DEPTH_8U, 3);
  cvPyrDown(image, small_image, CV_GAUSSIAN_5x5);
  int scale = 2;

  // Load XML
  NSString *path1=[[NSBundle mainBundle] pathForResource:@"haarcascade_eye" ofType:@"xml"];
  NSString *path = [[NSBundle mainBundle] pathForResource:@"haarcascade_frontalface_default" ofType:@"xml"];
  CvHaarClassifierCascade* cascade = (CvHaarClassifierCascade*)cvLoad([path cStringUsingEncoding:NSASCIIStringEncoding], NULL, NULL, NULL);
  CvHaarClassifierCascade* cascade1= (CvHaarClassifierCascade*)cvLoad([path1 cStringUsingEncoding:NSASCIIStringEncoding], NULL, NULL,NULL);
  CvMemStorage* storage = cvCreateMemStorage(0);

  // Detect faces and draw rectangle on them
  CvSeq* faces = cvHaarDetectObjects(small_image, cascade, storage, 1.2f, 2, CV_HAAR_DO_CANNY_PRUNING, cvSize(20, 20));
  cvReleaseImage(&small_image);

  // Create canvas to show the results
  CGImageRef imageRef = imageView.image.CGImage;
  CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
  CGContextRef contextRef = CGBitmapContextCreate(NULL, imageView.image.size.width, imageView.image.size.height,
              8, imageView.image.size.width * 4,
              colorSpace, kCGImageAlphaPremultipliedLast|kCGBitmapByteOrderDefault);
  CGContextDrawImage(contextRef, CGRectMake(0, 0, imageView.image.size.width, imageView.image.size.height), imageRef);

  CGContextSetLineWidth(contextRef, 4);
  CGContextSetRGBStrokeColor(contextRef, 0.0, 0.0, 1.0, 0.5);

  CvRect cvrect;

  // Draw results on the iamge
  for(int i = 0; i < faces->total; i++)
  {
   NSAutoreleasePool * pool = [[NSAutoreleasePool alloc] init];

   // Calc the rect of faces
   cvrect = *(CvRect*)cvGetSeqElem(faces, i);
   CGRect face_rect = CGContextConvertRectToDeviceSpace(contextRef, CGRectMake(cvrect.x * scale, cvrect.y * scale, cvrect.width * scale, cvrect.height * scale));

   if(overlayImage) 
   {
    CGContextDrawImage(contextRef, face_rect, overlayImage.CGImage);
   }
   else
   {
    CGContextStrokeRect(contextRef, face_rect);
   }
   [pool release];
  }

  cvClearMemStorage(storage);

   // cvSetImageROI(image,cvRect((cvrect.x * scale),(cvrect.y * (scale +((cvrect.height * scale)/5.5))), (cvrect.width * scale), (cvrect.height * scale)/3.0));
  cvSetImageROI(image, cvRect(80,100,300,300));
  CvSeq* eyes=cvHaarDetectObjects(image, cascade1, storage, 1.15, 3, 0, cvSize(25, 15));
  for(int i=0;i<eyes->total;i++)
  {
   NSAutoreleasePool * pool = [[NSAutoreleasePool alloc] init];
   CvRect  cvrect= *(CvRect*)cvGetSeqElem(eyes, i);
       // cvRectangle(img,cvPoint(cvrect.x * scale, cvrect.y * scale),cvPoint(cvrect.x * scale + cvrect.width * scale, cvrect.y * scale+cvrect.height * scale);
   CGRect eyes_rect = CGContextConvertRectToDeviceSpace(contextRef, CGRectMake(cvrect.x * scale, cvrect.y * scale, cvrect.width * scale, cvrect.height * scale));    
   if(overlayImage) {
   CGContextDrawImage(contextRef, eyes_rect, overlayImage.CGImage);
   }
   else
   {
   CGContextStrokeRect(contextRef, eyes_rect);
   }
   [pool release];
  }
      cvResetImageROI(image);




  imageView.image = [UIImage imageWithCGImage:CGBitmapContextCreateImage(contextRef)];
  CGContextRelease(contextRef);
  CGColorSpaceRelease(colorSpace);


  cvReleaseMemStorage(&storage);
  cvReleaseHaarClassifierCascade(&cascade);
  //int i;

  [self hideProgressIndicator];
 }

}
4

2 回答 2

1

这似乎基于 opencv 2.1 中包含的 facedetect 示例代码您使用的 xml 文件包含在发行版的数据目录中。

我不确定它的尺度不变性如何。我建议您更改比例以使其适用于您的输入图像比例。

查看 python 示例代码 facedetect.py 以获得一些线索

于 2010-11-19T10:12:32.227 回答
0

为 setImageRIO 传递参数时设置 face_rect 帧

CGRect face_rect;

// Draw results on the iamge
for(int i = 0; i < faces->total; i++) {
    NSAutoreleasePool * pool = [[NSAutoreleasePool alloc] init];

    // Calc the rect of faces
    CvRect cvrect = *(CvRect*)cvGetSeqElem(faces, i);

     face_rect = CGContextConvertRectToDeviceSpace(contextRef, 
                            CGRectMake(cvrect.x * scale, cvrect.y * scale, cvrect.width * scale, cvrect.height * scale));
    CGContextStrokeRect(contextRef, face_rect);

    [pool release];


}

cvClearMemStorage(存储);

cvSetImageROI(image, cvRect(face_rect.origin.x,face_rect.origin.y,face_rect.size.width,face_rect.size.height));

CvSeq *eye  =   cvHaarDetectObjects(small_image, cascade, storage,1.1, 3, 0, cvSize(10, 10));

for(int i=0;i<eye->total;i++)
{
    NSAutoreleasePool * pool = [[NSAutoreleasePool alloc] init];
    CvRect  cvrect= *(CvRect*)cvGetSeqElem(eye, i);

    CGRect eyes_rect;

    CGContextSetRGBStrokeColor(contextRef, 1.1, 1.0, 0.0, 0.5);
        //left eye detection
        eyes_rect = CGContextConvertRectToDeviceSpace(contextRef, CGRectMake((cvrect.x *scale + 20), (cvrect.y * scale)+(cvrect.height - 30 ) , cvrect.width - 30, 40 )); 

        CGContextStrokeRect(contextRef, eyes_rect);

        //right eye detection
    CGRect eyes_Right   =   CGContextConvertRectToDeviceSpace(contextRef, CGRectMake((cvrect.x *scale + cvrect.width+ 10), (cvrect.y * scale)+(cvrect.height - 30 ) , cvrect.width - 30, 40 ));

        CGContextStrokeRect(contextRef, eyes_Right);    

    [pool release];
}
于 2011-01-12T06:52:53.810 回答