1

为了在相机中检测人脸,我参考 SquareCam(iOS 开发人员库)。但我无法显示左眼、右眼和嘴的位置。我正在使用以下代码

NSArray *sublayers = [NSArray arrayWithArray:[previewLayer sublayers]];
NSInteger sublayersCount = [sublayers count], currentSublayer = 0;
NSInteger featuresCount = [features count], currentFeature = 0;

[CATransaction begin];
[CATransaction setValue:(id)kCFBooleanTrue forKey:kCATransactionDisableActions];

// hide all the face layers
for ( CALayer *layer in sublayers )
{
    if ( [[layer name] isEqualToString:@"FaceView"] )
        [layer setHidden:YES];
}

if ( featuresCount == 0 || !detectFaces ) {
    [CATransaction commit];
    return; // early bail.
}

CGSize parentFrameSize = [previewView frame].size;
NSString *gravity = [previewLayer videoGravity];
BOOL isMirrored = [previewLayer isMirrored];
CGRect previewBox = [SquareCamViewController videoPreviewBoxForGravity:gravity
                                                             frameSize:parentFrameSize
                                                          apertureSize:clap.size];

for ( CIFaceFeature *ff in features ) {


    // find the correct position for the square layer within the previewLayer
    // the feature box originates in the bottom left of the video frame.
    // (Bottom right if mirroring is turned on)
    CGRect faceRect = [ff bounds];

    CGRect leftEyeFrameRect;

    CGFloat temp             = faceRect.size.width;
    faceRect.size.width      = faceRect.size.height;
    faceRect.size.height     = temp;
    temp                     = faceRect.origin.x;
    faceRect.origin.x        = faceRect.origin.y;
    faceRect.origin.y        = temp;

    // scale coordinates so they fit in the preview box, which may be scaled
    CGFloat widthScaleBy     = previewBox.size.width / clap.size.height;
    CGFloat heightScaleBy    = previewBox.size.height / clap.size.width;
    faceRect.size.width     *= widthScaleBy;
    faceRect.size.height    *= heightScaleBy;
    faceRect.origin.x       *= widthScaleBy;
    faceRect.origin.y       *= heightScaleBy;


    if ( isMirrored )
    {

        faceRect = CGRectOffset(faceRect, previewBox.origin.x + previewBox.size.width - faceRect.size.width - (faceRect.origin.x * 2), previewBox.origin.y);


    }
    else
    {

        faceRect = CGRectOffset(faceRect, previewBox.origin.x, previewBox.origin.y);
          leftEyeFrameRect=CGRectOffset(faceRect,ff.leftEyePosition.x, ff.leftEyePosition.y);

    }

    CALayer *featureLayer   = nil;
    CALayer *eyeLayer       = nil;

    // re-use an existing layer if possible
    while ( !featureLayer && (currentSublayer < sublayersCount) )

    {
        CALayer *currentLayer = [sublayers objectAtIndex:currentSublayer++];
        if ( [[currentLayer name] isEqualToString:@"FaceLayer"] ) {
            featureLayer = currentLayer;

            [currentLayer setHidden:NO];
        }
    }



    // create a new one if necessary
    if ( !featureLayer ) {

        featureLayer = [CALayer new];
        [featureLayer   setContents:(id)[square CGImage]];
        [featureLayer   setName:@"FaceLayer"];
        [previewLayer addSublayer:featureLayer];
        [featureLayer   release];

    }
    [featureLayer setFrame:faceRect];


    if (faceView !=nil) {
        [faceView removeFromSuperview];
        [faceView release];
    }
    if (leftEyeView != nil) {
        [leftEyeView removeFromSuperview];
        [leftEyeView release];
    }

    faceView   = [[UIView alloc] initWithFrame:CGRectMake(faceRect.origin.x, faceRect.origin.y ,faceRect.size.width, faceRect.size.height)];

    faceView.layer.borderWidth   = 1;
    faceView.layer.borderColor   = [[UIColor redColor] CGColor];
    [self.view    addSubview:faceView];


    leftEyeView = [[UIView alloc] initWithFrame:CGRectMake(faceView.frame.origin.x+(faceView.frame.size.height/2), faceView.frame.origin.y+(faceView.frame.size.height*0.10) ,faceView.frame.size.width*0.40, faceView.frame.size.height*0.40)];

    UIImageView  *leftEyeImageView=[[UIImageView alloc] initWithImage:[UIImage imageNamed:@"eye.png"]];
    leftEyeImageView.frame = CGRectMake(0, 0, faceView.frame.size.width*0.40, faceView.frame.size.height*0.40);
    [leftEyeView addSubview:leftEyeImageView];
    [self.view    addSubview:leftEyeView];



    if (ff.hasLeftEyePosition) {
        CGPoint leftEyeCenter= ff.leftEyePosition;
        UIView *vv= [[UIView alloc] initWithFrame:CGRectMake(leftEyeCenter.x, leftEyeCenter.y, 50, 50)];
        vv.center = leftEyeCenter;
        vv.layer.borderWidth= 4.0;
        vv.layer.borderColor= [[UIColor blackColor]CGColor];
        [self.view addSubview:vv];  
    }

它正在检测眼睛,但未显示在正确的位置。任何人都可以帮助解决这个问题。提前致谢。

4

3 回答 3

1

使用前置摄像头时,我在预览中遇到了与您相同的问题,因为预览是镜像的,我找不到任何可以缩放的好信息。

以下代码是我得到的最接近的。请注意,我将图像定义为被调用的属性,heartImage并且我假设您使用的是名为 SquareCam 的 Apple 示例。

在方法中- (void)drawFaceBoxesForFeatures:(NSArray *)features forVideoBox:(CGRect)clap orientation:(UIDeviceOrientation)orientation

        if(ff.hasLeftEyePosition)
    {
        //swap coordinates
        CGFloat leftEyeRectOriginX = ff.leftEyePosition.y ; 
        CGFloat leftEyeRectOriginY = ff.leftEyePosition.x ;
        CGFloat leftEyeRectWidth = faceRect.size.width*0.3;
        CGFloat leftEyeRectHeight = faceRect.size.width*0.3;

        //adjust scale
        leftEyeRectOriginX *= widthScaleBy;
        leftEyeRectOriginY *= heightScaleBy;

        NSLog(@"LeftEyePosition: %@", NSStringFromCGPoint(ff.leftEyePosition));
        CGRect r = CGRectMake(leftEyeRectOriginX -  (leftEyeRectWidth/2) , leftEyeRectOriginY - (leftEyeRectHeight/2), leftEyeRectWidth, leftEyeRectHeight);

        if ( isMirrored ){
            r = CGRectOffset(r, previewBox.origin.x + previewBox.size.width - (rightEyeRectOriginX*2) - rightEyeRectWidth+ faceRect.origin.x, previewBox.origin.y);
            NSLog(@"LeftEyeRect mirrored: %@", NSStringFromCGRect(r));
        }
        else{
            r = CGRectOffset(r, previewBox.origin.x, previewBox.origin.y);
        }


        while ( !leftEyeEyeLayer && (currentSublayer < sublayersCount) ) {
            CALayer *currentLayer = [sublayers objectAtIndex:currentSublayer++];
            if ( [[currentLayer name] isEqualToString:@"LeftEyeLayer"] ) {
                leftEyeEyeLayer = currentLayer;
                [currentLayer setHidden:NO];
            }
        }

        // create a new one if necessary
        if ( !leftEyeEyeLayer ) {
            leftEyeEyeLayer = [CALayer new];
            [leftEyeEyeLayer setContents:(id)[heartImage CGImage]];
            [leftEyeEyeLayer setName:@"LeftEyeLayer"];
            [previewLayer addSublayer:leftEyeEyeLayer];
            [leftEyeEyeLayer release];
        }
        [leftEyeEyeLayer setFrame:r];

    }

这同样适用于右眼,除了我在它被镜像的情况下使用它:r = CGRectOffset(r, previewBox.origin.x + previewBox.size.width - (rightEyeRectOriginX*2) - rightEyeRectWidth+ faceRect.origin.x, previewBox.origin.y); 。与示例代码的唯一区别是您首先要删除所有 featureLayers,因此我的代码上方的某些行如下所示:

    // hide all the face layers
for ( CALayer *layer in sublayers ) {
    if ( [[layer name] isEqualToString:@"FaceLayer"] || [[layer name] isEqualToString:@"LeftEyeLayer"] || [[layer name] isEqualToString:@"RightEyeLayer"] )
        [layer setHidden:YES];
}   

确切地说,我只在实时相机预览中遇到麻烦。使用该方法将图片保存在库中时(- (CGImageRef)newSquareOverlayedImageForFeatures:(NSArray *)features inCGImage:(CGImageRef)backgroundImage withOrientation:(UIDeviceOrientation)orientation frontFacing:(BOOL)isFrontFacing )它可以通过使用以下方法正常工作:

        if(ff.hasLeftEyePosition)
    {
        CGRect r = CGRectMake(ff.leftEyePosition.x-faceWidth*0.15, ff.leftEyePosition.y-faceWidth*0.15, faceWidth*0.3, faceWidth*0.3);
        CGContextDrawImage(bitmapContext, r, [rotatedHeartImage CGImage]);

    }

请让我知道是否以及如何改进我的答案。

于 2013-07-17T21:56:47.997 回答
0

这可能是由于您的输入、检测器和输出之间的方向不正确造成的。如果检测到人脸,则可能只需要将输出坐标从横向转换为纵向,反之亦然。否则,请看这里

于 2012-12-06T11:25:46.607 回答
-1

您可以在 Haar 级联训练文件和 OPENGL 中进行调查,但这是完全不同的方法。它确实支持低于 6.0 的 iOS 版本,这是一个专业版。但它比 Apple 的 Squarecam 样本(con)更难。

这个 OpenGL 能够检测耳朵、眼睛等。网络上可能已经有一些训练文件。

于 2012-12-06T11:37:36.497 回答