3

我按照教程来检测图像中的人脸,它可以工作。它通过制作UIView *faceView. 现在我正在尝试获取检测到的人脸坐标,但是返回的结果在 y 轴上略有偏离。我怎样才能解决这个问题?我哪里错了。

这是我尝试过的:

CGRect newBounds = CGRectMake(faceFeature.bounds.origin.x, 
                              imageView.bounds.size.height - faceFeature.bounds.origin.y - faceFeature.bounds.size.height,
                              faceFeature.bounds.size.width, 
                              faceFeature.bounds.size.height);

这是检测的源代码:

-

(void)markFaces:(UIImageView *)facePicture
{
    // draw a CI image with the previously loaded face detection picture
    CIImage* image = [CIImage imageWithCGImage:facePicture.image.CGImage];

    // create a face detector - since speed is not an issue we'll use a high accuracy
    // detector
    CIDetector* detector = [CIDetector detectorOfType:CIDetectorTypeFace 
                                              context:nil options:[NSDictionary dictionaryWithObject:CIDetectorAccuracyHigh forKey:CIDetectorAccuracy]];

    // create an array containing all the detected faces from the detector    
    NSArray* features = [detector featuresInImage:image];

    // we'll iterate through every detected face.  CIFaceFeature provides us
    // with the width for the entire face, and the coordinates of each eye
    // and the mouth if detected.  Also provided are BOOL's for the eye's and
    // mouth so we can check if they already exist.
    for(CIFaceFeature* faceFeature in features)
    {
        // get the width of the face
        CGFloat faceWidth = faceFeature.bounds.size.width;

        // create a UIView using the bounds of the face
        UIView* faceView = [[UIView alloc] initWithFrame:faceFeature.bounds];

        // add a border around the newly created UIView
        faceView.layer.borderWidth = 1;
        faceView.layer.borderColor = [[UIColor redColor] CGColor];


        CGRect newBounds = CGRectMake(faceFeature.bounds.origin.x, 
                                      imageView.bounds.size.height - faceFeature.bounds.origin.y - faceFeature.bounds.size.height,
                                      faceFeature.bounds.size.width, 
                                      faceFeature.bounds.size.height);





        NSLog(@"My view frame: %@", NSStringFromCGRect(newBounds));

        [self.view addSubview:faceView];

        if(faceFeature.hasLeftEyePosition)
        {
        }

        if(faceFeature.hasRightEyePosition)
        {
        }

        if(faceFeature.hasMouthPosition)
        {
        }
    }
}

-(void)faceDetector
{
    // Load the picture for face detection
    UIImageView* image = [[UIImageView alloc] initWithImage:[UIImage imageNamed:@"jolie.jpg"]];

    // Draw the face detection image
    [self.view addSubview:image];

    // flip image on y-axis to match coordinate system used by core image
    [image setTransform:CGAffineTransformMakeScale(1, -1)];

    // flip the entire window to make everything right side up
    [self.view setTransform:CGAffineTransformMakeScale(1, -1)];

    // Execute the method used to markFaces in background
    [self performSelectorInBackground:@selector(markFaces:) withObject:image];
}
4

1 回答 1

3

CoreImage 协调系统和 UIKit 协调系统是完全不同的。CIFaceFeature 提供 coreimage 坐标系中的坐标,您需要将它们转换为 uikit 坐标系:

// CoreImage coordinate system origin is at the bottom left corner and UIKit is at the top left corner
// So we need to translate features positions before drawing them to screen
// In order to do so we make an affine transform
// **Note**
// Its better to convert CoreImage coordinates to UIKit coordinates and
// not the other way around because doing so could affect other drawings
// i.e. In the original sample project you see the image and the bottom, Isn't weird?
CGAffineTransform transform = CGAffineTransformMakeScale(1, -1);
transform = CGAffineTransformTranslate(transform, 0, -_pickerImageView.bounds.size.height);

for(CIFaceFeature* faceFeature in features)
{
 // Translate CoreImage coordinates to UIKit coordinates
 const CGRect faceRect = CGRectApplyAffineTransform(faceFeature.bounds, transform);

    // create a UIView using the bounds of the face
    UIView* faceView = [[UIView alloc] initWithFrame:faceRect];
    faceView.layer.borderWidth = 1;
    faceView.layer.borderColor = [[UIColor redColor] CGColor];

    // get the width of the face
    CGFloat faceWidth = faceFeature.bounds.size.width;

    // add the new view to create a box around the face
[_pickerImageView addSubview:faceView];

        if(faceFeature.hasLeftEyePosition)
    {
        // Get the left eye position: Translate CoreImage coordinates to UIKit coordinates
        const CGPoint leftEyePos = CGPointApplyAffineTransform(faceFeature.leftEyePosition, transform);

        // Note1:
        // If you want to add this to the the faceView instead of the imageView we need to translate its
        // coordinates a bit more {-x, -y} in other words: {-faceFeature.bounds.origin.x, -faceFeature.bounds.origin.y}
        // You could do the same for the other eye and the mouth too.

        // Create an UIView to represent the left eye, its size depend on the width of the face.
        UIView* leftEyeView = [[UIView alloc] initWithFrame:CGRectMake(leftEyePos.x - faceWidth*EYE_SIZE_RATE*0.5f /*- faceFeature.bounds.origin.x*/, // See Note1
                                                                       leftEyePos.y - faceWidth*EYE_SIZE_RATE*0.5f /*- faceFeature.bounds.origin.y*/, // See Note1
                                                                       faceWidth*EYE_SIZE_RATE,
                                                                       faceWidth*EYE_SIZE_RATE)];
        leftEyeView.backgroundColor = [[UIColor magentaColor] colorWithAlphaComponent:0.3];
        leftEyeView.layer.cornerRadius = faceWidth*EYE_SIZE_RATE*0.5;
        //[faceView addSubview:leftEyeView];  // See Note1
        [_pickerImageView addSubview:leftEyeView];
    }
}
于 2013-01-04T11:47:22.040 回答