2

(忽略下面的大量代码块。它只是供参考/资源,以防其他人想一起玩)

CoreImage 中的人脸检测例程自然在较小的图像上工作得更快,所以我一直在研究使用 来生成人aspectRatioThumbnail脸数据,并计划将其放大以利用fullScreenImage表示。我这样做的原因是我可能要处理 20-30 张图像,所以我想减少任务时间。

这可能是一个简单的数学问题,但我试图将一个图像中的点映射到另一个图像时得到不准确的结果。

90 x 120 图像 - CGPoint(64, 50) rightEyePosition

480 x 640 图像 - CGPoint(331, 303) rightEyePosition

(480 /90) * 64 = 341.333 - 但应该是 331,是吗?我做错了吗?

更新- 稍后再进行一些测试。那么也许只是因为图像分辨率不同,人脸数据结果略有不同?这是有道理的:数据结果之间没有可扩展的关系。不过我仍然想知道:我上面的缩放数学是错误的吗?


Using CIDetectorAccuracyHigh     
useImageOptions:     0
------------ aspectRatioThumbnail   90.000000  120.000000 orientation: 0
2013-01-18 12:33:30.378 SeqMeTestBed[9705:907] aspectRatioThumbnail: features {
    bounds = "{{23, 16}, {56, 56}}";
    hasLeftEyePosition = 1;
    hasMouthPosition = 1;
    hasRightEyePosition = 1;
    leftEyePosition = "{43, 59}";
    mouthPosition = "{51, 31}";
    rightEyePosition = "{64, 50}";
}
------------ fullScreenImage   480.000000  640.000000 orientation: 0
2013-01-18 12:33:33.029 SeqMeTestBed[9705:907] fullScreenImage: features {
    bounds = "{{135, 81}, {298, 298}}";
    hasLeftEyePosition = 1;
    hasMouthPosition = 1;
    hasRightEyePosition = 1;
    leftEyePosition = "{228, 321}";
    mouthPosition = "{290, 156}";
    rightEyePosition = "{331, 303}";
}
------------ fullResolutionImage   640.000000  480.000000 orientation: 0
2013-01-18 12:33:35.745 SeqMeTestBed[9705:907] fullResolutionImage: features {
    bounds = "{{195, 105}, {366, 366}}";
    hasLeftEyePosition = 1;
    hasMouthPosition = 1;
    hasRightEyePosition = 1;
    leftEyePosition = "{356, 411}";
    mouthPosition = "{350, 201}";
    rightEyePosition = "{455, 400}";

// 使用的代码 //

- (void)detectFacialFeatures
{

    NSDictionary *detectorOptions = [[NSDictionary alloc] initWithObjectsAndKeys:CIDetectorAccuracyHigh, CIDetectorAccuracy, nil];
    CIDetector* faceDetector = [CIDetector detectorOfType:CIDetectorTypeFace context:nil options:detectorOptions];


    NSDictionary *imageOptions = nil;

    UIImage *tmpImage;
    NSNumber* orientation; 
    CIImage *ciImage;
    NSArray *array;
    NSMutableDictionary* featuresDictionary;

    Boolean useImageOptions = NO;

    printf("Using CIDetectorAccuracyHigh     \n");
    printf("useImageOptions:     %d\n", useImageOptions);

    //-----------------aspectRatioThumbnail
    tmpImage = [[UIImage alloc] initWithCGImage:self.asset.aspectRatioThumbnail];
    orientation = [NSNumber numberWithInt:tmpImage.imageOrientation];

    printf("------------ aspectRatioThumbnail   %f  %f orientation: %d\n", tmpImage.size.width, tmpImage.size.height, [orientation integerValue]);
    ciImage = [CIImage imageWithCGImage:tmpImage.CGImage];
    if (ciImage == nil) printf("----------!!!aspectRatioThumbnail: ciImage is nil    \n");

    imageOptions = [NSDictionary dictionaryWithObjectsAndKeys:orientation, CIDetectorImageOrientation,
                    CIDetectorAccuracyHigh, CIDetectorAccuracy, nil];

    if (useImageOptions) {
        array = [faceDetector featuresInImage:ciImage];
    } else {
        array = [faceDetector featuresInImage:ciImage options:imageOptions];
    }

    featuresDictionary = [self convertFeaturesToDictionary:array];
    NSLog(@"aspectRatioThumbnail: features %@", featuresDictionary);

   //-----------------fullScreenImage
    tmpImage = [[UIImage alloc] initWithCGImage:self.asset.defaultRepresentation.fullScreenImage];
    orientation = [NSNumber numberWithInt:tmpImage.imageOrientation];
    printf("------------ fullScreenImage   %f  %f orientation: %d\n", tmpImage.size.width, tmpImage.size.height, [orientation integerValue]);

    ciImage = [CIImage imageWithCGImage:tmpImage.CGImage];
    if (ciImage == nil) printf("----------!!!fullScreenImage: ciImage is nil    \n");

    imageOptions = [NSDictionary dictionaryWithObjectsAndKeys:orientation, CIDetectorImageOrientation,
                    CIDetectorAccuracyHigh, CIDetectorAccuracy, nil];

    if (useImageOptions) {
        array = [faceDetector featuresInImage:ciImage];
    } else {
        array = [faceDetector featuresInImage:ciImage options:imageOptions];
    }

    featuresDictionary = [self convertFeaturesToDictionary:array];
    NSLog(@"fullScreenImage: features %@", featuresDictionary);

    //-----------------fullResolutionImage
    tmpImage = [[UIImage alloc] initWithCGImage:self.asset.defaultRepresentation.fullResolutionImage];
    orientation = [NSNumber numberWithInt:tmpImage.imageOrientation];

    printf("------------ fullResolutionImage   %f  %f orientation: %d\n", tmpImage.size.width, tmpImage.size.height, [orientation integerValue]);

    ciImage = [CIImage imageWithCGImage:tmpImage.CGImage];
    if (ciImage == nil) printf("----------!!!fullResolutionImage: ciImage is nil    \n");

    imageOptions = [NSDictionary dictionaryWithObjectsAndKeys:orientation, CIDetectorImageOrientation,
                    CIDetectorAccuracyHigh, CIDetectorAccuracy, nil];

    if (useImageOptions) {
        array = [faceDetector featuresInImage:ciImage];
    } else {
        array = [faceDetector featuresInImage:ciImage options:imageOptions];
    }

    featuresDictionary = [self convertFeaturesToDictionary:array];
    NSLog(@"fullResolutionImage: features %@", featuresDictionary);

}







- (NSMutableDictionary*)convertFeaturesToDictionary:(NSArray*)foundFaces
{
    NSMutableDictionary * faceFeatures = [[NSMutableDictionary alloc] init];

    if (foundFaces.count) {

        CIFaceFeature *face = [foundFaces objectAtIndex:0];
        NSNumber* hasMouthPosition = [NSNumber numberWithBool:face.hasMouthPosition];
        NSNumber* hasLeftEyePosition = [NSNumber numberWithBool:face.hasLeftEyePosition];
        NSNumber* hasRightEyePosition = [NSNumber numberWithBool:face.hasRightEyePosition];

        [faceFeatures setValue:hasMouthPosition forKey:@"hasMouthPosition"];
        [faceFeatures setValue:hasLeftEyePosition forKey:@"hasLeftEyePosition"];
        [faceFeatures setValue:hasRightEyePosition forKey:@"hasRightEyePosition"];

        NSString * boundRect = NSStringFromCGRect(face.bounds);
       // NSLog(@"------------boundRect %@", boundRect);
        [faceFeatures setValue:boundRect forKey:@"bounds"];

        if (hasMouthPosition){
            NSString * mouthPosition = NSStringFromCGPoint(face.mouthPosition);
            [faceFeatures setValue:mouthPosition forKey:@"mouthPosition"];
        }

        if (hasLeftEyePosition){
            NSString * leftEyePosition = NSStringFromCGPoint(face.leftEyePosition);
            [faceFeatures setValue:leftEyePosition forKey:@"leftEyePosition"];
        }

        if (hasRightEyePosition){
            NSString * rightEyePosition = NSStringFromCGPoint(face.rightEyePosition);
            [faceFeatures setValue:rightEyePosition forKey:@"rightEyePosition"];
        }

    }
    return faceFeatures;
}
4

1 回答 1

0

基于缩略图保留检测所需的所有面部数据的假设,您的数学是正确的。

这个假设不成立,因为在缩略图中,即使是人类也更难识别一张脸。

因此,对于更高分辨率的图像,引擎应该返回更准确的面部位置,该位置应该更紧密地绑定到实际面部。通过简单地缩放缩略图图像中的值,它通常仍应与检测到的人脸匹配,但您绝对应该期望较低的准确度。

于 2013-09-18T16:03:19.503 回答