0

我正在编写一个简单的程序,它可以在图像中找到最亮的像素,将来可以实现它来找到视频帧中最亮的像素。对于小图像,它可以正常工作。在我的 8x8 测试图像上,全黑和一个白色像素似乎可以实时找到白色像素,但是当我升级到 1000x1000 图像时,需要几秒钟才能找到它。我的目标是能够让它在高于 1000x1000 的物体上每秒定位 15 次以上。这甚至可能吗?这是我正在使用的代码。

   //These are at the beginning of the class
   static NSBitmapImageRep *imageRepStatic;
   static float brightestBrightness;
   static CGPoint brightest;


    //This is in my function for getting the pixel
    for (int y = 0; y < imageRepStatic.size.height; y++) {
    for (int x = 0; x < imageRepStatic.size.width; x++) {

        NSColor *color = [imageRepStatic colorAtX:x y:y];
        NSArray *pixelData = [[NSString stringWithFormat:@"%@", color] componentsSeparatedByString:@" "];
        float red = [[pixelData objectAtIndex:1] floatValue];
        float green = [[pixelData objectAtIndex:2] floatValue];
        float blue = [[pixelData objectAtIndex:3] floatValue];
        float brightness = (red + green + blue) / 3;
        if (brightness >= brightestBrightness) {brightestBrightness = brightness; brightest = CGPointMake(x, y);}
    }
}

NSLog(@"The brightest pixel is at (%f, %f) and has a brightness of %f", brightest.x, brightest.y, brightestBrightness);
frame ++;
NSLog(@"%i", frame);
4

0 回答 0