我正在研究图像处理框架并使用此代码读取 RGB 数据:
if let data = image.cgImage?.dataProvider?.data {
let dataPtr: UnsafePointer<UInt8> = CFDataGetBytePtr(data)
let width = Int(image.size.width)
let height = Int(image.size.height)
for y in 0..<height {
for x in 0..<width {
let pixelInfo: Int = ((width * y) + x) * 4
let r = dataPtr[pixelInfo]
let g = dataPtr[pixelInfo + 1]
let b = dataPtr[pixelInfo + 2]
print("\(r), \(g), \(b)")
}
}
}
如果我创建新的 Swift 项目和新的 Objective-C 项目并使用相同的代码(使用 Objc 项目的桥头文件),我会得到不同的结果,例如:
5, 36, 20; 24, 69, 48 (Swift)
5, 36, 18; 21, 69, 47 (Objc)
它会在进一步处理中导致许多不同的结果。我尝试使用 Objective-C 代码并使用 CGBitmapContextCreate() 读取数据,但得到完全相同的结果。它在两个应用程序中显示相同的 ColorSpace,我尝试手动将其设置为 DeviceRGB 和 sRGB,但没有任何运气。
我必须将 Objc 输出与与 Swift 应用程序具有完全相同结果的 Android 应用程序进行匹配。
更新。我尝试过的第二种解决方案是为 Objective-C 编写另一个代码,它返回与 Swift 不匹配的完全相同的结果:
size_t bytesSize = 0;
unsigned char *bytes = [self getBytesFromImage:image dataSize:&bytesSize];
size_t doubleSize = sizeof(double) * bytesSize;
double *doubles = (double *)malloc(doubleSize);
size_t doublesIndex = 0;
size_t counter = 0;
while (counter < bytesSize) {
unsigned char r = bytes[counter];
unsigned char g = bytes[counter+1];
unsigned char b = bytes[counter+2];
counter += 4;
}
- (unsigned char*) getBytesFromImage:(UIImage *)image dataSize:(size_t *)dataSize {
*dataSize = size_t(4 * image.size.width * image.size.height);
unsigned char *imageData = (unsigned char*)malloc(*dataSize);
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
CGImageRef imageRef = [image CGImage];
CGContextRef bitmap = CGBitmapContextCreate( imageData, image.size.width, image.size.height, 8, image.size.width * 4 , colorSpace, kCGBitmapByteOrder32Little | kCGImageAlphaPremultipliedFirst);
CGContextDrawImage(bitmap, CGRectMake(0, 0, image.size.width, image.size.height), imageRef);
CGContextRelease(bitmap);
CGColorSpaceRelease(colorSpace);
return imageData;
}