我正在尝试使用原生扩展来访问 iOS 上的设备摄像头。目标是在 AS3 上的 BitmapData 上制作 UIView 流。
mView=[[UIView alloc]initWithFrame:[UIScreen mainScreen].bounds];
AVCaptureVideoPreviewLayer *previewLayer = [AVCaptureVideoPreviewLayer layerWithSession:mSession];
previewLayer.frame = mView.bounds;
[mView.layer addSublayer:previewLayer];
这是代码的一部分,其中在 UIView 上添加带有相机预览的子层,然后在 ANE 控制器中:
FREObject drawViewToBitmap(FREContext ctx, void* funcData, uint32_t argc, FREObject argv[]) { // 获取用于写入 FREBitmapData bmd 的 AS3 bitmapData 对象;int32_t _id;
//get CCapture object that contains the camera interface...
CCapture* cap;
FREGetObjectAsInt32(argv[0], &_id);
cap = active_cams[_id];
//When start's to capture
if(cap && captureCheckNewFrame(cap))
{
UIView* myView = getView(cap); //<--- Here I get the mView from the code above
FREAcquireBitmapData(argv[1], &bmd);
// Draw the UIView to a UIImage object. myView is a UIView object
// that exists somewhere in our code. It can be any view.
UIGraphicsBeginImageContext(myView.bounds.size);
[myView.layer renderInContext:UIGraphicsGetCurrentContext()];
UIImage *image = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
// Now we'll pull the raw pixels values out of the image data
CGImageRef imageRef = [image CGImage];
NSUInteger width = CGImageGetWidth(imageRef);
NSUInteger height = CGImageGetHeight(imageRef);
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
// Pixel color values will be written here
unsigned char *rawData = (unsigned char*)malloc(height * width * 4);
NSUInteger bytesPerPixel = 4;
NSUInteger bytesPerRow = bytesPerPixel * width;
NSUInteger bitsPerComponent = 8;
CGContextRef context = CGBitmapContextCreate(rawData, width, height,
bitsPerComponent, bytesPerRow, colorSpace,
kCGImageAlphaPremultipliedLast | kCGBitmapByteOrder32Big);
CGColorSpaceRelease(colorSpace);
CGContextDrawImage(context, CGRectMake(0, 0, width, height), imageRef);
CGContextRelease(context);
// Pixels are now in rawData in the format RGBA8888
// We'll now loop over each pixel write them into the AS3 bitmapData memory
int x, y;
// There may be extra pixels in each row due to the value of
// bmd.lineStride32, we'll skip over those as needed
int offset = bmd.lineStride32 - bmd.width;
int offset2 = bytesPerRow - bmd.width*4;
int byteIndex = 0;
uint32_t *bmdPixels = bmd.bits32;
// NOTE: In this example we are assuming that our AS3 bitmapData and our
// native UIView are the same dimensions to keep things simple.
for(y=0; y<bmd.height; y++) {
for(x=0; x<bmd.width; x++, bmdPixels ++, byteIndex += 4) {
// Values are currently in RGBA8888, so each colour
// value is currently a separate number
int red = (rawData[byteIndex]);
int green = (rawData[byteIndex + 1]);
int blue = (rawData[byteIndex + 2]);
int alpha = (rawData[byteIndex + 3]);
// Combine values into ARGB32
* bmdPixels = (alpha << 24) | (red << 16) | (green << 8) | blue;
}
bmdPixels += offset;
byteIndex += offset2;
}
// free the the memory we allocated
free(rawData);
// Tell Flash which region of the bitmapData changes (all of it here)
FREInvalidateBitmapDataRect(argv[0], 0, 0, bmd.width, bmd.height);
// Release our control over the bitmapData
FREReleaseBitmapData(argv[0]);
}
return NULL;
问题出在这一行:image = UIGraphicsGetImageFromCurrentImageContext();,图像的宽度/高度 = 0,其余代码在“int red = (rawData[byteIndex]);”行中失败,任何人都知道可以在哪里问题?
函数 drawViewToBitmap 它是来自 Tyler Egeto 的代码,我试图与来自 github/inspirit 的 ANE 一起使用来流式传输 UIView 屏幕大小,而不是在 AS3 端调整大小非常昂贵的大静止图像。
谢谢!