1

在我的应用程序中,我正在使用以下代码打开一个视频预览层:

    AVCaptureDeviceInput *captureInput = [AVCaptureDeviceInput 
                                      deviceInputWithDevice:[AVCaptureDevice defaultDeviceWithMediaType:AVMediaTypeVideo] 
                                      error:nil];
/*We setupt the output*/
AVCaptureVideoDataOutput *captureOutput = [[AVCaptureVideoDataOutput alloc] init];


captureOutput.alwaysDiscardsLateVideoFrames = YES; 

dispatch_queue_t queue;
queue = dispatch_queue_create("cameraQueue", NULL);
[captureOutput setSampleBufferDelegate:self queue:queue];
dispatch_release(queue);
NSString* key = (NSString*)kCVPixelBufferPixelFormatTypeKey; 
NSNumber* value = [NSNumber numberWithUnsignedInt:kCVPixelFormatType_32BGRA]; 
NSDictionary* videoSettings = [NSDictionary dictionaryWithObject:value forKey:key]; 
[captureOutput setVideoSettings:videoSettings]; 

self.captureSession = [[AVCaptureSession alloc] init];
[self.captureSession addInput:captureInput];
[self.captureSession addOutput:captureOutput];
/*We use medium quality, ont the iPhone 4 this demo would be laging too much, the conversion in UIImage and CGImage demands too much ressources for a 720p resolution.*/
[self.captureSession setSessionPreset:AVCaptureSessionPresetMedium];



CGRect Vframe;
Vframe = CGRectMake(self.viewNo2.frame.origin.x, self.viewNo2.frame.origin.y, self.viewNo2.frame.size.width, self.viewNo2.frame.size.height);   



/*We add the Custom Layer (We need to change the orientation of the layer so that the video is displayed correctly)*/
self.customLayer = [CALayer layer];
self.customLayer.frame = Vframe;
self.customLayer.contentsGravity = kCAGravityResizeAspect;
[self.view.layer addSublayer:self.customLayer];


CGRect VFrame1;
VFrame1 = CGRectMake(self.viewNo3.frame.origin.x, self.viewNo3.frame.origin.y, self.viewNo3.frame.size.width, self.viewNo3.frame.size.height);   

/*We add the Custom Layer (We need to change the orientation of the layer so that the video is displayed correctly)*/
self.customLayer1 = [CALayer layer];
self.customLayer1.frame = VFrame1;
self.customLayer1.contentsGravity = kCAGravityResizeAspect;
[self.view.layer addSublayer:self.customLayer1]; 


///*We add the imageView*/
//self.imageView = [[UIImageView alloc] init];
//self.imageView.frame = CGRectMake(9, 9, 137, 441);
//[self.view addSubview:self.imageView];
/*We add the preview layer*/


CGRect VFrame2;
VFrame2 = CGRectMake(self.viewNo1.frame.origin.x, self.viewNo1.frame.origin.y, self.viewNo1.frame.size.width, self.viewNo1.frame.size.height);   

self.prevLayer = [AVCaptureVideoPreviewLayer layerWithSession: self.captureSession];
self.prevLayer.frame = VFrame2;

self.prevLayer.videoGravity = AVLayerVideoGravityResizeAspectFill;
[self.view.layer addSublayer: self.prevLayer];
/*We start the capture*/
[self.captureSession startRunning];

当我尝试使用此方法捕获屏幕时:

-(IBAction)Photo{

CGRect rect = [self.view bounds];
UIGraphicsBeginImageContextWithOptions(rect.size,YES,0.0f);
CGContextRef context = UIGraphicsGetCurrentContext();
[self.view.layer renderInContext:context];   
UIImage *viewImage = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
UIImageWriteToSavedPhotosAlbum(viewImage, nil, nil, nil);}

prevLayer 没有被捕获,我错过了什么吗?

4

1 回答 1

3

AVCaptureVideoPreviewLayer不响应捕获屏幕UIGraphicsGetImageFromCurrentImageContext()。这只是苹果所做的一个奇怪的规则。获取当前屏幕图像的唯一方法是从AVCaptureInput. 然后可以手动将其添加到您的屏幕截图中。

于 2012-07-28T04:12:02.443 回答