我正在制作一个应用程序,我想从前置摄像头捕获图像,而不显示任何类型的捕获屏幕。我想在没有任何用户交互的情况下完全用代码拍照。我将如何为前置摄像头执行此操作?
问问题
21764 次
5 回答
41
如何使用 AVFoundation 前置摄像头捕捉图像:
开发注意事项:
- 仔细检查您的应用程序和图像方向设置
- AVFoundation 及其相关框架是令人讨厌的庞然大物,很难理解/实现。我已经使我的代码尽可能精简,但请查看这个优秀的教程以获得更好的解释(网站不再可用,通过 archive.org 链接): http ://www.benjaminloulier.com/posts/ios4-和直接访问相机
视图控制器.h
// Frameworks
#import <CoreVideo/CoreVideo.h>
#import <CoreMedia/CoreMedia.h>
#import <AVFoundation/AVFoundation.h>
#import <UIKit/UIKit.h>
@interface CameraViewController : UIViewController <AVCaptureVideoDataOutputSampleBufferDelegate>
// Camera
@property (weak, nonatomic) IBOutlet UIImageView* cameraImageView;
@property (strong, nonatomic) AVCaptureDevice* device;
@property (strong, nonatomic) AVCaptureSession* captureSession;
@property (strong, nonatomic) AVCaptureVideoPreviewLayer* previewLayer;
@property (strong, nonatomic) UIImage* cameraImage;
@end
视图控制器.m
#import "CameraViewController.h"
@implementation CameraViewController
- (void)viewDidLoad
{
[super viewDidLoad];
[self setupCamera];
[self setupTimer];
}
- (void)setupCamera
{
NSArray* devices = [AVCaptureDevice devicesWithMediaType:AVMediaTypeVideo];
for(AVCaptureDevice *device in devices)
{
if([device position] == AVCaptureDevicePositionFront)
self.device = device;
}
AVCaptureDeviceInput* input = [AVCaptureDeviceInput deviceInputWithDevice:self.device error:nil];
AVCaptureVideoDataOutput* output = [[AVCaptureVideoDataOutput alloc] init];
output.alwaysDiscardsLateVideoFrames = YES;
dispatch_queue_t queue;
queue = dispatch_queue_create("cameraQueue", NULL);
[output setSampleBufferDelegate:self queue:queue];
NSString* key = (NSString *) kCVPixelBufferPixelFormatTypeKey;
NSNumber* value = [NSNumber numberWithUnsignedInt:kCVPixelFormatType_32BGRA];
NSDictionary* videoSettings = [NSDictionary dictionaryWithObject:value forKey:key];
[output setVideoSettings:videoSettings];
self.captureSession = [[AVCaptureSession alloc] init];
[self.captureSession addInput:input];
[self.captureSession addOutput:output];
[self.captureSession setSessionPreset:AVCaptureSessionPresetPhoto];
self.previewLayer = [AVCaptureVideoPreviewLayer layerWithSession:self.captureSession];
self.previewLayer.videoGravity = AVLayerVideoGravityResizeAspectFill;
// CHECK FOR YOUR APP
self.previewLayer.frame = CGRectMake(0, 0, self.view.frame.size.height, self.view.frame.size.width);
self.previewLayer.orientation = AVCaptureVideoOrientationLandscapeRight;
// CHECK FOR YOUR APP
[self.view.layer insertSublayer:self.previewLayer atIndex:0]; // Comment-out to hide preview layer
[self.captureSession startRunning];
}
- (void)captureOutput:(AVCaptureOutput *)captureOutput didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer fromConnection:(AVCaptureConnection *)connection
{
CVImageBufferRef imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer);
CVPixelBufferLockBaseAddress(imageBuffer,0);
uint8_t *baseAddress = (uint8_t *)CVPixelBufferGetBaseAddress(imageBuffer);
size_t bytesPerRow = CVPixelBufferGetBytesPerRow(imageBuffer);
size_t width = CVPixelBufferGetWidth(imageBuffer);
size_t height = CVPixelBufferGetHeight(imageBuffer);
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
CGContextRef newContext = CGBitmapContextCreate(baseAddress, width, height, 8, bytesPerRow, colorSpace, kCGBitmapByteOrder32Little | kCGImageAlphaPremultipliedFirst);
CGImageRef newImage = CGBitmapContextCreateImage(newContext);
CGContextRelease(newContext);
CGColorSpaceRelease(colorSpace);
self.cameraImage = [UIImage imageWithCGImage:newImage scale:1.0f orientation:UIImageOrientationDownMirrored];
CGImageRelease(newImage);
CVPixelBufferUnlockBaseAddress(imageBuffer,0);
}
- (void)setupTimer
{
NSTimer* cameraTimer = [NSTimer scheduledTimerWithTimeInterval:2.0f target:self selector:@selector(snapshot) userInfo:nil repeats:YES];
}
- (void)snapshot
{
NSLog(@"SNAPSHOT");
self.cameraImageView.image = self.cameraImage; // Comment-out to hide snapshot
}
@end
将它连接到带有 UIImageView 的 UIViewController 用于快照,它会起作用!快照以 2.0 秒的间隔以编程方式拍摄,无需任何用户输入。注释掉选定的行以删除预览层和快照反馈。
如有更多问题/意见,请告诉我!
于 2012-12-28T00:27:49.257 回答
5
将上述代码转换为 Swift 4
import UIKit
import AVFoundation
class CameraViewController: UIViewController, AVCaptureVideoDataOutputSampleBufferDelegate {
@IBOutlet weak var cameraImageView: UIImageView!
var device: AVCaptureDevice?
var captureSession: AVCaptureSession?
var previewLayer: AVCaptureVideoPreviewLayer?
var cameraImage: UIImage?
override func viewDidLoad() {
super.viewDidLoad()
setupCamera()
setupTimer()
}
func setupCamera() {
let discoverySession = AVCaptureDevice.DiscoverySession(deviceTypes: [.builtInWideAngleCamera],
mediaType: AVMediaType.video,
position: .front)
device = discoverySession.devices[0]
let input: AVCaptureDeviceInput
do {
input = try AVCaptureDeviceInput(device: device!)
} catch {
return
}
let output = AVCaptureVideoDataOutput()
output.alwaysDiscardsLateVideoFrames = true
let queue = DispatchQueue(label: "cameraQueue")
output.setSampleBufferDelegate(self, queue: queue)
output.videoSettings = [kCVPixelBufferPixelFormatTypeKey as AnyHashable as! String: kCVPixelFormatType_32BGRA]
captureSession = AVCaptureSession()
captureSession?.addInput(input)
captureSession?.addOutput(output)
captureSession?.sessionPreset = AVCaptureSession.Preset.photo
previewLayer = AVCaptureVideoPreviewLayer(session: captureSession!)
previewLayer?.videoGravity = AVLayerVideoGravity.resizeAspectFill
previewLayer?.frame = CGRect(x: 0.0, y: 0.0, width: view.frame.width, height: view.frame.height)
view.layer.insertSublayer(previewLayer!, at: 0)
captureSession?.startRunning()
}
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
let imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer)
CVPixelBufferLockBaseAddress(imageBuffer!, CVPixelBufferLockFlags(rawValue: 0))
let baseAddress = UnsafeMutableRawPointer(CVPixelBufferGetBaseAddress(imageBuffer!))
let bytesPerRow = CVPixelBufferGetBytesPerRow(imageBuffer!)
let width = CVPixelBufferGetWidth(imageBuffer!)
let height = CVPixelBufferGetHeight(imageBuffer!)
let colorSpace = CGColorSpaceCreateDeviceRGB()
let newContext = CGContext(data: baseAddress, width: width, height: height, bitsPerComponent: 8, bytesPerRow: bytesPerRow, space: colorSpace, bitmapInfo:
CGBitmapInfo.byteOrder32Little.rawValue | CGImageAlphaInfo.premultipliedFirst.rawValue)
let newImage = newContext!.makeImage()
cameraImage = UIImage(cgImage: newImage!)
CVPixelBufferUnlockBaseAddress(imageBuffer!, CVPixelBufferLockFlags(rawValue: 0))
}
func setupTimer() {
_ = Timer.scheduledTimer(timeInterval: 2.0, target: self, selector: #selector(snapshot), userInfo: nil, repeats: true)
}
@objc func snapshot() {
print("SNAPSHOT")
cameraImageView.image = cameraImage
}
}
于 2017-12-09T19:52:39.690 回答
3
您可能需要用于AVFoundation
捕获视频流/图像而不显示它。与 不同UIImagePickerController
,它不能“开箱即用”。以 AppleAVCam
为例来帮助您入门。
于 2012-06-02T12:03:53.140 回答
2
如果有人在 2017 年仍在寻找解决方案,我将上面的代码从 Objc 转换为 Swift 3。
import UIKit
import AVFoundation
class CameraViewController: UIViewController, AVCaptureVideoDataOutputSampleBufferDelegate {
@IBOutlet weak var cameraImageView: UIImageView!
var device: AVCaptureDevice?
var captureSession: AVCaptureSession?
var previewLayer: AVCaptureVideoPreviewLayer?
var cameraImage: UIImage?
override func viewDidLoad() {
super.viewDidLoad()
setupCamera()
setupTimer()
}
func setupCamera() {
let discoverySession = AVCaptureDeviceDiscoverySession(deviceTypes: [.builtInWideAngleCamera],
mediaType: AVMediaTypeVideo,
position: .front)
device = discoverySession?.devices[0]
let input: AVCaptureDeviceInput
do {
input = try AVCaptureDeviceInput(device: device)
} catch {
return
}
let output = AVCaptureVideoDataOutput()
output.alwaysDiscardsLateVideoFrames = true
let queue = DispatchQueue(label: "cameraQueue")
output.setSampleBufferDelegate(self, queue: queue)
output.videoSettings = [kCVPixelBufferPixelFormatTypeKey as AnyHashable: kCVPixelFormatType_32BGRA]
captureSession = AVCaptureSession()
captureSession?.addInput(input)
captureSession?.addOutput(output)
captureSession?.sessionPreset = AVCaptureSessionPresetPhoto
previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
previewLayer?.videoGravity = AVLayerVideoGravityResizeAspectFill
previewLayer?.frame = CGRect(x: 0.0, y: 0.0, width: view.frame.width, height: view.frame.height)
view.layer.insertSublayer(previewLayer!, at: 0)
captureSession?.startRunning()
}
func captureOutput(_ captureOutput: AVCaptureOutput!, didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, from connection: AVCaptureConnection!) {
let imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer)
CVPixelBufferLockBaseAddress(imageBuffer!, CVPixelBufferLockFlags(rawValue: .allZeros))
let baseAddress = UnsafeMutableRawPointer(CVPixelBufferGetBaseAddress(imageBuffer!))
let bytesPerRow = CVPixelBufferGetBytesPerRow(imageBuffer!)
let width = CVPixelBufferGetWidth(imageBuffer!)
let height = CVPixelBufferGetHeight(imageBuffer!)
let colorSpace = CGColorSpaceCreateDeviceRGB()
let newContext = CGContext(data: baseAddress, width: width, height: height, bitsPerComponent: 8, bytesPerRow: bytesPerRow, space: colorSpace, bitmapInfo:
CGBitmapInfo.byteOrder32Little.rawValue | CGImageAlphaInfo.premultipliedFirst.rawValue)
let newImage = newContext!.makeImage()
cameraImage = UIImage(cgImage: newImage!)
CVPixelBufferUnlockBaseAddress(imageBuffer!, CVPixelBufferLockFlags(rawValue: .allZeros))
}
func setupTimer() {
_ = Timer.scheduledTimer(timeInterval: 2.0, target: self, selector: #selector(snapshot), userInfo: nil, repeats: true)
}
func snapshot() {
print("SNAPSHOT")
cameraImageView.image = cameraImage
}
}
另外,我找到了一个从 CMSampleBuffer 获取图像的更短的解决方案:
func captureOutput(_ captureOutput: AVCaptureOutput!, didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, from connection: AVCaptureConnection!) {
let myPixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer)
let myCIimage = CIImage(cvPixelBuffer: myPixelBuffer!)
let videoImage = UIImage(ciImage: myCIimage)
cameraImage = videoImage
}
于 2017-08-11T11:11:05.927 回答
0
UIImagePickerController 类的文档中有一个名为 takePicture 的方法。它说:
将此方法与自定义叠加视图结合使用,以启动静止图像的编程捕获。这支持在不离开界面的情况下拍摄多张照片,但需要您隐藏默认的图像选择器控件。
于 2014-10-08T17:11:20.400 回答