这是我之前遇到的一个未回答问题的扩展:AVCaptureSession is notrecording audio from the mic in Swift
我非常不清楚如何编写实时修改的视频和从麦克风录制的音频。我一直在寻找几个月,我什么也没找到。似乎使我的问题与其他问题不同的是,我从 captureOutput 函数中的 sampleBuffer 获取图像缓冲区,将其转换为图像,对其进行修改,然后将其写回 AVAssetWriterInputPixelBufferAdaptor,而不是将输出中的所有内容记录为普通视频。从这里开始,我不知道如何从 sampleBuffer 获取音频,或者这是否是正确的方法,尽管我看到其他人从 captureOutput 获取 AudioBufferList。
至少,这是我在我的主要课程中所拥有的:
class CaptureVC: UIViewController, AVCapturePhotoCaptureDelegate, AVCaptureVideoDataOutputSampleBufferDelegate, UIImagePickerControllerDelegate, UINavigationControllerDelegate,UIPickerViewDataSource,UIPickerViewDelegate {
var captureSession: AVCaptureSession?
var stillImageOutput: AVCapturePhotoOutput?
var videoPreviewLayer: AVCaptureVideoPreviewLayer?
let videoOutput = AVCaptureVideoDataOutput()
let audioOutput = AVCaptureAudioDataOutput()
var assetWriter: AVAssetWriter?
var assetWriterPixelBufferInput: AVAssetWriterInputPixelBufferAdaptor?
var assetWriterAudioInput: AVAssetWriterInput?
var currentSampleTime: CMTime?
var currentVideoDimensions: CMVideoDimensions?
var videoIsRecording = false
override func viewDidLoad() {
super.viewDidLoad()
let backCamera = AVCaptureDevice.default(for:AVMediaType.video)
let microphone = AVCaptureDevice.default(.builtInMicrophone, for: AVMediaType.audio, position: .unspecified)
var error: NSError?
var videoInput: AVCaptureDeviceInput!
var micInput: AVCaptureDeviceInput!
do {
videoInput = try AVCaptureDeviceInput(device: backCamera!)
micInput = try AVCaptureDeviceInput(device: microphone!)
} catch let error1 as NSError {
error = error1
videoInput = nil
micInput = nil
print(error!.localizedDescription)
}
if error == nil &&
captureSession!.canAddInput(videoInput) &&
captureSession!.canAddInput(micInput){
captureSession!.addInput(videoInput)
captureSession!.addInput(micInput)
stillImageOutput = AVCapturePhotoOutput()
if captureSession!.canAddOutput(stillImageOutput!) {
captureSession!.addOutput(stillImageOutput!)
let q = DispatchQueue(label: "sample buffer delegate", qos: .default)
videoOutput.setSampleBufferDelegate(self, queue: q)
if captureSession!.canAddOutput(videoOutput){
captureSession!.addOutput(videoOutput)
}
audioOutput.setSampleBufferDelegate(self as? AVCaptureAudioDataOutputSampleBufferDelegate, queue: q)
if captureSession!.canAddOutput(audioOutput){
captureSession!.addOutput(audioOutput)
}
captureSession!.startRunning()
}
}
}
我创建资产编写器的课程
func createWriter() {
self.checkForAndDeleteFile()
do {
assetWriter = try AVAssetWriter(outputURL: movieURL() as URL, fileType: AVFileType.mov)
} catch let error as NSError {
print(error.localizedDescription)
return
}
let videoSettings = [
AVVideoCodecKey : AVVideoCodecType.h264,
AVVideoWidthKey : Int(currentVideoDimensions!.height), // note: these are swapped because of REASONS
AVVideoHeightKey : Int(currentVideoDimensions!.width)
] as [String : Any]
let assetWriterVideoInput = AVAssetWriterInput(mediaType: AVMediaType.video, outputSettings: videoSettings)
assetWriterVideoInput.expectsMediaDataInRealTime = true
let sourcePixelBufferAttributesDictionary = [
String(kCVPixelBufferPixelFormatTypeKey) : Int(kCVPixelFormatType_32BGRA),
String(kCVPixelBufferWidthKey) : Int(currentVideoDimensions!.width),
String(kCVPixelBufferHeightKey) : Int(currentVideoDimensions!.height),
String(kCVPixelFormatOpenGLESCompatibility) : kCFBooleanTrue
] as [String : Any]
assetWriterPixelBufferInput = AVAssetWriterInputPixelBufferAdaptor(assetWriterInput: assetWriterVideoInput,
sourcePixelBufferAttributes: sourcePixelBufferAttributesDictionary)
var channelLayout = AudioChannelLayout()
memset(&channelLayout, 0, MemoryLayout<AudioChannelLayout>.size);
channelLayout.mChannelLayoutTag = kAudioChannelLayoutTag_Stereo;
let audioSettings: [String: Any] = [AVFormatIDKey: kAudioFormatMPEG4AAC,
AVSampleRateKey: 44100,
AVNumberOfChannelsKey: 2]
assetWriterAudioInput = AVAssetWriterInput(mediaType: AVMediaType.audio, outputSettings: audioSettings)
assetWriterAudioInput?.expectsMediaDataInRealTime = true
if assetWriter!.canAdd(assetWriterVideoInput) {
assetWriter!.add(assetWriterVideoInput)
} else {
print("cannot add \(assetWriterVideoInput)")
}
if assetWriter!.canAdd(assetWriterAudioInput!) {
assetWriter!.add(assetWriterAudioInput!)
} else {
print("cannot add \(String(describing: assetWriterAudioInput))")
}
}
当我录制我创建作家时,开始写作并开始会话:
videoIsRecording = true
createWriter()
assetWriter?.startWriting()
assetWriter?.startSession(atSourceTime: currentSampleTime!)
当我停止录制时,它会转到另一个视图来显示视频:
assetWriter?.finishWriting(completionHandler: {[unowned self] () -> Void in
let firstAsset = AVURLAsset(url: self.movieURL() as URL)
guard let exporter = AVAssetExportSession(asset: firstAsset, presetName: AVAssetExportPresetHighestQuality) else { return }
guard let vidComp = self.getVideoComposition(asset: firstAsset,
videoSize: CGSize(width:1280,
height:720)) else {
print("Unable to create video composition")
return
}
print(vidComp.instructions)
exporter.videoComposition = vidComp
exporter.outputURL = self.movieURL() as URL
exporter.outputFileType = AVFileType.mov
exporter.exportAsynchronously() {
DispatchQueue.main.async(){
self.activityTextStatus.text = ""
fileURLSenderVal = self.movieURL() as URL
let manageCaptureVC = self.storyboard?.instantiateViewController(withIdentifier: "ManageCaptureVC") as! ManageCaptureVC
manageCaptureVC.fileURL = fileURLSenderVal
manageCaptureVC.imageCaptureMode = ManageCaptureVC.imageCaptureModes(rawValue: self.imageCaptureMode.rawValue)!
manageCaptureVC.delegate = self
self.present(manageCaptureVC, animated: true, completion: nil)
}
}
})
但这就是我卡住的地方——我在哪里以及如何记录麦克风的输入?
// live output from camera
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection){
if(captureOutput){
let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer)
let cameraImage = CIImage(cvPixelBuffer: pixelBuffer!)
var orientation = UIImageOrientation.right
if(isFrontCamera){
orientation = UIImageOrientation.leftMirrored
}
image = UIImage(ciImage: cameraImage)
if let ciImage = image?.ciImage {
image = applyFilterAndReturnImage(ciImage: ciImage, orientation: orientation, currentCameraRes:currentCameraRes!)
let formatDescription = CMSampleBufferGetFormatDescription(sampleBuffer)!
self.currentVideoDimensions = CMVideoFormatDescriptionGetDimensions(formatDescription)
self.currentSampleTime = CMSampleBufferGetOutputPresentationTimeStamp(sampleBuffer)
if(videoIsRecording && self.assetWriterPixelBufferInput?.assetWriterInput.isReadyForMoreMediaData == true){
let filteredBuffer = buffer(from: image!)
let success = self.assetWriterPixelBufferInput?.append(filteredBuffer!, withPresentationTime: self.currentSampleTime!)
if success == false {
print("Pixel Buffer failed")
}
}
DispatchQueue.main.async(){
imageView!.image = image
}
}
}
再一次,我已经断断续续地做了几个月了——没有文件我能找到帮助。谢谢