我正在尝试在视频上添加文本覆盖,当在 iPhone 5s 或更低质量的设备中以高质量录制并在其上写入文本时,1 或 2 秒后音频丢失,但这不会发生在 iPhone 6/6s 等大型设备上. 如果我删除该文本编写器方法,那么它可以在所有设备上正常工作,或者如果我在 5 秒内降低视频质量,那么它也可以正常工作。我如何在 iPhone 5s 中使用叠加文本获取带音频的视频。
这是我的代码
import Foundation
import AVFoundation
import AssetsLibrary
import UIKit
import CoreImage
class VideoWriter : NSObject{
var fileWriter: AVAssetWriter!
var videoInput: AVAssetWriterInput!
var audioInput: AVAssetWriterInput!
var assetWriterPixelBufferInput: AVAssetWriterInputPixelBufferAdaptor?
var presentationTime = kCMTimeZero
var wod:WOD!
var watermark = Watermark()
var watermarkData: Dictionary<String, Any>?
init(fileUrl:URL!, height:Int, width:Int, channels:Int, samples:Float64) {
fileWriter = try? AVAssetWriter(outputURL: fileUrl, fileType: AVFileType.mp4)
let videoOutputSettings: [String: Any] = [
AVVideoCodecKey : AVVideoCodecH264 as AnyObject,
AVVideoWidthKey : width as AnyObject,
AVVideoHeightKey : height as AnyObject
];
videoInput = AVAssetWriterInput(mediaType: AVMediaType.video, outputSettings: videoOutputSettings)
videoInput.expectsMediaDataInRealTime = true
fileWriter.add(videoInput)
let sourcePixeBufferAttributes :[String: Any] = [
kCVPixelBufferPixelFormatTypeKey as String: kCVPixelFormatType_32BGRA, //kCVPixelFormatType_32BGRA,
kCVPixelBufferWidthKey as String: width,
kCVPixelBufferHeightKey as String: height
]
assetWriterPixelBufferInput = AVAssetWriterInputPixelBufferAdaptor(assetWriterInput: videoInput, sourcePixelBufferAttributes: sourcePixeBufferAttributes)
let audioOutputSettings: [String: Any] = [
AVFormatIDKey : Int(kAudioFormatMPEG4AAC) as AnyObject,
AVNumberOfChannelsKey : channels as AnyObject,
AVSampleRateKey : samples as AnyObject,
AVEncoderBitRateKey : 128000 as AnyObject
]
audioInput = AVAssetWriterInput(mediaType: AVMediaType.audio, outputSettings: audioOutputSettings)
audioInput.expectsMediaDataInRealTime = true
fileWriter.add(audioInput)
}
func write(_ sample: CMSampleBuffer, isVideo: Bool){
if CMSampleBufferDataIsReady(sample) {
if fileWriter.status == AVAssetWriterStatus.unknown {
print("Start writing, isVideo = \(isVideo), status = \(fileWriter.status.rawValue)")
let startTime = CMSampleBufferGetPresentationTimeStamp(sample)
fileWriter.startWriting()
fileWriter.startSession(atSourceTime: startTime)
}
if fileWriter.status == AVAssetWriterStatus.failed {
print("Error occured, isVideo = \(isVideo), status = \(fileWriter.status.rawValue), \(fileWriter.error!.localizedDescription)")
return
}
if isVideo {
if videoInput.isReadyForMoreMediaData {
let time = CMSampleBufferGetPresentationTimeStamp(sample)
let pixelBuffer = self.watermark.addWatermark(data: sample, values: self.watermarkData!)
self.assetWriterPixelBufferInput!.append(pixelBuffer, withPresentationTime: time)
}
}else{
if audioInput.isReadyForMoreMediaData {
audioInput.append(sample)
}
}
}
}
}
如果assetWriterPixelBufferInput
我们不使用videoInput.append(sample)
此方法,但我们会丢失文本覆盖。
水印(文字叠加)示例代码——
class Watermark {
func addWatermark(data: CMSampleBuffer, values: Dictionary<String, Any>) -> CVPixelBuffer{
let pixelBuffer = CMSampleBufferGetImageBuffer(data)
CVPixelBufferLockBaseAddress(pixelBuffer!, CVPixelBufferLockFlags(rawValue: 0))
......
self.writeImage(image: image,timerType: timerType, date: date, name: name, wod: wod, timer:timer, rounds:rounds, reps:reps, status: status, toBuffer: pixelBuffer!)
CVPixelBufferUnlockBaseAddress(pixelBuffer!, CVPixelBufferLockFlags(rawValue: 0))
return pixelBuffer!
}
func writeImage(image overlayImage:UIImage, timerType:String, date:String, name:String, wod: String, timer:String, rounds:String, reps:String,status:String, toBuffer pixelBuffer:CVPixelBuffer){
let textImage = self.createTextImage(image: overlayImage, timerType: timerType, date: date, userName: name, myWod: wod, timer: timer, rounds:rounds, reps:reps, status: status, size: CGSize(width: CVPixelBufferGetWidth(pixelBuffer), height: CVPixelBufferGetHeight(pixelBuffer)))
let maskImage = CIImage(image: textImage)
let colorSpace = CGColorSpaceCreateDeviceRGB();
let options = [kCIImageColorSpace: colorSpace]
let inputImage = CIImage(cvImageBuffer: pixelBuffer, options: options)
let filter = CIFilter(name: "CISourceOverCompositing")
filter?.setValue(inputImage, forKey: kCIInputBackgroundImageKey)
filter?.setValue(maskImage, forKey: kCIInputImageKey)
let outputImage = filter?.outputImage
var bitmapInfo: UInt32 = CGBitmapInfo.byteOrder32Little.rawValue
bitmapInfo |= CGImageAlphaInfo.premultipliedFirst.rawValue & CGBitmapInfo.alphaInfoMask.rawValue
let context = CGContext(data: CVPixelBufferGetBaseAddress(pixelBuffer), width: CVPixelBufferGetWidth(pixelBuffer), height: CVPixelBufferGetHeight(pixelBuffer), bitsPerComponent: 8, bytesPerRow: CVPixelBufferGetBytesPerRow(pixelBuffer), space: CGColorSpaceCreateDeviceRGB(), bitmapInfo: bitmapInfo)
if context != nil{
let ciContext = CIContext(cgContext: context!, options: nil)
ciContext.render(outputImage!, to: pixelBuffer, bounds: outputImage!.extent, colorSpace: CGColorSpaceCreateDeviceRGB())
}
}