我试图通过互联网将CMSampleBuffer转换为 Data 并创建一个副本。
我已经通过 VTCompressionSession 使用 h264 压缩了相机捕获的 CMSampleBuffer。
现在我构造了 CMSampleBuffer,创建了 VTDecompressionSession,但是在VTDecompressionSessionDecodeFrame()时出现内存错误,我不知道如何修复。
我在 Internet 上看到了很多 h264 示例,但它们都使用的是传统的 Objective-C 语言。
我正在使用 Xcode 11.3 Swift 5.1 并已部署到运行 iOS 12.1 的 iOS 设备。
var sps,pps:UnsafePointer<UInt8>?
var spsSize=0,ppsSize:Int=0
var parameterSetCount=0
var nalUnitHeaderLength:Int32=0
CMVideoFormatDescriptionGetH264ParameterSetAtIndex(CMSampleBufferGetFormatDescription(encodedSamples.last!)!, parameterSetIndex: 0, parameterSetPointerOut: &sps, parameterSetSizeOut: &spsSize, parameterSetCountOut: ¶meterSetCount, nalUnitHeaderLengthOut: &nalUnitHeaderLength)
CMVideoFormatDescriptionGetH264ParameterSetAtIndex(CMSampleBufferGetFormatDescription(encodedSamples.last!)!, parameterSetIndex: 1, parameterSetPointerOut: &pps, parameterSetSizeOut: &ppsSize, parameterSetCountOut: ¶meterSetCount, nalUnitHeaderLengthOut: &nalUnitHeaderLength)
let dataBuffer:CMBlockBuffer={
var buffer:CMBlockBuffer?
let bufferData:Data={
let blockBuffer=CMSampleBufferGetDataBuffer(encodedSamples.last!)!
var totalLength:Int=0
var data:UnsafeMutablePointer<Int8>?
CMBlockBufferGetDataPointer(blockBuffer, atOffset: 0, lengthAtOffsetOut: nil, totalLengthOut: &totalLength, dataPointerOut: &data)
return Data(bytes: data!,count:totalLength)
}()
var bbd=Data()
bbd.append(bufferData)
let status=CMBlockBufferCreateWithMemoryBlock(allocator: kCFAllocatorDefault,memoryBlock: &bbd, blockLength: bbd.count,blockAllocator: kCFAllocatorDefault,customBlockSource: nil, offsetToData: 0, dataLength: bbd.count,flags: 0, blockBufferOut: &buffer)
if status != kCMBlockBufferNoErr{print(status)}
return buffer!
}()
let formatDes:CMFormatDescription={
let dataParamArray = [sps!,pps!]
let parameterSetPointers = UnsafePointer<UnsafePointer<UInt8>>(dataParamArray)
var formatDescription:CMFormatDescription?
CMVideoFormatDescriptionCreateFromH264ParameterSets(allocator: kCFAllocatorDefault, parameterSetCount: parameterSetCount, parameterSetPointers: parameterSetPointers, parameterSetSizes: UnsafePointer<Int>([spsSize,ppsSize]), nalUnitHeaderLength: nalUnitHeaderLength, formatDescriptionOut: &formatDescription)
return formatDescription!
}()
var timingInfo:CMSampleTimingInfo={
var t=CMSampleTimingInfo()
CMSampleBufferGetSampleTimingInfoArray(encodedSamples.last!, entryCount: 1, arrayToFill: &t, entriesNeededOut: nil)
return t
}()
var sampleSize=CMBlockBufferGetDataLength(dataBuffer)
var sampleBuffer:CMSampleBuffer!
let status=CMSampleBufferCreate(allocator: kCFAllocatorDefault, dataBuffer: dataBuffer, dataReady: true, makeDataReadyCallback: nil, refcon: nil, formatDescription: formatDes, sampleCount: 1, sampleTimingEntryCount: 1, sampleTimingArray: &timingInfo, sampleSizeEntryCount: 1, sampleSizeArray: &sampleSize, sampleBufferOut: &sampleBuffer)
var attachmentsArray=CMSampleBufferGetSampleAttachmentsArray(sampleBuffer, createIfNecessary: true) as! Array<Dictionary<String, Any>>
attachmentsArray[0]=[kCMSampleAttachmentKey_DependsOnOthers as String:false]
if status != noErr{print(status)}
if #available(iOS 9, *){
var session:VTDecompressionSession?
VTDecompressionSessionCreate(allocator: kCFAllocatorDefault, formatDescription: formatDes, decoderSpecification: nil, imageBufferAttributes: [kCVPixelBufferOpenGLESCompatibilityKey:true,kCVPixelBufferPixelFormatTypeKey:kCVPixelFormatType_32BGRA] as CFDictionary, outputCallback: nil, decompressionSessionOut: &session)
print(VTDecompressionSessionCanAcceptFormatDescription(session!, formatDescription: formatDes))
var info=VTDecodeInfoFlags()
let flags = VTDecodeFrameFlags._EnableAsynchronousDecompression
let status=VTDecompressionSessionDecodeFrame(session!, sampleBuffer: sampleBuffer, flags: flags, infoFlagsOut: &info){status,infoFlags,imageBuffer,presentationTimeStamp,presentationDuration in
print(imageBuffer!)
}//EXC_BAD_ADDRESS HERE!!
print(status,info)
VTDecompressionSessionInvalidate(session!)
}