2

我将样本缓冲区转换为CGContext. 然后我将转换应用于上下文并CIImage从中创建一个,然后将其显示在一个UIImageView.

同时,我想将其附加到AVAssetWriterInput以创建这些转换的电影。

到目前为止,我应用于上下文的转换没有任何效果。当我在图像视图中显示所谓的转换图像时。它看起来完全一样。

更新: 我设法将样本缓冲区记录到视频文件中(尽管方向错误,它仍然被拉伸)。我已将此代码用作基础

http://geek-is-stupid.github.io/blog/2017/04/13/how-to-record-detect-face-overlay-video-at-real-time-using-swift/

但我仍在努力将旋转应用于 CGContext。基本上我对上下文所做的一切都被完全忽略了。

func captureOutput(_ captureOutput: AVCaptureOutput!, didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, from connection: AVCaptureConnection!) {

        let writable = canWrite()
        if writable , sessionAtSourceTime == nil {
                print("starting session")
                sessionAtSourceTime = CMSampleBufferGetPresentationTimeStamp(sampleBuffer)
                assetWriter!.startSession(atSourceTime: sessionAtSourceTime!)
            }

        let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer)!
        if writable {
            autoreleasepool {
                CVPixelBufferLockBaseAddress(pixelBuffer, CVPixelBufferLockFlags(rawValue: 0))
                var renderedOutputPixelBuffer: CVPixelBuffer? = nil
                let options = [
                    kCVPixelBufferCGImageCompatibilityKey as String: true,
                    kCVPixelBufferCGBitmapContextCompatibilityKey as String: true,] as CFDictionary
                let status = CVPixelBufferCreate(kCFAllocatorDefault,
                                                 CVPixelBufferGetWidth(pixelBuffer),
                                                 CVPixelBufferGetHeight(pixelBuffer),
                                                 kCVPixelFormatType_32BGRA, options,
                                                 &renderedOutputPixelBuffer)
                guard status == kCVReturnSuccess else { return }

                CVPixelBufferLockBaseAddress(renderedOutputPixelBuffer!,CVPixelBufferLockFlags(rawValue: 0))

                let renderedOutputPixelBufferBaseAddress = CVPixelBufferGetBaseAddress(renderedOutputPixelBuffer!)

                memcpy(renderedOutputPixelBufferBaseAddress,CVPixelBufferGetBaseAddress(pixelBuffer),CVPixelBufferGetHeight(pixelBuffer) * CVPixelBufferGetBytesPerRow(pixelBuffer))

                CVPixelBufferLockBaseAddress(renderedOutputPixelBuffer!, CVPixelBufferLockFlags(rawValue: 0))

                let context = CGContext(data: renderedOutputPixelBufferBaseAddress,
                                        width: CVPixelBufferGetWidth(renderedOutputPixelBuffer!),
                                        height: CVPixelBufferGetHeight(renderedOutputPixelBuffer!),
                                        bitsPerComponent: 8,
                                        bytesPerRow: CVPixelBufferGetBytesPerRow(renderedOutputPixelBuffer!),
                                        space: CGColorSpaceCreateDeviceRGB(),
                                        bitmapInfo: bitmapInfo!)


                let radians : Float = atan2f(Float(boxView!.transform.b), Float(boxView!.transform.a));
                context!.translateBy(x: self.view.frame.size.width/2, y: self.view.frame.size.height/2)
                context!.rotate(by:CGFloat(radians))

                let image: CGImage = context!.makeImage()!

                self.imageView!.image = UIImage(cgImage: image)

                if (bufferAdaptor?.assetWriterInput.isReadyForMoreMediaData)!, canWrite() {
                   bufferAdaptor?.append(renderedOutputPixelBuffer!, withPresentationTime: CMSampleBufferGetPresentationTimeStamp(sampleBuffer))
                }

            CVPixelBufferUnlockBaseAddress(renderedOutputPixelBuffer!,CVPixelBufferLockFlags(rawValue: 0))
            CVPixelBufferUnlockBaseAddress(pixelBuffer,CVPixelBufferLockFlags(rawValue: 0))
        } 
    }
4

1 回答 1

2

找到了解决方案。下面是代码的重要部分。

   //create pixelbuffer from the delegate method samplebuffer
   let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer)!
   CVPixelBufferLockBaseAddress(pixelBuffer, CVPixelBufferLockFlags(rawValue: 0))
   //create CI image from the buffer
   let ci = CIImage.init(cvPixelBuffer: pixelBuffer, options: options)
   //create filter to rotate
   let filter = CIFilter.init(name: "CIAffineTransform")
   //create transform, move rotation point to center             
   var transform = CGAffineTransform(translationX: self.view.frame.midX, y: self.view.frame.midY)
   //rotate it
   transform = transform.rotate(angle: CGFloat(radians))
   // move the transform point back to the original
   transform = transform.translatedBy(x: -self.view.frame.midX, y: -self.view.frame.midY)

   filter!.setValue(transform, forKey: kCIInputTransformKey)
   filter!.setValue(ci, forKey: kCIInputImageKey)
   //take the output from the filter
   let output = filter?.outputImage
   //create empty pixelbuffer
   var newPixelBuffer : CVPixelBuffer? = nil

   CVPixelBufferCreate(kCFAllocatorDefault, Int(self.view.frame.width) ,
                                    Int(self.view.frame.height),
                                    kCVPixelFormatType_32BGRA,
                                    nil,
                                    &newPixelBuffer)
   //render the context to the new pixelbuffer, context is a global
   //CIContext variable. creating a new one each frame is too CPU intensive             
   context.render(output!, to: newPixelBuffer!)

   //finally, write this to the pixelbufferadaptor             
   if (bufferAdaptor?.assetWriterInput.isReadyForMoreMediaData)!, canWrite() {
       bufferAdaptor?.append(newPixelBuffer!, 
                      withPresentationTime: CMSampleBufferGetPresentationTimeStamp(sampleBuffer))

      }

   CVPixelBufferUnlockBaseAddress(pixelBuffer,CVPixelBufferLockFlags(rawValue: 0))
于 2017-06-18T10:20:30.730 回答