0

我只是想捕捉屏幕、应用程序音频和麦克风音频。应用程序音频和麦克风音频独立工作正常,但组合时会引发一些未知错误。以下是开始截屏和processSampleBuffer.

设置屏幕捕获编写器的方法

func startCapture() {
        _filename = UUID().uuidString
        let videoPath = FileManager.default.urls(for: .documentDirectory, in: .userDomainMask).first!.appendingPathComponent("\(_filename).mp4")
        let writer = try! AVAssetWriter(outputURL: videoPath, fileType: .mp4)
        
        let screen = UIScreen.main.bounds
        let screenBounds = screen.size
        let videoCompressionPropertys = [
            AVVideoAverageBitRateKey: screenBounds.width * screenBounds.height * 10.1
        ]
        
        let videoSettings: [String: Any] = [
            AVVideoCodecKey: AVVideoCodecType.h264,
            AVVideoWidthKey: screenBounds.width,
            AVVideoHeightKey: screenBounds.height,
            AVVideoCompressionPropertiesKey: videoCompressionPropertys
        ]
        
        let input = AVAssetWriterInput(mediaType: .video, outputSettings: videoSettings)
        input.expectsMediaDataInRealTime = true
        if writer.canAdd(input) {
            writer.add(input)
        }
        
        // Add the app audio input
        var acl = AudioChannelLayout()
        memset(&acl, 0, MemoryLayout<AudioChannelLayout>.size)
        acl.mChannelLayoutTag = kAudioChannelLayoutTag_Mono;
        let audioOutputSettings: [String: Any] =
        [ AVFormatIDKey: kAudioFormatMPEG4AAC,
       AVSampleRateKey : 44100,
 AVNumberOfChannelsKey : 1,
   AVEncoderBitRateKey : 128000,
    AVChannelLayoutKey : Data(bytes: &acl, count: MemoryLayout<AudioChannelLayout>.size)]
        
        let audioInput = AVAssetWriterInput(mediaType: AVMediaType.audio, outputSettings: audioOutputSettings)
        audioInput.expectsMediaDataInRealTime = true
        if (writer.canAdd(audioInput)) {
            writer.add(audioInput)
        }
        
        
        // Add the mic audio input
        let audioOutputSettings1: [String: Any] =
        [ AVFormatIDKey: kAudioFormatMPEG4AAC,
       AVSampleRateKey : 24000,
 AVNumberOfChannelsKey : 1,
AVEncoderAudioQualityKey: AVAudioQuality.high.rawValue]
        
        let micAudioInput = AVAssetWriterInput(mediaType: AVMediaType.audio, outputSettings: audioOutputSettings1)
        micAudioInput.expectsMediaDataInRealTime = true
        if (writer.canAdd(micAudioInput)) {
            writer.add(micAudioInput)
        }
        
        writer.startWriting()
        
        _audioAssetWriterInput = audioInput
        _micAssetWriterInput = micAudioInput
        _assetWriterInput = input
        _assetWriter = writer
    }

处理样本缓冲区

override func processSampleBuffer(_ sampleBuffer: CMSampleBuffer, with sampleBufferType: RPSampleBufferType) {
        if startTime == nil {
            startTime = CMSampleBufferGetPresentationTimeStamp(sampleBuffer)
            
            _assetWriter!.startSession(atSourceTime: CMTime.zero)
        }
        
        if sampleBufferType == RPSampleBufferType.video {
            if let _assetWriterInput = _assetWriterInput {
                if _assetWriterInput.isReadyForMoreMediaData {
                    let appended = _assetWriterInput.append(sampleBuffer)
                    print(appended)
                    
                    if !appended {
                        let status = _assetWriter?.status
                        let error = _assetWriter?.error
                        print("cannot append video")
                    }
                }
            }
        }
        
        if sampleBufferType == RPSampleBufferType.audioApp {
            if let _assetWriterInput = _audioAssetWriterInput {
                if _assetWriterInput.isReadyForMoreMediaData {
                    let appended = _assetWriterInput.append(sampleBuffer)
                    print(appended)
                    
                    if !appended {
                        let status = _assetWriter?.status
                        let error = _assetWriter?.error
                        print("cannot append app audio")
                    }
                }
            }
        }
        
        if sampleBufferType == RPSampleBufferType.audioMic {
            if let _assetWriterInput = _micAssetWriterInput {
                if _assetWriterInput.isReadyForMoreMediaData {
                    let appended = _assetWriterInput.append(sampleBuffer)
                    print(appended)
                    
                    if !appended {
                        let status = _assetWriter?.status
                        let error = _assetWriter?.error
                        print("cannot append mic audio")
                    }
                }
            }
        }
        
        if shouldEnd {
            _finishWriters()
        }
    }
4

0 回答 0