1

我正在开发一个将语音转换为文本的应用程序,由于某些原因出现以下错误。

Utility] +[AFAggregator logDictationFailedWithError:] 错误域=kAFAssistantErrorDomain Code=203 "Corrupt" UserInfo={NSLocalizedDescription=Corrupt, NSUnderlyingError=0x60c000253c50 {Error Domain=SiriSpeechErrorDomain Code=102 "(null)"}}

请找到以下代码。

import Foundation
import Speech
protocol SpeechManagerDelegate {
    func didReceiveText(text:String)
    func didStartedListening(status:Bool)
}

class SpeechManager {
    lazy var speechSynthesizer = AVSpeechSynthesizer()
    let audioEngine = AVAudioEngine()
    let speechRecognizer: SFSpeechRecognizer? = SFSpeechRecognizer()
    var request = SFSpeechAudioBufferRecognitionRequest()
    var recognitionTask: SFSpeechRecognitionTask?
    let audioSession = AVAudioSession.sharedInstance()
    var delegate:SpeechManagerDelegate?

    static let shared:SpeechManager = {
        let instance = SpeechManager()
        return instance
    }()

    func startRecording() {

        if recognitionTask != nil {
            recognitionTask?.cancel()
            recognitionTask = nil
        }

        do {
            try audioSession.setCategory(AVAudioSessionCategoryRecord)
            try audioSession.setMode(AVAudioSessionModeMeasurement)
            try audioSession.setActive(true, with: .notifyOthersOnDeactivation)
        } catch {
            print("audioSession properties weren't set because of an error.")
        }
        request = SFSpeechAudioBufferRecognitionRequest()
        guard let inputNode = audioEngine.inputNode as? AVAudioInputNode else {
            fatalError("Audio engine has no input node")
        }
        guard let recognitionRequest = request as? SFSpeechAudioBufferRecognitionRequest else {
            fatalError("Unable to create an SFSpeechAudioBufferRecognitionRequest object")
        }
        recognitionRequest.shouldReportPartialResults = true
        recognitionTask = speechRecognizer?.recognitionTask(with: recognitionRequest, resultHandler: { (result, error) in
            var isFinal = false
            if result != nil {
                self.delegate?.didReceiveText(text: (result?.bestTranscription.formattedString)!)
                isFinal = (result?.isFinal)!
            }
            if error != nil || isFinal {
                self.audioEngine.stop()
                inputNode.removeTap(onBus: 0)
            }
        })
        let recordingFormat = inputNode.outputFormat(forBus: 0)
        inputNode.installTap(onBus: 0, bufferSize: 1024, format: recordingFormat) { (buffer, when) in
            self.request.append(buffer) 
        }

        audioEngine.prepare()

        do {
            try audioEngine.start()
        } catch {
            print("audioEngine couldn't start because of an error.")
        }

        delegate?.didStartedListening(status: true)
    }
    func stopRecording() {
        if audioEngine.isRunning {
            audioEngine.stop()
            request.endAudio()
            audioEngine.inputNode.removeTap(onBus: 0)
        }
    }
    func keepRecording() {
        if audioEngine.isRunning {
            return
        } else {
            startRecording()
        }
    }

    func isRecoding() -> Bool {
        if audioEngine.isRunning {
            return true
        } else {
            return false
        }
    }

    func speak(text: String) {
        do {
            try audioSession.setCategory(AVAudioSessionCategoryPlayback)
            try audioSession.setMode(AVAudioSessionModeMeasurement)
            try audioSession.setActive(true, with: .notifyOthersOnDeactivation)
        } catch {
            print("audioSession properties weren't set because of an error.")
        }
        let speechUtterance = AVSpeechUtterance(string: text)
        speechSynthesizer.speak(speechUtterance)
    }
}

recognitonTask结果返回零。任何帮助表示赞赏。

4

0 回答 0