任何人都请帮我找到此示例中使用的 pod 的官方文档:https ://github.com/GoogleCloudPlatform/ios-docs-samples/tree/master/speech/Swift/Speech-gRPC-Streaming
此外,我正在开发一个 iOS 应用程序,在该应用程序中,我们使用流式方法将 Google 语音转换为文本,在示例中,您没有演示如何传递元数据,因此官方文档可能对如何在初始化时传递元数据有一些帮助,这里是完整的我想喂的配置:
{
"encoding": "LINEAR16",
"sampleRateHertz": 16000,
"languageCode": "en-US",
"maxAlternatives": 30,
"metadata": {
"interactionType": "VOICE_SEARCH",
"recordingDeviceType": "SMARTPHONE",
"microphoneDistance": "NEARFIELD",
"originalMediaType": "AUDIO",
"recordingDeviceName": "iPhone",
"audioTopic": "Quran surah and ayah search"
},
"speechContexts": [
{
"phrases": ["mumtahinah"],
"boost": 2
},
{
"phrases": ["Hujrat"],
"boost": 2
},
{
"phrases": ["taubah"],
"boost": 2
},
{
"phrases": ["fajar"],
"boost": 2
}
]
}
这是我当前的代码:
import Foundation
import googleapis
let API_KEY : String = "YOUR_API_KEY"
let HOST = "speech.googleapis.com"
typealias SpeechRecognitionCompletionHandler = (StreamingRecognizeResponse?, NSError?) -> (Void)
class SpeechRecognitionService {
var sampleRate: Int = 16000
private var streaming = false
private var client : Speech!
private var writer : GRXBufferedPipe!
private var call : GRPCProtoCall!
static let sharedInstance = SpeechRecognitionService()
func streamAudioData(_ audioData: NSData, completion: @escaping SpeechRecognitionCompletionHandler) {
if (!streaming) {
// if we aren't already streaming, set up a gRPC connection
client = Speech(host:HOST)
writer = GRXBufferedPipe()
call = client.rpcToStreamingRecognize(withRequestsWriter: writer,
eventHandler:
{ (done, response, error) in
completion(response, error as? NSError)
})
// authenticate using an API key obtained from the Google Cloud Console
call.requestHeaders.setObject(NSString(string:API_KEY),
forKey:NSString(string:"X-Goog-Api-Key"))
// if the API key has a bundle ID restriction, specify the bundle ID like this
call.requestHeaders.setObject(NSString(string:Bundle.main.bundleIdentifier!),
forKey:NSString(string:"X-Ios-Bundle-Identifier"))
print("HEADERS:\(call.requestHeaders)")
call.start()
streaming = true
// send an initial request message to configure the service
let recognitionConfig = RecognitionConfig()
recognitionConfig.encoding = .linear16
recognitionConfig.sampleRateHertz = Int32(sampleRate)
recognitionConfig.languageCode = "en-US"
recognitionConfig.maxAlternatives = 30
recognitionConfig.enableWordTimeOffsets = true
let streamingRecognitionConfig = StreamingRecognitionConfig()
streamingRecognitionConfig.config = recognitionConfig
streamingRecognitionConfig.singleUtterance = false
streamingRecognitionConfig.interimResults = true
let streamingRecognizeRequest = StreamingRecognizeRequest()
streamingRecognizeRequest.streamingConfig = streamingRecognitionConfig
writer.writeValue(streamingRecognizeRequest)
}
// send a request message containing the audio data
let streamingRecognizeRequest = StreamingRecognizeRequest()
streamingRecognizeRequest.audioContent = audioData as Data
writer.writeValue(streamingRecognizeRequest)
}
func stopStreaming() {
if (!streaming) {
return
}
writer.finishWithError(nil)
streaming = false
}
func isStreaming() -> Bool {
return streaming
}
}