人们已经知道如何使用 Google Speech API(Speech-To-Text)。我试图让它与 Flash Speex 编解码器一起工作,但我就是想不通。我尝试在每 160 个字节之前插入帧大小字节(正如一些消息来源所说),但这不起作用。
所以我发布了一个挑战,以某种方式翻译 Flash speex 字节以供 Google Speech API 理解。
这是基本的弹性代码:
<?xml version="1.0" encoding="utf-8"?>
<s:Application xmlns:fx="http://ns.adobe.com/mxml/2009"
xmlns:s="library://ns.adobe.com/flex/spark"
creationComplete="init();">
<fx:Script>
<![CDATA[
// Speech API info
// Reference: http://mikepultz.com/2011/03/accessing-google-speech-api-chrome-11/,
// Reference: https://stackoverflow.com/questions/4361826/does-chrome-have-buil-in-speech-recognition-for-input-type-text-x-webkit-speec
private static const speechApiUrl:String = "http://www.google.com/speech-api/v1/recognize";
private static const speechLanguage:String = "en";
private static const mimeType:String = "audio/x-speex-with-header-byte";
private static const sampleRate:uint = 8;
// Sound bytes & mic
private var soundBytes:ByteArray;
private var microphone:Microphone;
// Initial setup
private function init():void {
// Set up the microphone
microphone = Microphone.getMicrophone();
// Speech API supports 8khz and 16khz rates
microphone.rate = sampleRate;
// Select the SPEEX codec
microphone.codec = SoundCodec.SPEEX;
// I don't know what effect this has...
microphone.framesPerPacket = 1;
}
// THIS IS THE CHALLENGE
// We have the flash speex bytes and we need to translate them so Google API understands
private function process():void{
soundBytes.position = 0;
var processed:ByteArray = new ByteArray();
processed.endian = Endian.BIG_ENDIAN;
var frameSize:uint = 160;
for(var n:uint = 0; n < soundBytes.bytesAvailable / frameSize; n++){
processed.writeByte(frameSize);
processed.writeBytes(soundBytes, frameSize * n, frameSize);
}
processed.position = 0;
soundBytes = processed;
}
// Sending to Google Speech server
private function send():void {
var loader:URLLoader = new URLLoader();
var request:URLRequest = new URLRequest(speechApiUrl + "?lang=" + speechLanguage);
request.method = URLRequestMethod.POST;
request.data = soundBytes;
request.contentType = mimeType + "; rate=" + (1000 * sampleRate);
loader.addEventListener(Event.COMPLETE, onComplete);
loader.addEventListener(IOErrorEvent.IO_ERROR, onError);
loader.load(request);
trace("Connecting to Speech API server");
}
private function onError(event:IOErrorEvent):void{
trace("Error: " + event.toString());
}
private function onComplete(event:Event):void{
trace("Done: " + event.target.data);
}
private function record(event:Event):void{
soundBytes = new ByteArray();
soundBytes.endian = Endian.BIG_ENDIAN;
microphone.addEventListener(SampleDataEvent.SAMPLE_DATA, sampleData);
}
private function sampleData(event:SampleDataEvent):void {
soundBytes.writeBytes(event.data, 0, event.data.bytesAvailable);
}
private function stop(e:Event):void {
microphone.removeEventListener(SampleDataEvent.SAMPLE_DATA, sampleData);
if(soundBytes != null){
process();
send();
}
}
]]>
</fx:Script>
<s:HGroup>
<s:Button label="Record"
click="record(event)"/>
<s:Button label="Stop and Send"
click="stop(event)"/>
</s:HGroup>
</s:Application>
有关更多信息,请查看以下链接: http: //mikepultz.com/2011/03/accessing-google-speech-api-chrome-11/和Chrome 是否具有针对“x-webkit-speech”输入元素的内置语音识别?