1

我一直在 Android 4.1 和 4.2 帖子上将 Android 语音识别称为服务,以尝试在服务中实现语音识别。

我想我做对了。在我的设备上运行时,我收到了我在 onReadyForSpeech() 函数中声明的“Ready for Speech”吐司消息。

根据为上述帖子提供答案的人Hoan Nguyen所说,我们可以在调用 onReadyForSpeech() 函数后立即开始讲话。

我的问题是我不知道如何获取我们正在讲的演讲并将其转换为文本以及在哪里进行。

有谁知道怎么做?我知道这是一个非常蹩脚的问题,但这是我第一次使用语音识别。所以请多多包涵。

非常感谢您对此的任何帮助。提前致谢 :)

    public class MyService extends Service
    {
      protected AudioManager mAudioManager; 
      protected SpeechRecognizer mSpeechRecognizer;
      protected Intent mSpeechRecognizerIntent;
      protected final Messenger mServerMessenger = new Messenger(new IncomingHandler(this));

      protected boolean mIsListening;
      protected volatile boolean mIsCountDownOn;

      static final int MSG_RECOGNIZER_START_LISTENING = 1;
      static final int MSG_RECOGNIZER_CANCEL = 2;

    @Override
    public void onCreate()
    {
        super.onCreate();
        mAudioManager = (AudioManager) getSystemService(Context.AUDIO_SERVICE); 
        mSpeechRecognizer = SpeechRecognizer.createSpeechRecognizer(this);
        mSpeechRecognizer.setRecognitionListener(new SpeechRecognitionListener());
        mSpeechRecognizerIntent = new Intent(RecognizerIntent.ACTION_RECOGNIZE_SPEECH);
        mSpeechRecognizerIntent.putExtra(RecognizerIntent.EXTRA_LANGUAGE_MODEL,
                                         RecognizerIntent.LANGUAGE_MODEL_FREE_FORM);
        mSpeechRecognizerIntent.putExtra(RecognizerIntent.EXTRA_CALLING_PACKAGE,
                                         this.getPackageName());

       mSpeechRecognizer.startListening(mSpeechRecognizerIntent);
        //Toast.makeText(this, "onCreate", Toast.LENGTH_SHORT).show();
        Log.d("onCreate","Entered");
    }


    protected static class IncomingHandler extends Handler
    {
        private WeakReference<MyService> mtarget;

        IncomingHandler(MyService target)
        {
            mtarget = new WeakReference<MyService>(target);

            Log.d("IncomingHandler","Entered");
        }


        @Override
        public void handleMessage(Message msg)
        {
            Log.d("handleMessage","Entered");

            final MyService target = mtarget.get();

            switch (msg.what)
            {
                case MSG_RECOGNIZER_START_LISTENING:

                    if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.JELLY_BEAN)
                    {
                        // turn off beep sound  
                        target.mAudioManager.setStreamMute(AudioManager.STREAM_SYSTEM, true);
                    }
                     if (!target.mIsListening)
                     {
                         target.mSpeechRecognizer.startListening(target.mSpeechRecognizerIntent);
                         target.mIsListening = true;
                         Log.d("TAG", "message start listening"); 
                         //$NON-NLS-1$
                     }
                     break;

                 case MSG_RECOGNIZER_CANCEL:
                      target.mSpeechRecognizer.cancel();
                      target.mIsListening = false;
                      Log.d("TAG", "message canceled recognizer"); //$NON-NLS-1$
                      break;
             }
       } 
    } 

    // Count down timer for Jelly Bean work around
    protected CountDownTimer mNoSpeechCountDown = new CountDownTimer(5000, 5000)
    {

        @Override
        public void onTick(long millisUntilFinished)
        {
            // TODO Auto-generated method stub
            Log.d("onTick","Entered");
        }

        @Override
        public void onFinish()
        {
            Log.d("onFinish","Entered");

            mIsCountDownOn = false;
            Message message = Message.obtain(null, MSG_RECOGNIZER_CANCEL);
            try
            {
                mServerMessenger.send(message);
                message = Message.obtain(null, MSG_RECOGNIZER_START_LISTENING);
                mServerMessenger.send(message);
            }
            catch (RemoteException e)
            {

            }
        }
    };

    @Override
    public int onStartCommand(Intent intent, int flags, int startId) {
        // TODO Auto-generated method stub
        //mSpeechRecognizer.startListening(mSpeechRecognizerIntent);

        try
        {
            Message msg = new Message();
            msg.what = MSG_RECOGNIZER_START_LISTENING; 
            mServerMessenger.send(msg);
        }
        catch (RemoteException e)
        {
            Log.d("msg",""+e);
        }
        return  START_NOT_STICKY;
        //return super.onStartCommand(intent, flags, startId);
    }

    @Override
    public void onDestroy()
    {
        super.onDestroy();

        if (mIsCountDownOn)
        {
            mNoSpeechCountDown.cancel();
        }
        if (mSpeechRecognizer != null)
        {
            mSpeechRecognizer.destroy();
        }

        Log.d("onDestroy","Entered");
    }

    protected class SpeechRecognitionListener implements RecognitionListener
    {

        private static final String TAG = "Sppech---->";

        @Override
        public void onBeginningOfSpeech()
        {
            // speech input will be processed, so there is no need for count down anymore
            if (mIsCountDownOn)
            {
                mIsCountDownOn = false;
                mNoSpeechCountDown.cancel();
            }               
            //Log.d(TAG, "onBeginingOfSpeech"); //$NON-NLS-1$
            Log.d("onBeginningOfSpeech","Entered");
        }

        @Override
        public void onBufferReceived(byte[] buffer)
        {
            String sTest = "";
            Log.d("onBufferReceived","Entered");
        }

        @Override
        public void onEndOfSpeech()
        {
            //Log.d(TAG, "onEndOfSpeech"); //$NON-NLS-1$
            Log.d("onEndOfSpeech","Entered");
         }

        @Override
        public void onError(int error)
        {
            if (mIsCountDownOn)
            {
                mIsCountDownOn = false;
                mNoSpeechCountDown.cancel();
            }
             mIsListening = false;
             Message message = Message.obtain(null, MSG_RECOGNIZER_START_LISTENING);
             try
             {
                    mServerMessenger.send(message);
             }
             catch (RemoteException e)
             {

             }
            //Log.d(TAG, "error = " + error); //$NON-NLS-1$
             Log.d("onError","Entered");
        }

        @Override
        public void onEvent(int eventType, Bundle params)
        {

        }

        @Override
        public void onPartialResults(Bundle partialResults)
        {

        }

        @Override
        public void onReadyForSpeech(Bundle params)
        {
            if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.JELLY_BEAN)
            {
                mIsCountDownOn = true;
                mNoSpeechCountDown.start();
                mAudioManager.setStreamMute(AudioManager.STREAM_SYSTEM, false);
            }
            //Log.d("TAG", "onReadyForSpeech"); 
            Toast.makeText(getApplicationContext(), "Ready for Speech", Toast.LENGTH_SHORT).show();
            Log.d("onReadyForSpeech","Entered");//$NON-NLS-1$
        }

        @Override
        public void onResults(Bundle results)
        {
            //Log.d(TAG, "onResults"); //$NON-NLS-1$

        }

        @Override
        public void onRmsChanged(float rmsdB)
        {

        }



    }

    @Override
    public IBinder onBind(Intent intent) {
        // TODO Auto-generated method stub
        return null;
    }
}
4

1 回答 1

1

你把它onResult(Bundle result)放进去,然后你就可以把用户所说的内容放到一个 ArrayList 中

ArrayList<String> matches = result.getStringArrayList(SpeechRecognizer.RESULTS_RECOGNITION);
于 2014-02-11T07:23:47.683 回答