0

我正在将代码从https://github.com/Azure-Samples/cognitive-services-speech-sdk/blob/master/quickstart/csharp/unity/text-to-speech/Assets/Scripts/HelloWorld.cs修改为让它从 1 计数到 10。虽然迭代在控制台中显示得很好,但系统只显示“十”。

我试图让它说“一”,“二”,“三”......“十”使用for循环。

我错过了什么?

using UnityEngine;
using UnityEngine.UI;
using Microsoft.CognitiveServices.Speech;
using System.Threading.Tasks;


public class HelloWorld : MonoBehaviour
{
    // Hook up the three properties below with a Text, InputField and Button object in your UI.
    public Text outputText;
    public InputField inputField;
    public Button speakButton;
    public AudioSource audioSource;

    private object threadLocker = new object();
    private bool waitingForSpeak;
    private string message;

    private SpeechConfig speechConfig;
    private SpeechSynthesizer synthesizer;


    public void ButtonClick()
    {

        // Starts speech synthesis, and returns after a single utterance is synthesized.
        for (int j = 1; j <= 10; j++)
        {
            Debug.Log("Iteration: " + j);


            using (var result = synthesizer.SpeakTextAsync(j.ToString()).Result)

            {
                // Checks result.
                if (result.Reason == ResultReason.SynthesizingAudioCompleted)
                {
                    var sampleCount = result.AudioData.Length / 2;
                    var audioData = new float[sampleCount];
                    for (var i = 0; i < sampleCount; ++i)
                    {
                        audioData[i] = (short)(result.AudioData[i * 2 + 1] << 8 | result.AudioData[i * 2]) / 32768.0F;
                    }

                    // The output audio format is 16K 16bit mono
                    var audioClip = AudioClip.Create("SynthesizedAudio", sampleCount, 1, 16000, false);
                    audioClip.SetData(audioData, 0);
                    audioSource.clip = audioClip;
                    audioSource.Play();

                    newMessage = "Speech synthesis succeeded!";
                }
                else if (result.Reason == ResultReason.Canceled)
                {
                    var cancellation = SpeechSynthesisCancellationDetails.FromResult(result);
                    newMessage = $"CANCELED:\nReason=[{cancellation.Reason}]\nErrorDetails=[{cancellation.ErrorDetails}]\nDid you update the subscription info?";
                }


            }

        }
          
    }

void Start()
    {
        if (outputText == null)
        {
            UnityEngine.Debug.LogError("outputText property is null! Assign a UI Text element to it.");
        }
        else if (inputField == null)
        {
            message = "inputField property is null! Assign a UI InputField element to it.";
            UnityEngine.Debug.LogError(message);
        }
        else if (speakButton == null)
        {
            message = "speakButton property is null! Assign a UI Button to it.";
            UnityEngine.Debug.LogError(message);
        }
        else
        {
            inputField.text = "Enter text you wish spoken here.";
            message = "Click button to synthesize speech";
            speakButton.onClick.AddListener(ButtonClick);
            speechConfig = SpeechConfig.FromSubscription("YourSubscriptionKey", "northeurope");

            // The default format is Riff16Khz16BitMonoPcm.
            // We are playing the audio in memory as audio clip, which doesn't require riff header.
            // So we need to set the format to Raw16Khz16BitMonoPcm.
            speechConfig.SetSpeechSynthesisOutputFormat(SpeechSynthesisOutputFormat.Raw16Khz16BitMonoPcm);

            // Creates a speech synthesizer.
            // Make sure to dispose the synthesizer after use!
            synthesizer = new SpeechSynthesizer(speechConfig, null);
        }
    }

    void Update()
    {
        lock (threadLocker)
        {
            if (speakButton != null)
            {
                speakButton.interactable = !waitingForSpeak;
            }

            if (outputText != null)
            {
                outputText.text = message;
            }
        }
    }

    void OnDestroy()
    {
        synthesizer.Dispose();
    }
}
4

1 回答 1

0

我最终做了以下事情,它按我想要的方式工作。基本上使按钮单击功能异步并等待合成器。

public async void ButtonClick()
    {
        Debug.Log("Button Pressed!");

        for (int j = 1; j <= 10; j++)
        {

            string newMessage = string.Empty;

            var config = SpeechConfig.FromSubscription("KEY", "northeurope");
            using var synthesizer = new SpeechSynthesizer(config);
            await synthesizer.SpeakTextAsync(j.ToString());

            } 
    }
于 2022-01-06T16:09:18.367 回答