コード例 #1
0
        // Speech synthesis to audio data stream.
        public static async Task SynthesisToAudioDataStreamAsync()
        {
            // Creates an instance of a speech config with specified subscription key and service region.
            // Replace with your own subscription key and service region (e.g., "westus").
            var config = SpeechConfig.FromSubscription("YourSubscriptionKey", "YourServiceRegion");

            // Creates a speech synthesizer with a null output stream.
            // This means the audio output data will not be written to any stream.
            // You can just get the audio from the result.
            using (var synthesizer = new SpeechSynthesizer(config, null as AudioConfig))
            {
                while (true)
                {
                    // Receives a text from console input and synthesize it to result.
                    Console.WriteLine("Enter some text that you want to synthesize, or enter empty text to exit.");
                    Console.Write("> ");
                    string text = Console.ReadLine();
                    if (string.IsNullOrEmpty(text))
                    {
                        break;
                    }

                    using (var result = await synthesizer.SpeakTextAsync(text))
                    {
                        if (result.Reason == ResultReason.SynthesizingAudioCompleted)
                        {
                            Console.WriteLine($"Speech synthesized for text [{text}].");

                            using (var audioDataStream = AudioDataStream.FromResult(result))
                            {
                                // You can save all the data in the audio data stream to a file
                                string fileName = "outputaudio.wav";
                                await audioDataStream.SaveToWaveFileAsync(fileName);

                                Console.WriteLine($"Audio data for text [{text}] was saved to [{fileName}]");

                                // You can also read data from audio data stream and process it in memory
                                // Reset the stream position to the beginnging since saving to file puts the postion to end
                                audioDataStream.SetPosition(0);

                                byte[] buffer     = new byte[16000];
                                uint   totalSize  = 0;
                                uint   filledSize = 0;

                                while ((filledSize = audioDataStream.ReadData(buffer)) > 0)
                                {
                                    Console.WriteLine($"{filledSize} bytes received.");
                                    totalSize += filledSize;
                                }

                                Console.WriteLine($"{totalSize} bytes of audio data received for text [{text}]");
                            }
                        }
                        else if (result.Reason == ResultReason.Canceled)
                        {
                            var cancellation = SpeechSynthesisCancellationDetails.FromResult(result);
                            Console.WriteLine($"CANCELED: Reason={cancellation.Reason}");

                            if (cancellation.Reason == CancellationReason.Error)
                            {
                                Console.WriteLine($"CANCELED: ErrorCode={cancellation.ErrorCode}");
                                Console.WriteLine($"CANCELED: ErrorDetails=[{cancellation.ErrorDetails}]");
                                Console.WriteLine($"CANCELED: Did you update the subscription info?");
                            }
                        }
                    }
                }
            }
        }
コード例 #2
0
        // Speech synthesis events.
        public static async Task SynthesisEventsAsync()
        {
            // Creates an instance of a speech config with specified subscription key and service region.
            // Replace with your own subscription key and service region (e.g., "westus").
            var config = SpeechConfig.FromSubscription("YourSubscriptionKey", "YourServiceRegion");

            // Creates a speech synthesizer with a null output stream.
            // This means the audio output data will not be written to any stream.
            // You can just get the audio from the result.
            using (var synthesizer = new SpeechSynthesizer(config, null as AudioConfig))
            {
                // Subscribes to events
                synthesizer.SynthesisStarted += (s, e) =>
                {
                    Console.WriteLine("Synthesis started.");
                };

                synthesizer.Synthesizing += (s, e) =>
                {
                    Console.WriteLine($"Synthesizing event received with audio chunk of {e.Result.AudioData.Length} bytes.");
                };

                synthesizer.SynthesisCompleted += (s, e) =>
                {
                    Console.WriteLine("Synthesis completed.");
                };

                while (true)
                {
                    // Receives a text from console input and synthesize it to result.
                    Console.WriteLine("Enter some text that you want to synthesize, or enter empty text to exit.");
                    Console.Write("> ");
                    string text = Console.ReadLine();
                    if (string.IsNullOrEmpty(text))
                    {
                        break;
                    }

                    using (var result = await synthesizer.SpeakTextAsync(text))
                    {
                        if (result.Reason == ResultReason.SynthesizingAudioCompleted)
                        {
                            Console.WriteLine($"Speech synthesized for text [{text}].");
                            var audioData = result.AudioData;
                            Console.WriteLine($"{audioData.Length} bytes of audio data received for text [{text}]");
                        }
                        else if (result.Reason == ResultReason.Canceled)
                        {
                            var cancellation = SpeechSynthesisCancellationDetails.FromResult(result);
                            Console.WriteLine($"CANCELED: Reason={cancellation.Reason}");

                            if (cancellation.Reason == CancellationReason.Error)
                            {
                                Console.WriteLine($"CANCELED: ErrorCode={cancellation.ErrorCode}");
                                Console.WriteLine($"CANCELED: ErrorDetails=[{cancellation.ErrorDetails}]");
                                Console.WriteLine($"CANCELED: Did you update the subscription info?");
                            }
                        }
                    }
                }
            }
        }
コード例 #3
0
        public async Task Speak(string text, BufferedWaveProvider waveProvider, int rate)
        {
            var fmt = new System.Speech.AudioFormat.SpeechAudioFormatInfo(waveProvider.WaveFormat.SampleRate, (System.Speech.AudioFormat.AudioBitsPerSample)waveProvider.WaveFormat.BitsPerSample, (System.Speech.AudioFormat.AudioChannel)waveProvider.WaveFormat.Channels);

            // Creates an instance of a speech config with specified subscription key and service region.
            // Replace with your own subscription key and service region (e.g., "westus").
            var config = SpeechConfig.FromSubscription(Key, Region);

            config.SpeechSynthesisLanguage  = Language;
            config.SpeechSynthesisVoiceName = Voice;

            // Creates an audio out stream.
            using (var stream = AudioOutputStream.CreatePullStream(AudioStreamFormat.GetWaveFormatPCM((uint)waveProvider.WaveFormat.SampleRate, (byte)waveProvider.WaveFormat.BitsPerSample, (byte)waveProvider.WaveFormat.Channels)))
            {
                // Creates a speech synthesizer using audio stream output.
                using (var streamConfig = AudioConfig.FromStreamOutput(stream))
                    using (var synthesizer = new SpeechSynthesizer(config, streamConfig))
                    {
                        using (var result = await synthesizer.SpeakTextAsync(text))
                        {
                            if (result.Reason == ResultReason.SynthesizingAudioCompleted)
                            {
                                //Console.WriteLine($"Speech synthesized for text [{text}], and the audio was written to output stream.");
                            }
                            else if (result.Reason == ResultReason.Canceled)
                            {
                                var cancellation = SpeechSynthesisCancellationDetails.FromResult(result);
                                OnLog?.Invoke($"CANCELED: Reason={cancellation.Reason}");

                                if (cancellation.Reason == CancellationReason.Error)
                                {
                                    OnLog?.Invoke($"CANCELED: ErrorCode={cancellation.ErrorCode}");
                                    OnLog?.Invoke($"CANCELED: ErrorDetails=[{cancellation.ErrorDetails}]");
                                    OnLog?.Invoke($"CANCELED: Did you update the subscription info?");
                                }
                            }
                        }
                    }

/*
 *              using (var reader = new WaveFileReader(new PullStream(stream)))
 *              {
 *                  var newFormat = new WaveFormat(waveProvider.WaveFormat.SampleRate, waveProvider.WaveFormat.BitsPerSample, waveProvider.WaveFormat.Channels);
 *                  using (var conversionStream = new WaveFormatConversionStream(newFormat, reader))
 *                  {
 *                      //WaveFileWriter.CreateWaveFile("output.wav", conversionStream);
 *                      byte[] buffer = new byte[32000];
 *                      int filledSize = 0;
 *                      int totalSize = 0;
 *                      while ((filledSize = conversionStream.Read(buffer, 0, buffer.Length)) > 0)
 *                      {
 *                          waveProvider.AddSamples(buffer, 0, (int)filledSize);
 *                          //Console.WriteLine($"{filledSize} bytes received.");
 *                          totalSize += filledSize;
 *                      }
 *                  }
 *              }*/


                // Reads(pulls) data from the stream
                byte[] buffer     = new byte[32000];
                uint   filledSize = 0;
                uint   totalSize  = 0;
                while ((filledSize = stream.Read(buffer)) > 0)
                {
                    waveProvider.AddSamples(buffer, 0, (int)filledSize);
                    //Console.WriteLine($"{filledSize} bytes received.");
                    totalSize += filledSize;
                }
            }
        }
コード例 #4
0
        public static async Task RecognizeSpeechAsync()
        {
            // Creates an instance of a speech config with specified subscription key and service region.
            // Replace with your own subscription key // and service region (e.g., "westus").
            var config = SpeechConfig.FromSubscription("9679c4f1753a444caefd13b75166c720", "westus");

            // Creates a speech recognizer.
            using (var recognizer = new SpeechRecognizer(config))
            {
                Console.WriteLine("Say something...");

                // Starts speech recognition, and returns after a single utterance is recognized. The end of a
                // single utterance is determined by listening for silence at the end or until a maximum of 15
                // seconds of audio is processed.  The task returns the recognition text as result.
                // Note: Since RecognizeOnceAsync() returns only a single utterance, it is suitable only for single
                // shot recognition like command or query.
                // For long-running multi-utterance recognition, use StartContinuousRecognitionAsync() instead.
                var result = await recognizer.RecognizeOnceAsync();

                // Checks result.
                if (result.Reason == ResultReason.RecognizedSpeech)
                {
                    Console.WriteLine($"I think you say that >  {result.Text}");
                }
                else if (result.Reason == ResultReason.NoMatch)
                {
                    Console.WriteLine($"NOMATCH: Speech could not be recognized.");
                }
                else if (result.Reason == ResultReason.Canceled)
                {
                    var cancellation = CancellationDetails.FromResult(result);
                    Console.WriteLine($"CANCELED: Reason={cancellation.Reason}");

                    if (cancellation.Reason == CancellationReason.Error)
                    {
                        Console.WriteLine($"CANCELED: ErrorCode={cancellation.ErrorCode}");
                        Console.WriteLine($"CANCELED: ErrorDetails={cancellation.ErrorDetails}");
                        Console.WriteLine($"CANCELED: Did you update the subscription info?");
                    }
                }

                //
                using (var synthesizer = new SpeechSynthesizer(config))
                {
                    // Receive a text from console input and synthesize it to speaker.
                    string text = result.Text;

                    if (result.Text == "What do you want to do?")
                    {
                        using (var result1 = await synthesizer.SpeakTextAsync("I want to sleep all day"))
                        {
                            if (result.Reason == ResultReason.SynthesizingAudioCompleted)
                            {
                                Console.WriteLine($"Speech synthesized to speaker for text [{result1}]");
                            }
                            else if (result.Reason == ResultReason.Canceled)
                            {
                                var cancellation = SpeechSynthesisCancellationDetails.FromResult(result1);
                                Console.WriteLine($"CANCELED: Reason={cancellation.Reason}");

                                if (cancellation.Reason == CancellationReason.Error)
                                {
                                    Console.WriteLine($"CANCELED: ErrorCode={cancellation.ErrorCode}");
                                    Console.WriteLine($"CANCELED: ErrorDetails=[{cancellation.ErrorDetails}]");
                                    Console.WriteLine($"CANCELED: Did you update the subscription info?");
                                }
                            }
                        }
                    }
                    else
                    {
                        using (var result1 = await synthesizer.SpeakTextAsync("i don't know."))
                        {
                            if (result.Reason == ResultReason.SynthesizingAudioCompleted)
                            {
                                Console.WriteLine($"Speech synthesized to speaker for text [{result1}]");
                            }
                            else if (result.Reason == ResultReason.Canceled)
                            {
                                var cancellation = SpeechSynthesisCancellationDetails.FromResult(result1);
                                Console.WriteLine($"CANCELED: Reason={cancellation.Reason}");

                                if (cancellation.Reason == CancellationReason.Error)
                                {
                                    Console.WriteLine($"CANCELED: ErrorCode={cancellation.ErrorCode}");
                                    Console.WriteLine($"CANCELED: ErrorDetails=[{cancellation.ErrorDetails}]");
                                    Console.WriteLine($"CANCELED: Did you update the subscription info?");
                                }
                            }
                        }
                    }
                }
            }
        }
コード例 #5
0
        public static async Task SynthesisToSpeakerAsync()
        {
            // Creates an instance of a speech config with specified subscription key and service region.
            // Replace with your own subscription key and service region (e.g., "westus").
            var    config      = SpeechConfig.FromSubscription("XXXXXXXXXXX", "westus");
            string textToSpeak = "I am Asif Khan";

            // Creates a speech synthesizer using the default speaker as audio output.
            using (var synthesizer = new SpeechSynthesizer(config))
            {
                string inputText            = "My name is Asif Khan. I live in sydney";
                string host                 = "https://api.cognitive.microsofttranslator.com";
                string route                = "/translate?api-version=3.0&to=hi&to=ur";
                TranslationResult[] _result = await TranslateTextHelper.GetInstance.TranslateInputText4Speech(host, route, inputText);

                foreach (TranslationResult o in _result)
                {
                    // Print the detected input language and confidence score.
                    Console.WriteLine("Detected input language: {0}\nConfidence score: {1}\n", o.DetectedLanguage.Language, o.DetectedLanguage.Score);
                    // Iterate over the results and print each translation.
                    foreach (Translation t in o.Translations)
                    {
                        if (t.To.ToUpper().Equals("HI"))
                        {
                            textToSpeak = t.Text;
                        }
                        Console.WriteLine("Translated to {0}: {1}", t.To, t.Text);
                    }
                }

                //    // Receive a text from console input and synthesize it to speaker.
                //Console.WriteLine("Type some text that you want to speak...");
                //Console.Write("> ");
                //string text = Console.ReadLine();

                //textToSpeak = "मेरा नाम आसिफ खान है। मैं सिडनी में रहते हैं";
                if (!string.IsNullOrEmpty(textToSpeak))
                {
                    //string ssmlText = "<speak version='1.0' xmlns='https://www.w3.org/2001/10/synthesis' xml:lang='en-US' >"
                    //    + "<voice lang='hi-IN'  name = 'Microsoft Server Speech Text to Speech Voice (hi-IN, Kalpana)' > "
                    //    + "मेरा नाम आसिफ खान है। मैं सिडनी में रहते हैं"
                    //    + "</ voice >"
                    //    + "</ speak >";
                    string ssmlText = GenerateSsml("hi-IN", "Female", "Microsoft Server Speech Text to Speech Voice (hi-IN, Kalpana)", textToSpeak);


                    using (var result = await synthesizer.SpeakSsmlAsync(ssmlText))
                    {
                        if (result.Reason == ResultReason.SynthesizingAudioCompleted)
                        {
                            Console.WriteLine($"Speech synthesized to speaker for text [{textToSpeak}]");
                        }
                        else if (result.Reason == ResultReason.Canceled)
                        {
                            var cancellation = SpeechSynthesisCancellationDetails.FromResult(result);
                            Console.WriteLine($"CANCELED: Reason={cancellation.Reason}");

                            if (cancellation.Reason == CancellationReason.Error)
                            {
                                Console.WriteLine($"CANCELED: ErrorCode={cancellation.ErrorCode}");
                                Console.WriteLine($"CANCELED: ErrorDetails=[{cancellation.ErrorDetails}]");
                                Console.WriteLine($"CANCELED: Did you update the subscription info?");
                            }
                        }
                    }
                }
            }
        }
コード例 #6
0
    private void streamAudio(GameObject gameObject, string text)
    {
        SpeechConfig      speechConfig;
        SpeechSynthesizer synthesizer;

        // Creates an instance of a speech config with specified subscription key and service region.
        // Replace with your own subscription key and service region (e.g., "westus").
        speechConfig = SpeechConfig.FromSubscription("c5ab91b760b24599b3667791c08aa7d9", "uksouth");

        // The default format is Riff16Khz16BitMonoPcm.
        // We are playing the audio in memory as audio clip, which doesn't require riff header.
        // So we need to set the format to Raw16Khz16BitMonoPcm.
        speechConfig.SetSpeechSynthesisOutputFormat(SpeechSynthesisOutputFormat.Raw16Khz16BitMonoPcm);

        // Creates a speech synthesizer.
        // Make sure to dispose the synthesizer after use!
        using (synthesizer = new SpeechSynthesizer(speechConfig, null))
        {
            text = cleanText(text);

            // Starts speech synthesis, and returns after a single utterance is synthesized.
            string ssml = @"<speak version='1.0' xmlns='https://www.w3.org/2001/10/synthesis' xml:lang='en-US'><voice name='" + masterScript.voice + "'>" + text + "</voice></speak>";

            using (var result = synthesizer.SpeakSsmlAsync(ssml).Result)//synthesizer.SpeakTextAsync(getTextboxText()).Result
            {
                // Checks result
                if (result.Reason == ResultReason.SynthesizingAudioCompleted)
                {
                    Debug.Log("Streaming Audio");

                    // Native playback is not supported on Unity yet (currently only supported on Windows/Linux Desktop).
                    // Use the Unity API to play audio here as a short term solution.
                    // Native playback support will be added in the future release.

                    var sampleCount = result.AudioData.Length / 2;
                    var audioData   = new float[sampleCount];
                    for (var i = 0; i < sampleCount; ++i)
                    {
                        audioData[i] = (short)(result.AudioData[i * 2 + 1] << 8 | result.AudioData[i * 2]) / 32768.0F;
                    }

                    // The output audio format is 16K 16bit mono
                    var audioClip = AudioClip.Create("SynthesizedAudio", sampleCount, 1, 16000, false);
                    audioClip.SetData(audioData, 0);

                    if (gameObject.GetComponent <AudioSource>() == null)
                    {
                        AudioSource newAudioSource = new AudioSource();
                        gameObject.AddComponent(typeof(AudioSource));
                    }

                    gameObject.GetComponent <AudioSource>().clip   = audioClip;
                    gameObject.GetComponent <AudioSource>().volume = (float)masterScript.audioVolume / 100;

                    if (gameObject.GetComponent <AudioSource>().isPlaying)
                    {
                        gameObject.GetComponent <AudioSource>().Pause();
                    }
                    else
                    {
                        gameObject.GetComponent <AudioSource>().Play();
                    }
                }
                else if (result.Reason == ResultReason.Canceled)
                {
                    var cancellation = SpeechSynthesisCancellationDetails.FromResult(result);
                    Debug.Log(cancellation.Reason);
                }
            }
        }
    }
コード例 #7
0
        /// <summary>
        /// Return an audio file for the passed in text
        /// </summary>
        /// <param name="text"></param>
        /// <returns></returns>
        public async Task <byte[]> TextToSpeechFile(string text)
        {
            if (!_availableServices.Contains(AzureServiceType.Speech))
            {
                return(null);
            }

            _speechSemaphore.Wait();
            try
            {
                StorageFolder localFolder = ApplicationData.Current.LocalFolder;

                //TODO Update to use PullAudioInputStream
                StorageFile storageFile = await localFolder.CreateFileAsync("TTSAudio.wav", CreationCollisionOption.ReplaceExisting);

                if (!string.IsNullOrWhiteSpace(CurrentSpeakingVoice))
                {
                    _speechConfig.SpeechSynthesisVoiceName = CurrentSpeakingVoice;
                }

                SetProfanityOption(AzureProfanitySetting);

                using (var fileOutput = AudioConfig.FromWavFileOutput(storageFile.Path))
                {
                    using (var synthesizer = new SpeechSynthesizer(_speechConfig, fileOutput))
                    {
                        var result = await synthesizer.SpeakTextAsync(text);

                        if (result.Reason == ResultReason.Canceled)
                        {
                            var cancellation = SpeechSynthesisCancellationDetails.FromResult(result);
                            _logger.LogWarning($"Call cancelled.  {cancellation.Reason}");

                            if (cancellation.Reason == CancellationReason.Error)
                            {
                                _logger.Log($"Cancel error code = {cancellation.ErrorCode}");
                                _logger.Log($"Cancel details = {cancellation.ErrorDetails}");

                                if (cancellation.ErrorCode == CancellationErrorCode.NoError)
                                {
                                    _logger.Log("You may be having an authorization issue, are your keys correct and up to date?");
                                }
                            }
                            return(null);
                        }

                        _logger.Log($"Audio Received. '{result.Reason}'");
                        return(result.AudioData);
                    }
                }
            }
            catch (Exception ex)
            {
                string message = "Failed processing text to speech.";
                _logger.Log(message, ex);
                return(null);
            }
            finally
            {
                _speechSemaphore.Release();
            }
        }
コード例 #8
0
        // ***** PERFORM TRANSLATION ON BUTTON CLICK
        private async void TranslateButton_Click(object sender, EventArgs e)
        {
            // Translate the text
            string textToTranslate = TextToTranslate.Text.Trim();
            string fromLanguage    = FromLanguageComboBox.SelectedValue.ToString();
            string fromLanguageCode;

            // Auto-detect source language if requested
            if (fromLanguage == "Detect")
            {
                fromLanguageCode = DetectLanguage(textToTranslate);
                if (!languageCodes.Contains(fromLanguageCode))
                {
                    MessageBox.Show("The source language could not be detected automatically " +
                                    "or is not supported for translation.", "Language detection failed",
                                    MessageBoxButton.OK, MessageBoxImage.Error);
                    return;
                }
            }
            else
            {
                fromLanguageCode = languageCodesAndTitles[fromLanguage];
            }

            string toLanguageCode = languageCodesAndTitles[ToLanguageComboBox.SelectedValue.ToString()];

            TextToTranslate.Text = textToTranslate;

            // Handle null operations: no text or same source/target languages
            if (textToTranslate == "" || fromLanguageCode == toLanguageCode)
            {
                TranslatedTextLabel.Content = textToTranslate;
                return;
            }

            // Send translation request
            string endpoint = string.Format(TEXT_TRANSLATION_API_ENDPOINT, "translate");
            string uri      = string.Format(endpoint + "&from={0}&to={1}", fromLanguageCode, toLanguageCode);

            System.Object[] body        = new System.Object[] { new { Text = textToTranslate } };
            var             requestBody = JsonConvert.SerializeObject(body);

            using (var client = new HttpClient())
                using (var request = new HttpRequestMessage())
                {
                    request.Method     = HttpMethod.Post;
                    request.RequestUri = new Uri(uri);
                    request.Content    = new StringContent(requestBody, Encoding.UTF8, "application/json");
                    request.Headers.Add("Ocp-Apim-Subscription-Key", COGNITIVE_SERVICES_KEY);
                    request.Headers.Add("Ocp-Apim-Subscription-Region", REGION);
                    request.Headers.Add("X-ClientTraceId", Guid.NewGuid().ToString());

                    var response = await client.SendAsync(request);

                    var responseBody = await response.Content.ReadAsStringAsync();

                    var responseResult = JsonConvert.DeserializeObject <List <Dictionary <string, List <Dictionary <string, string> > > > >(responseBody);
                    var translation    = responseResult[0]["translations"][0]["text"];

                    //Update the translation field
                    TranslatedTextLabel.Content = translation;

                    var config = SpeechConfig.FromSubscription(COGNITIVE_SPEECH_SERVICES_KEY, REGION);
                    //Speak in Hindi language. You can change language to any code you want.
                    config.SpeechSynthesisLanguage = "hi-IN";
                    using (var synthesizer = new SpeechSynthesizer(config))
                    {
                        string text = translation;

                        using (var result = await synthesizer.SpeakTextAsync(text))
                        {
                            if (result.Reason == ResultReason.SynthesizingAudioCompleted)
                            {
                                return;
                            }
                            else if (result.Reason == ResultReason.Canceled)
                            {
                                var cancellation = SpeechSynthesisCancellationDetails.FromResult(result);
                                MessageBox.Show($"CANCELED: Reason={cancellation.Reason}");

                                if (cancellation.Reason == CancellationReason.Error)
                                {
                                    MessageBox.Show($"CANCELED: ErrorCode={cancellation.ErrorCode}");
                                    MessageBox.Show($"CANCELED: ErrorDetails=[{cancellation.ErrorDetails}]");
                                    MessageBox.Show($"CANCELED: Did you update the subscription info?");
                                }
                            }
                        }
                    }
                }
        }
        public void Synthesize(string text)
        {
            var  start       = DateTime.Now;
            var  synthesizer = pool.Get();
            var  ssml        = GenerateSsml("en-US", "Female", speechConfig.SpeechSynthesisVoiceName, text);
            bool first       = true;


            void SynthesizingEvent(object sender, SpeechSynthesisEventArgs eventArgs)
            {
                // receive streaming audio here.
                if (!first)
                {
                    return;
                }

                Console.WriteLine("First byte latency: {0}", DateTime.Now - start);
                first = false;
                latencyList.Add((DateTime.Now - start).TotalMilliseconds);
            }

            void SynthesizerSynthesisCanceled(object sender, SpeechSynthesisEventArgs e)
            {
                var cancellation = SpeechSynthesisCancellationDetails.FromResult(e.Result);

                Console.WriteLine($"CANCELED: Reason={cancellation.Reason}");

                if (cancellation.Reason == CancellationReason.Error)
                {
                    Console.WriteLine($"CANCELED: ErrorCode={cancellation.ErrorCode}");
                    Console.WriteLine($"CANCELED: ErrorDetails=[{cancellation.ErrorDetails}]");
                    Console.WriteLine($"CANCELED: Did you update the subscription info?");
                }
            }

            synthesizer.Synthesizing      += SynthesizingEvent;
            synthesizer.SynthesisCanceled += SynthesizerSynthesisCanceled;

            var result = synthesizer.StartSpeakingSsmlAsync(ssml).Result;

            try
            {
                if (result.Reason == ResultReason.SynthesizingAudioStarted)
                {
                    uint totalSize = 0;
                    using (var audioDataStream = AudioDataStream.FromResult(result))
                    {
                        // buffer block size can be adjusted based on scenario
                        byte[] buffer     = new byte[4096];
                        uint   filledSize = 0;

                        // read audio block in a loop here
                        // if it is end of audio stream, it will return 0
                        // if there are error happening,  the cancel event will be called.
                        while ((filledSize = audioDataStream.ReadData(buffer)) > 0)
                        {
                            // Here you can save the audio or send the data to another pipeline in your service.
                            Console.WriteLine($"{filledSize} bytes received. Handle the data buffer here");

                            totalSize += filledSize;
                        }
                    }

                    if (totalSize > 0)
                    {
                        processingTimeList.Add((DateTime.Now - start).TotalMilliseconds);
                    }

                    synthesizer.Synthesizing      -= SynthesizingEvent;
                    synthesizer.SynthesisCanceled -= SynthesizerSynthesisCanceled;
                    pool.Put(synthesizer);
                }
            }
            catch (Exception)
            {
                synthesizer.SynthesisCanceled -= SynthesizerSynthesisCanceled;
                synthesizer.Synthesizing      -= SynthesizingEvent;
                synthesizer.Dispose();
            }
            finally
            {
            }
        }
コード例 #10
0
        private async void SpeechSynthesisToStream_ButtonClicked()
        {
            if (!AreKeysValid())
            {
                NotifyUser("Subscription Key is missing!", NotifyType.ErrorMessage);
                return;
            }
            else
            {
                NotifyUser(" ", NotifyType.StatusMessage);
            }

            // Creates an instance of a speech config with specified and service region (e.g., "westus").
            var config = SpeechConfig.FromSubscription(this.SubscriptionKey, this.Region);

            config.SpeechSynthesisLanguage = this.SynthesisLanguage;

            // Creates a speech synthesizer using the config.
            using (var synthesizer = new SpeechSynthesizer(config, null))
            {
                // Subscribes to events.
                synthesizer.SynthesisStarted += (s, e) =>
                {
                    NotifyUser($"Speech synthesis started.", NotifyType.StatusMessage);
                };

                synthesizer.Synthesizing += (s, e) =>
                {
                    NotifyUser($"{e.Result.AudioData.Length} bytes received.", NotifyType.StatusMessage);
                };

                synthesizer.SynthesisCompleted += (s, e) =>
                {
                    NotifyUser($"Speech synthesis completed.", NotifyType.StatusMessage);
                };

                synthesizer.SynthesisCanceled += (s, e) =>
                {
                    var cancellation = SpeechSynthesisCancellationDetails.FromResult(e.Result);

                    StringBuilder sb = new StringBuilder();
                    sb.AppendLine($"CANCELED: Reason={cancellation.Reason}");
                    sb.AppendLine($"CANCELED: ErrorCode={cancellation.ErrorCode}");
                    sb.AppendLine($"CANCELED: ErrorDetails=[{cancellation.ErrorDetails}]");
                    sb.AppendLine($"CANCELED: Did you update the subscription info?");

                    NotifyUser(sb.ToString(), NotifyType.ErrorMessage);
                };

                var text = this.TextForSynthesizingTextBox.Text;

                // Waits for completion.
                using (var result = await synthesizer.SpeakTextAsync(text).ConfigureAwait(false))
                {
                    using (var stream = AudioDataStream.FromResult(result))
                    {
                        byte[] buffer     = new byte[16000];
                        uint   totalSize  = 0;
                        uint   filledSize = 0;

                        while ((filledSize = stream.ReadData(buffer)) > 0)
                        {
                            NotifyUser($"{filledSize} bytes received.", NotifyType.StatusMessage);
                            totalSize += filledSize;
                        }

                        NotifyUser($"{totalSize} bytes of audio data received for text [{text}]", NotifyType.StatusMessage);
                    }
                }
            }
        }
コード例 #11
0
        private async void SpeechSynthesisToFile_ButtonClicked()
        {
            if (!AreKeysValid())
            {
                NotifyUser("Subscription Key is missing!", NotifyType.ErrorMessage);
                return;
            }
            else
            {
                NotifyUser(" ", NotifyType.StatusMessage);
            }

            // User can also specify another under the ApplicationData.LocalFolder or Package.InstalledLocation
            var filePath = Path.Combine(ApplicationData.Current.LocalFolder.Path, "outputaudio.wav");

            // Creates an instance of a speech config with specified and service region (e.g., "westus").
            var config = SpeechConfig.FromSubscription(this.SubscriptionKey, this.Region);

            config.SpeechSynthesisLanguage = this.SynthesisLanguage;

            // Creates a speech synthesizer using file as audio output.
            using (var audioOutput = AudioConfig.FromWavFileOutput(filePath))
                using (var synthesizer = new SpeechSynthesizer(config, audioOutput))
                {
                    // Subscribes to events.
                    synthesizer.SynthesisStarted += (s, e) =>
                    {
                        NotifyUser($"Speech synthesis started.", NotifyType.StatusMessage);
                    };

                    synthesizer.Synthesizing += (s, e) =>
                    {
                        NotifyUser($"{e.Result.AudioData.Length} bytes received.", NotifyType.StatusMessage);
                    };

                    synthesizer.SynthesisCompleted += (s, e) =>
                    {
                        NotifyUser($"Speech synthesis completed.", NotifyType.StatusMessage);
                    };

                    synthesizer.SynthesisCanceled += (s, e) =>
                    {
                        var cancellation = SpeechSynthesisCancellationDetails.FromResult(e.Result);

                        StringBuilder sb = new StringBuilder();
                        sb.AppendLine($"CANCELED: Reason={cancellation.Reason}");
                        sb.AppendLine($"CANCELED: ErrorCode={cancellation.ErrorCode}");
                        sb.AppendLine($"CANCELED: ErrorDetails=[{cancellation.ErrorDetails}]");

                        NotifyUser(sb.ToString(), NotifyType.ErrorMessage);
                    };

                    // Waits for completion.
                    using (var result = await synthesizer.SpeakTextAsync(this.TextForSynthesizingTextBox.Text).ConfigureAwait(false))
                    {
                        if (result.Reason == ResultReason.SynthesizingAudioCompleted)
                        {
                            NotifyUser($"Speech synthesis completed. The audio has been saved to {filePath}.", NotifyType.StatusMessage);
                        }
                    }
                }
        }