Exemplo n.º 1
0
        static async Task SynthesizeAudioAsync()
        {
            var config = SpeechConfig.FromEndpoint(
                new Uri("https://eastus.api.cognitive.microsoft.com/sts/v1.0/issuetoken"),
                "ee7e8304193c464f82dc7e5af674eabb");

            config.SetProperty("SpeechServiceResponse_Synthesis_WordBoundaryEnabled", "false");

            //using var audioConfig = AudioConfig.FromWavFileOutput("file.wav");
            //using var synthesizer = new SpeechSynthesizer(config, audioConfig);
            var fileName       = "Message.xml";
            var outputFileName = "01-AzureServerless-Intro.mp3";

#if EXPORT
            config.SetSpeechSynthesisOutputFormat(SpeechSynthesisOutputFormat.Audio24Khz160KBitRateMonoMp3);
            using var synthesizer = new SpeechSynthesizer(config, null);
            var ssml   = File.ReadAllText(fileName);
            var result = await synthesizer.SpeakSsmlAsync(ssml);

            using var stream = AudioDataStream.FromResult(result);
            await stream.SaveToWaveFileAsync(outputFileName);
#else
            using var synthesizer = new SpeechSynthesizer(config);
            var ssml = File.ReadAllText(fileName);
            await synthesizer.SpeakSsmlAsync(ssml);
#endif
        }
        public async Task <AudioStream> SpeakAsync(string text, Boolean useNeural = false)
        {
            using var synthesizer = new SpeechSynthesizer(_speechConfig, null);
            var ssml = String.Format(SSML_TEMPLATE, text, useNeural ? "zh-CN-XiaoxiaoNeural" : "zh-CN-Yaoyao-Apollo");

            using var result = await synthesizer.SpeakSsmlAsync(ssml);

            if (result.Reason == ResultReason.SynthesizingAudioCompleted)
            {
                return(AudioDataStream.FromResult(result));
            }
            else if (result.Reason == ResultReason.Canceled)
            {
                var cancellation = SpeechSynthesisCancellationDetails.FromResult(result);
                Console.WriteLine($"CANCELED: Reason={cancellation.Reason}");

                if (cancellation.Reason == CancellationReason.Error)
                {
                    Console.WriteLine($"CANCELED: ErrorCode={cancellation.ErrorCode}");
                    Console.WriteLine($"CANCELED: ErrorDetails=[{cancellation.ErrorDetails}]");
                    Console.WriteLine($"CANCELED: Did you update the subscription info?");
                }
            }
            return(null);
        }
Exemplo n.º 3
0
        private async Task <bool> SynthesisToWaveFileAsync(string filename, string ssmlInput, SpeechConfig config)
        {
            using var synthesizer = new SpeechSynthesizer(config, null);
            using var result      = await synthesizer.SpeakSsmlAsync(ssmlInput);

            if (result.Reason == ResultReason.SynthesizingAudioCompleted)
            {
                using var audioDataStream = AudioDataStream.FromResult(result);
                await audioDataStream.SaveToWaveFileAsync(filename);

                return(true);
            }
            else if (result.Reason == ResultReason.Canceled)
            {
                var cancellation = SpeechSynthesisCancellationDetails.FromResult(result);
                Console.WriteLine($"CANCELED: Reason={cancellation.Reason}");

                if (cancellation.Reason == CancellationReason.Error)
                {
                    Console.WriteLine($"CANCELED: ErrorCode={cancellation.ErrorCode}");
                    Console.WriteLine($"CANCELED: ErrorDetails=[{cancellation.ErrorDetails}]");
                    Console.WriteLine($"CANCELED: Did you update the subscription info?");
                }
            }
            return(false);
        }
Exemplo n.º 4
0
        public static async Task KeywordRecognizer()
        {
            Console.WriteLine("say something ...");
            using (var audioInput = AudioConfig.FromDefaultMicrophoneInput())
            {
                using (var recognizer = new KeywordRecognizer(audioInput))
                {
                    var model  = KeywordRecognitionModel.FromFile("YourKeywordModelFilename.");
                    var result = await recognizer.RecognizeOnceAsync(model).ConfigureAwait(false);

                    Console.WriteLine($"got result reason as {result.Reason}");
                    if (result.Reason == ResultReason.RecognizedKeyword)
                    {
                        var stream = AudioDataStream.FromResult(result);

                        await Task.Delay(2000);

                        stream.DetachInput();
                        await stream.SaveToWaveFileAsync("AudioFromRecognizedKeyword.wav");
                    }
                    else
                    {
                        Console.WriteLine($"got result reason as {result.Reason}. You can't get audio when no keyword is recognized.");
                    }
                }
            }
        }
Exemplo n.º 5
0
        private async void SaveToFileButton_Click(object sender, RoutedEventArgs e)
        {
            var stream = AudioDataStream.FromResult(result);

            stream.DetachInput();
            RecognizeKeywordButton.IsEnabled = true;
            SaveToFileButton.IsEnabled       = false;
            var savePicker = new FileSavePicker();

            savePicker.SuggestedStartLocation = PickerLocationId.DocumentsLibrary;
            savePicker.FileTypeChoices.Add("WAV File", new List <string>()
            {
                ".wav"
            });
            savePicker.SuggestedFileName = "audio";
            var file = await savePicker.PickSaveFileAsync();

            if (file != null)
            {
                var tempFilePath = $"{ApplicationData.Current.TemporaryFolder.Path}\\audio.wav";
                await stream.SaveToWaveFileAsync(tempFilePath);

                var tempFile = await StorageFile.GetFileFromPathAsync(tempFilePath);

                await tempFile.MoveAndReplaceAsync(file);
            }
        }
Exemplo n.º 6
0
        /// <summary>
        /// a task that converts text to speech and speaks it (on a separate thread)
        /// </summary>
        /// <param name="textToSpeak">plain text to speak</param>
        /// <returns>task result is the number of milliseconds to let the playback to complete</returns>
        public static async Task <int> SpeakTextAsync(string textToSpeak)
        {
            using (var synthesizer = new SpeechSynthesizer(speechConfig, null))
            {
                // Receive a text from "Text for Synthesizing" text box and synthesize it to speaker.
                string str = "<speak version=\"1.0\"";
                str += " xmlns=\"http://www.w3.org/2001/10/synthesis\"";
                str += " xml:lang=\"en-US\">";
                //str += $"<voice name=\"{this.SynthesisLanguage}\">";
                str += "<voice name=\"en-US-Jessa24kRUS\">";
                //str += "<mstts:express-as type=\"cheerful\">";
                str += textToSpeak; // text to speak
                //str += "</mstts:express-as>";
                str += "</voice>";
                str += "</speak>";

                Debug.WriteLine(speechConfig.AuthorizationToken);


                using (var result = await synthesizer.SpeakSsmlAsync(str).ConfigureAwait(false))  // REVIEW: continue on the caller's thread?
                {
                    // Checks result.

                    Debug.WriteLine(result.Reason);


                    if (result.Reason == ResultReason.SynthesizingAudioCompleted)
                    {
                        // Since native playback is not yet supported on UWP yet (currently only supported on Windows/Linux Desktop),
                        // use the WinRT API to play audio here as a short term solution

                        using (var audioStream = AudioDataStream.FromResult(result))
                        {
                            // Save synthesized audio data as a wave file and user MediaPlayer to play it
                            var filePath = Path.Combine(ApplicationData.Current.LocalFolder.Path, "outputaudio_for_playback.wav");
                            await audioStream.SaveToWaveFileAsync(filePath);

                            MediaSource mySource = MediaSource.CreateFromStorageFile(await StorageFile.GetFileFromPathAsync(filePath));
                            mediaPlayer.Source = mySource;
                            mediaPlayer.Play();
                            // Duration is not updated immediately (by MediaPlayer), so we have to be careful how we calculate the delays
                            int msWaitedToAccuireDelay = 0;
                            while (!mySource.Duration.HasValue && msWaitedToAccuireDelay < 5000)
                            {
                                await Task.Delay(5);

                                msWaitedToAccuireDelay += 5;
                            }
                            int msRemainingDelay = (int)mySource.Duration.GetValueOrDefault().TotalMilliseconds;
                            return(msRemainingDelay);
                        }
                    }
                }
            }

            return(0);
        }
Exemplo n.º 7
0
        /// <summary>文字转语音 输出到内存流</summary>
        public static async Task <AudioDataStream> SynthesisToStreamAsync(string inputText)
        {
            var config = SpeechConfig.FromSubscription(subscriptionKey, region);

            using var synthesizer = new SpeechSynthesizer(config, null);
            var result = await synthesizer.SpeakTextAsync("inputText");

            using var stream = AudioDataStream.FromResult(result);
            return(stream);
        }
Exemplo n.º 8
0
        public async void saveSynthesis(string speech)
        {
            config.SetSpeechSynthesisOutputFormat(SpeechSynthesisOutputFormat.Riff24Khz16BitMonoPcm);
            var synthesizerSave = new SpeechSynthesizer(config, null);

            var result = await synthesizerSave.SpeakSsmlAsync(speech);

            var stream = AudioDataStream.FromResult(result);
            await stream.SaveToWaveFileAsync(@"C:\users\Goofynose\Desktop\audio.wav");

            MessageBox.Show("Saved.");
        }
        private async void CoronaSpeakAsync()
        {
            // Creates an instance of a speech config with specified subscription key and service region.
            // Replace with your own subscription key and service region (e.g., "westus").
            // var config = SpeechConfig.FromSubscription("b1323ad73dda43038546ea1a82594e8a", "northeurope");
            var config = SpeechConfig.FromSubscription("b1323ad73dda43038546ea1a82594e8a", "northeurope");


            try
            {
                // Creates a speech synthesizer.
                using (var synthesizer = new SpeechSynthesizer(config, null))
                {
                    using (var result = await synthesizer.SpeakTextAsync(text2Speech).ConfigureAwait(false))
                    {
                        // Checks result.
                        if (result.Reason == ResultReason.SynthesizingAudioCompleted)
                        {
                            Console.WriteLine("Speech Synthesis Succeeded.");
                            Console.WriteLine(NotifyType.StatusMessage);

                            // Since native playback is not yet supported on UWP (currently only supported on Windows/Linux Desktop),
                            // use the WinRT API to play audio here as a short term solution.
                            using (var audioStream = AudioDataStream.FromResult(result))
                            {
                                // Save synthesized audio data as a wave file and use MediaPlayer to play it
                                var filePath = Path.Combine(ApplicationData.Current.LocalFolder.Path, "outputaudio.wav");
                                await audioStream.SaveToWaveFileAsync(filePath);

                                mediaPlayer.Source = MediaSource.CreateFromStorageFile(await StorageFile.GetFileFromPathAsync(filePath));
                                mediaPlayer.Play();
                            }
                        }
                        else if (result.Reason == ResultReason.Canceled)
                        {
                            var cancellation = SpeechSynthesisCancellationDetails.FromResult(result);

                            StringBuilder sb = new StringBuilder();
                            sb.AppendLine($"CANCELED: Reason={cancellation.Reason}");
                            sb.AppendLine($"CANCELED: ErrorCode={cancellation.ErrorCode}");
                            sb.AppendLine($"CANCELED: ErrorDetails=[{cancellation.ErrorDetails}]");

                            Console.WriteLine(sb.ToString());
                        }
                    }
                }
            }
            catch (Exception ex)
            {
                Console.WriteLine(ex.ToString());
                Console.WriteLine(NotifyType.ErrorMessage);
            }
        }
Exemplo n.º 10
0
        private async void Speak_ButtonClicked(object sender, RoutedEventArgs e)
        {
            // Creates an instance of a speech config with specified subscription key and service region.
            // Replace with your own subscription key and service region (e.g., "westus").
            var config = SpeechConfig.FromSubscription("YourSubscriptionKey", "YourServiceRegion");

            try
            {
                // Creates a speech synthesizer.
                using (var synthesizer = new SpeechSynthesizer(config, null))
                {
                    // Receive a text from TextForSynthesis text box and synthesize it to speaker.
                    using (var result = await synthesizer.SpeakTextAsync(this.TextForSynthesis.Text).ConfigureAwait(false))
                    {
                        // Checks result.
                        if (result.Reason == ResultReason.SynthesizingAudioCompleted)
                        {
                            NotifyUser($"Speech Synthesis Succeeded.", NotifyType.StatusMessage);

                            // Since native playback is not yet supported on UWP yet (currently only supported on Windows/Linux Desktop),
                            // use the WinRT API to play audio here as a short term solution.
                            // Native playback support will be added in the future release.
                            using (var audioStream = AudioDataStream.FromResult(result))
                            {
                                // Save synthesized audio data as a wave file and user MediaPlayer to play it
                                var filePath = Path.Combine(ApplicationData.Current.LocalFolder.Path, "outputaudio.wav");
                                await audioStream.SaveToWaveFileAsync(filePath);

                                mediaPlayer.Source = MediaSource.CreateFromStorageFile(await StorageFile.GetFileFromPathAsync(filePath));
                                mediaPlayer.Play();
                            }
                        }
                        else if (result.Reason == ResultReason.Canceled)
                        {
                            var cancellation = SpeechSynthesisCancellationDetails.FromResult(result);

                            StringBuilder sb = new StringBuilder();
                            sb.AppendLine($"CANCELED: Reason={cancellation.Reason}");
                            sb.AppendLine($"CANCELED: ErrorCode={cancellation.ErrorCode}");
                            sb.AppendLine($"CANCELED: ErrorDetails=[{cancellation.ErrorDetails}]");

                            NotifyUser(sb.ToString(), NotifyType.ErrorMessage);
                        }
                    }
                }
            }
            catch (Exception ex)
            {
                NotifyUser($"{ex.ToString()}", NotifyType.ErrorMessage);
            }
        }
        private async void SpeechSynthesisToSpeaker_ButtonClicked(object sender, RoutedEventArgs e)
        {
            if (!AreKeysValid())
            {
                NotifyUser("Subscription Key is missing!", NotifyType.ErrorMessage);
                return;
            }
            // Creates an instance of a speech config with specified subscription key and region.
            var config = SpeechConfig.FromSubscription(this.SubscriptionKey, this.Region);

            config.SpeechSynthesisLanguage = this.SynthesisLanguage;

            // Creates a speech synthesizer.
            using (var synthesizer = new SpeechSynthesizer(config, null))
            {
                // Receive a text from "Text for Synthesizing" text box and synthesize it to speaker.
                using (var result = await synthesizer.SpeakTextAsync(this.TextForSynthesizingTextBox.Text).ConfigureAwait(false))
                {
                    // Checks result.
                    if (result.Reason == ResultReason.SynthesizingAudioCompleted)
                    {
                        NotifyUser($"Speech Synthesis Succeeded.", NotifyType.StatusMessage);

                        // Since native playback is not yet supported on UWP yet (currently only supported on Windows/Linux Desktop),
                        // use the WinRT API to play audio here as a short term solution
                        using (var audioStream = AudioDataStream.FromResult(result))
                        {
                            // Save synthesized audio data as a wave file and user MediaPlayer to play it
                            var filePath = Path.Combine(ApplicationData.Current.LocalFolder.Path, "outputaudio_for_playback.wav");
                            await audioStream.SaveToWaveFileAsync(filePath);

                            mediaPlayer.Source = MediaSource.CreateFromStorageFile(await StorageFile.GetFileFromPathAsync(filePath));
                            mediaPlayer.Play();
                        }
                    }
                    else if (result.Reason == ResultReason.Canceled)
                    {
                        var cancellation = SpeechSynthesisCancellationDetails.FromResult(result);

                        StringBuilder sb = new StringBuilder();
                        sb.AppendLine($"CANCELED: Reason={cancellation.Reason}");
                        sb.AppendLine($"CANCELED: ErrorCode={cancellation.ErrorCode}");
                        sb.AppendLine($"CANCELED: ErrorDetails=[{cancellation.ErrorDetails}]");

                        NotifyUser(sb.ToString(), NotifyType.ErrorMessage);
                    }
                }
            }
        }
Exemplo n.º 12
0
        private async Task <string> SynthesizeText(SpeechConfig config, string text, string synthesisType)
        {
            try
            {
                using (var synthesizer = new SpeechSynthesizer(config, null))
                {
                    // Receive a text from TextForSynthesis text box and synthesize it to speaker.
                    using (var result = await synthesizer.SpeakTextAsync(text).ConfigureAwait(false))
                    {
                        // Checks result.
                        if (result.Reason == ResultReason.SynthesizingAudioCompleted)
                        {
                            NotifyUser($"Speech Synthesis(" + synthesisType + ")Succeeded.", NotifyType.StatusMessage);

                            using (var audioStream = AudioDataStream.FromResult(result))
                            {
                                // Save synthesized audio data as a wave file and user MediaPlayer to play it
                                var filePath = Path.Combine(ApplicationData.Current.LocalFolder.Path, synthesisType + ".wav");
                                await audioStream.SaveToWaveFileAsync(filePath);

                                return(filePath);
                            }
                        }
                        else if (result.Reason == ResultReason.Canceled)
                        {
                            var           cancellation = SpeechSynthesisCancellationDetails.FromResult(result);
                            StringBuilder sb           = new StringBuilder();
                            sb.AppendLine($"CANCELED: Reason={cancellation.Reason}");
                            sb.AppendLine($"CANCELED: ErrorCode={cancellation.ErrorCode}");
                            sb.AppendLine($"CANCELED: ErrorDetails=[{cancellation.ErrorDetails}]");

                            NotifyUser(sb.ToString(), NotifyType.ErrorMessage);

                            return(null);
                        }
                        else
                        {
                            return(null);
                        }
                    }
                }
            }
            catch
            {
                NotifyUser($"Speech Synthesis Error", NotifyType.StatusMessage);
                return(null);
            }
        }
Exemplo n.º 13
0
        public async Task SayIT(string text, string key, string region, string language)
        {
            string speechKey = key;


            var config = SpeechConfig.FromSubscription(speechKey, region);

            config.SpeechSynthesisLanguage = language;

            MediaPlayer mediaPlayer = new MediaPlayer();

            mediaPlayer.SetVolume(1f, 1f);
            // Creates a speech synthesizer.
            using (var synthesizer = new SpeechSynthesizer(config, null))
            {
                // Receive a text from TextForSynthesis text box and synthesize it to speaker.
                using (var result = await synthesizer.SpeakTextAsync(text).ConfigureAwait(false))
                {
                    // Checks result.
                    if (result.Reason == ResultReason.SynthesizingAudioCompleted)
                    {
                        using (var audioStream = AudioDataStream.FromResult(result))
                        {
                            MemoryStream ms = new MemoryStream(result.AudioData);
                            ms.Seek(0, SeekOrigin.Begin);
                            mediaPlayer.Prepared += (sender, e) =>
                            {
                                mediaPlayer.Start();
                            };
                            mediaPlayer.SetDataSource(new StreamMediaDataSource(ms));

                            mediaPlayer.Prepare();
                        }
                    }
                    else if (result.Reason == ResultReason.Canceled)
                    {
                        var cancellation = SpeechSynthesisCancellationDetails.FromResult(result);

                        StringBuilder sb = new StringBuilder();
                        sb.AppendLine($"CANCELED: Reason={cancellation.Reason}");
                        sb.AppendLine($"CANCELED: ErrorCode={cancellation.ErrorCode}");
                        sb.AppendLine($"CANCELED: ErrorDetails=[{cancellation.ErrorDetails}]");

                        throw new Exception(sb.ToString());
                    }
                }
            }
        }
Exemplo n.º 14
0
        public async Task  SayTest()
        {
            var configuration = new ConfigurationBuilder().AddJsonFile("config.json").Build();

            if (configuration != null)
            {
                string speechKey    = configuration["keyspeech"];
                string endpoint     = configuration["endpointspeech"];
                string speechregion = configuration["region"];

                var config = SpeechConfig.FromSubscription(speechKey, speechregion);

                config.SpeechSynthesisLanguage = "en-US";
                MediaPlayer mediaPlayer = new MediaPlayer();

                // Creates a speech synthesizer.
                using (var synthesizer = new SpeechSynthesizer(config, null))
                {
                    // Receive a text from TextForSynthesis text box and synthesize it to speaker.
                    using (var result = await synthesizer.SpeakTextAsync("stuff").ConfigureAwait(false))
                    {
                        // Checks result.
                        if (result.Reason == ResultReason.SynthesizingAudioCompleted)
                        {
                            using (var audioStream = AudioDataStream.FromResult(result))
                            {
                                StorageFolder picturesDirectory = KnownFolders.PicturesLibrary;
                                // Save synthesized audio data as a wave file and use MediaPlayer to play it
                                var filePath = Path.Combine(picturesDirectory.Path, "outputaudio.wav");
                                await audioStream.SaveToWaveFileAsync(filePath);

                                //mediaPlayer.Source = Windows.Media.Core.MediaSource.CreateFromStorageFile(await StorageFile.GetFileFromPathAsync(filePath));
                                mediaPlayer.Play();
                            }
                        }
                        else if (result.Reason == ResultReason.Canceled)
                        {
                            var cancellation = SpeechSynthesisCancellationDetails.FromResult(result);

                            throw new Exception("Error: " + cancellation.ErrorDetails);
                        }
                    }
                }
            }

            await Task.CompletedTask;
        }
Exemplo n.º 15
0
        private async void SpeakButton_Clicked(object sender, RoutedEventArgs e)
        {
            var config = SpeechConfig.FromSubscription("YOUR_API_KEY", "YOUR_LOCATION");

            config.SpeechSynthesisLanguage = "ja-JP";
            try
            {
                // Creates a speech synthesizer.
                using (var synthesizer = new SpeechSynthesizer(config, null))
                {
                    // Receive a text from TextForSynthesis text box and synthesize it to speaker.
                    using (var result = await synthesizer.SpeakTextAsync(this.TextForSynthesis.Text).ConfigureAwait(false))
                    {
                        // Checks result.
                        if (result.Reason == ResultReason.SynthesizingAudioCompleted)
                        {
                            NotifyUser($"Speech Synthesis Succeeded.", NotifyType.StatusMessage);

                            using (var audioStream = AudioDataStream.FromResult(result))
                            {
                                // Save synthesized audio data as a wave file and user MediaPlayer to play it
                                var filePath = Path.Combine(ApplicationData.Current.LocalFolder.Path, "outputaudio.wav");
                                await audioStream.SaveToWaveFileAsync(filePath);

                                mediaPlayer.Source = MediaSource.CreateFromStorageFile(await StorageFile.GetFileFromPathAsync(filePath));
                                mediaPlayer.Play();
                            }
                        }
                        else if (result.Reason == ResultReason.Canceled)
                        {
                            var cancellation = SpeechSynthesisCancellationDetails.FromResult(result);

                            StringBuilder sb = new StringBuilder();
                            sb.AppendLine($"CANCELED: Reason={cancellation.Reason}");
                            sb.AppendLine($"CANCELED: ErrorCode={cancellation.ErrorCode}");
                            sb.AppendLine($"CANCELED: ErrorDetails=[{cancellation.ErrorDetails}]");

                            NotifyUser(sb.ToString(), NotifyType.ErrorMessage);
                        }
                    }
                }
            }
            catch (Exception ex)
            {
                NotifyUser($"{ex.ToString()}", NotifyType.ErrorMessage);
            }
        }
Exemplo n.º 16
0
        public async Task SayIT(string text, string key, string region, string language)
        {
            var configuration = new ConfigurationBuilder().AddJsonFile("config.json").Build();

            if (configuration != null)
            {
                string speechKey = key;//configuration["keyspeech"];

                var config = SpeechConfig.FromSubscription(speechKey, region);
                config.SpeechSynthesisLanguage = language;

                MediaPlayer mediaPlayer = new MediaPlayer();

                // Creates a speech synthesizer.
                using (var synthesizer = new SpeechSynthesizer(config, null))
                {
                    // Receive a text from TextForSynthesis text box and synthesize it to speaker.
                    using (var result = await synthesizer.SpeakTextAsync(text).ConfigureAwait(false))
                    {
                        // Checks result.
                        if (result.Reason == ResultReason.SynthesizingAudioCompleted)
                        {
                            using (var audioStream = AudioDataStream.FromResult(result))
                            {
                                MemoryStream ms = new MemoryStream(result.AudioData);
                                ms.Seek(0, SeekOrigin.Begin);
                                mediaPlayer.SetStreamSource(ms.AsRandomAccessStream());
                                mediaPlayer.Play();
                            }
                        }
                        else if (result.Reason == ResultReason.Canceled)
                        {
                            var cancellation = SpeechSynthesisCancellationDetails.FromResult(result);

                            StringBuilder sb = new StringBuilder();
                            sb.AppendLine($"CANCELED: Reason={cancellation.Reason}");
                            sb.AppendLine($"CANCELED: ErrorCode={cancellation.ErrorCode}");
                            sb.AppendLine($"CANCELED: ErrorDetails=[{cancellation.ErrorDetails}]");

                            throw new Exception(sb.ToString());
                        }
                    }
                }
            }

            await Task.CompletedTask;
        }
Exemplo n.º 17
0
        private async void Play(SpeechSynthesisResult speech)
        {
            //var stream = new MemoryStream(speech.AudioData);
            //var mPlayer = new MediaPlayer();
            //mPlayer.AudioCategory = MediaPlayerAudioCategory.Speech;
            //mPlayer.Source = MediaSource.CreateFromStream(stream.AsRandomAccessStream(), "audio/wave");
            //mPlayer.Play();

            using var audioStream = AudioDataStream.FromResult(speech);
            var filePath = Path.Combine(ApplicationData.Current.LocalFolder.Path, "outputaudio_for_playback.wav");
            await audioStream.SaveToWaveFileAsync(filePath);

            var mediaPlayer = new MediaPlayer();

            //mediaPlayer.MediaEnded += (sender, args) => {
            //    var file = StorageFile.GetFileFromPathAsync(filePath).GetResults();
            //    file.DeleteAsync();
            //};
            mediaPlayer.Source = MediaSource.CreateFromStorageFile(await StorageFile.GetFileFromPathAsync(filePath));
            mediaPlayer.Play();
        }
Exemplo n.º 18
0
        private async void Read() //Ugly code
        {
            var speechConfig = await TryGetSpeechConfig();

            if (speechConfig == null)
            {
                return;
            }

            if (string.IsNullOrEmpty(SelectedItem.Text))
            {
                return;
            }

            using (var synthesizer = new SpeechSynthesizer(speechConfig, null))
            {
                using (var result = await synthesizer.SpeakTextAsync(SelectedItem.Text).ConfigureAwait(false))
                {
                    if (result.Reason == ResultReason.SynthesizingAudioCompleted)
                    {
                        using (var audioStream = AudioDataStream.FromResult(result))
                        {
                            var path = Path.Combine(ApplicationData.Current.LocalFolder.Path, "outputaudio.wav");
                            await audioStream.SaveToWaveFileAsync(path);

                            mediaPlayer.Source = MediaSource.CreateFromStorageFile(await StorageFile.GetFileFromPathAsync(path));
                            mediaPlayer.Play();
                        }
                    }
                    else
                    {
                        var cancellation = SpeechSynthesisCancellationDetails.FromResult(result); //TODO Notify user
                    }
                }
            }
        }
        private async void SpeechSynthesisToStream_ButtonClicked()
        {
            if (!AreKeysValid())
            {
                NotifyUser("Subscription Key is missing!", NotifyType.ErrorMessage);
                return;
            }
            else
            {
                NotifyUser(" ", NotifyType.StatusMessage);
            }

            // Creates an instance of a speech config with specified and service region (e.g., "westus").
            var config = SpeechConfig.FromSubscription(this.SubscriptionKey, this.Region);

            config.SpeechSynthesisLanguage = this.SynthesisLanguage;

            // Creates a speech synthesizer using the config.
            using (var synthesizer = new SpeechSynthesizer(config, null))
            {
                // Subscribes to events.
                synthesizer.SynthesisStarted += (s, e) =>
                {
                    NotifyUser($"Speech synthesis started.", NotifyType.StatusMessage);
                };

                synthesizer.Synthesizing += (s, e) =>
                {
                    NotifyUser($"{e.Result.AudioData.Length} bytes received.", NotifyType.StatusMessage);
                };

                synthesizer.SynthesisCompleted += (s, e) =>
                {
                    NotifyUser($"Speech synthesis completed.", NotifyType.StatusMessage);
                };

                synthesizer.SynthesisCanceled += (s, e) =>
                {
                    var cancellation = SpeechSynthesisCancellationDetails.FromResult(e.Result);

                    StringBuilder sb = new StringBuilder();
                    sb.AppendLine($"CANCELED: Reason={cancellation.Reason}");
                    sb.AppendLine($"CANCELED: ErrorCode={cancellation.ErrorCode}");
                    sb.AppendLine($"CANCELED: ErrorDetails=[{cancellation.ErrorDetails}]");
                    sb.AppendLine($"CANCELED: Did you update the subscription info?");

                    NotifyUser(sb.ToString(), NotifyType.ErrorMessage);
                };

                var text = this.TextForSynthesizingTextBox.Text;

                // Waits for completion.
                using (var result = await synthesizer.SpeakTextAsync(text).ConfigureAwait(false))
                {
                    using (var stream = AudioDataStream.FromResult(result))
                    {
                        byte[] buffer     = new byte[16000];
                        uint   totalSize  = 0;
                        uint   filledSize = 0;

                        while ((filledSize = stream.ReadData(buffer)) > 0)
                        {
                            NotifyUser($"{filledSize} bytes received.", NotifyType.StatusMessage);
                            totalSize += filledSize;
                        }

                        NotifyUser($"{totalSize} bytes of audio data received for text [{text}]", NotifyType.StatusMessage);
                    }
                }
            }
        }
Exemplo n.º 20
0
    public void ButtonClick()
    {
        lock (threadLocker)
        {
            waitingForSpeak = true;
        }

        string newMessage = null;
        var    startTime  = DateTime.Now;

        // Starts speech synthesis, and returns once the synthesis is started.
        using (var result = synthesizer.StartSpeakingTextAsync(inputField.text).Result)
        {
            // Native playback is not supported on Unity yet (currently only supported on Windows/Linux Desktop).
            // Use the Unity API to play audio here as a short term solution.
            // Native playback support will be added in the future release.
            var audioDataStream   = AudioDataStream.FromResult(result);
            var isFirstAudioChunk = true;
            var audioClip         = AudioClip.Create(
                "Speech",
                SampleRate * 600, // Can speak 10mins audio as maximum
                1,
                SampleRate,
                true,
                (float[] audioChunk) =>
            {
                var chunkSize       = audioChunk.Length;
                var audioChunkBytes = new byte[chunkSize * 2];
                var readBytes       = audioDataStream.ReadData(audioChunkBytes);
                if (isFirstAudioChunk && readBytes > 0)
                {
                    var endTime       = DateTime.Now;
                    var latency       = endTime.Subtract(startTime).TotalMilliseconds;
                    newMessage        = $"Speech synthesis succeeded!\nLatency: {latency} ms.";
                    isFirstAudioChunk = false;
                }

                for (int i = 0; i < chunkSize; ++i)
                {
                    if (i < readBytes / 2)
                    {
                        audioChunk[i] = (short)(audioChunkBytes[i * 2 + 1] << 8 | audioChunkBytes[i * 2]) / 32768.0F;
                    }
                    else
                    {
                        audioChunk[i] = 0.0f;
                    }
                }

                if (readBytes == 0)
                {
                    Thread.Sleep(200);     // Leave some time for the audioSource to finish playback
                    audioSourceNeedStop = true;
                }
            });

            audioSource.clip = audioClip;
            audioSource.Play();
        }

        lock (threadLocker)
        {
            if (newMessage != null)
            {
                message = newMessage;
            }

            waitingForSpeak = false;
        }
    }
Exemplo n.º 21
0
        // Speech synthesis to audio data stream.
        public static async Task SynthesisToAudioDataStreamAsync()
        {
            // Creates an instance of a speech config with specified subscription key and service region.
            // Replace with your own subscription key and service region (e.g., "westus").
            var config = SpeechConfig.FromSubscription("YourSubscriptionKey", "YourServiceRegion");

            // Creates a speech synthesizer with a null output stream.
            // This means the audio output data will not be written to any stream.
            // You can just get the audio from the result.
            using (var synthesizer = new SpeechSynthesizer(config, null))
            {
                while (true)
                {
                    // Receives a text from console input and synthesize it to result.
                    Console.WriteLine("Enter some text that you want to synthesize, or enter empty text to exit.");
                    Console.Write("> ");
                    string text = Console.ReadLine();
                    if (string.IsNullOrEmpty(text))
                    {
                        break;
                    }

                    using (var result = await synthesizer.SpeakTextAsync(text))
                    {
                        if (result.Reason == ResultReason.SynthesizingAudioCompleted)
                        {
                            Console.WriteLine($"Speech synthesized for text [{text}].");

                            using (var audioDataStream = AudioDataStream.FromResult(result))
                            {
                                // You can save all the data in the audio data stream to a file
                                string fileName = "outputaudio.wav";
                                await audioDataStream.SaveToWaveFileAsync(fileName);

                                Console.WriteLine($"Audio data for text [{text}] was saved to [{fileName}]");

                                // You can also read data from audio data stream and process it in memory
                                // Reset the stream position to the beginnging since saving to file puts the postion to end
                                audioDataStream.SetPosition(0);

                                byte[] buffer     = new byte[16000];
                                uint   totalSize  = 0;
                                uint   filledSize = 0;

                                while ((filledSize = audioDataStream.ReadData(buffer)) > 0)
                                {
                                    Console.WriteLine($"{filledSize} bytes received.");
                                    totalSize += filledSize;
                                }

                                Console.WriteLine($"{totalSize} bytes of audio data received for text [{text}]");
                            }
                        }
                        else if (result.Reason == ResultReason.Canceled)
                        {
                            var cancellation = SpeechSynthesisCancellationDetails.FromResult(result);
                            Console.WriteLine($"CANCELED: Reason={cancellation.Reason}");

                            if (cancellation.Reason == CancellationReason.Error)
                            {
                                Console.WriteLine($"CANCELED: ErrorCode={cancellation.ErrorCode}");
                                Console.WriteLine($"CANCELED: ErrorDetails=[{cancellation.ErrorDetails}]");
                                Console.WriteLine($"CANCELED: Did you update the subscription info?");
                            }
                        }
                    }
                }
            }
        }
        public void Synthesize(string text)
        {
            var  start       = DateTime.Now;
            var  synthesizer = pool.Get();
            var  ssml        = GenerateSsml("en-US", "Female", speechConfig.SpeechSynthesisVoiceName, text);
            bool first       = true;


            void SynthesizingEvent(object sender, SpeechSynthesisEventArgs eventArgs)
            {
                // receive streaming audio here.
                if (!first)
                {
                    return;
                }

                Console.WriteLine("First byte latency: {0}", DateTime.Now - start);
                first = false;
                latencyList.Add((DateTime.Now - start).TotalMilliseconds);
            }

            void SynthesizerSynthesisCanceled(object sender, SpeechSynthesisEventArgs e)
            {
                var cancellation = SpeechSynthesisCancellationDetails.FromResult(e.Result);

                Console.WriteLine($"CANCELED: Reason={cancellation.Reason}");

                if (cancellation.Reason == CancellationReason.Error)
                {
                    Console.WriteLine($"CANCELED: ErrorCode={cancellation.ErrorCode}");
                    Console.WriteLine($"CANCELED: ErrorDetails=[{cancellation.ErrorDetails}]");
                    Console.WriteLine($"CANCELED: Did you update the subscription info?");
                }
            }

            synthesizer.Synthesizing      += SynthesizingEvent;
            synthesizer.SynthesisCanceled += SynthesizerSynthesisCanceled;

            var result = synthesizer.StartSpeakingSsmlAsync(ssml).Result;

            try
            {
                if (result.Reason == ResultReason.SynthesizingAudioStarted)
                {
                    uint totalSize = 0;
                    using (var audioDataStream = AudioDataStream.FromResult(result))
                    {
                        // buffer block size can be adjusted based on scenario
                        byte[] buffer     = new byte[4096];
                        uint   filledSize = 0;

                        // read audio block in a loop here
                        // if it is end of audio stream, it will return 0
                        // if there are error happening,  the cancel event will be called.
                        while ((filledSize = audioDataStream.ReadData(buffer)) > 0)
                        {
                            // Here you can save the audio or send the data to another pipeline in your service.
                            Console.WriteLine($"{filledSize} bytes received. Handle the data buffer here");

                            totalSize += filledSize;
                        }
                    }

                    if (totalSize > 0)
                    {
                        processingTimeList.Add((DateTime.Now - start).TotalMilliseconds);
                    }

                    synthesizer.Synthesizing      -= SynthesizingEvent;
                    synthesizer.SynthesisCanceled -= SynthesizerSynthesisCanceled;
                    pool.Put(synthesizer);
                }
            }
            catch (Exception)
            {
                synthesizer.SynthesisCanceled -= SynthesizerSynthesisCanceled;
                synthesizer.Synthesizing      -= SynthesizingEvent;
                synthesizer.Dispose();
            }
            finally
            {
            }
        }
Exemplo n.º 23
0
 public AudioStream(AudioDataStream stream)
 {
     dataStream = stream;
 }