/// <summary>
        /// Reads the specified phrase, with the default text-to-speech voice.
        /// </summary>
        /// <param name="phrase">The text to say.</param>
        /// <param name="media">The <see cref="MediaElement"/> that plays the speech.</param>
        /// <remarks><para>This method is awaitable, because in the case of a speech prompt,
        /// the speech recognizer can hear the prompt and may process it, along with
        /// the user's speech. Avoid this bug by awaiting the call to the <see cref="SpeakAsync"/> method
        /// and then setting <see cref="RecognitionMode"/> to <see cref="SpeechRecognitionMode.Dictation"/>
        /// after it completes. This way, the speech prompt ends before recognition begins.</para>
        /// <para>Also, the <see cref="SpeakAsync"/> method stops the current recognition session,
        /// so the user and any spoken prompts don't trigger speech commands.</para>
        /// <para>The <see cref="SpeakAsync"/> method uses the <see cref="SemaphoreSlim"/> class to implement
        /// a signal from the <see cref="MediaElement.MediaEnded"/> event handler to this method.
        /// </para>
        /// </remarks>
        public async Task SpeakAsync(string phrase, MediaElement media)
        {
            if (!String.IsNullOrEmpty(phrase))
            {
                // Turn off speech recognition while speech synthesis is happening.
                await SetRecognitionMode(SpeechRecognitionMode.Paused);

                MediaPlayerElement = media;
                SpeechSynthesisStream synthesisStream = await SpeechSynth.SynthesizeTextToStreamAsync(phrase);

                // The Play call starts the sound stream playback and immediately returns,
                // so a semaphore is required to make the SpeakAsync method awaitable.
                media.AutoPlay = true;
                media.SetSource(synthesisStream, synthesisStream.ContentType);
                media.Play();

                // Wait until the MediaEnded event on MediaElement is raised,
                // before turning on speech recognition again. The semaphore
                // is signaled in the mediaElement_MediaEnded event handler.
                await Semaphore.WaitAsync();

                // Turn on speech recognition and listen for commands.
                await SetRecognitionMode(SpeechRecognitionMode.CommandPhrases);
            }
        }
        public void TestMethod1()
        {
            string startupText = "Welcome to the speech synthesizer. Please enter text and press enter to hear me speak what you write:";

            SpeechSynth.Main();
            Assert.AreEqual(Console.Out.ToString(), startupText);
            Environment.Exit(0);
        }
Example #3
0
        public async Task <bool> Test1()
        {
            using (var _synth = new SpeechSynth())
            {
                var m =                                                   //"That'd be just amazing!";//
                        "Привет. Можно ли чаю? Can I have a cup of tea?"; //          "Is this awesome or what!";
                //"Привет. Awesome! Awesome? Are you OK?";
                //"Hi there. Hi there! Hi there?";

                var voices = _voicenames
                             .Where(v => v.EndsWith("RUS"))
                             //.Where(v => v.EndsWith("ural"))
                             //.Where(v => v.StartsWith("zh-CN-Xia"))
                             //.Where(v => v.StartsWith("en-A"))
                             //.Where(v => v.StartsWith("ru-"))
                             .ToArray();

                var speakingStyles = new[] { // https://docs.microsoft.com/en-us/azure/cognitive-services/speech-service/speech-synthesis-markup?tabs=csharp#adjust-speaking-styles
                    "angry",                 // XiaoxiaoNeural only
                    //"sad" ,               // XiaoxiaoNeural only
                    //"affectionate",       // XiaoxiaoNeural only
                    //"newscast-formal" ,   // AriaNeural only
                    //"newscast-casual" ,   // AriaNeural only
                    //"customerservice" ,   // AriaNeural only
                    //"chat"            ,   // AriaNeural only
                    //"cheerful"        ,   // AriaNeural only
                    //"empathetic"          // AriaNeural only
                };

                Console.WriteLine($"v-{voices.Length} x {speakingStyles.Length} styles = ");

                for (var v = 0; v < voices.Length; v++)
                {
                    for (var s = 0; s < speakingStyles.Length; s++)
                    {
                        //await _synth.SpeakAsync(m, VMode.Prosody, voices[v], speakingStyle: speakingStyles[s]);
                        await sayIt(_synth, m, voices, speakingStyles, s, v);
                    }
                }

                //await _synth.SpeakAsync(s, a, "ru-RU-Irina");
                //await _synth.SpeakAsync(s, a, "ru-RU-Pavel");
                //await _synth.SpeakAsync(s, a, "ru-RU-EkaterinaRUS");
                //await _synth.SpeakAsync(s, a, "ru-RU-DariyaNeural");    // bad ending
                //await _synth.SpeakAsync(s, a, "ru-RU-SvetlanaNeural");  // the best
                //await _synth.SpeakAsync(s, a, "ru-RU-DmitryNeural");    // best question intonation
            }
            return(true);
        }
Example #4
0
        // Define the event handlers.
        private void OnChanged(object source, FileSystemEventArgs e)
        {
            Stopwatch stopwatch = new Stopwatch();

            stopwatch.Start();
            var output = speechRecogniser.GetTopResult(e.FullPath);

            Console.WriteLine("RecognisedSpeech:" + stopwatch.ElapsedMilliseconds.ToString() + "ms");
            Console.WriteLine(output);
            stopwatch.Restart();
            var response = responder.GetResponse(output);

            Console.WriteLine("Responder:" + stopwatch.ElapsedMilliseconds.ToString() + "ms");
            stopwatch.Restart();
            SpeechSynth.SynthesisToSpeakerAsync(response).Wait();
            Console.WriteLine("SpeechSynthesiser:" + stopwatch.ElapsedMilliseconds.ToString() + "ms");
            File.Delete(e.FullPath);
        }
Example #5
0
        private void Say()
        {
            var t = ddlSay.Text;

            if (!string.IsNullOrEmpty(t))
            {
                SpeechSynth.Say(t, CW);
                CW.LogToPlugin("Text: " + t);
                var p = new List <string> {
                    t
                };
                foreach (var i in ddlSay.Items)
                {
                    if (!p.Contains(i) && !string.IsNullOrEmpty(i.ToString()))
                    {
                        p.Add(i.ToString());
                    }
                }

                var x = "";
                int j = 0;
                foreach (string s in p)
                {
                    if (j < 10)
                    {
                        x += s + "|";
                    }
                    else
                    {
                        break;
                    }
                    j++;
                }
                x = x.Trim('|');
                MainForm.Conf.TextSentences = x;
                PopSentences();
            }
        }
Example #6
0
 static async Task sayIt(SpeechSynth _synth, string m, string[] voices, string[] speakingStyles, int s, int v)
 {
     Console.WriteLine($" --- {m},   VMode.Express,   {voices[v],-26}   {speakingStyles[s],-16}");
     await _synth.SpeakAsync(m, VMode.Express, voice : voices[v], styleForExpressOnly : speakingStyles[s]);
 }