Exemple #1
0
        private async void InitVoiceCommands()
        {
            var enableVoiceCommandsFileUri = new Uri("ms-appx:///Grammars/EnableGrammar.xml");
            var enableVoiceCommandsFile    = await Windows.Storage.StorageFile.GetFileFromApplicationUriAsync(enableVoiceCommandsFileUri);

            var disableVoiceCommandsFileUri = new Uri("ms-appx:///Grammars/DisableGrammar.xml");
            var disableVoiceCommandsFile    = await Windows.Storage.StorageFile.GetFileFromApplicationUriAsync(disableVoiceCommandsFileUri);

            var speechRecognizer = new Windows.Media.SpeechRecognition.SpeechRecognizer();

            speechRecognizer.StateChanged += SpeechRecognizer_StateChanged;

            Windows.Media.SpeechRecognition.SpeechRecognitionGrammarFileConstraint enableGrammarContraints  = new Windows.Media.SpeechRecognition.SpeechRecognitionGrammarFileConstraint(enableVoiceCommandsFile);
            Windows.Media.SpeechRecognition.SpeechRecognitionGrammarFileConstraint disableGrammarContraints = new Windows.Media.SpeechRecognition.SpeechRecognitionGrammarFileConstraint(disableVoiceCommandsFile);
            speechRecognizer.Constraints.Add(enableGrammarContraints);
            speechRecognizer.Constraints.Add(disableGrammarContraints);

            Debug.WriteLine("Compiling grammar...");
            var compilationResults = await speechRecognizer.CompileConstraintsAsync();

            if (compilationResults.Status == Windows.Media.SpeechRecognition.SpeechRecognitionResultStatus.Success)
            {
                speechRecognizer.Timeouts.EndSilenceTimeout                    = TimeSpan.FromSeconds(1.5);
                speechRecognizer.ContinuousRecognitionSession.Completed       += ContinuousRecognitionSession_Completed;
                speechRecognizer.ContinuousRecognitionSession.ResultGenerated += ContinuousRecognitionSession_ResultGenerated;
                Debug.WriteLine("Grammar compiled.");
                Debug.WriteLine("Starting continuous recognition");
                await speechRecognizer.ContinuousRecognitionSession.StartAsync();
            }
            else
            {
                Debug.WriteLine("Could not complie grammar. " + compilationResults.Status);
            }
        }
        public override IObservable <string> ListenUntilPause()
        {
            return(Observable.Create <string>(async ob =>
            {
                var speech = new WinSpeechRecognizer();
                await speech.CompileConstraintsAsync();
                this.ListenSubject.OnNext(true);

                var handler = new TypedEventHandler <SpeechContinuousRecognitionSession, SpeechContinuousRecognitionResultGeneratedEventArgs>((sender, args) =>
                {
                    var words = args.Result.Text.Split(' ');
                    foreach (var word in words)
                    {
                        ob.OnNext(word);
                    }
                });
                speech.ContinuousRecognitionSession.ResultGenerated += handler;
                await speech.ContinuousRecognitionSession.StartAsync();

                return () =>
                {
                    speech.ContinuousRecognitionSession.StopAsync();
                    speech.ContinuousRecognitionSession.ResultGenerated -= handler;
                    this.ListenSubject.OnNext(false);
                    speech.Dispose();
                };
            }));
        }
Exemple #3
0
            static async Task DoStart()
            {
                if (recognizer == null)
                {
                    recognizer = new Windows.Media.SpeechRecognition.SpeechRecognizer();
                    var result = await recognizer.CompileConstraintsAsync().AsTask();

                    if (result.Status != SpeechRecognitionResultStatus.Success)
                    {
                        throw new Exception("Failed to start speech recognizer: " + result.Status);
                    }
                }

                recognizer.ContinuousRecognitionSession.ResultGenerated += ContinuousRecognitionSession_ResultGenerated;

                try
                {
                    await recognizer.ContinuousRecognitionSession.StartAsync();
                }
                catch (System.Runtime.InteropServices.COMException ex)
                {
                    recognizer.ContinuousRecognitionSession.ResultGenerated -= ContinuousRecognitionSession_ResultGenerated;
                    recognizer = null;
                    await Device.OS.OpenSettings("privacy-speechtyping");

                    throw new Exception(ex.Message);
                }
            }
        public async Task RecordSpeechFromMicrophoneAsync(string command)
        {
            string recognizedText = string.Empty;

            if (this.mute == false)
            {
                ElementSoundPlayer.State  = ElementSoundPlayerState.On;
                ElementSoundPlayer.Volume = 0.5;
                ElementSoundPlayer.Play(ElementSoundKind.Invoke);
                //BackgroundMediaPlayer.Current.SetUriSource(new Uri("ms-winsoundevent:Notification.Mail"));
            }

            BackgroundMediaPlayer.Current.Play();
            Helper.MessageBoxLongAsync(command, "Voice");
            using (SpeechRecognizer recognizer =
                       new Windows.Media.SpeechRecognition.SpeechRecognizer())
            {
                await recognizer.CompileConstraintsAsync();

                SpeechRecognitionResult result = await recognizer.RecognizeAsync();

                if (result.Status == SpeechRecognitionResultStatus.Success)
                {
                    recognizedText = result.Text;
                }
            }
            VoiceResult = (recognizedText);
        }
Exemple #5
0
        async void clickStart(object sender, RoutedEventArgs e)
        {
            // Create an instance of SpeechRecognizer.
            var speechRecognizer = new Windows.Media.SpeechRecognition.SpeechRecognizer();

            // Compile the dictation grammar by default.
            await speechRecognizer.CompileConstraintsAsync();

            // Start recognition.
            Windows.Media.SpeechRecognition.SpeechRecognitionResult speechRecognitionResult = await speechRecognizer.RecognizeWithUIAsync();

            // Do something with the recognition result.
            var messageDialog = new Windows.UI.Popups.MessageDialog(speechRecognitionResult.Text, "Text spoken");
            await messageDialog.ShowAsync();

            //Task.Factory.StartNew(async () =>
            //{
            //    try
            //    {
            //          Speech.Initialize();
            //await Speech.StartRecognition();
            //    }
            //    catch (Exception ex)
            //    {
            //        throw ex;
            //    }

            //});
        }
        private async void BtnSpeechRecogWeatherSearchAsync_Click(object sender, RoutedEventArgs e)
        {
            // Create an instance of SpeechRecognizer.
            var speechRecognizer = new Windows.Media.SpeechRecognition.SpeechRecognizer();

            // Listen for audio input issues.
            ///////  speechRecognizer.RecognitionQualityDegrading += speechRecognizer_RecognitionQualityDegrading;

            // Add a web search grammar to the recognizer.
            var webSearchGrammar = new Windows.Media.SpeechRecognition.SpeechRecognitionTopicConstraint(Windows.Media.SpeechRecognition.SpeechRecognitionScenario.WebSearch, "webSearch");

            speechRecognizer.UIOptions.AudiblePrompt = "Say what you want to search for...";
            speechRecognizer.UIOptions.ExampleText   = @"Ex. 'weather for London'";
            speechRecognizer.Constraints.Add(webSearchGrammar);

            // Compile the constraint.
            await speechRecognizer.CompileConstraintsAsync();

            // Start recognition.
            Windows.Media.SpeechRecognition.SpeechRecognitionResult speechRecognitionResult = await speechRecognizer.RecognizeWithUIAsync();

            //await speechRecognizer.RecognizeWithUIAsync();

            // Do something with the recognition result.
            var messageDialog = new Windows.UI.Popups.MessageDialog(speechRecognitionResult.Text, "Text spoken");
            await messageDialog.ShowAsync();
        }
Exemple #7
0
 /// <summary>
 /// Event if recognizer state changed.
 /// </summary>
 /// <param name="sender">The sender.</param>
 /// <param name="args">The <see cref="Windows.Media.SpeechRecognition.SpeechRecognizerStateChangedEventArgs" /> instance containing the event data.</param>
 private static async void RecognizerStateChanged(Windows.Media.SpeechRecognition.SpeechRecognizer sender, SpeechRecognizerStateChangedEventArgs args)
 {
     Log(LogLevel.Debug, "Speech recognizer state: " + args.State);
     if (args.State == SpeechRecognizerState.Idle)
     {
         lastListenCylce = DateTime.Now;
         await Listen();
     }
 }
Exemple #8
0
        public override IObservable <string> ListenUntilPause() => Observable.FromAsync(async ct =>
        {
            var speech = new WinSpeechRecognizer();
            await speech.CompileConstraintsAsync();
            this.ListenSubject.OnNext(true);
            var result = await speech.RecognizeAsync();
            this.ListenSubject.OnNext(false);

            return(result.Text);
        });
        private async void DefaultRecognizing_OnClick(object sender, RoutedEventArgs e)
        {
            // カスタム制約なし
            var speechRecognizer = new Windows.Media.SpeechRecognition.SpeechRecognizer();

            await speechRecognizer.CompileConstraintsAsync();

            var result = await speechRecognizer.RecognizeWithUIAsync();

            var dialog = new MessageDialog(result.Text, "Text spoken");
            await dialog.ShowAsync();
        }
Exemple #10
0
        /// <summary>
        /// While the user is speaking, update the textbox with the partial sentence of what's being said for user feedback.
        /// </summary>
        /// <param name="sender">The recognizer that has generated the hypothesis</param>
        /// <param name="args">The hypothesis formed</param>
        private async void SpeechRecognizer_HypothesisGenerated(Windows.Media.SpeechRecognition.SpeechRecognizer sender, SpeechRecognitionHypothesisGeneratedEventArgs args)
        {
            string hypothesis = args.Hypothesis.Text;

            // Update the textbox with the currently confirmed text, and the hypothesis combined.
            string textboxContent = dictatedTextBuilder.ToString() + " ??? " + hypothesis + " ...";
            await Dispatcher.RunAsync(CoreDispatcherPriority.Normal, () =>
            {
                dictationTextBox.Text  = textboxContent;
                btnClearText.IsEnabled = true;
            });
        }
 public async Task InitializeAsync()
 {
     if (_speechRecognizer == null)
     {
         _speechRecognizer = new Windows.Media.SpeechRecognition.SpeechRecognizer();
         _speechRecognizer.Timeouts.EndSilenceTimeout = TimeSpan.FromMilliseconds(300);
         _speechRecognizer.StateChanged += SpeechRecognizer_StateChanged;
         _speechRecognizer.RecognitionQualityDegrading += SpeechRecognizer_RecognitionQualityDegrading;
         _speechRecognizer.ContinuousRecognitionSession.Completed += ContinuousRecognitionSession_Completed;
         _speechRecognizer.ContinuousRecognitionSession.ResultGenerated += ContinuousRecognitionSession_ResultGenerated;
         await SetConstraintsAsync();
     }
 }
 public async Task InitializeAsync()
 {
     if (_speechRecognizer == null)
     {
         _speechRecognizer = new Windows.Media.SpeechRecognition.SpeechRecognizer();
         _speechRecognizer.Timeouts.EndSilenceTimeout = TimeSpan.FromMilliseconds(300);
         _speechRecognizer.StateChanged += SpeechRecognizer_StateChanged;
         _speechRecognizer.RecognitionQualityDegrading                  += SpeechRecognizer_RecognitionQualityDegrading;
         _speechRecognizer.ContinuousRecognitionSession.Completed       += ContinuousRecognitionSession_Completed;
         _speechRecognizer.ContinuousRecognitionSession.ResultGenerated += ContinuousRecognitionSession_ResultGenerated;
         await SetConstraintsAsync();
     }
 }
        /// <summary>
        /// Initialize Speech Recognizer and compile constraints.
        /// </summary>
        /// <param name="recognizerLanguage">Language to use for the speech recognizer</param>
        /// <returns>Awaitable task.</returns>
        private async Task InitializeRecognizer(Language recognizerLanguage)
        {
            if (speechRecognizer != null)
            {
                this.speechRecognizer.Dispose();
                this.speechRecognizer = null;
            }

            try
            {
                // Create an instance of SpeechRecognizer.
                speechRecognizer = new SpeechRecognizer(recognizerLanguage);

                // Compile the dictation topic constraint, which optimizes for dictated speech.
                var dictationConstraint = new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.Dictation, "dictation");
                speechRecognizer.Constraints.Add(dictationConstraint);
            }
            catch (Exception ex)
            {
                System.Diagnostics.Debug.WriteLine(ex.Message);
                throw;
            }

            try
            {
                SpeechRecognitionCompilationResult compilationResult = await speechRecognizer.CompileConstraintsAsync();

                //// RecognizeWithUIAsync allows developers to customize the prompts.
                //speechRecognizer.UIOptions.AudiblePrompt = "Dictate a phrase or sentence...";
                //speechRecognizer.UIOptions.ExampleText = speechResourceMap.GetValue("DictationUIOptionsExampleText", speechContext).ValueAsString;

                // Check to make sure that the constraints were in a proper format and the recognizer was able to compile it.
                if (compilationResult.Status != SpeechRecognitionResultStatus.Success)
                {
                    //// Disable the recognition buttons.
                    //btnRecognizeWithUI.IsEnabled = false;
                    //btnRecognizeWithoutUI.IsEnabled = false;

                    //// Let the user know that the grammar didn't compile properly.
                    //resultTextBlock.Visibility = Visibility.Visible;
                    //resultTextBlock.Text = "Unable to compile grammar.";
                }
            }
            catch (Exception ex)
            {
                System.Diagnostics.Debug.WriteLine(ex.Message);
                throw;
            }
        }
Exemple #14
0
        public async void initSpeech()
        {
            var         speechRecognizer = new Windows.Media.SpeechRecognition.SpeechRecognizer();
            var         url  = new Uri("ms-appx:///SRGS-Enhanced V2.grxml").ToString();
            StorageFile file = await StorageFile.GetFileFromApplicationUriAsync(new Uri(url));

            var grammarFileConstraint = new Windows.Media.SpeechRecognition.SpeechRecognitionGrammarFileConstraint(file);

            speechRecognizer.UIOptions.ExampleText = @"Ex. 'blue background', 'green text'";
            speechRecognizer.Constraints.Add(grammarFileConstraint);
            var status = await speechRecognizer.CompileConstraintsAsync();

            speechRecognizer.ContinuousRecognitionSession.Completed += ContinuousRecognitionSession_Completed;
            speechRecognizer.StateChanged += SpeechRecognizer_StateChanged;
            speechRecognizer.ContinuousRecognitionSession.ResultGenerated += ContinuousRecognitionSession_ResultGenerated;
            await speechRecognizer.ContinuousRecognitionSession.StartAsync(Windows.Media.SpeechRecognition.SpeechContinuousRecognitionMode.Default);
        }
Exemple #15
0
        public async void initSpeech()
        {
            var         speechRecognizer = new Windows.Media.SpeechRecognition.SpeechRecognizer();
            var         x    = speechRecognizer.CurrentLanguage;
            var         url  = new Uri(String.Format("ms-appx:///SRGS-Enhanced-{0}.grxml", x.LanguageTag)).ToString();
            StorageFile file = await StorageFile.GetFileFromApplicationUriAsync(new Uri(url));

            var grammarFileConstraint = new Windows.Media.SpeechRecognition.SpeechRecognitionGrammarFileConstraint(file);

            speechRecognizer.Timeouts.EndSilenceTimeout = new TimeSpan(0, 0, 0, 0, 400);
            speechRecognizer.Constraints.Add(grammarFileConstraint);
            var status = await speechRecognizer.CompileConstraintsAsync();

            speechRecognizer.ContinuousRecognitionSession.Completed += ContinuousRecognitionSession_Completed;
            speechRecognizer.StateChanged += SpeechRecognizer_StateChanged;
            speechRecognizer.ContinuousRecognitionSession.ResultGenerated += ContinuousRecognitionSession_ResultGenerated;
            await speechRecognizer.ContinuousRecognitionSession.StartAsync(Windows.Media.SpeechRecognition.SpeechContinuousRecognitionMode.Default);
        }
Exemple #16
0
        private async void SpeechRecognizer_RecognitionQualityDegrading(Windows.Media.SpeechRecognition.SpeechRecognizer sender, SpeechRecognitionQualityDegradingEventArgs args)
        {
            // In some scenarios, a developer may choose to ignore giving the user feedback in this case, if speech
            // is not the primary input mechanism for the application.
            // Here, just remove any hypothesis text by resetting it to the last known good.
            await Dispatcher.RunAsync(CoreDispatcherPriority.Normal, () =>
            {
                dictationTextBox.Text = dictatedTextBuilder.ToString();
                string problemText    = args.Problem.ToString();
                if (!string.IsNullOrEmpty(problemText))
                {
                    problemText = problemText.Length <= 25 ? problemText : (problemText.Substring(0, 25) + "...");

                    discardedTextBlock.Text       = "Recognition quality degrading: " + problemText;
                    discardedTextBlock.Visibility = Windows.UI.Xaml.Visibility.Visible;
                }
            });
        }
        private async void StartRecognition(object sender, RoutedEventArgs e)
        {
            // Create an instance of SpeechRecognizer.
            var speechRecognizer = new Windows.Media.SpeechRecognition.SpeechRecognizer();

            // Compile the dictation grammar by default.
            await speechRecognizer.CompileConstraintsAsync();

            // Start recognition.
            SpeechRecognitionResult speechRecognitionResult = await speechRecognizer.RecognizeWithUIAsync();

            //SpeechRecognitionResult speechRecognitionResult = await speechRecognizer.RecognizeAsync();


            // Do something with the recognition result.
            var messageDialog = new MessageDialog(speechRecognitionResult.Text, "Text spoken");
            await messageDialog.ShowAsync();
        }
Exemple #18
0
        public override IObservable <string> ContinuousDictation() => Observable.Create <string>(async ob =>
        {
            var speech = new WinSpeechRecognizer();
            await speech.CompileConstraintsAsync();

            var handler = new TypedEventHandler <SpeechContinuousRecognitionSession, SpeechContinuousRecognitionResultGeneratedEventArgs>((sender, args) =>
                                                                                                                                          ob.OnNext(args.Result.Text)
                                                                                                                                          );
            speech.ContinuousRecognitionSession.ResultGenerated += handler;
            await speech.ContinuousRecognitionSession.StartAsync();
            this.ListenSubject.OnNext(true);

            return(() =>
            {
                //speech.ContinuousRecognitionSession.StopAsync();
                speech.ContinuousRecognitionSession.ResultGenerated -= handler;
                this.ListenSubject.OnNext(false);
                speech.Dispose();
            });
        });
        private async void StartRecognition(object sender, RoutedEventArgs e)
        {
            // Create an instance of SpeechRecognizer.
            var speechRecognizer = new Windows.Media.SpeechRecognition.SpeechRecognizer();

            // Compile the dictation grammar by default.
            await speechRecognizer.CompileConstraintsAsync();

            // Start recognition.
            SpeechRecognitionResult speechRecognitionResult = await speechRecognizer.RecognizeWithUIAsync();
            //SpeechRecognitionResult speechRecognitionResult = await speechRecognizer.RecognizeAsync();


            // Do something with the recognition result.
            var messageDialog = new MessageDialog(speechRecognitionResult.Text, "Text spoken");
            await messageDialog.ShowAsync();          



        }
Exemple #20
0
        /// <summary>
        /// Initialize Speech Recognizer and compile constraints.
        /// </summary>
        /// <param name="recognizerLanguage">Language to use for the speech recognizer</param>
        /// <returns>Awaitable task.</returns>
        private async Task InitializeRecognizer(Language recognizerLanguage)
        {
            if (speechRecognizer != null)
            {
                // cleanup prior to re-initializing this scenario.
                speechRecognizer.StateChanged -= SpeechRecognizer_StateChanged;
                speechRecognizer.ContinuousRecognitionSession.Completed       -= ContinuousRecognitionSession_Completed;
                speechRecognizer.ContinuousRecognitionSession.ResultGenerated -= ContinuousRecognitionSession_ResultGenerated;
                speechRecognizer.HypothesisGenerated         -= SpeechRecognizer_HypothesisGenerated;
                speechRecognizer.RecognitionQualityDegrading -= SpeechRecognizer_RecognitionQualityDegrading;

                this.speechRecognizer.Dispose();
                this.speechRecognizer = null;
            }

            this.speechRecognizer = new Windows.Media.SpeechRecognition.SpeechRecognizer(recognizerLanguage);

            // Provide feedback to the user about the state of the recognizer. This can be used to provide visual feedback in the form
            // of an audio indicator to help the user understand whether they're being heard.
            speechRecognizer.StateChanged += SpeechRecognizer_StateChanged;

            // Apply the dictation topic constraint to optimize for dictated freeform speech.
            var dictationConstraint = new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.Dictation, "dictation");

            speechRecognizer.Constraints.Add(dictationConstraint);
            SpeechRecognitionCompilationResult result = await speechRecognizer.CompileConstraintsAsync();

            if (result.Status != SpeechRecognitionResultStatus.Success)
            {
                rootPage.NotifyUser("Grammar Compilation Failed: " + result.Status.ToString(), NotifyType.ErrorMessage);
                btnContinuousRecognize.IsEnabled = false;
            }

            // Handle continuous recognition events. Completed fires when various error states occur. ResultGenerated fires when
            // some recognized phrases occur, or the garbage rule is hit. HypothesisGenerated fires during recognition, and
            // allows us to provide incremental feedback based on what the user's currently saying.
            speechRecognizer.ContinuousRecognitionSession.Completed       += ContinuousRecognitionSession_Completed;
            speechRecognizer.ContinuousRecognitionSession.ResultGenerated += ContinuousRecognitionSession_ResultGenerated;
            speechRecognizer.HypothesisGenerated         += SpeechRecognizer_HypothesisGenerated;
            speechRecognizer.RecognitionQualityDegrading += SpeechRecognizer_RecognitionQualityDegrading;
        }
        private async void TopicConstraintRecognizing_OnClick(object sender, RoutedEventArgs e)
        {
            // Web 検索文法の指定 (SpeechRecognitionTopicConstraint)

            var speechRecognizer = new Windows.Media.SpeechRecognition.SpeechRecognizer();

            speechRecognizer.RecognitionQualityDegrading += speechRecognizer_RecognitionQualityDegrading;

            var webSearchGrammar = new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.WebSearch, "webSearch");

            speechRecognizer.UIOptions.AudiblePrompt = "Say what you want to speach for ...";
            speechRecognizer.UIOptions.ExampleText   = @"Ex. 'weather for London";
            speechRecognizer.Constraints.Add(webSearchGrammar);

            await speechRecognizer.CompileConstraintsAsync();

            var result = await speechRecognizer.RecognizeWithUIAsync();

            var dialog = new MessageDialog(result.Text, "Text spoken");
            await dialog.ShowAsync();
        }
        private async void ListConstraintRecognizing_OnClick(object sender, RoutedEventArgs e)
        {
            // プログラムによる一覧の制約の指定 (SpeechRecognitionListConstraint)

            var speechRecognizer = new Windows.Media.SpeechRecognition.SpeechRecognizer();

            string[] responses = { "Yes", "No" };

            var list = new SpeechRecognitionListConstraint(responses, "yesOrNo");

            speechRecognizer.UIOptions.ExampleText = @"Ex. 'yes', 'no'";
            speechRecognizer.Constraints.Add(list);

            await speechRecognizer.CompileConstraintsAsync();

            var result = await speechRecognizer.RecognizeWithUIAsync();

            var dialog = new MessageDialog(result.Text, "Text spoken");

            dialog.ShowAsync();
        }
        private async void GrammarFileConstraintRecognizing_OnClick(object sender, RoutedEventArgs e)
        {
            // SRGS 文法 (SpeechRecognitionGrammarFileConstraint)

            var speechRecognizer = new Windows.Media.SpeechRecognition.SpeechRecognizer();

            var storageFile = await StorageFile.GetFileFromApplicationUriAsync(new Uri("ms-appx:///Sample.grxml"));

            var grammarFileCOnstraint = new SpeechRecognitionGrammarFileConstraint(storageFile, "colors");

            speechRecognizer.UIOptions.ExampleText = @"Ex. 'blue background', 'green text'";
            speechRecognizer.Constraints.Add(grammarFileCOnstraint);

            await speechRecognizer.CompileConstraintsAsync();

            var result = await speechRecognizer.RecognizeWithUIAsync();

            var dialog = new MessageDialog(result.Text, "Text spoken");

            dialog.ShowAsync();
        }
        public override IObservable <string> ContinuousDictation()
        {
            return(Observable.Create <string>(async ob =>
            {
                var speech = new WinSpeechRecognizer();
                await speech.CompileConstraintsAsync();
                this.ListenSubject.OnNext(true);
                var result = await speech.RecognizeAsync();
                var words = result.Text.Split(' ');

                foreach (var word in words)
                {
                    ob.OnNext(word);
                }

                return () =>
                {
                    this.ListenSubject.OnNext(false);
                    speech.Dispose();
                };
            }));
        }
        public static async Task <string> GetText()
        {
            var language = new Windows.Globalization.Language("en-US");

            using (var speechRecognizer = new Windows.Media.SpeechRecognition.SpeechRecognizer(language))
            {
                await speechRecognizer.CompileConstraintsAsync();

                speechRecognizer.StateChanged += SpeechRecognizerStateChangedHandler;;

                var result = await speechRecognizer.RecognizeAsync();

                if (result.Status == SpeechRecognitionResultStatus.Success)
                {
                    return(result.Text);
                }
                else
                {
                    // we need to control confidence and other factors
                }
            }

            return(null);
        }
Exemple #26
0
        /// <summary>
        /// Upon leaving, clean up the speech recognizer. Ensure we aren't still listening, and disable the event
        /// handlers to prevent leaks.
        /// </summary>
        /// <param name="e">Unused navigation parameters.</param>
        protected async override void OnNavigatedFrom(NavigationEventArgs e)
        {
            if (this.speechRecognizer != null)
            {
                if (isListening)
                {
                    await this.speechRecognizer.ContinuousRecognitionSession.CancelAsync();

                    isListening = false;
                    DictationButtonText.Text      = " Dictate";
                    cbLanguageSelection.IsEnabled = true;
                }

                dictationTextBox.Text = "";

                speechRecognizer.ContinuousRecognitionSession.Completed       -= ContinuousRecognitionSession_Completed;
                speechRecognizer.ContinuousRecognitionSession.ResultGenerated -= ContinuousRecognitionSession_ResultGenerated;
                speechRecognizer.HypothesisGenerated -= SpeechRecognizer_HypothesisGenerated;
                speechRecognizer.StateChanged        -= SpeechRecognizer_StateChanged;

                this.speechRecognizer.Dispose();
                this.speechRecognizer = null;
            }
        }
 private async void InitializeSpeechRecognizer()
 {
     this.speechRecognizer = new Windows.Media.SpeechRecognition.SpeechRecognizer();
     await this.speechRecognizer.CompileConstraintsAsync();
 }
 private void SpeechRecognizer_RecognitionQualityDegrading(SR.SpeechRecognizer sender, SR.SpeechRecognitionQualityDegradingEventArgs args)
 {
     Debug.WriteLine($"- Speech quality: {args.Problem}");
 }
Exemple #29
0
        private void SpeechRecognizer_StateChanged(Windows.Media.SpeechRecognition.SpeechRecognizer sender, Windows.Media.SpeechRecognition.SpeechRecognizerStateChangedEventArgs args)
        {
            var speechRecognizerState = args.State;

            Debug.WriteLine("Speech Recognizer State change to " + speechRecognizerState);
        }
Exemple #30
0
        private async void InitVoiceCommands()
        {
            var enableVoiceCommandsFileUri = new Uri("ms-appx:///Grammars/EnableGrammar.xml");
            var enableVoiceCommandsFile = await Windows.Storage.StorageFile.GetFileFromApplicationUriAsync(enableVoiceCommandsFileUri);

            var disableVoiceCommandsFileUri = new Uri("ms-appx:///Grammars/DisableGrammar.xml");
            var disableVoiceCommandsFile = await Windows.Storage.StorageFile.GetFileFromApplicationUriAsync(disableVoiceCommandsFileUri);

            var speechRecognizer = new Windows.Media.SpeechRecognition.SpeechRecognizer();
            speechRecognizer.StateChanged += SpeechRecognizer_StateChanged;

            Windows.Media.SpeechRecognition.SpeechRecognitionGrammarFileConstraint enableGrammarContraints = new Windows.Media.SpeechRecognition.SpeechRecognitionGrammarFileConstraint(enableVoiceCommandsFile);
            Windows.Media.SpeechRecognition.SpeechRecognitionGrammarFileConstraint disableGrammarContraints = new Windows.Media.SpeechRecognition.SpeechRecognitionGrammarFileConstraint(disableVoiceCommandsFile);
            speechRecognizer.Constraints.Add(enableGrammarContraints);
            speechRecognizer.Constraints.Add(disableGrammarContraints);

            Debug.WriteLine("Compiling grammar...");
            var compilationResults = await speechRecognizer.CompileConstraintsAsync();

            if(compilationResults.Status == Windows.Media.SpeechRecognition.SpeechRecognitionResultStatus.Success)
            {
                speechRecognizer.Timeouts.EndSilenceTimeout = TimeSpan.FromSeconds(1.5);
                speechRecognizer.ContinuousRecognitionSession.Completed += ContinuousRecognitionSession_Completed;
                speechRecognizer.ContinuousRecognitionSession.ResultGenerated += ContinuousRecognitionSession_ResultGenerated;
                Debug.WriteLine("Grammar compiled.");
                Debug.WriteLine("Starting continuous recognition");
                await speechRecognizer.ContinuousRecognitionSession.StartAsync();
            }
            else
            {
                Debug.WriteLine("Could not complie grammar. " + compilationResults.Status);
            }
        }
 private void SpeechRecognizer_StateChanged(SR.SpeechRecognizer sender, SR.SpeechRecognizerStateChangedEventArgs args)
 {
     Debug.WriteLine($"- {args.State}");
 }
Exemple #32
0
 private void SpeechRecognizer_StateChanged(Windows.Media.SpeechRecognition.SpeechRecognizer sender, Windows.Media.SpeechRecognition.SpeechRecognizerStateChangedEventArgs args)
 {
     var x = args.State;
 }
Exemple #33
0
 /// <summary>
 /// Provide feedback to the user based on whether the recognizer is receiving their voice input.
 /// </summary>
 /// <param name="sender">The recognizer that is currently running.</param>
 /// <param name="args">The current state of the recognizer.</param>
 private async void SpeechRecognizer_StateChanged(Windows.Media.SpeechRecognition.SpeechRecognizer sender, SpeechRecognizerStateChangedEventArgs args)
 {
     await Dispatcher.RunAsync(CoreDispatcherPriority.Normal, () => {
         rootPage.NotifyUser(args.State.ToString(), NotifyType.StatusMessage);
     });
 }
        //Lancement reconnaissance vocale
        private async void micro_Click(object sender, RoutedEventArgs e)
        {
            var speechRecognizer = new Windows.Media.SpeechRecognition.SpeechRecognizer();

            string[] responses = commandesHttp[0].ToArray();

            //Ne compare que avec des commandes vocale connue
            var listConstraint = new Windows.Media.SpeechRecognition.SpeechRecognitionListConstraint(responses, "commandeHttp");

            speechRecognizer.UIOptions.ExampleText = @"Ex. 'Yana comment vas tu ?'";
            speechRecognizer.Constraints.Add(listConstraint);

            await speechRecognizer.CompileConstraintsAsync();

            Windows.Media.SpeechRecognition.SpeechRecognitionResult speechRecognitionResult = await speechRecognizer.RecognizeWithUIAsync();

            allmessages.Add(new Message { TextMessage = speechRecognitionResult.Text, Time = DateTime.Now.ToString(), Status = "Sent", tofrom = true });

            var index = Array.FindIndex(responses, row => row.Contains(speechRecognitionResult.Text));
            string reponse = "";

            //Verification si commande http ou socket
            try
            {
                if (index < commandesHttp[1].Count)
                {
                    reponse = await getReponseHttp(commandesHttp[1][index]);
                }
                else
                {
                    getReponseSocket(speechRecognitionResult.Text);
                }

            }
            catch
            {
                reponse = "Une erreur est survenue";
            }

        }