示例#1
0
        private async void InitVoiceCommands()
        {
            var enableVoiceCommandsFileUri = new Uri("ms-appx:///Grammars/EnableGrammar.xml");
            var enableVoiceCommandsFile    = await Windows.Storage.StorageFile.GetFileFromApplicationUriAsync(enableVoiceCommandsFileUri);

            var disableVoiceCommandsFileUri = new Uri("ms-appx:///Grammars/DisableGrammar.xml");
            var disableVoiceCommandsFile    = await Windows.Storage.StorageFile.GetFileFromApplicationUriAsync(disableVoiceCommandsFileUri);

            var speechRecognizer = new Windows.Media.SpeechRecognition.SpeechRecognizer();

            speechRecognizer.StateChanged += SpeechRecognizer_StateChanged;

            Windows.Media.SpeechRecognition.SpeechRecognitionGrammarFileConstraint enableGrammarContraints  = new Windows.Media.SpeechRecognition.SpeechRecognitionGrammarFileConstraint(enableVoiceCommandsFile);
            Windows.Media.SpeechRecognition.SpeechRecognitionGrammarFileConstraint disableGrammarContraints = new Windows.Media.SpeechRecognition.SpeechRecognitionGrammarFileConstraint(disableVoiceCommandsFile);
            speechRecognizer.Constraints.Add(enableGrammarContraints);
            speechRecognizer.Constraints.Add(disableGrammarContraints);

            Debug.WriteLine("Compiling grammar...");
            var compilationResults = await speechRecognizer.CompileConstraintsAsync();

            if (compilationResults.Status == Windows.Media.SpeechRecognition.SpeechRecognitionResultStatus.Success)
            {
                speechRecognizer.Timeouts.EndSilenceTimeout                    = TimeSpan.FromSeconds(1.5);
                speechRecognizer.ContinuousRecognitionSession.Completed       += ContinuousRecognitionSession_Completed;
                speechRecognizer.ContinuousRecognitionSession.ResultGenerated += ContinuousRecognitionSession_ResultGenerated;
                Debug.WriteLine("Grammar compiled.");
                Debug.WriteLine("Starting continuous recognition");
                await speechRecognizer.ContinuousRecognitionSession.StartAsync();
            }
            else
            {
                Debug.WriteLine("Could not complie grammar. " + compilationResults.Status);
            }
        }
        public override IObservable <string> ListenUntilPause()
        {
            return(Observable.Create <string>(async ob =>
            {
                var speech = new WinSpeechRecognizer();
                await speech.CompileConstraintsAsync();
                this.ListenSubject.OnNext(true);

                var handler = new TypedEventHandler <SpeechContinuousRecognitionSession, SpeechContinuousRecognitionResultGeneratedEventArgs>((sender, args) =>
                {
                    var words = args.Result.Text.Split(' ');
                    foreach (var word in words)
                    {
                        ob.OnNext(word);
                    }
                });
                speech.ContinuousRecognitionSession.ResultGenerated += handler;
                await speech.ContinuousRecognitionSession.StartAsync();

                return () =>
                {
                    speech.ContinuousRecognitionSession.StopAsync();
                    speech.ContinuousRecognitionSession.ResultGenerated -= handler;
                    this.ListenSubject.OnNext(false);
                    speech.Dispose();
                };
            }));
        }
示例#3
0
        public async Task RecordSpeechFromMicrophoneAsync(string command)
        {
            string recognizedText = string.Empty;

            if (this.mute == false)
            {
                ElementSoundPlayer.State  = ElementSoundPlayerState.On;
                ElementSoundPlayer.Volume = 0.5;
                ElementSoundPlayer.Play(ElementSoundKind.Invoke);
                //BackgroundMediaPlayer.Current.SetUriSource(new Uri("ms-winsoundevent:Notification.Mail"));
            }

            BackgroundMediaPlayer.Current.Play();
            Helper.MessageBoxLongAsync(command, "Voice");
            using (SpeechRecognizer recognizer =
                       new Windows.Media.SpeechRecognition.SpeechRecognizer())
            {
                await recognizer.CompileConstraintsAsync();

                SpeechRecognitionResult result = await recognizer.RecognizeAsync();

                if (result.Status == SpeechRecognitionResultStatus.Success)
                {
                    recognizedText = result.Text;
                }
            }
            VoiceResult = (recognizedText);
        }
示例#4
0
            static async Task DoStart()
            {
                if (recognizer == null)
                {
                    recognizer = new Windows.Media.SpeechRecognition.SpeechRecognizer();
                    var result = await recognizer.CompileConstraintsAsync().AsTask();

                    if (result.Status != SpeechRecognitionResultStatus.Success)
                    {
                        throw new Exception("Failed to start speech recognizer: " + result.Status);
                    }
                }

                recognizer.ContinuousRecognitionSession.ResultGenerated += ContinuousRecognitionSession_ResultGenerated;

                try
                {
                    await recognizer.ContinuousRecognitionSession.StartAsync();
                }
                catch (System.Runtime.InteropServices.COMException ex)
                {
                    recognizer.ContinuousRecognitionSession.ResultGenerated -= ContinuousRecognitionSession_ResultGenerated;
                    recognizer = null;
                    await Device.OS.OpenSettings("privacy-speechtyping");

                    throw new Exception(ex.Message);
                }
            }
示例#5
0
        async void clickStart(object sender, RoutedEventArgs e)
        {
            // Create an instance of SpeechRecognizer.
            var speechRecognizer = new Windows.Media.SpeechRecognition.SpeechRecognizer();

            // Compile the dictation grammar by default.
            await speechRecognizer.CompileConstraintsAsync();

            // Start recognition.
            Windows.Media.SpeechRecognition.SpeechRecognitionResult speechRecognitionResult = await speechRecognizer.RecognizeWithUIAsync();

            // Do something with the recognition result.
            var messageDialog = new Windows.UI.Popups.MessageDialog(speechRecognitionResult.Text, "Text spoken");
            await messageDialog.ShowAsync();

            //Task.Factory.StartNew(async () =>
            //{
            //    try
            //    {
            //          Speech.Initialize();
            //await Speech.StartRecognition();
            //    }
            //    catch (Exception ex)
            //    {
            //        throw ex;
            //    }

            //});
        }
        private async void BtnSpeechRecogWeatherSearchAsync_Click(object sender, RoutedEventArgs e)
        {
            // Create an instance of SpeechRecognizer.
            var speechRecognizer = new Windows.Media.SpeechRecognition.SpeechRecognizer();

            // Listen for audio input issues.
            ///////  speechRecognizer.RecognitionQualityDegrading += speechRecognizer_RecognitionQualityDegrading;

            // Add a web search grammar to the recognizer.
            var webSearchGrammar = new Windows.Media.SpeechRecognition.SpeechRecognitionTopicConstraint(Windows.Media.SpeechRecognition.SpeechRecognitionScenario.WebSearch, "webSearch");

            speechRecognizer.UIOptions.AudiblePrompt = "Say what you want to search for...";
            speechRecognizer.UIOptions.ExampleText   = @"Ex. 'weather for London'";
            speechRecognizer.Constraints.Add(webSearchGrammar);

            // Compile the constraint.
            await speechRecognizer.CompileConstraintsAsync();

            // Start recognition.
            Windows.Media.SpeechRecognition.SpeechRecognitionResult speechRecognitionResult = await speechRecognizer.RecognizeWithUIAsync();

            //await speechRecognizer.RecognizeWithUIAsync();

            // Do something with the recognition result.
            var messageDialog = new Windows.UI.Popups.MessageDialog(speechRecognitionResult.Text, "Text spoken");
            await messageDialog.ShowAsync();
        }
示例#7
0
        public override IObservable <string> ListenUntilPause() => Observable.FromAsync(async ct =>
        {
            var speech = new WinSpeechRecognizer();
            await speech.CompileConstraintsAsync();
            this.ListenSubject.OnNext(true);
            var result = await speech.RecognizeAsync();
            this.ListenSubject.OnNext(false);

            return(result.Text);
        });
        private async void DefaultRecognizing_OnClick(object sender, RoutedEventArgs e)
        {
            // カスタム制約なし
            var speechRecognizer = new Windows.Media.SpeechRecognition.SpeechRecognizer();

            await speechRecognizer.CompileConstraintsAsync();

            var result = await speechRecognizer.RecognizeWithUIAsync();

            var dialog = new MessageDialog(result.Text, "Text spoken");
            await dialog.ShowAsync();
        }
        /// <summary>
        /// Initialize Speech Recognizer and compile constraints.
        /// </summary>
        /// <param name="recognizerLanguage">Language to use for the speech recognizer</param>
        /// <returns>Awaitable task.</returns>
        private async Task InitializeRecognizer(Language recognizerLanguage)
        {
            if (speechRecognizer != null)
            {
                this.speechRecognizer.Dispose();
                this.speechRecognizer = null;
            }

            try
            {
                // Create an instance of SpeechRecognizer.
                speechRecognizer = new SpeechRecognizer(recognizerLanguage);

                // Compile the dictation topic constraint, which optimizes for dictated speech.
                var dictationConstraint = new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.Dictation, "dictation");
                speechRecognizer.Constraints.Add(dictationConstraint);
            }
            catch (Exception ex)
            {
                System.Diagnostics.Debug.WriteLine(ex.Message);
                throw;
            }

            try
            {
                SpeechRecognitionCompilationResult compilationResult = await speechRecognizer.CompileConstraintsAsync();

                //// RecognizeWithUIAsync allows developers to customize the prompts.
                //speechRecognizer.UIOptions.AudiblePrompt = "Dictate a phrase or sentence...";
                //speechRecognizer.UIOptions.ExampleText = speechResourceMap.GetValue("DictationUIOptionsExampleText", speechContext).ValueAsString;

                // Check to make sure that the constraints were in a proper format and the recognizer was able to compile it.
                if (compilationResult.Status != SpeechRecognitionResultStatus.Success)
                {
                    //// Disable the recognition buttons.
                    //btnRecognizeWithUI.IsEnabled = false;
                    //btnRecognizeWithoutUI.IsEnabled = false;

                    //// Let the user know that the grammar didn't compile properly.
                    //resultTextBlock.Visibility = Visibility.Visible;
                    //resultTextBlock.Text = "Unable to compile grammar.";
                }
            }
            catch (Exception ex)
            {
                System.Diagnostics.Debug.WriteLine(ex.Message);
                throw;
            }
        }
示例#10
0
        public async void initSpeech()
        {
            var         speechRecognizer = new Windows.Media.SpeechRecognition.SpeechRecognizer();
            var         url  = new Uri("ms-appx:///SRGS-Enhanced V2.grxml").ToString();
            StorageFile file = await StorageFile.GetFileFromApplicationUriAsync(new Uri(url));

            var grammarFileConstraint = new Windows.Media.SpeechRecognition.SpeechRecognitionGrammarFileConstraint(file);

            speechRecognizer.UIOptions.ExampleText = @"Ex. 'blue background', 'green text'";
            speechRecognizer.Constraints.Add(grammarFileConstraint);
            var status = await speechRecognizer.CompileConstraintsAsync();

            speechRecognizer.ContinuousRecognitionSession.Completed += ContinuousRecognitionSession_Completed;
            speechRecognizer.StateChanged += SpeechRecognizer_StateChanged;
            speechRecognizer.ContinuousRecognitionSession.ResultGenerated += ContinuousRecognitionSession_ResultGenerated;
            await speechRecognizer.ContinuousRecognitionSession.StartAsync(Windows.Media.SpeechRecognition.SpeechContinuousRecognitionMode.Default);
        }
        private async void StartRecognition(object sender, RoutedEventArgs e)
        {
            // Create an instance of SpeechRecognizer.
            var speechRecognizer = new Windows.Media.SpeechRecognition.SpeechRecognizer();

            // Compile the dictation grammar by default.
            await speechRecognizer.CompileConstraintsAsync();

            // Start recognition.
            SpeechRecognitionResult speechRecognitionResult = await speechRecognizer.RecognizeWithUIAsync();

            //SpeechRecognitionResult speechRecognitionResult = await speechRecognizer.RecognizeAsync();


            // Do something with the recognition result.
            var messageDialog = new MessageDialog(speechRecognitionResult.Text, "Text spoken");
            await messageDialog.ShowAsync();
        }
示例#12
0
        public async void initSpeech()
        {
            var         speechRecognizer = new Windows.Media.SpeechRecognition.SpeechRecognizer();
            var         x    = speechRecognizer.CurrentLanguage;
            var         url  = new Uri(String.Format("ms-appx:///SRGS-Enhanced-{0}.grxml", x.LanguageTag)).ToString();
            StorageFile file = await StorageFile.GetFileFromApplicationUriAsync(new Uri(url));

            var grammarFileConstraint = new Windows.Media.SpeechRecognition.SpeechRecognitionGrammarFileConstraint(file);

            speechRecognizer.Timeouts.EndSilenceTimeout = new TimeSpan(0, 0, 0, 0, 400);
            speechRecognizer.Constraints.Add(grammarFileConstraint);
            var status = await speechRecognizer.CompileConstraintsAsync();

            speechRecognizer.ContinuousRecognitionSession.Completed += ContinuousRecognitionSession_Completed;
            speechRecognizer.StateChanged += SpeechRecognizer_StateChanged;
            speechRecognizer.ContinuousRecognitionSession.ResultGenerated += ContinuousRecognitionSession_ResultGenerated;
            await speechRecognizer.ContinuousRecognitionSession.StartAsync(Windows.Media.SpeechRecognition.SpeechContinuousRecognitionMode.Default);
        }
示例#13
0
        private async void StartRecognition(object sender, RoutedEventArgs e)
        {
            // Create an instance of SpeechRecognizer.
            var speechRecognizer = new Windows.Media.SpeechRecognition.SpeechRecognizer();

            // Compile the dictation grammar by default.
            await speechRecognizer.CompileConstraintsAsync();

            // Start recognition.
            SpeechRecognitionResult speechRecognitionResult = await speechRecognizer.RecognizeWithUIAsync();
            //SpeechRecognitionResult speechRecognitionResult = await speechRecognizer.RecognizeAsync();


            // Do something with the recognition result.
            var messageDialog = new MessageDialog(speechRecognitionResult.Text, "Text spoken");
            await messageDialog.ShowAsync();          



        }
示例#14
0
        public override IObservable <string> ContinuousDictation() => Observable.Create <string>(async ob =>
        {
            var speech = new WinSpeechRecognizer();
            await speech.CompileConstraintsAsync();

            var handler = new TypedEventHandler <SpeechContinuousRecognitionSession, SpeechContinuousRecognitionResultGeneratedEventArgs>((sender, args) =>
                                                                                                                                          ob.OnNext(args.Result.Text)
                                                                                                                                          );
            speech.ContinuousRecognitionSession.ResultGenerated += handler;
            await speech.ContinuousRecognitionSession.StartAsync();
            this.ListenSubject.OnNext(true);

            return(() =>
            {
                //speech.ContinuousRecognitionSession.StopAsync();
                speech.ContinuousRecognitionSession.ResultGenerated -= handler;
                this.ListenSubject.OnNext(false);
                speech.Dispose();
            });
        });
示例#15
0
        /// <summary>
        /// Initialize Speech Recognizer and compile constraints.
        /// </summary>
        /// <param name="recognizerLanguage">Language to use for the speech recognizer</param>
        /// <returns>Awaitable task.</returns>
        private async Task InitializeRecognizer(Language recognizerLanguage)
        {
            if (speechRecognizer != null)
            {
                // cleanup prior to re-initializing this scenario.
                speechRecognizer.StateChanged -= SpeechRecognizer_StateChanged;
                speechRecognizer.ContinuousRecognitionSession.Completed       -= ContinuousRecognitionSession_Completed;
                speechRecognizer.ContinuousRecognitionSession.ResultGenerated -= ContinuousRecognitionSession_ResultGenerated;
                speechRecognizer.HypothesisGenerated         -= SpeechRecognizer_HypothesisGenerated;
                speechRecognizer.RecognitionQualityDegrading -= SpeechRecognizer_RecognitionQualityDegrading;

                this.speechRecognizer.Dispose();
                this.speechRecognizer = null;
            }

            this.speechRecognizer = new Windows.Media.SpeechRecognition.SpeechRecognizer(recognizerLanguage);

            // Provide feedback to the user about the state of the recognizer. This can be used to provide visual feedback in the form
            // of an audio indicator to help the user understand whether they're being heard.
            speechRecognizer.StateChanged += SpeechRecognizer_StateChanged;

            // Apply the dictation topic constraint to optimize for dictated freeform speech.
            var dictationConstraint = new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.Dictation, "dictation");

            speechRecognizer.Constraints.Add(dictationConstraint);
            SpeechRecognitionCompilationResult result = await speechRecognizer.CompileConstraintsAsync();

            if (result.Status != SpeechRecognitionResultStatus.Success)
            {
                rootPage.NotifyUser("Grammar Compilation Failed: " + result.Status.ToString(), NotifyType.ErrorMessage);
                btnContinuousRecognize.IsEnabled = false;
            }

            // Handle continuous recognition events. Completed fires when various error states occur. ResultGenerated fires when
            // some recognized phrases occur, or the garbage rule is hit. HypothesisGenerated fires during recognition, and
            // allows us to provide incremental feedback based on what the user's currently saying.
            speechRecognizer.ContinuousRecognitionSession.Completed       += ContinuousRecognitionSession_Completed;
            speechRecognizer.ContinuousRecognitionSession.ResultGenerated += ContinuousRecognitionSession_ResultGenerated;
            speechRecognizer.HypothesisGenerated         += SpeechRecognizer_HypothesisGenerated;
            speechRecognizer.RecognitionQualityDegrading += SpeechRecognizer_RecognitionQualityDegrading;
        }
        private async void GrammarFileConstraintRecognizing_OnClick(object sender, RoutedEventArgs e)
        {
            // SRGS 文法 (SpeechRecognitionGrammarFileConstraint)

            var speechRecognizer = new Windows.Media.SpeechRecognition.SpeechRecognizer();

            var storageFile = await StorageFile.GetFileFromApplicationUriAsync(new Uri("ms-appx:///Sample.grxml"));

            var grammarFileCOnstraint = new SpeechRecognitionGrammarFileConstraint(storageFile, "colors");

            speechRecognizer.UIOptions.ExampleText = @"Ex. 'blue background', 'green text'";
            speechRecognizer.Constraints.Add(grammarFileCOnstraint);

            await speechRecognizer.CompileConstraintsAsync();

            var result = await speechRecognizer.RecognizeWithUIAsync();

            var dialog = new MessageDialog(result.Text, "Text spoken");

            dialog.ShowAsync();
        }
        private async void ListConstraintRecognizing_OnClick(object sender, RoutedEventArgs e)
        {
            // プログラムによる一覧の制約の指定 (SpeechRecognitionListConstraint)

            var speechRecognizer = new Windows.Media.SpeechRecognition.SpeechRecognizer();

            string[] responses = { "Yes", "No" };

            var list = new SpeechRecognitionListConstraint(responses, "yesOrNo");

            speechRecognizer.UIOptions.ExampleText = @"Ex. 'yes', 'no'";
            speechRecognizer.Constraints.Add(list);

            await speechRecognizer.CompileConstraintsAsync();

            var result = await speechRecognizer.RecognizeWithUIAsync();

            var dialog = new MessageDialog(result.Text, "Text spoken");

            dialog.ShowAsync();
        }
        private async void TopicConstraintRecognizing_OnClick(object sender, RoutedEventArgs e)
        {
            // Web 検索文法の指定 (SpeechRecognitionTopicConstraint)

            var speechRecognizer = new Windows.Media.SpeechRecognition.SpeechRecognizer();

            speechRecognizer.RecognitionQualityDegrading += speechRecognizer_RecognitionQualityDegrading;

            var webSearchGrammar = new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.WebSearch, "webSearch");

            speechRecognizer.UIOptions.AudiblePrompt = "Say what you want to speach for ...";
            speechRecognizer.UIOptions.ExampleText   = @"Ex. 'weather for London";
            speechRecognizer.Constraints.Add(webSearchGrammar);

            await speechRecognizer.CompileConstraintsAsync();

            var result = await speechRecognizer.RecognizeWithUIAsync();

            var dialog = new MessageDialog(result.Text, "Text spoken");
            await dialog.ShowAsync();
        }
        public override IObservable <string> ContinuousDictation()
        {
            return(Observable.Create <string>(async ob =>
            {
                var speech = new WinSpeechRecognizer();
                await speech.CompileConstraintsAsync();
                this.ListenSubject.OnNext(true);
                var result = await speech.RecognizeAsync();
                var words = result.Text.Split(' ');

                foreach (var word in words)
                {
                    ob.OnNext(word);
                }

                return () =>
                {
                    this.ListenSubject.OnNext(false);
                    speech.Dispose();
                };
            }));
        }
示例#20
0
        public async void CallAssitant(TextBlock speechText)
        {
            // Compile the dictation grammar by default.
            await speechRecognizer.CompileConstraintsAsync();

            //recognitionOperation = speechRecognizer.RecognizeAsync();
            //SpeechRecognitionResult speechRecognitionResult = await recognitionOperation;

            //// Start recognition.

            //if (speechRecognitionResult.Status == SpeechRecognitionResultStatus.Success)
            //{
            //    TextSaid = "\n" + speechRecognitionResult.Text;
            //}


            Windows.Media.SpeechRecognition.SpeechRecognitionResult speechRecognitionResult = await speechRecognizer.RecognizeWithUIAsync();

            TextSaid        = "\n" + speechRecognitionResult.Text;
            speechText.Text = speechText.Text + TextSaid;
            //This code is commented out because i am trying to live without a dialogue box
        }
示例#21
0
        private async Task SetConstraintsAsync()
        {
            StorageFile roverCommandsActivationFile = await Package.Current.InstalledLocation.GetFileAsync("SRGS\\RoverCommandsActivation.grxml");

            StorageFile roverCommandsFile = await Package.Current.InstalledLocation.GetFileAsync("SRGS\\RoverCommands.grxml");

            _enableSpeechConstraint = new SR.SpeechRecognitionGrammarFileConstraint(
                roverCommandsActivationFile,
                "EnableCommanding");

            _commandingConstraint = new SR.SpeechRecognitionGrammarFileConstraint(
                roverCommandsFile,
                "Commands");

            Debug.WriteLine($"Activation file path: {roverCommandsActivationFile.Path}");
            Debug.WriteLine($"Commands file path: {roverCommandsFile.Path}");

            _speechRecognizer.Constraints.Add(_enableSpeechConstraint);
            _speechRecognizer.Constraints.Add(_commandingConstraint);
            _commandingConstraint.IsEnabled = false;
            var status = await _speechRecognizer.CompileConstraintsAsync();

            Debug.WriteLine($"Compilation ended with: {status.Status}");
        }
示例#22
0
        public static async Task <string> GetText()
        {
            var language = new Windows.Globalization.Language("en-US");

            using (var speechRecognizer = new Windows.Media.SpeechRecognition.SpeechRecognizer(language))
            {
                await speechRecognizer.CompileConstraintsAsync();

                speechRecognizer.StateChanged += SpeechRecognizerStateChangedHandler;;

                var result = await speechRecognizer.RecognizeAsync();

                if (result.Status == SpeechRecognitionResultStatus.Success)
                {
                    return(result.Text);
                }
                else
                {
                    // we need to control confidence and other factors
                }
            }

            return(null);
        }
示例#23
0
        private async void InitVoiceCommands()
        {
            var enableVoiceCommandsFileUri = new Uri("ms-appx:///Grammars/EnableGrammar.xml");
            var enableVoiceCommandsFile = await Windows.Storage.StorageFile.GetFileFromApplicationUriAsync(enableVoiceCommandsFileUri);

            var disableVoiceCommandsFileUri = new Uri("ms-appx:///Grammars/DisableGrammar.xml");
            var disableVoiceCommandsFile = await Windows.Storage.StorageFile.GetFileFromApplicationUriAsync(disableVoiceCommandsFileUri);

            var speechRecognizer = new Windows.Media.SpeechRecognition.SpeechRecognizer();
            speechRecognizer.StateChanged += SpeechRecognizer_StateChanged;

            Windows.Media.SpeechRecognition.SpeechRecognitionGrammarFileConstraint enableGrammarContraints = new Windows.Media.SpeechRecognition.SpeechRecognitionGrammarFileConstraint(enableVoiceCommandsFile);
            Windows.Media.SpeechRecognition.SpeechRecognitionGrammarFileConstraint disableGrammarContraints = new Windows.Media.SpeechRecognition.SpeechRecognitionGrammarFileConstraint(disableVoiceCommandsFile);
            speechRecognizer.Constraints.Add(enableGrammarContraints);
            speechRecognizer.Constraints.Add(disableGrammarContraints);

            Debug.WriteLine("Compiling grammar...");
            var compilationResults = await speechRecognizer.CompileConstraintsAsync();

            if(compilationResults.Status == Windows.Media.SpeechRecognition.SpeechRecognitionResultStatus.Success)
            {
                speechRecognizer.Timeouts.EndSilenceTimeout = TimeSpan.FromSeconds(1.5);
                speechRecognizer.ContinuousRecognitionSession.Completed += ContinuousRecognitionSession_Completed;
                speechRecognizer.ContinuousRecognitionSession.ResultGenerated += ContinuousRecognitionSession_ResultGenerated;
                Debug.WriteLine("Grammar compiled.");
                Debug.WriteLine("Starting continuous recognition");
                await speechRecognizer.ContinuousRecognitionSession.StartAsync();
            }
            else
            {
                Debug.WriteLine("Could not complie grammar. " + compilationResults.Status);
            }
        }
示例#24
0
        //Lancement reconnaissance vocale
        private async void micro_Click(object sender, RoutedEventArgs e)
        {
            var speechRecognizer = new Windows.Media.SpeechRecognition.SpeechRecognizer();

            string[] responses = commandesHttp[0].ToArray();

            //Ne compare que avec des commandes vocale connue
            var listConstraint = new Windows.Media.SpeechRecognition.SpeechRecognitionListConstraint(responses, "commandeHttp");

            speechRecognizer.UIOptions.ExampleText = @"Ex. 'Yana comment vas tu ?'";
            speechRecognizer.Constraints.Add(listConstraint);

            await speechRecognizer.CompileConstraintsAsync();

            Windows.Media.SpeechRecognition.SpeechRecognitionResult speechRecognitionResult = await speechRecognizer.RecognizeWithUIAsync();

            allmessages.Add(new Message { TextMessage = speechRecognitionResult.Text, Time = DateTime.Now.ToString(), Status = "Sent", tofrom = true });

            var index = Array.FindIndex(responses, row => row.Contains(speechRecognitionResult.Text));
            string reponse = "";

            //Verification si commande http ou socket
            try
            {
                if (index < commandesHttp[1].Count)
                {
                    reponse = await getReponseHttp(commandesHttp[1][index]);
                }
                else
                {
                    getReponseSocket(speechRecognitionResult.Text);
                }

            }
            catch
            {
                reponse = "Une erreur est survenue";
            }

        }