Esempio n. 1
0
 /// <summary>
 /// Intermediate speech recognition result received.
 /// Store the confidence so that we can calculate the average at the end.
 /// Also store the text that was recognized in this result segment.
 /// </summary>
 private void ContinuousRecognitionSession_ResultGenerated(SpeechContinuousRecognitionSession sender, SpeechContinuousRecognitionResultGeneratedEventArgs args)
 {
     Debug.WriteLine("Recognized: " + args.Result.RawConfidence.ToString(CultureInfo.CurrentCulture) + ", " + args.Result.Text);
     _averageScore += args.Result.RawConfidence;
     _numSegments++;
     _recognizedText.AppendLine(args.Result.Text);
 }
Esempio n. 2
0
 private async void ContinuousRecognitionSession_ResultGenerated(SpeechContinuousRecognitionSession sender, SpeechContinuousRecognitionResultGeneratedEventArgs args)
 {
     // We may choose to discard content that has low confidence, as that could indicate that we're picking up
     // noise via the microphone, or someone could be talking out of earshot.
     if (args.Result.Confidence == SpeechRecognitionConfidence.Medium ||
         args.Result.Confidence == SpeechRecognitionConfidence.High &&
         _currentCase.SpeakText == args.Result.Text)
     {
         await Dispatcher.RunAsync(CoreDispatcherPriority.Normal, () =>
         {
             ResultTextBlock.Text       = args.Result.Text;
             ResultDetailTextBlock.Text = "성공";
         });
     }
     else
     {
         // In some scenarios, a developer may choose to ignore giving the user feedback in this case, if speech
         // is not the primary input mechanism for the application.
         // Here, just remove any hypothesis text by resetting it to the last known good.
         await Dispatcher.RunAsync(CoreDispatcherPriority.Normal, () =>
         {
             ResultTextBlock.Text       = "Discarded due to low/rejected Confidence: " + args.Result.Text;
             ResultDetailTextBlock.Text = "실패 [" + _currentCase.SpeakText + "]";
         });
     }
 }
Esempio n. 3
0
 private async void ContinuousRecognitionSession_ResultGenerated(SpeechContinuousRecognitionSession sender, SpeechContinuousRecognitionResultGeneratedEventArgs args)
 {
     await Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal,
                               async() =>
     {
         var cmd   = args.Result.SemanticInterpretation.Properties["cmd"][0].ToString();
         var param = "";
         if (args.Result.SemanticInterpretation.Properties.ContainsKey("param"))
         {
             param = args.Result.SemanticInterpretation.Properties["param"][0].ToString();
         }
         if (param == "")
         {
             if (cmd == "forw" || cmd == "back")
             {
                 param = "50";
             }
             if (cmd == "left" || cmd == "right")
             {
                 param = "90";
             }
         }
         stat.Text = cmd + " " + param;
         await Exec(cmd, double.Parse(param));
         // "Recognized, conf="+args.Result.Confidence.ToString();
     });
 }
 private async void ContinuousRecognitionSession_Completed(SpeechContinuousRecognitionSession sender,
                                                           SpeechContinuousRecognitionCompletedEventArgs args)
 {
     if (args.Status != SpeechRecognitionResultStatus.Success)
     {
         if (args.Status == SpeechRecognitionResultStatus.TimeoutExceeded)
         {
             await dispatcher.RunAsync(CoreDispatcherPriority.Normal, () =>
             {
                 rootPage.NotifyUser("Automatic Time Out of Dictation", NotifyType.StatusMessage);
                 dictationTextBox.Text = dictatedTextBuilder.ToString();
                 isListening           = false;
             });
         }
         else
         {
             await dispatcher.RunAsync(CoreDispatcherPriority.Normal, () =>
             {
                 rootPage.NotifyUser("Continuous Recognition Completed: " + args.Status.ToString(),
                                     NotifyType.StatusMessage);
                 isListening = false;
             });
         }
     }
 }
Esempio n. 5
0
        private void MyRecognizer_ResultGenerated(SpeechContinuousRecognitionSession sender, SpeechContinuousRecognitionResultGeneratedEventArgs args)
        {
            // Write Debug Information
            Debug.WriteLine(args.Result.Text);
            NhanDienAsync("Nhận dạng : " + args.Result.Text);
            // Drive robot on recognized speech

            switch (args.Result.Text)
            {
            case "move forward":
                MqttPublish(args.Result.Text); break;

            case "move backward":
                MqttPublish(args.Result.Text); break;

            case "turn right":
                MqttPublish(args.Result.Text); break;

            case "turn left":
                MqttPublish(args.Result.Text); break;

            case "rotate right":
                MqttPublish(args.Result.Text); break;

            case "rote left":
                MqttPublish(args.Result.Text); break;

            case "stop":
                MqttPublish(args.Result.Text); break;
            }
        }
Esempio n. 6
0
        /// <summary>
        /// Handle events fired when a result is generated. This may include a garbage rule that fires when general room noise
        /// or side-talk is captured (this will have a confidence of Rejected typically, but may occasionally match a rule with
        /// low confidence).
        /// </summary>
        /// <param name="sender">The Recognition session that generated this result</param>
        /// <param name="args">Details about the recognized speech</param>
        private async void ContinuousRecognitionSession_ResultGenerated(SpeechContinuousRecognitionSession sender, SpeechContinuousRecognitionResultGeneratedEventArgs args)
        {
            // The garbage rule will not have a tag associated with it, the other rules will return a string matching the tag provided
            // when generating the grammar.
            string tag = AppResources.GetString("unknown");

            if (args.Result.Constraint != null)
            {
                tag = args.Result.Constraint.Tag;
            }
            // Developers may decide to use per-phrase confidence levels in order to tune the behavior of their
            // grammar based on testing.
            if (args.Result.Confidence == SpeechRecognitionConfidence.Medium ||
                args.Result.Confidence == SpeechRecognitionConfidence.High)
            {
                await Dispatcher.RunAsync(CoreDispatcherPriority.Normal, () =>
                {
                    edHearState.Text         = $"{AppResources.GetString("Heard")}: {args.Result.Text}, ({AppResources.GetString("Tag")}: {tag}, {AppResources.GetString("Confidence")}: {args.Result.Confidence.ToString()})";
                    edContent.Text          += string.Format("{0}", args.Result.Text);
                    edContent.SelectionStart = edContent.Text.Length;
                });
            }
            else
            {
                // In some scenarios, a developer may choose to ignore giving the user feedback in this case, if speech
                // is not the primary input mechanism for the application.
                await Dispatcher.RunAsync(CoreDispatcherPriority.Normal, () =>
                {
                    edHearState.Text = $"{AppResources.GetString("VoiceCatchFailed")}. ({AppResources.GetString("Heard")}: {args.Result.Text}, {AppResources.GetString("Tag")}: {tag}, {AppResources.GetString("Confidence")}: {args.Result.Confidence.ToString()})";
                });
            }
        }
Esempio n. 7
0
 /// <summary>
 /// Handle events fired when error conditions occur, such as the microphone becoming unavailable, or if
 /// some transient issues occur.
 /// </summary>
 /// <param name="sender">The continuous recognition session</param>
 /// <param name="args">The state of the recognizer</param>
 private async void ContinuousRecognitionSession_Completed(SpeechContinuousRecognitionSession sender, SpeechContinuousRecognitionCompletedEventArgs args)
 {
     if (args.Status != SpeechRecognitionResultStatus.Success)
     {
         // If TimeoutExceeded occurs, the user has been silent for too long. We can use this to
         // cancel recognition if the user in dictation mode and walks away from their device, etc.
         // In a global-command type scenario, this timeout won't apply automatically.
         // With dictation (no grammar in place) modes, the default timeout is 20 seconds.
         if (args.Status == SpeechRecognitionResultStatus.TimeoutExceeded)
         {
             await Dispatcher.InvokeAsync(() => {
                 btnContinuousRecognize.Content = "Speak";
                 DictationTextBox.Text          = dictatedTextBuilder.ToString() + "Automatic Time Out of Dictation";
                 isListening = false;
             });
         }
         else
         {
             await Dispatcher.InvokeAsync(() => {
                 DictationTextBox.Text          = "Continuous Recognition Completed: " + args.Status.ToString();
                 btnContinuousRecognize.Content = "Speak";
                 isListening = false;
             });
         }
     }
 }
 private async void ContinuousRecognitionSession_Completed(SpeechContinuousRecognitionSession sender, SpeechContinuousRecognitionCompletedEventArgs args)
 {
     if (args.Status != SpeechRecognitionResultStatus.Success)
     {
         if (args.Status == SpeechRecognitionResultStatus.TimeoutExceeded)
         {
             await CoreApplication.MainView.CoreWindow.Dispatcher.RunAsync(
                Windows.UI.Core.CoreDispatcherPriority.Normal,
                () =>
                {
                    MessageDialog dialog = new MessageDialog("Voice recognization time out and stop");
                    dialog.ShowAsync();
                });
         }
         else
         {
             await CoreApplication.MainView.CoreWindow.Dispatcher.RunAsync(
                Windows.UI.Core.CoreDispatcherPriority.Normal,
                () =>
                {
                    //MessageDialog dialog = new MessageDialog("Voice recognization ended");
                    //dialog.ShowAsync();
                    if (speechRecognizer.State == SpeechRecognizerState.Idle)
                    {
                        speechRecognizer.ContinuousRecognitionSession.StartAsync();
                    }
                });
         }
     }
 }
        private async void StartSpeechRecognizer()
        {
            // Compile the loaded GrammarFiles
            SpeechRecognitionCompilationResult compilationResult = await _recognizer.CompileConstraintsAsync();

            // If successful, display the recognition result.
            if (compilationResult.Status == SpeechRecognitionResultStatus.Success)
            {
                Debug.WriteLine("Result: " + compilationResult.ToString());

                SpeechContinuousRecognitionSession session = _recognizer.ContinuousRecognitionSession;
                try
                {
                    await session.StartAsync();
                }
                catch (Exception e)
                {
                    //TODO this needs to report to the user that something failed.
                    //also potentially write to a log somewhere.
                    Debug.WriteLine(e.Data);
                }
            }
            else
            {
                //TODO this needs to report to the user that something failed.
                //also potentially write to a log somewhere.
                Debug.WriteLine("Status: " + compilationResult.Status);
            }
        }
Esempio n. 10
0
 void OnContinuousRecognitionSessionCompleted(
     SpeechContinuousRecognitionSession sender,
     SpeechContinuousRecognitionCompletedEventArgs args)
 {
     _isInRecognitionSession = false;
     OnStateChanged(new StateChangedEventArgs(args));
 }
        /// <summary>
        /// Handle events fired when a result is generated. This may include a garbage rule that fires when general room noise
        /// or side-talk is captured (this will have a confidence of Rejected typically, but may occasionally match a rule with
        /// low confidence).
        /// </summary>
        /// <param name="sender">The Recognition session that generated this result</param>
        /// <param name="args">Details about the recognized speech</param>
        private async void ContinuousRecognitionSession_ResultGenerated(SpeechContinuousRecognitionSession sender, SpeechContinuousRecognitionResultGeneratedEventArgs args)
        {
            // The garbage rule will not have a tag associated with it, the other rules will return a string matching the tag provided
            // when generating the grammar.
            string tag = "unknown";

            if (args.Result.Constraint != null)
            {
                tag = args.Result.Constraint.Tag;
            }

            // Developers may decide to use per-phrase confidence levels in order to tune the behavior of their
            // grammar based on testing.
            if (args.Result.Confidence == SpeechRecognitionConfidence.Medium ||
                args.Result.Confidence == SpeechRecognitionConfidence.High)
            {
                await dispatcher.RunAsync(CoreDispatcherPriority.Normal, () =>
                {
                    resultTextBlock.Text = string.Format("Heard: '{0}', (Tag: '{1}', Confidence: {2})", args.Result.Text, tag, args.Result.Confidence.ToString());
                });
            }
            else
            {
                // In some scenarios, a developer may choose to ignore giving the user feedback in this case, if speech
                // is not the primary input mechanism for the application.
                await dispatcher.RunAsync(CoreDispatcherPriority.Normal, () =>
                {
                    resultTextBlock.Text = string.Format("Sorry, I didn't catch that. (Heard: '{0}', Tag: {1}, Confidence: {2})", args.Result.Text, tag, args.Result.Confidence.ToString());
                });
            }
        }
 private async void Recogniser_Completed(
     SpeechContinuousRecognitionSession sender,
     SpeechContinuousRecognitionCompletedEventArgs args)
 {
     if (args.Status != SpeechRecognitionResultStatus.Success)
     {
         if (args.Status == SpeechRecognitionResultStatus.TimeoutExceeded)
         {
             await _dispatcher.RunAsync(CoreDispatcherPriority.Normal, () =>
             {
                 Result?.Invoke(_builder.ToString());
                 Completed?.Invoke();
                 _listening = false;
             });
         }
         else
         {
             await _dispatcher.RunAsync(CoreDispatcherPriority.Normal, () =>
             {
                 Completed?.Invoke();
                 _listening = false;
             });
         }
     }
 }
 private async void ContinuousRecognitionSession_Completed(SpeechContinuousRecognitionSession sender, SpeechContinuousRecognitionCompletedEventArgs args)
 {
     if (args.Status != SpeechRecognitionResultStatus.Success)
     {
         // If TimeoutExceeded occurs, the user has been silent for too long. We can use this to
         // cancel recognition if the user in dictation mode and walks away from their device, etc.
         // In a global-command type scenario, this timeout won't apply automatically.
         // With dictation (no grammar in place) modes, the default timeout is 20 seconds.
         if (args.Status == SpeechRecognitionResultStatus.TimeoutExceeded)
         {
             await this.Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, () =>
             {
                 this.speechRecognitionControlButtonSymbol.Symbol = Symbol.Refresh;
                 this.speechRecognitionTextBox.PlaceholderText    = "";
                 this.speechRecognitionTextBox.Text = dictatedTextBuilder.ToString();
                 this.isCapturingSpeech             = false;
             });
         }
         else
         {
             await this.Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, () =>
             {
                 this.speechRecognitionControlButtonSymbol.Symbol = Symbol.Refresh;
                 this.speechRecognitionTextBox.PlaceholderText    = "";
                 this.isCapturingSpeech = false;
             });
         }
     }
 }
Esempio n. 14
0
        private async void ContinuousRecognitionSession_ResultGenerated(SpeechContinuousRecognitionSession sender, SpeechContinuousRecognitionResultGeneratedEventArgs args)
        {
            try
            {
                string s = args.Result.Text;

                if (args.Result.Status == SpeechRecognitionResultStatus.Success)
                {
                    dictatedTextBuilder.Append(s + " ");
                    await dispatcher.RunAsync(CoreDispatcherPriority.Normal, () =>
                    {
                        dictationTextBox.Text  = dictatedTextBuilder.ToString();
                        btnClearText.IsEnabled = true;
                    });
                }
                else
                {
                    await dispatcher.RunAsync(CoreDispatcherPriority.Normal, () =>
                    {
                        dictationTextBox.Text = dictatedTextBuilder.ToString();
                    });
                }
            }
            catch (Exception ex)
            {
                Console.WriteLine(ex.Message);
            }
        }
Esempio n. 15
0
        private async void ContinuousRecognitionSession_Completed(SpeechContinuousRecognitionSession sender, SpeechContinuousRecognitionCompletedEventArgs args)
        {
            if (args.Status != SpeechRecognitionResultStatus.Success)
            {
                if (args.Status == SpeechRecognitionResultStatus.TimeoutExceeded)
                {
                    await dispatcher.RunAsync(CoreDispatcherPriority.Normal, async() =>
                    {
                        await StopRecognition();
                        checkError.Visibility = Visibility.Visible;
                        errorCheck.Visibility = Visibility.Visible;
                        errorCheck.Text       = "Automatic Time out of Dictation";
                        dictationTextBox.Text = dictatedTextBuilder.ToString();
                    });
                }
                else
                {
                    await dispatcher.RunAsync(CoreDispatcherPriority.Normal, async() => {
                        await StopRecognition();

                        checkError.Visibility = Visibility.Visible;
                        errorCheck.Visibility = Visibility.Visible;
                        errorCheck.Text       = "Continuous Recognition Completed:" + args.Status.ToString();
                    });
                }
            }
        }
Esempio n. 16
0
        async void ContinuousRecognitionSession_ResultGenerated(
            SpeechContinuousRecognitionSession sender,
            SpeechContinuousRecognitionResultGeneratedEventArgs args)
        {
            if (recognizer_ == null || recognizer_.ContinuousRecognitionSession != sender)
            {
                return;
            }

            dictatedTextBuilder_.Append(args.Result.Text + "|");

            System.Diagnostics.Debug.WriteLine("ResultGenerated:" + args.Result.Confidence + " " + args.Result.Text);

            lock (eventQue_)
            {
                var confidenceLevel = UnityEngine.Windows.Speech.ConfidenceLevel.Rejected;
                switch (args.Result.Confidence)
                {
                case SpeechRecognitionConfidence.High: confidenceLevel = UnityEngine.Windows.Speech.ConfidenceLevel.High; break;

                case SpeechRecognitionConfidence.Low: confidenceLevel = UnityEngine.Windows.Speech.ConfidenceLevel.Low; break;

                case SpeechRecognitionConfidence.Medium: confidenceLevel = UnityEngine.Windows.Speech.ConfidenceLevel.Medium; break;

                case SpeechRecognitionConfidence.Rejected: confidenceLevel = UnityEngine.Windows.Speech.ConfidenceLevel.Rejected; break;
                }
                eventQue_.Enqueue(new RecoEvent_
                {
                    eventType       = RecoEvent_.EventType.DictationResult,
                    text            = args.Result.Text,
                    confidenceLevel = confidenceLevel,
                });
            }
        }
Esempio n. 17
0
        private static async void ContinuousRecognitionSession_ResultGenerated(
            SpeechContinuousRecognitionSession sender,
            SpeechContinuousRecognitionResultGeneratedEventArgs args)
        {
            if (args.Result.Confidence == SpeechRecognitionConfidence.Medium ||
                args.Result.Confidence == SpeechRecognitionConfidence.High)
            {
                string resultGeneratedText = args.Result.Text + " ";
                dictatedTextBuilder.Append(resultGeneratedText);
                recognizedText     += resultGeneratedText;
                resultGeneratedText = string.Empty;

                await dispatcher.RunAsync(CoreDispatcherPriority.Normal, () =>
                {
                    RtfTextHelper.Text = dictatedTextBuilder.ToString();
                });
            }
            else
            {
                await dispatcher.RunAsync(CoreDispatcherPriority.Normal, () =>
                {
                    RtfTextHelper.Text = dictatedTextBuilder.ToString();
                });
            }
        }
Esempio n. 18
0
        /// <summary>
        /// Handle events fired when a result is generated. This may include a garbage rule that fires when general room noise
        /// or side-talk is captured (this will have a confidence of Rejected typically, but may occasionally match a rule with
        /// low confidence).
        /// </summary>
        /// <param name="sender">The Recognition session that generated this result</param>
        /// <param name="args">Details about the recognized speech</param>
        private void ContinuousRecognitionSession_ResultGenerated(SpeechContinuousRecognitionSession sender,
                                                                  SpeechContinuousRecognitionResultGeneratedEventArgs args)
        {
            var text = string.Empty;
            var tag  = string.Empty;

            if (args.Result != null)
            {
                text = args.Result.Text;
            }

            if (args.Result.Constraint != null)
            {
                tag = args.Result.Constraint.Tag;
            }

            Debug.WriteLine($"ContinuousRecognitionSession_ResultGenerated {text} {tag} {args.Result.Confidence.ToString()}");

            // Developers may decide to use per-phrase confidence levels in order to tune the behavior of their
            // grammar based on testing.
            if (!string.IsNullOrEmpty(tag) &&
                (args.Result.Confidence == SpeechRecognitionConfidence.Medium || args.Result.Confidence == SpeechRecognitionConfidence.High))
            {
                OnPhraseRecognized(1, text, tag);
            }
            else
            {
                OnPhraseRecognized(-1, text, tag);
            }
        }
Esempio n. 19
0
        /// <summary>
        /// Handle events fired when a result is generated. Check for high to medium confidence, and then append the
        /// string to the end of the stringbuffer, and replace the content of the textbox with the string buffer, to
        /// remove any hypothesis text that may be present.
        /// </summary>
        /// <param name="sender">The Recognition session that generated this result</param>
        /// <param name="args">Details about the recognized speech</param>
        private void ContinuousRecognitionSession_ResultGenerated(SpeechContinuousRecognitionSession sender, SpeechContinuousRecognitionResultGeneratedEventArgs args)
        {
            // We may choose to discard content that has low confidence, as that could indicate that we're picking up
            // noise via the microphone, or someone could be talking out of earshot.
            //if (args.Result.Confidence == SpeechRecognitionConfidence.Medium ||
            //    args.Result.Confidence == SpeechRecognitionConfidence.High)
            {
                AppendTextToDictationOutput(args.Result.Text);
            }
            //else
            //{
            //    // In some scenarios, a developer may choose to ignore giving the user feedback in this case, if speech
            //    // is not the primary input mechanism for the application.
            //    // Here, just remove any hypothesis text by resetting it to the last known good.
            //    await Dispatcher.RunAsync(CoreDispatcherPriority.Normal, () =>
            //    {
            //        dictationTextBox.Text = dictatedTextBuilder.ToString();
            //        string discardedText = args.Result.Text;
            //        if (!string.IsNullOrEmpty(discardedText))
            //        {
            //            discardedText = discardedText.Length <= 25 ? discardedText : (discardedText.Substring(0, 25) + "...");

            //            discardedTextBlock.Text = "Discarded due to low/rejected Confidence: " + discardedText;
            //            discardedTextBlock.Visibility = Windows.UI.Xaml.Visibility.Visible;
            //        }
            //    });
            //}
        }
        /// <summary>
        /// Handle events fired when a result is generated in the continuous recognition mode.
        /// </summary>
        /// <param name="sender">The Recognition session that generated this result</param>
        /// <param name="args">Details about the recognized speech</param>
        private static void OnContinuousRecognitionSessionResultGeneratedHandler(SpeechContinuousRecognitionSession sender, SpeechContinuousRecognitionResultGeneratedEventArgs args)
        {
            SpeechRecognitionResult result = args.Result;

            if (result.Confidence != SpeechRecognitionConfidence.Rejected)
            {
                var  altResults = result.GetAlternates(s_maxRecognitionResultAlternates);
                uint idx        = 0;
                foreach (var curentAltResult in altResults)
                {
                    if (curentAltResult.Confidence == SpeechRecognitionConfidence.Rejected)
                    {
                        break;
                    }
                    int    num;
                    string speechResult = curentAltResult.Text.Remove(curentAltResult.Text.Length - 1);
                    bool   isNumber     = Int32.TryParse(speechResult, out num);
                    if (isNumber)
                    {
                        MainPage.SetAudioTempCommand(speechResult);
                    }
                    else
                    {
                        UiUtils.ShowNotification("Your message could not be parsed as number. Please specify a number!");
                    }
                    idx++;
                }
            }
            else
            {
                UiUtils.ShowNotification("Sorry, could not get that. Can you repeat?");
            }
        }
        private async void continuousRecognitionSessionOnResultGenerated(SpeechContinuousRecognitionSession sender, SpeechContinuousRecognitionResultGeneratedEventArgs args)
        {
            try
            {
                Debug.WriteLine(args.Result.Confidence.ToString());

                if (args.Result.Confidence == SpeechRecognitionConfidence.Low)
                {
                    return;
                }

                if (args.Result.Confidence == SpeechRecognitionConfidence.Medium)
                {
                    //await SpeechService.BadlyUnderstood();
                    mainPage.StartColorAnimation(mainPage.RecognitionLight, "(RecognitionLight.Background).Color", Colors.Black, Colors.Red, 2);
                }
                else if (args.Result.Confidence == SpeechRecognitionConfidence.High)
                {
                    await speechRecognizer.SpeechRecognizer.ContinuousRecognitionSession.PauseAsync();

                    Debug.WriteLine("Speech Recognition stopped");

                    mainPage.StartColorAnimation(mainPage.RecognitionLight, "(RecognitionLight.Background).Color", Colors.Black, Colors.Green);
                    await handleRecognizedSpeech(evaluateSpeechInput(args.Result));

                    speechRecognizer.SpeechRecognizer.ContinuousRecognitionSession.Resume();

                    Debug.WriteLine("Speech Recognition started");
                }
            }
            catch (Exception)
            {
                // ignored
            }
        }
Esempio n. 22
0
 private void ContinuousRecognitionSession_ResultGenerated(SpeechContinuousRecognitionSession sender, SpeechContinuousRecognitionResultGeneratedEventArgs args)
 {
     if (args.Result.Confidence == SpeechRecognitionConfidence.High || args.Result.Confidence == SpeechRecognitionConfidence.Medium)
     {
         RunOnCoreDispatcherIfPossible(() => WakeUpAndListen(), false);
     }
 }
Esempio n. 23
0
        private async void ContinuousRecognitionSession_ResultGenerated(SpeechContinuousRecognitionSession sender, SpeechContinuousRecognitionResultGeneratedEventArgs args)
        {
            string s = args.Result.Text;

            if (args.Result.Status == SpeechRecognitionResultStatus.Success)
            {
                dictatedTextBuilder.Append(s + " ");
                await dispatcher.RunAsync(CoreDispatcherPriority.Normal, () =>
                {
                    dictationTextBox.Text  = dictatedTextBuilder.ToString();
                    btnClearText.IsEnabled = true;
                });
            }
            else
            {
                // In some scenarios, a developer may choose to ignore giving the user feedback in this case, if speech
                // is not the primary input mechanism for the application.
                // Here, just remove any hypothesis text by resetting it to the last known good.
                await dispatcher.RunAsync(CoreDispatcherPriority.Normal, () =>
                {
                    dictationTextBox.Text = dictatedTextBuilder.ToString();
                });
            }

            this.toSpeak = s;
        }
Esempio n. 24
0
 private async void ContinuousRecognitionSession_Completed(SpeechContinuousRecognitionSession sender, SpeechContinuousRecognitionCompletedEventArgs args)
 {
     if (speechRecognizer.State == SpeechRecognizerState.Idle)
     {
         await speechRecognizer.ContinuousRecognitionSession.StartAsync();
     }
 }
Esempio n. 25
0
        private async void ContinuousRecognitionSession_ResultGenerated(SpeechContinuousRecognitionSession sender, SpeechContinuousRecognitionResultGeneratedEventArgs args)
        {
            if (args.Result.Confidence == SpeechRecognitionConfidence.Medium ||
                args.Result.Confidence == SpeechRecognitionConfidence.High || args.Result.Confidence == SpeechRecognitionConfidence.Low)
            {
                await dispatcher.RunAsync(CoreDispatcherPriority.Normal, () =>
                {
                    resultTextBlock.Visibility = Visibility.Visible;
                    resultTextBlock.Text       = args.Result.Text;

                    if (args.Result.Text == "product")
                    {
                        this.Frame.Navigate(typeof(addProduct), null);
                        backButton();
                    }
                });
            }

            else
            {
                await dispatcher.RunAsync(CoreDispatcherPriority.Normal, () =>
                {
                    resultTextBlock.Visibility = Visibility.Visible;
                    resultTextBlock.Text       = "sorry, I didnt catch that";
                });
            }
        }
Esempio n. 26
0
 private async void ContinuousRecognitionSession_Completed(SpeechContinuousRecognitionSession sender, SpeechContinuousRecognitionCompletedEventArgs args)
 {
     await Dispatcher.RunAsync(CoreDispatcherPriority.Normal, () =>
     {
         this.NotifyUser("Continuous Recognition Completed: " + args.Status.ToString(), NotifyType.StatusMessage);
     });
 }
Esempio n. 27
0
 /// <summary>
 /// Handle events fired when error conditions occur, such as the microphone becoming unavailable, or if
 /// some transient issues occur.
 /// </summary>
 /// <param name="sender">The continuous recognition session</param>
 /// <param name="args">The state of the recognizer</param>
 private async void ContinuousRecognitionSession_Completed(SpeechContinuousRecognitionSession sender, SpeechContinuousRecognitionCompletedEventArgs args)
 {
     if (args.Status != SpeechRecognitionResultStatus.Success)
     {
         // If TimeoutExceeded occurs, the user has been silent for too long. We can use this to
         // cancel recognition if the user in dictation mode and walks away from their device, etc.
         // In a global-command type scenario, this timeout won't apply automatically.
         // With dictation (no grammar in place) modes, the default timeout is 20 seconds.
         if (args.Status == SpeechRecognitionResultStatus.TimeoutExceeded)
         {
             await Dispatcher.RunAsync(CoreDispatcherPriority.Normal, () =>
             {
                 rootPage.NotifyUser("Automatic Time Out of Dictation", NotifyType.StatusMessage);
                 DictationButtonText.Text      = " Dictate";
                 cbLanguageSelection.IsEnabled = true;
                 dictationTextBox.Text         = dictatedTextBuilder.ToString();
                 isListening = false;
                 ContinuousRecognize_Click(null, null);
             });
         }
         else
         {
             await Dispatcher.RunAsync(CoreDispatcherPriority.Normal, () =>
             {
                 rootPage.NotifyUser("Continuous Recognition Completed: " + args.Status.ToString(), NotifyType.StatusMessage);
                 DictationButtonText.Text      = " Dictate";
                 cbLanguageSelection.IsEnabled = true;
                 isListening = false;
             });
         }
     }
 }
Esempio n. 28
0
        /// <summary>
        /// Fires when MyRecognizer successfully parses a speech
        /// </summary>
        /// <param name="sender"></param>
        /// <param name="args"></param>
        private void MyRecognizer_ResultGenerated(SpeechContinuousRecognitionSession sender, SpeechContinuousRecognitionResultGeneratedEventArgs args)
        {
            // Write Debug Information
            Debug.WriteLine(args.Result.Text);

            // Drive robot on recognized speech
            switch (args.Result.Text)
            {
            case "move forward":
                _robot.MoveForward();
                break;

            case "move reverse":
                break;

            case "turn right":
                break;

            case "turn left":
                break;

            case "stop":
                break;
            }

            // Turn on/off obstacle detection
        }
Esempio n. 29
0
        /// <summary>
        /// Handle events fired when a result is generated. Check for high to medium confidence, and then append the
        /// string to the end of the stringbuffer, and replace the content of the textbox with the string buffer, to
        /// remove any hypothesis text that may be present.
        /// </summary>
        /// <param name="sender">The Recognition session that generated this result</param>
        /// <param name="args">Details about the recognized speech</param>
        private async void ContinuousRecognitionSession_ResultGenerated(SpeechContinuousRecognitionSession sender, SpeechContinuousRecognitionResultGeneratedEventArgs args)
        {
            // We may choose to discard content that has low confidence, as that could indicate that we're picking up
            // noise via the microphone, or someone could be talking out of earshot.
            if (args.Result.Confidence == SpeechRecognitionConfidence.Medium ||
                args.Result.Confidence == SpeechRecognitionConfidence.High)
            {
                dictatedTextBuilder.Append(args.Result.Text + " ");

                await Dispatcher.InvokeAsync(() => {
                    DictationTextBox.Text = dictatedTextBuilder.ToString();
                });
            }
            else
            {
                // In some scenarios, a developer may choose to ignore giving the user feedback in this case, if speech
                // is not the primary input mechanism for the application.
                // Here, just remove any hypothesis text by resetting it to the last known good.
                await Dispatcher.InvokeAsync(() => {
                    DictationTextBox.Text = dictatedTextBuilder.ToString();
                    string discardedText  = args.Result.Text;
                    if (!string.IsNullOrEmpty(discardedText))
                    {
                        discardedText         = discardedText.Length <= 25 ? discardedText : (discardedText.Substring(0, 25) + "...");
                        DictationTextBox.Text = "Discarded due to low/rejected Confidence: " + discardedText;
                    }
                });
            }
        }
Esempio n. 30
0
        private void RecognizerResultGenerated(SpeechContinuousRecognitionSession sender, SpeechContinuousRecognitionResultGeneratedEventArgs args)
        {
            var cmd     = args.Result;
            var command = cmd.SemanticInterpretation.Properties["command"].FirstOrDefault();
            var what    = cmd.SemanticInterpretation.Properties["direction"].FirstOrDefault();

            //Debug.WriteLine(command + " " + what);
            //if (cmd.Confidence > SpeechRecognitionConfidence.Low)
            if (cmd.Confidence >= SpeechRecognitionConfidence.Medium)
            {
                if (command == "SHOW")
                {
                    if (what == "NEWS")
                    {
                        if (cmd.Confidence >= SpeechRecognitionConfidence.High)
                        {
                            ShowNewsPage();
                        }
                    }
                    else if (what == "SCHEDULE")
                    {
                        ShowSchedulePage();
                    }
                    else if (what == "RADIO")
                    {
                        PlayRadio();
                    }
                    else if (what == "HOME")
                    {
                        NavBack();
                    }
                }
            }
        }
        private async void UpdateTextBox(SpeechContinuousRecognitionSession session, SpeechContinuousRecognitionResultGeneratedEventArgs args)
        {

            System.Diagnostics.Debug.WriteLine("updated box with recognizer results");
            await Dispatcher.InvokeAsync(() =>
            {
                customPhrase.Text = args.Result.Text;
            });
        }
Esempio n. 32
0
 private async void Con_Result(SpeechContinuousRecognitionSession sender, SpeechContinuousRecognitionResultGeneratedEventArgs args)
 {
     if (args.Result.Text == "note finished")
     {
         await this.Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, () => talkBack("note recorded"));
         await sender.StopAsync();
         //SetListening(true);
     }
 }
Esempio n. 33
0
 private async void Con_Result(SpeechContinuousRecognitionSession sender, SpeechContinuousRecognitionResultGeneratedEventArgs args)
 {
     switch (args.Result.Text) {
         case "sponge in":
             await this.Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, () => talkBack("Noted"));
             spongeIn++;
             await this.Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, () => SetNumber(spongInC, spongeIn));
             break;
         case "sponge out":
             await this.Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, () => talkBack("Noted"));
             spongeOut++;
             await this.Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, () => SetNumber( spongOutC,spongeOut));
             break;
         case "instrument in":
             await this.Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, () => talkBack("Noted"));
             instruIn++;
             await this.Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, () => SetNumber( instInC, instruIn));
             break;
         case "needle in":
             await this.Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, () => talkBack("Noted"));
             needleIn++;
             await this.Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, () => SetNumber( needleInC,needleIn));
             break;
         case "instrument out":
             await this.Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, () => talkBack("Noted"));
             instruOut++;
             await this.Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, () => SetNumber( instOutC,instruOut));
             break;
         case "needle out":
             await this.Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, () => talkBack("Noted"));
             needleOut++;
             await this.Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, () => SetNumber( needleOutC,needleOut));
             break;
         case "going to close":
             if (spongeIn != spongeOut)
             {
                 await this.Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, () => talkBack("I'm concerned. There is "+ (int) Math.Abs(spongeIn-spongeOut)+" sponge not accounted for. "+spongeIn+" In, "+ spongeOut+" out"));
                 
             } else if (needleIn != needleOut)
             {
                 await this.Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, () => talkBack("I'm concerned. There is " + (int) Math.Abs(needleIn - needleOut) + " needle not accounted for. " + needleIn + " in, " + needleOut + " out."));
                 
             }
             else if (instruIn != instruOut)
             {
                 await this.Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, () => talkBack("I'm concerned. There is " + (int) Math.Abs(instruIn - instruOut) + " instrument not accounted for. " + instruIn + " in, " + instruOut + " out."));
                 
             }
             else {
                 await this.Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, () => talkBack("I have countered " + spongeIn + " in sponge, " + spongeOut + " out sponge, " + instruIn + " in instrument, " + instruOut + " out instrument, " + needleIn + " in needle, " + needleOut + " out needle. Count seems OK"));
                 instruIn = instruOut = needleIn = needleOut = spongeIn = spongeOut = 0;
                 await sender.StopAsync();
             }
             break;
     }
 }
 private async void ContinuousRecognitionSession_ResultGenerated(SpeechContinuousRecognitionSession sender, SpeechContinuousRecognitionResultGeneratedEventArgs args)
 {
     Debug.WriteLine(args.Result.Text);
     await
           Windows.ApplicationModel.Core.CoreApplication.MainView.CoreWindow.Dispatcher.RunAsync(
               CoreDispatcherPriority.Normal, () =>
               {
                     SpeechResult = args.Result.Text;
                     ProcessCommands(args.Result);
               });
 }
Esempio n. 35
0
 private async void ContinuousRecognitionSession_ResultGenerated(SpeechContinuousRecognitionSession sender, SpeechContinuousRecognitionResultGeneratedEventArgs args)
 {
     if (args.Result.Confidence == SpeechRecognitionConfidence.Medium ||
         args.Result.Confidence == SpeechRecognitionConfidence.High)
     {
         if (args.Result.Text == "Start Listening")
         {
             //await Windows.Media.Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, () =>
             //{
             //    SetListening(true);
             //});
         }
     }
 }
Esempio n. 36
0
 private async void Con_Result(SpeechContinuousRecognitionSession sender, SpeechContinuousRecognitionResultGeneratedEventArgs args)
 {
     if (args.Result.Text.Contains("yes"))
     {
         if (marked)
         {
             queCounter++;
             marked = false;
         }
         if (queCounter == 1)
         {
             await this.Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, () => talkBack(questionList[queCounter]));
             await this.Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, () => tick1.Visibility = Visibility.Visible);
             queCounter++;
         }
         else if (queCounter == 2)
         {
             await this.Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, () => talkBack(questionList[queCounter]));
             await this.Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, () => tick2.Visibility = Visibility.Visible);
             queCounter++;
         }
         else if (queCounter == 3)
         {
             await this.Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, () => talkBack(questionList[queCounter]));
             await this.Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, () => tick3.Visibility = Visibility.Visible);
             queCounter++;
         }
         else if (queCounter == 4)
         {
             await this.Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, () => talkBack(questionList[queCounter]));
             await this.Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, () => tick4.Visibility = Visibility.Visible);
             queCounter++;
         }
         else if (queCounter == questionList.Count()) {
             await this.Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, () => tick5.Visibility = Visibility.Visible);
             await this.Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, () => talkBack("Sign in checklist complete, you can track equipment now."));
             await sender.StopAsync();
         }
     }
 }
 /// <summary>
 /// Handle events fired when error conditions occur, such as the microphone becoming unavailable, or if
 /// some transient issues occur.
 /// </summary>
 /// <param name="sender">The continuous recognition session</param>
 /// <param name="args">The state of the recognizer</param>
 private async void ContinuousRecognitionSession_Completed(SpeechContinuousRecognitionSession sender, SpeechContinuousRecognitionCompletedEventArgs args)
 {
     if (args.Status != SpeechRecognitionResultStatus.Success)
     {
         // If TimeoutExceeded occurs, the user has been silent for too long. We can use this to 
         // cancel recognition if the user in dictation mode and walks away from their device, etc.
         // In a global-command type scenario, this timeout won't apply automatically.
         // With dictation (no grammar in place) modes, the default timeout is 20 seconds.
         if (args.Status == SpeechRecognitionResultStatus.TimeoutExceeded)
         {
             await dispatcher.RunAsync(CoreDispatcherPriority.Normal, () =>
             {
                 rootPage.NotifyUser("Automatic Time Out of Dictation", NotifyType.StatusMessage);
                 DictationButtonText.Text = " Dictate";
                 cbLanguageSelection.IsEnabled = true;
                 dictationTextBox.Text = dictatedTextBuilder.ToString();
                 isListening = false;
             });
         }
         else
         {
             await dispatcher.RunAsync(CoreDispatcherPriority.Normal, () =>
             {
                 rootPage.NotifyUser("Continuous Recognition Completed: " + args.Status.ToString(), NotifyType.StatusMessage);
                 DictationButtonText.Text = " Dictate";
                 cbLanguageSelection.IsEnabled = true;
                 isListening = false;
             });
         }
     }
 }
        /// <summary>
        /// Handle events fired when a result is generated. Check for high to medium confidence, and then append the
        /// string to the end of the stringbuffer, and replace the content of the textbox with the string buffer, to
        /// remove any hypothesis text that may be present.
        /// </summary>
        /// <param name="sender">The Recognition session that generated this result</param>
        /// <param name="args">Details about the recognized speech</param>
        private async void ContinuousRecognitionSession_ResultGenerated(SpeechContinuousRecognitionSession sender, SpeechContinuousRecognitionResultGeneratedEventArgs args)
        {
            // We may choose to discard content that has low confidence, as that could indicate that we're picking up
            // noise via the microphone, or someone could be talking out of earshot.
            if (args.Result.Confidence == SpeechRecognitionConfidence.Medium ||
                args.Result.Confidence == SpeechRecognitionConfidence.High)
            {
                dictatedTextBuilder.Append(args.Result.Text + " ");

                await dispatcher.RunAsync(CoreDispatcherPriority.Normal, () =>
                {
                    discardedTextBlock.Visibility = Windows.UI.Xaml.Visibility.Collapsed;

                    dictationTextBox.Text = dictatedTextBuilder.ToString();
                    btnClearText.IsEnabled = true;
                });
            }
            else
            {
                // In some scenarios, a developer may choose to ignore giving the user feedback in this case, if speech
                // is not the primary input mechanism for the application.
                // Here, just remove any hypothesis text by resetting it to the last known good.
                await dispatcher.RunAsync(CoreDispatcherPriority.Normal, () =>
                {
                    dictationTextBox.Text = dictatedTextBuilder.ToString();
                    string discardedText = args.Result.Text;
                    if (!string.IsNullOrEmpty(discardedText))
                    {
                        discardedText = discardedText.Length <= 25 ? discardedText : (discardedText.Substring(0, 25) + "...");

                        discardedTextBlock.Text = "Discarded due to low/rejected Confidence: " + discardedText;
                        discardedTextBlock.Visibility = Windows.UI.Xaml.Visibility.Visible;
                    }
                });
            }
        }
 /// <summary>
 /// Triggers cortana with simulated key combo if constraint is heard.
 /// </summary>
 /// <param name="session">backgroundListener's continuous recognition session.</param>
 /// <param name="args">Result arguments</param>
 private void blResultGenerated(SpeechContinuousRecognitionSession session, SpeechContinuousRecognitionResultGeneratedEventArgs args)
 {
     if (args.Result.Constraint != null)
     {
         switch (args.Result.Text)
         {
             case "Open Netflix now":
                 Process.Start(@"C:\Program Files (x86)\Google\Chrome\Application\chrome.exe", @"http:\\www.netflix.com");
                 break;
             default:
                 InputSimulator.SimulateModifiedKeyStroke(VirtualKeyCode.LWIN, VirtualKeyCode.VK_S);
                 break;
         }
     }
 }
Esempio n. 40
0
        /// <summary>
        /// Handles successful recognized speech commands
        /// </summary>
        /// <param name="sender">The session of <see cref="SpeechContinuousRecognitionSession"/> that generated the result</param>
        /// <param name="args">The generated result arguments</param>
        private void RecognizerResultGenerated(SpeechContinuousRecognitionSession sender, SpeechContinuousRecognitionResultGeneratedEventArgs args)
        {
            var rssProvider = m_Container.Resolve<RssProvider>();

            var command = args.Result.SemanticInterpretation.Properties.ContainsKey("command") ?
                           args.Result.SemanticInterpretation.Properties["command"][0].ToString() :
                           "";
            var subreddit = args.Result.SemanticInterpretation.Properties.ContainsKey("subreddit") ?
                             args.Result.SemanticInterpretation.Properties["subreddit"][0].ToString() :
                             "";
            var page = args.Result.SemanticInterpretation.Properties.ContainsKey("page") ?
                             args.Result.SemanticInterpretation.Properties["page"][0].ToString() :
                             "";

            Debug.WriteLine(string.Format("Command: {0}, SubReddit: {1}, Page: {2}", command, subreddit, page));

            if (!string.IsNullOrWhiteSpace(subreddit) && (rssProvider != null))
            {
                rssProvider.Subreddit = subreddit;
            }


            if (!string.IsNullOrWhiteSpace(command))
            {
                switch (command)
                {
                    case "on":
                        DispatcherHelper.RunOnUIThread(() =>
                        {
                            var frame = m_Container.Resolve<Window>().Content as Frame;
                            frame.Navigate(typeof(MainPage));
                        });
                        break;

                    case "off":
                        DispatcherHelper.RunOnUIThread(() =>
                        {
                            var frame = m_Container.Resolve<Window>().Content as Frame;
                            frame.Navigate(typeof(BlankPage));
                        });
                        break;

                    default:
                        break;
                }
            }

            var navigationPage = ParseNavigationPage(page);
            if ((navigationPage != null))
            {
                DispatcherHelper.RunOnUIThread(() =>
                {
                    var frame = m_Container.Resolve<Window>().Content as Frame;
                    frame.Navigate(navigationPage);
                });
            }
        }
Esempio n. 41
0
        private async void ContinuousRecognitionSession_ResultGenerated(SpeechContinuousRecognitionSession sender, SpeechContinuousRecognitionResultGeneratedEventArgs args)
        {
            SpeechRecognitionResult tmpRes = args.Result;
            if (tmpRes != null && tmpRes.Status.Equals(SpeechRecognitionResultStatus.Success))

            {
                if (tmpRes.Confidence == SpeechRecognitionConfidence.Rejected)
                    await dispatcher.RunAsync(CoreDispatcherPriority.Normal, () =>
                    {
                        listenText.Text = "didn't get cha.";
                    });
                else
                    await dispatcher.RunAsync(CoreDispatcherPriority.Normal, () =>
                    {
                        listenText.Text = tmpRes.Text;
                        if (!slideshow)
                        {
                            if (listenText.Text.Equals("Light on") || listenText.Text.Equals("on") || listenText.Text.Equals("light") || listenText.Text.Equals("bright"))
                            {
                                lightPinValue = GpioPinValue.Low;
                                lightPin.Write(lightPinValue);
                                LED.Fill = redBrush;
                                lightOn = true;
                            }
                            if (listenText.Text.Equals("Light off") || listenText.Text.Equals("off") || listenText.Text.Equals("dark"))
                            {
                                lightPinValue = GpioPinValue.High;
                                lightPin.Write(lightPinValue);
                                LED.Fill = grayBrush;
                                lightOn = false;
                            }
                            if (listenText.Text.Equals("next") || listenText.Text.Equals("forward"))
                            {
                                triggerPinValue = GpioPinValue.Low;
                                timer.Interval = TimeSpan.FromMilliseconds(200);
                                timer.Tick += Timer_Tick;
                                triggerPin.Write(triggerPinValue);
                                timer.Start();
                                slideCounter++;
                            }
                            if (listenText.Text.Equals("previous") || listenText.Text.Equals("back"))
                            {
                                triggerPinValue = GpioPinValue.Low;
                                timer.Interval = TimeSpan.FromMilliseconds(700);
                                timer.Tick += Timer_Tick;
                                triggerPin.Write(triggerPinValue);
                                timer.Start();
                                slideCounter--;
                            }
                            if (listenText.Text.Equals("start slideshow") || listenText.Text.Equals("slideshow"))
                            {
                                //triggerPinValue = GpioPinValue.Low;
                                //triggerPin.Write(triggerPinValue);
                                timer = new DispatcherTimer();
                                timer.Interval = TimeSpan.FromMilliseconds(100);
                                timer.Tick += Slideshow_Tick;
                                timer.Start();
                                slideshow = true;
                            }
                        }
                        else //slideshow mode
                        {
                            if (listenText.Text.Equals("stop slideshow") || listenText.Text.Equals("stop"))
                            {

                                timer.Stop();
                                timer.Tick -= Slideshow_Tick;
                                slideshow = false;
                                timer = new DispatcherTimer(); //??
                                //if (timer.Tick != null)
                                {

                                }
                            }
                        }
                    });
            }
        }
Esempio n. 42
0
        private void RecognizerResultGenerated(SpeechContinuousRecognitionSession session, SpeechContinuousRecognitionResultGeneratedEventArgs args)
        {
            // Output debug strings
            Debug.WriteLine(args.Result.Status);
            Debug.WriteLine(args.Result.Text);

            int count = args.Result.SemanticInterpretation.Properties.Count;

            Debug.WriteLine("Count: " + count);
            Debug.WriteLine("Tag: " + args.Result.Constraint.Tag);

            // Read tags
            var bodyName = args.Result.GetTag(TAG_BODYNAME);

            Debug.WriteLine("Body: " + bodyName);

            // Try to find the body
            var body = system.Bodies.Where(b => b.Name == bodyName).FirstOrDefault();

            // Notify
            if (ResultRecognized != null)
            {
                if ((body != null))
                {
                    var bodies = new List<CelestialBody>() { body };
                    ResultRecognized(this, new CelestialSpeechResult(new List<CelestialBody>(bodies)));
                }
            }
        }
Esempio n. 43
0
        private async void ContinuousRecognitionSession_Completed(SpeechContinuousRecognitionSession sender, SpeechContinuousRecognitionCompletedEventArgs args)
        {
            if (args.Status != SpeechRecognitionResultStatus.Success)
            {
                if (args.Status == SpeechRecognitionResultStatus.TimeoutExceeded)
                {
                    await dispatcher.RunAsync(CoreDispatcherPriority.Normal, () =>
                    {
                        DictationButtonText.Text = " Continuous Recognition";
                        DictationTextBox.Text = dictatedTextBuilder.ToString();
                    });
                }
                else
                {
                    await dispatcher.RunAsync(CoreDispatcherPriority.Normal, () =>
                    {
                        DictationButtonText.Text = " Continuous Recognition";
                    });
                }
            }

            //// Stop
            //Button_Click_1(null, null);

            //var question = _questions[_questionsCount];
            //Speak(string.Format(new QuestionFormatter(), question.AssociatedMark, _questions));
        }
Esempio n. 44
0
        /// <summary>
        /// Handle events fired when a result is generated. Check for high to medium confidence, and then append the
        /// string to the end of the stringbuffer, and replace the content of the textbox with the string buffer, to
        /// remove any hypothesis text that may be present.
        /// </summary>
        /// <param name="sender">The Recognition session that generated this result</param>
        /// <param name="args">Details about the recognized speech</param>
        private async void ContinuousRecognitionSession_ResultGenerated(SpeechContinuousRecognitionSession sender, SpeechContinuousRecognitionResultGeneratedEventArgs args)
        {
            // We may choose to discard content that has low confidence, as that could indicate that we're picking up
            // noise via the microphone, or someone could be talking out of earshot.

            // Speak it out

            string s = args.Result.Text;
            string lang = selectedLang.LanguageTag + ":";

            // we add the translator here. And then translate what heard to English
            //Translator Trans = new Translator(s, ConstantParam.from, ConstantParam.to);
            //Translator Trans = new Translator(s, SRLang.LanguageTag, SSLang.LanguageTag);
            //string translatedS = Trans.GetTranslatedString();

            //// Second translator added to verify the end to end scenario
            //Translator trans1 = new Translator(translatedS, ConstantParam.from1, ConstantParam.to1);
            //string translatedS1 = trans1.GetTranslatedString();

            //Make the Connection
            // ConnectHost();

            //Send the Data
            SendDataToHost(lang + s);

            //SpeechSynthesisStream stream = await synthesizer.SynthesizeTextToStreamAsync(translatedS);

            // if (args.Result.Confidence==SpeechRecognitionConfidence.Medium || args.Result.Confidence == SpeechRecognitionConfidence.High)
            if (args.Result.Status == SpeechRecognitionResultStatus.Success)
            {
                dictatedTextBuilder.Append(s + " ");
                await dispatcher.RunAsync(CoreDispatcherPriority.Normal, () =>
                {
                    dictationTextBox.Text = dictatedTextBuilder.ToString();
                    btnClearText.IsEnabled = true;
                    // we comment out PLAY here as we will play on another server
                    //media.SetSource(stream, stream.ContentType);
                    //media.Play();
                });
            }
            else
            {
                // In some scenarios, a developer may choose to ignore giving the user feedback in this case, if speech
                // is not the primary input mechanism for the application.
                // Here, just remove any hypothesis text by resetting it to the last known good.
                await dispatcher.RunAsync(CoreDispatcherPriority.Normal, () =>
                {
                    dictationTextBox.Text = dictatedTextBuilder.ToString();
                });
            }

        }
Esempio n. 45
0
 /// <summary>
 /// Callback when the continuous speech recognition session completed.
 /// In the ideal case, this is because the app stopped the session after 7 seconds.
 /// In that case, calculate the average score and show this as well as the recognized text
 /// to the user.
 /// In case the status is an error state, show the error to the user.
 /// </summary>
 private async void ContinuousRecognitionSession_Completed(SpeechContinuousRecognitionSession sender, SpeechContinuousRecognitionCompletedEventArgs args)
 {
     Debug.Write("Completed: " + args.Status);
     await _dispatcher.RunAsync(CoreDispatcherPriority.Normal, () =>
     {
         if (args.Status == SpeechRecognitionResultStatus.Success)
         {
             const string resultText = "Your score: {0:P}\nI understood:\n{1}";
             TxtScore.Text = string.Format(resultText, _averageScore / _numSegments, _recognizedText.ToString());
         }
         else
         {
             TxtScore.Text = "Could not recognize what you said - please restart and try again! Reason: " + args.Status;
             ResetTimer();
         }
     });
 }
Esempio n. 46
0
 /// <summary>
 /// Intermediate speech recognition result received.
 /// Store the confidence so that we can calculate the average at the end.
 /// Also store the text that was recognized in this result segment.
 /// </summary>
 private void ContinuousRecognitionSession_ResultGenerated(SpeechContinuousRecognitionSession sender, SpeechContinuousRecognitionResultGeneratedEventArgs args)
 {
     Debug.WriteLine("Recognized: " + args.Result.RawConfidence.ToString(CultureInfo.CurrentCulture) + ", " + args.Result.Text);
     _averageScore += args.Result.RawConfidence;
     _numSegments++;
     _recognizedText.AppendLine(args.Result.Text);
 }
 /// <summary>
 /// Handle events fired when a result is generated. This may include a garbage rule that fires when general room noise
 /// or side-talk is captured (this will have a confidence of Rejected typically, but may occasionally match a rule with
 /// low confidence).
 /// </summary>
 /// <param name="sender">The Recognition session that generated this result</param>
 /// <param name="args">Details about the recognized speech</param>
 private async void ContinuousRecognitionSession_ResultGenerated(SpeechContinuousRecognitionSession sender, SpeechContinuousRecognitionResultGeneratedEventArgs args)
 {
     // Developers may decide to use per-phrase confidence levels in order to tune the behavior of their 
     // grammar based on testing.
     if (args.Result.Confidence == SpeechRecognitionConfidence.Medium ||
         args.Result.Confidence == SpeechRecognitionConfidence.High)
     {
         await dispatcher.RunAsync(CoreDispatcherPriority.Normal, () =>
         {
             HandleRecognitionResult(args.Result);
         });
     }
     // Prompt the user if recognition failed or recognition confidence is low.
     else if (args.Result.Confidence == SpeechRecognitionConfidence.Rejected ||
     args.Result.Confidence == SpeechRecognitionConfidence.Low)
     {
         // In some scenarios, a developer may choose to ignore giving the user feedback in this case, if speech
         // is not the primary input mechanism for the application.
         await dispatcher.RunAsync(CoreDispatcherPriority.Normal, () =>
         {
             resultTextBlock.Text = speechResourceMap.GetValue("SRGSGarbagePromptText", speechContext).ValueAsString;
         });
     }
 }
 /// <summary>
 /// Handle events fired when the session ends, either from a call to
 /// CancelAsync() or StopAsync(), or an error condition, such as the 
 /// microphone becoming unavailable or some transient issues occuring.
 /// </summary>
 /// <param name="sender">The continuous recognition session</param>
 /// <param name="args">The state of the recognizer</param>
 private async void ContinuousRecognitionSession_Completed(SpeechContinuousRecognitionSession sender, SpeechContinuousRecognitionCompletedEventArgs args)
 {
     await dispatcher.RunAsync(CoreDispatcherPriority.Normal, () =>
     {
         rootPage.NotifyUser("Continuous Recognition Completed: " + args.Status.ToString(), NotifyType.StatusMessage);
         ContinuousRecoButtonText.Text = " Continuous Recognition";
         cbLanguageSelection.IsEnabled = true;
     });
 }
Esempio n. 49
0
        private async void ContinuousRecognitionSession_ResultGenerated(SpeechContinuousRecognitionSession sender, SpeechContinuousRecognitionResultGeneratedEventArgs args)
        {
            if (!this.Completed)
            {

                try
                {
                    //sender.StopAsync();
                    //this.Completed = true;
                    sender.CancelAsync();
                }
                catch (Exception)
                {

                }
                await this.Dispatcher.RunAsync(CoreDispatcherPriority.Normal, () =>
                    {

                        var mensaje = args.Result.Text.Replace(".", string.Empty);
                        if (mensaje.Contains("Text")) mensaje = "Texto";
                        var vm = new ImageSearchViewModel();
                    // mensaje = "Texto";
                    vm.Search(mensaje);
                        if (vm.SearchResult.Count > 0)
                        {
                            Frame.Navigate(typeof (SearchListView), vm);
                        }
                        else
                        {
                            var auth = new Authentication("713034f5c7994f089c1d5a70c1a12ede", "54c4cd393679455d90a48250cde0cfa4");
                            var token = auth.GetAccessToken();
                            var requestUri = "https://speech.platform.bing.com/synthesize";

                            var sb = new StringBuilder();
                            sb.AppendFormat("No se han encontrado resultados de búsqueda.");
                            
                            var cortana = new Synthesize(new Synthesize.InputOptions()
                            {
                                RequestUri = new Uri(requestUri),
                                Text = sb.ToString(),
                                VoiceType = Gender.Female,
                                Locale = "es-es",
                                VoiceName = "Microsoft Server Speech Text to Speech Voice (en-US, ZiraRUS)",
                                OutputFormat = AudioOutputFormat.Riff16Khz16BitMonoPcm,
                                AuthorizationToken = "Bearer " + token.access_token,
                            });

                            cortana.OnAudioAvailable += PlayAudio;
                            cortana.OnError += ErrorHandler;
                             cortana.Speak(CancellationToken.None);
                            cortana = null;
                        }
                    });
            }
        }
Esempio n. 50
0
        private async void ContinuousRecognitionSession_Completed(SpeechContinuousRecognitionSession sender, SpeechContinuousRecognitionCompletedEventArgs args)
        {
            
            if (args.Status != SpeechRecognitionResultStatus.Success)
            {
                // InitGpio();
                await startSRProcess();
                //  startDictate();

                //    // If TimeoutExceeded occurs, the user has been silent for too long. We can use this to 
                //    // cancel recognition if the user in dictation mode and walks away from their device, etc.
                //    // In a global-command type scenario, this timeout won't apply automatically.
                //    // With dictation (no grammar in place) modes, the default timeout is 20 seconds.
                //    if (args.Status == SpeechRecognitionResultStatus.TimeoutExceeded)
                //    {
                //        await dispatcher.RunAsync(CoreDispatcherPriority.Normal, () =>
                //        {
                //            DictationButtonText.Text = "Dictate";
                //            dictationTextBox.Text = dictatedTextBuilder.ToString();
                //            isListening = false;
                //        });
                //    }
                //    else
                //    {
                //        await dispatcher.RunAsync(CoreDispatcherPriority.Normal, () =>
                //        {
                //            DictationButtonText.Text = "Dictate";
                //            isListening = false;
                //        });
                //    }

            }
             //InitGpio();
           // await startSRProcess();

        }
Esempio n. 51
0
 private void ContinuousRecognitionSession_Completed(SpeechContinuousRecognitionSession sender, SpeechContinuousRecognitionCompletedEventArgs args)
 {
 }
        /// <summary>
        /// Handle events fired when a result is generated. This may include a garbage rule that fires when general room noise
        /// or side-talk is captured (this will have a confidence of Rejected typically, but may occasionally match a rule with
        /// low confidence).
        /// </summary>
        /// <param name="sender">The Recognition session that generated this result</param>
        /// <param name="args">Details about the recognized speech</param>
        private async void ContinuousRecognitionSession_ResultGenerated(SpeechContinuousRecognitionSession sender, SpeechContinuousRecognitionResultGeneratedEventArgs args)
        {
            // The garbage rule will not have a tag associated with it, the other rules will return a string matching the tag provided
            // when generating the grammar.
            string tag = "unknown";
            if (args.Result.Constraint != null)
            {
                tag = args.Result.Constraint.Tag;
            }

            // Developers may decide to use per-phrase confidence levels in order to tune the behavior of their 
            // grammar based on testing.
            if (args.Result.Confidence == SpeechRecognitionConfidence.Medium ||
                args.Result.Confidence == SpeechRecognitionConfidence.High)
            {
                await dispatcher.RunAsync(CoreDispatcherPriority.Normal, () =>
                {
                    resultTextBlock.Text = string.Format("Heard: '{0}', (Tag: '{1}', Confidence: {2})", args.Result.Text, tag, args.Result.Confidence.ToString());
                });
            }
            else
            {
                // In some scenarios, a developer may choose to ignore giving the user feedback in this case, if speech
                // is not the primary input mechanism for the application.
                await dispatcher.RunAsync(CoreDispatcherPriority.Normal, () =>
                {
                    resultTextBlock.Text = string.Format("Sorry, I didn't catch that. (Heard: '{0}', Tag: {1}, Confidence: {2})", args.Result.Text, tag, args.Result.Confidence.ToString());
                });
            }
        }
Esempio n. 53
0
 private void ContinuousRecognitionSession_Completed(SpeechContinuousRecognitionSession sender, SpeechContinuousRecognitionCompletedEventArgs args)
 {
     Debug.WriteLine("Continuous Recognition Session Completed: " + args.Status.ToString());
 }
Esempio n. 54
0
        private async void ContinuousRecognitionSession_ResultGenerated(SpeechContinuousRecognitionSession sender, SpeechContinuousRecognitionResultGeneratedEventArgs args)
        {
            if (args.Result.Confidence == SpeechRecognitionConfidence.Medium || args.Result.Confidence == SpeechRecognitionConfidence.High)
            {
                switch (args.Result.Text)
                {
                    case "product":
                        await Media.Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, () =>
                        {
                            captureElement();
                        });
                        break;
                    case "Reset":
                        await Media.Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, () =>
                        {
                            resetSetup();
                        });
                        break;
                    case "How Old":
                        await Media.Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, () =>
                        {
                            captureElement_Old();
                        });
                        break;
                    default:
                        break;
                }
            }

        }
Esempio n. 55
0
        private async void ContinuousRecognitionSession_ResultGenerated(SpeechContinuousRecognitionSession sender, SpeechContinuousRecognitionResultGeneratedEventArgs args)
        {
            //if (args.Result.Confidence == SpeechRecognitionConfidence.Medium ||
            //  args.Result.Confidence == SpeechRecognitionConfidence.High)
            //{
            dictatedTextBuilder.Append(args.Result.Text + " ");

            if (_currentQuestion.Key == "subject")
            {
                var words = args.Result.Text.Split(' ');
                for (int i = 0; i < words.Length; i++)
                {
                    if (words[i] == "un" || words[i] == "une")
                    {
                        _currentQuestion.Value = words[i + 1];
                        break;
                    }
                }

                for (int i = words.Length - 1; i >= 0; i--)
                {
                    if (words[i].StartsWith("personne"))
                    {
                        var q = _questions.Single(e => e.Key == QuestionsType.nbpers.ToString());
                        var nb = q.Finder.Resolve(args.Result.Text);

                        if (nb != null)
                        {
                            q.Value = nb;
                            q.HasBeenAsked = true;
                        }

                        break;
                    }
                }

                if (args.Result.Text.Contains("soyons fou") || args.Result.Text.Contains("soyons fous") || args.Result.Text.Contains("soyons-fou"))
                    _isCrazy = true;

            }
            else
            {
                if (args.Result.Text.Contains("quelle est la différence"))
                {
                    Speak(_currentQuestion.Help);
                    return;
                }
                else
                    _currentQuestion.Value = _currentQuestion.Finder.Resolve(args.Result.Text);
            }

            await dispatcher.RunAsync(CoreDispatcherPriority.Normal, () =>
                    {
                        DictationTextBox.Text = dictatedTextBuilder.ToString();
                    });

            if (_currentQuestion.Value == null)
            {
                AskQuestionAgain();
                return;
            }

            _currentQuestion.HasBeenAsked = true;
            var nextQ = _questions.Where(e => !e.HasBeenAsked).FirstOrDefault();

            if (nextQ == null)
            {
                EndConversation();
            }
            else
                AskQuestion(nextQ);

            //}
            //else
            //{
            //    await dispatcher.RunAsync(CoreDispatcherPriority.Normal, () =>
            //    {
            //        DictationTextBox.Text = dictatedTextBuilder.ToString();
            //    });
            //}
        }
Esempio n. 56
0
 private void ContinuousRecognitionSession_Completed(SpeechContinuousRecognitionSession sender, SpeechContinuousRecognitionCompletedEventArgs args)
 {
     //var recognition = voiceRecognizer.RecognizeAsync();
     //recognition.Completed += this.OnRecoginitionCompletedHandler();
 }
Esempio n. 57
0
 private void ContinuousRecognitionSession_Completed(SpeechContinuousRecognitionSession sender, SpeechContinuousRecognitionCompletedEventArgs args)
 {
     //throw new NotImplementedException();
 }
 /// <summary>
 /// Handle events fired when error conditions occur, such as the microphone becoming unavailable, or if
 /// some transient issues occur.
 /// </summary>
 /// <param name="sender">The continuous recognition session</param>
 /// <param name="args">The state of the recognizer</param>
 private async void ContinuousRecognitionSession_Completed(SpeechContinuousRecognitionSession sender, SpeechContinuousRecognitionCompletedEventArgs args)
 {
     if (args.Status != SpeechRecognitionResultStatus.Success)
     {
         await dispatcher.RunAsync(CoreDispatcherPriority.Normal, () =>
         {
             rootPage.NotifyUser("Continuous Recognition Completed: " + args.Status.ToString(), NotifyType.StatusMessage);
             recognizeButtonText.Text = " Continuous Recognition";
             btnEmailGrammar.IsEnabled = false;
             btnPhoneGrammar.IsEnabled = false;
             isListening = false;
         });
     }
 }
Esempio n. 59
0
        private void ContinuousRecognitionSessionOnResultGenerated(SpeechContinuousRecognitionSession sender, SpeechContinuousRecognitionResultGeneratedEventArgs args)
        {
            var result = args.Result;
            if (result.Status == SpeechRecognitionResultStatus.Success &&
                result.Confidence == SpeechRecognitionConfidence.High)
            {
                var tags = result.Constraint.Tag.Split(';');
                var actionName = tags[new Random().Next(tags.Length)];
                var action = TinBotData.ActionsLib[actionName];

                if (action != null)
                    ActionRequested?.Invoke(this, action);
            }
            else
            {
                ActionRequested?.Invoke(this, new SavedAction("LPulseRed"));
            }
        }
Esempio n. 60
0
 private async void ContinuousRecognitionSession_ResultGenerated(SpeechContinuousRecognitionSession sender, SpeechContinuousRecognitionResultGeneratedEventArgs args)
 {
     await Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal,
         async () =>
         {
             var cmd = args.Result.SemanticInterpretation.Properties["cmd"][0].ToString();
             var param = "";
             if (args.Result.SemanticInterpretation.Properties.ContainsKey("param"))
             {
                 param = args.Result.SemanticInterpretation.Properties["param"][0].ToString();
             }
             if (param=="")
             {
                 if (cmd == "forw" || cmd == "back") param = "50";
                 if (cmd == "left" || cmd == "right") param = "90";
             }
             stat.Text = cmd+" "+param;
             await Exec(cmd, double.Parse(param));
             // "Recognized, conf="+args.Result.Confidence.ToString();
         });
 }