private void _speechRecognizer_HypothesisGenerated(SpeechRecognizer sender, SpeechRecognitionHypothesisGeneratedEventArgs args)
 {
     if (_textBox != null)
     {
         var ignore = Helpers.RunOnCoreDispatcherIfPossible(() => _textBox.Text = args.Hypothesis.Text.ToLower(), false);
     }
 }
 private void VoiceRecognizer_HypothesisGenerated(SpeechRecognizer sender, SpeechRecognitionHypothesisGeneratedEventArgs args)
 {
     if (m_progress != null)
     {
         m_progress.Report(new APIProgressReport(50.0, "Hypothosis Generated", APIResponse.ContinuingExecution, args.Hypothesis));
     }
 }
Exemple #3
0
        private async void ContSpeechRecognizer_HypothesisGenerated(
            SpeechRecognizer sender, SpeechRecognitionHypothesisGeneratedEventArgs args)
        {
            await dispatcher.RunAsync(CoreDispatcherPriority.Normal, () =>
            {
                Logger.Log(args.Hypothesis.Text);
                switch (args.Hypothesis.Text)
                {
                case "full screen":
                    ApplicationView.GetForCurrentView().TryEnterFullScreenMode();
                    break;

                case "exit full screen":
                    ApplicationView.GetForCurrentView().ExitFullScreenMode();
                    break;

                case "zoom in":
                    ApplicationView.GetForCurrentView().TryResizeView(new Size(Width = this.ActualWidth * 1.5, Height = this.ActualHeight * 1.5));
                    break;

                case "zoom out":
                    ApplicationView.GetForCurrentView().TryResizeView(new Size(Width = this.ActualWidth * 0.5, Height = this.ActualHeight * 0.5));
                    break;

                case "minimize":
                    this.Hide();
                    break;

                case "maximize":
                    this.Show();
                    break;
                }
            });
        }
        private void Recognizer_HypothesisGenerated(SpeechRecognizer sender, SpeechRecognitionHypothesisGeneratedEventArgs args)
        {
            Debug.WriteLine("[Speech to Text]: ********* Partial Result *********");
            Debug.WriteLine($"[Speech to Text]: {args.Hypothesis.Text}");
            Debug.WriteLine("[Speech to Text]: ");

            this.OnHypothesis?.Invoke(this, args.Hypothesis.Text);
        }
Exemple #5
0
 /// <summary>
 /// While the user is speaking, update the textbox with the partial sentence of what's being said for user feedback.
 /// </summary>
 /// <param name="sender">The recognizer that has generated the hypothesis</param>
 /// <param name="args">The hypothesis formed</param>
 private void SpeechRecognizer_HypothesisGenerated(SpeechRecognizer sender, SpeechRecognitionHypothesisGeneratedEventArgs args)
 {
     if (this.isNote)
     {
         string saveCommand = speechResourceMap.GetValue("ListGrammarSaveTrip", speechContext).ValueAsString;
         this.hypothesis = args.Hypothesis.Text;
     }
 }
Exemple #6
0
        private async void OnRecognationHypothesisGenerated(SpeechRecognizer sender, SpeechRecognitionHypothesisGeneratedEventArgs args)
        {
            Debug.WriteLine("Hypotheses generated = " + args.Hypothesis.Text);

            await dispatcher.RunAsync(CoreDispatcherPriority.Normal, () =>
            {
                Debug.WriteLine(originalEditorText + dictatedText.ToString() + args.Hypothesis.Text + "...");
            });
        }
Exemple #7
0
        private void SpeechRecognizer_HypothesisGenerated(SpeechRecognizer sender, SpeechRecognitionHypothesisGeneratedEventArgs args)
        {
            string hypothesis = args.Hypothesis.Text;

            speech = hypothesis;

            Debug.WriteLine("[HYP] - " + hypothesis);
            AppendOutputSafe(hypothesis);
        }
        private async void SpeechRecognizer_HypothesisGenerated(SpeechRecognizer sender,
                                                                SpeechRecognitionHypothesisGeneratedEventArgs args)
        {
            string hypothesis = args.Hypothesis.Text;

            // Update the textbox with the currently confirmed text, and the hypothesis combined.
            string textboxContent = dictatedTextBuilder.ToString() + " " + hypothesis + " ...";
            await dispatcher.RunAsync(CoreDispatcherPriority.Normal, () => { dictationTextBox.Text = textboxContent; });
        }
 private void SpeechRecognizerHypothesisGenerated(SpeechRecognizer sender, SpeechRecognitionHypothesisGeneratedEventArgs args)
 {
     Debug.WriteLine(args.Hypothesis.Text);
     /*await
         Windows.ApplicationModel.Core.CoreApplication.MainView.CoreWindow.Dispatcher.RunAsync(
             CoreDispatcherPriority.Normal,
             () =>
             {
             });*/
 }
        private void SpeechRecognizerHypothesisGenerated(SpeechRecognizer sender, SpeechRecognitionHypothesisGeneratedEventArgs args)
        {
            Debug.WriteLine(args.Hypothesis.Text);

            /*await
             *  Windows.ApplicationModel.Core.CoreApplication.MainView.CoreWindow.Dispatcher.RunAsync(
             *      CoreDispatcherPriority.Normal,
             *      () =>
             *      {
             *      });*/
        }
 private async void SpeechRecognizer_HypothesisGenerated(
     SpeechRecognizer sender,
     SpeechRecognitionHypothesisGeneratedEventArgs args)
 {
     string hypothesis = args.Hypothesis.Text;
     string content    = $"{_builder.ToString()} {hypothesis} ...";
     await _dispatcher.RunAsync(CoreDispatcherPriority.Normal, () =>
     {
         Result?.Invoke(content);
     });
 }
Exemple #12
0
        private static async void SpeechRecognizer_HypothesisGenerated(
            SpeechRecognizer sender,
            SpeechRecognitionHypothesisGeneratedEventArgs args)
        {
            string hypothesis     = args.Hypothesis.Text;
            string textboxContent = dictatedTextBuilder.ToString() + " " + hypothesis + " ...";

            await dispatcher.RunAsync(CoreDispatcherPriority.Normal, () =>
            {
                RtfTextHelper.Text = textboxContent;
            });
        }
Exemple #13
0
        private void SpeechRecognizer_HypothesisGenerated(SpeechRecognizer sender, SpeechRecognitionHypothesisGeneratedEventArgs args)
        {//según va entendiendo, ir mostrando la información en la UI
            string hypothesis = args.Hypothesis.Text;

            // Update the textbox with the currently confirmed text, and the hypothesis combined.
            string textboxContent = szTextoDictado.ToString() + " " + hypothesis + " ...";

            dispatcher.RunAsync(CoreDispatcherPriority.Normal, () =>
            {
                txbTextoReconocido.Text = textboxContent;
            });
        }
Exemple #14
0
 async void OnHypothesisGenerated(SpeechRecognizer sender,
                                  SpeechRecognitionHypothesisGeneratedEventArgs args)
 {
     await this.Dispatcher.RunAsync(CoreDispatcherPriority.Normal,
                                    () =>
     {
         if (this.stopWatch == null)
         {
             this.stopWatch = new Stopwatch();
             this.stopWatch.Start();
             this.quarterSecondTimer.Start();
         }
         this.HypothesisedSpeech = args.Hypothesis.Text;
     }
                                    );
 }
        /// <summary>
                /// While the user is dictator_speaking, update the textbox with the partial sentence of what's being said for user feedback.
                /// </summary>
                /// <param name="sender">The recognizer that has generated the hypothesis</param>
                /// <param name="args">The hypothesis formed</param>
        private void SpeechRecognizer_HypothesisGenerated(SpeechRecognizer sender, SpeechRecognitionHypothesisGeneratedEventArgs args)
        {
            string hypothesis = args.Hypothesis.Text;

            // Update the textbox with the currently confirmed text, and the hypothesis combined.
            string textboxContent = dictatedTextBuilder.ToString() + " " + hypothesis + " ...";

            //Debug.WriteLine(textboxContent);
            stop_sound_controller();


            // if(textboxContent.Length >= 80)initialize_dictator();
            // await dispatcher.RunAsync(CoreDispatcherPriority.Normal, () =>
            // {
            //     //dictationTextBox.Text = textboxContent;
            //     //btnClearText.IsEnabled = true;
            // });
        }
Exemple #16
0
        private async void SpeechRecognizer_HypothesisGenerated(
            SpeechRecognizer sender,
            SpeechRecognitionHypothesisGeneratedEventArgs args)
        {
            if (recognizer_ == null || recognizer_ != sender)
            {
                return;
            }

            string hypothesis     = args.Hypothesis.Text;
            string textboxContent = /*dictatedTextBuilder_.ToString() +*/ " " + hypothesis + " ...";

            System.Diagnostics.Debug.WriteLine("... :" + textboxContent + " ");

            eventQue_.Enqueue(new RecoEvent_
            {
                eventType = RecoEvent_.EventType.DictationHypothesis,
                text      = args.Hypothesis.Text,
            });
        }
Exemple #17
0
 private async void OnQuestionSpeechRecognizerHypothesisGenerated(SpeechRecognizer sender, SpeechRecognitionHypothesisGeneratedEventArgs args)
 {
     await Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, () =>
     {
         QuestionText.Text = FormatQuestion(args.Hypothesis.Text);
     });
 }
Exemple #18
0
 private void SpeechRecognizer_HypothesisGenerated(SpeechRecognizer sender, SpeechRecognitionHypothesisGeneratedEventArgs args)
 {
     HypothesisGenerated?.Invoke(this, args);
 }
Exemple #19
0
 static async void speechRecognizer_HypothesisGenerated(SpeechRecognizer sender, SpeechRecognitionHypothesisGeneratedEventArgs args)
 {
     //Debug.WriteLine("\nHypothesize :" + hypothesize);
     // Debug.WriteLine("args length :" + args.Hypothesis.Text.Length);
     //  Debug.WriteLine(args.Hypothesis.Text);
     if (hypothesize != args.Hypothesis.Text.Length)
     {
         keys    = args.Hypothesis.Text.Split(separator);
         lastKey = keys[keys.Length - 1];
         // Debug.WriteLine("lastKey :" + lastKey);
     }
     else
     {
         // Debug.WriteLine("Doublon d'hypothèse");
     }
     hypothesize = args.Hypothesis.Text.Length;
     //key = lastKey;
     keyDependet.key = lastKey;
     // Debug.WriteLine("Recognised key = " + key);
     //  Debug.WriteLine("Key recognition speed = " + timer.ElapsedMilliseconds + " ms");
 }
 /// <summary>
 /// Event handler used to display what's being heard in the main screen's text box
 /// </summary>
 /// <param name="recognizer"></param>
 /// <param name="args"></param>
 private static void Recognizer_HypothesisGenerated(SpeechRecognizer recognizer, SpeechRecognitionHypothesisGeneratedEventArgs args)
 {
     if (StringUtils.Contains(args.Hypothesis.Text, activatorString) || StringUtils.AreEqual(SpokenText, activatorString))
     {
         Utils.RunOnMainThread(() =>
         {
             if (commandBox != null)
             {
                 commandBox.Text = SpokenText + " " + args.Hypothesis.Text;
             }
         });
     }
 }
Exemple #21
0
        /// <summary>
        /// While the user is speaking, update the textbox with the partial sentence of what's being said for user feedback.
        /// </summary>
        /// <param name="sender">The recognizer that has generated the hypothesis</param>
        /// <param name="args">The hypothesis formed</param>
        private async void SpeechRecognizer_HypothesisGenerated(SpeechRecognizer sender, SpeechRecognitionHypothesisGeneratedEventArgs args)
        {
            string hypothesis = args.Hypothesis.Text;

            // Update the textbox with the currently confirmed text, and the hypothesis combined.
            string textboxContent = dictatedTextBuilder.ToString() + " " + hypothesis + " ...";
            await Dispatcher.RunAsync(CoreDispatcherPriority.Normal, () =>
            {
                if (textboxContent.Contains("clear"))
                {
                    commandBox.Text = "";
                    dictatedTextBuilder.Clear();
                }
                else
                {
                    commandBox.Text = textboxContent;
                }
                // dictationTextBox.Text = textboxContent;
                //btnClearText.IsEnabled = true;
            });
        }
Exemple #22
0
 private void _speechRecognizer_HypothesisGenerated(SpeechRecognizer sender, SpeechRecognitionHypothesisGeneratedEventArgs args)
 {
     Debug.WriteLine(args.Hypothesis.Text);
 }
Exemple #23
0
        private async void ContSpeechRecognizer_HypothesisGenerated(
            SpeechRecognizer sender, SpeechRecognitionHypothesisGeneratedEventArgs args)
        {
            await this.Dispatcher.RunAsync(CoreDispatcherPriority.Normal, () =>
            {
                // Logger.Log(args.Hypothesis.Text);
                //Logger.Log(args.Hypothesis.Text);
                switch (args.Hypothesis.Text)
                {
                case "full screen":
                    ApplicationView.GetForCurrentView().TryEnterFullScreenMode();
                    break;

                case "exit full screen":
                    ApplicationView.GetForCurrentView().ExitFullScreenMode();
                    break;

                case "zoom in":
                    ApplicationView.GetForCurrentView().TryResizeView(new Size(Width = this.ActualWidth * 2, Height = this.ActualHeight * 2));
                    ++zoomCount;
                    break;

                case "zoom out":
                    if (zoomCount > 0)
                    {
                        ApplicationView.GetForCurrentView().TryResizeView(new Size(Width = this.ActualWidth * 0.5, Height = this.ActualHeight * 0.5));
                        this.CenterCursor();
                        --zoomCount;
                    }
                    break;

                case "minimize":
                    this.Hide();
                    break;

                case "maximize":
                    this.Show();
                    break;

                case "hide cursor":
                    Logger.Log("hide cursor");
                    this.CursorPanel.Hide();
                    break;

                case "show cursor":
                    Logger.Log("show cursor");
                    this.CursorPanel.Show();
                    break;

                case "increase cursor size":
                    Logger.Log("increase old scale:" + this.CursorElementInner.Scale.ToString());
                    this.CursorElementInner.Scale += new System.Numerics.Vector3(0.5f);
                    Logger.Log("new scale:" + this.CursorElementInner.Scale.ToString());
                    break;

                case "decrease cursor size":
                    Logger.Log("decrease old scale:" + this.CursorElementInner.Scale.ToString());
                    if (this.CursorElementInner.Scale.X >= 0.5f &&
                        this.CursorElementInner.Scale.Y >= 0.5f &&
                        this.CursorElementInner.Scale.Z >= 0.5f)
                    {
                        this.CursorElementInner.Scale -= new System.Numerics.Vector3(0.5f);
                    }
                    Logger.Log("new scale:" + this.CursorElementInner.Scale.ToString());
                    break;

                case "center cursor":
                    this.CenterCursor();
                    break;
                }
            });
        }
Exemple #24
0
 /// <summary>
 /// Runs when a hypothesis is generated, displays the text on the screen.
 /// </summary>
 /// <param name="sender"></param>
 /// <param name="args"></param>
 private void CommandHypothesisGenerated(
     SpeechRecognizer sender,
     SpeechRecognitionHypothesisGeneratedEventArgs args)
 {
     OnResponseReceived(args.Hypothesis.Text);
 }
Exemple #25
0
        private async void SpeechRecognizer_HypothesisGenerated(SpeechRecognizer sender, SpeechRecognitionHypothesisGeneratedEventArgs args)
        {
            try
            {
                string hypothesis = args.Hypothesis.Text;

                // Update the textbox with the currently confirmed text, and the hypothesis combined.
                string textboxContent = dictatedTextBuilder.ToString() + " " + hypothesis + " ...";

                await dispatcher.RunAsync(CoreDispatcherPriority.Normal, () =>
                {
                    dictationTextBox.Text  = textboxContent;
                    btnClearText.IsEnabled = true;
                });
            }
            catch (Exception ex)
            {
                Console.WriteLine(ex.Message);
            }
        }
        /// <summary>
        /// While the user is speaking, update the textbox with the partial sentence of what's being said for user feedback.
        /// </summary>
        /// <param name="sender">The recognizer that has generated the hypothesis</param>
        /// <param name="args">The hypothesis formed</param>
        private async void SpeechRecognizer_HypothesisGenerated(SpeechRecognizer sender, SpeechRecognitionHypothesisGeneratedEventArgs args)
        {
            string hypothesis = args.Hypothesis.Text;

            // Update the textbox with the currently confirmed text, and the hypothesis combined.
            string textboxContent = dictatedTextBuilder.ToString() + " " + hypothesis + " ...";

            await dispatcher.RunAsync(CoreDispatcherPriority.Normal, () =>
            {
                dictationTextBox.Text = textboxContent;
                btnClearText.IsEnabled = true;
            });
        }
Exemple #27
0
 private void Service_HypothesisGenerated(object sender, SpeechRecognitionHypothesisGeneratedEventArgs e)
 {
     MajaConversation.SetUserText(e.Hypothesis.Text.ToLower());
 }
Exemple #28
0
 /// <summary>
 /// Called while speech is partically recognized
 /// </summary>
 private async void SpeechRecognizer_HypothesisGenerated(SpeechRecognizer sender, SpeechRecognitionHypothesisGeneratedEventArgs args)
 {
     await CoreApplication.MainView.CoreWindow.Dispatcher.RunAsync(CoreDispatcherPriority.Normal, () =>
     {
         AnswerHypo = args.Hypothesis.Text;
     });
 }
Exemple #29
0
        private async void SpeechRecognizer_HypothesisGenerated(SpeechRecognizer sender, SpeechRecognitionHypothesisGeneratedEventArgs args)
        {
            string hypothesis = args.Hypothesis.Text;
            string textboxContent = dictatedTextBuilder.ToString() + " " + hypothesis + " ...";

            await dispatcher.RunAsync(CoreDispatcherPriority.Normal, () =>
            {
                DictationTextBox.Text = textboxContent;
            });
        }
 private async void Recognizer_HypothesisGenerated
     (SpeechRecognizer sender, SpeechRecognitionHypothesisGeneratedEventArgs args)
 {
     await _dispatcher.RunAsync(CoreDispatcherPriority.Normal, () =>
     {
         var text = args.Hypothesis.Text;
         _recognizedText.Text = text;
     });
 }
Exemple #31
0
 private async void SpeechRecognizer_HypothesisGenerated(SpeechRecognizer sender, SpeechRecognitionHypothesisGeneratedEventArgs args)
 {
     await textBox.Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, () =>
     {
         Text = args.Hypothesis.Text;
     });
 }
 private void SpeechRecognizer_HypothesisGenerated(SpeechRecognizer sender, SpeechRecognitionHypothesisGeneratedEventArgs args)
 {
     Debug.WriteLine($"VoiceCommand: HypothesisGenerated: {args.Hypothesis.Text}");
 }
Exemple #33
0
 private async void speechHypothesisCallback(SpeechRecognizer sender, SpeechRecognitionHypothesisGeneratedEventArgs args)
 {
     await dispatcher.RunAsync(CoreDispatcherPriority.Normal, () => spokenText.Text = args.Hypothesis.Text);
 }
 /// <summary>
 /// While the user is speaking, update the textbox with the partial sentence of what's being said for user feedback.
 /// </summary>
 /// <param name="sender">The recognizer that has generated the hypothesis</param>
 /// <param name="args">The hypothesis formed</param>
 private void SpeechRecognizer_HypothesisGenerated(SpeechRecognizer sender, SpeechRecognitionHypothesisGeneratedEventArgs args)
 {
     if (this.isNote)
     {
         string saveCommand = speechResourceMap.GetValue("ListGrammarSaveTrip", speechContext).ValueAsString;
         this.hypothesis = args.Hypothesis.Text;
     }
 }
Exemple #35
0
 private async void UISpeechRecognizer_HypothesisGenerated(SpeechRecognizer sender, SpeechRecognitionHypothesisGeneratedEventArgs args)
 {
     await Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, () =>
     {
         UISpeechText.Text = args.Hypothesis.Text;
     });
 }
 private async void SpeechRecognizerHypothesisGenerated(SpeechRecognizer sender, SpeechRecognitionHypothesisGeneratedEventArgs args)
 {
     await
         Windows.ApplicationModel.Core.CoreApplication.MainView.CoreWindow.Dispatcher.RunAsync(
             CoreDispatcherPriority.Normal,
             () =>
             {
                 SpokenTextIsVisible = true;
                 SpokenText = args.Hypothesis.Text;
             });
 }
 private async void SpeechRecognizer_HypothesisGenerated(SpeechRecognizer sender, SpeechRecognitionHypothesisGeneratedEventArgs args)
 {
     await this.Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, () =>
     {
         if (timer.IsEnabled == false)
             timer.Start();
         hypotesis = args.Hypothesis.Text;
         Text = args.Hypothesis.Text;
     });
 }
Exemple #38
0
 private async void SpeechRecognizer_HypothesisGenerated(SpeechRecognizer sender, SpeechRecognitionHypothesisGeneratedEventArgs args)
 {
     await this.Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, () =>
     {
         if (timer.IsEnabled == false)
         {
             timer.Start();
         }
         hypotesis = args.Hypothesis.Text;
         Text      = args.Hypothesis.Text;
     });
 }