Exemplo n.º 1
0
    /// <summary>
    /// RecoServiceClient_OnMessageReceived event handler:
    /// This event handler gets fired every time a new message comes back via WebSocket.
    /// </summary>
    /// <param name="result"></param>
    private void RecoServiceClient_OnMessageReceived(SpeechServiceResult result)
    {
        try
        {
            if (result.Path == SpeechServiceResult.SpeechMessagePaths.SpeechHypothesis)
            {
                UpdateUICanvasLabel(result.Result.Text, FontStyle.Italic);
                spawnController.SetCurrentText(result.Result.Text);
            }
            else if (result.Path == SpeechServiceResult.SpeechMessagePaths.SpeechPhrase)
            {
                if (isRecognizing)
                {
                    StopRecording();
                }

                UpdateUICanvasLabel(result.Result.DisplayText, FontStyle.Normal);
                spawnController.SetRecognizedText(result.Result.DisplayText);


                Debug.Log("* RECOGNITION STATUS: " + result.Result.RecognitionStatus);
                Debug.Log("* FINAL RESULT: " + result.Result.DisplayText);
            }
        }
        catch (Exception ex)
        {
            string msg = String.Format("Error: Something went wrong when posting speech recognition results. See error details below:{0}{1}{2}{3}",
                                       Environment.NewLine, ex.ToString(), Environment.NewLine, ex.Message);
            Debug.LogError(msg);
            UpdateUICanvasLabel(msg, FontStyle.Normal);
        }
    }
Exemplo n.º 2
0
        private void CallContext(SpeechServiceResult result)
        {
            Debug.Log("Voice Command Manager -> CallContext: " + result.Path);

            if (result.Path != SpeechServiceResult.SpeechMessagePaths.SpeechPhrase)
            {
                Debug.Log("Voice Command Manager -> CallContext: Incomplete message received");
                //Message is not complete
                return;
            }

            if (Context == null)
            {
                Debug.Log("Voice Command Manager -> CallContext: No context provided");
                //no context to assess
                return;
            }

            Debug.Log("Voice Command Manager -> CallContext: Text is " + result.Result.DisplayText);

            foreach (var c in Context.commands)
            {
                //TODO: equalilty testing - how close does the word need to be
                if (result.Result.DisplayText.ToLower().Equals(c.phrase))
                {
                    c.command?.Invoke();
                }
            }
        }
Exemplo n.º 3
0
 private static void RecoServiceClient_OnMessageReceived(SpeechServiceResult result)
 {
     // Let's ignore all hypotheses and other messages for now and only report back on the final phrase
     if (result.Path == SpeechServiceResult.SpeechMessagePaths.SpeechPhrase)
     {
         Console.WriteLine("*================================================================================");
         Console.WriteLine("* RECOGNITION STATUS: " + result.Result.RecognitionStatus);
         Console.WriteLine("* FINAL RESULT: " + result.Result.DisplayText);
         Console.WriteLine("*================================================================================" + Environment.NewLine);
     }
 }
Exemplo n.º 4
0
    /// <summary>
    /// RecoServiceClient_OnMessageReceived event handler:
    /// This event handler gets fired every time a new message comes back via WebSocket.
    /// </summary>
    /// <param name="result"></param>
    private void RecoServiceClient_OnMessageReceived(SpeechServiceResult result)
    {
        if (result.Path == SpeechServiceResult.SpeechMessagePaths.SpeechHypothesis)
        {
            DisplayLabel.text      = result.Result.Text;
            DisplayLabel.fontStyle = FontStyle.Italic;
        }
        else if (result.Path == SpeechServiceResult.SpeechMessagePaths.SpeechPhrase)
        {
            DisplayLabel.text      = result.Result.DisplayText;
            DisplayLabel.fontStyle = FontStyle.Normal;

            Debug.Log("* RECOGNITION STATUS: " + result.Result.RecognitionStatus);
            Debug.Log("* FINAL RESULT: " + result.Result.DisplayText);
        }
    }
        /// <summary>
        /// Speech recognition completed handler.
        /// </summary>
        /// <param name="speechResult">
        /// Service result.
        /// </param>
        private void OnSpeechRecognitionCompleted(SpeechServiceResult speechResult)
        {
            Debug.Assert(speechResult != null, "speechResult is null");

            this.RecognizingProgress.Visibility = Visibility.Collapsed;

            if (speechResult.Status == Status.Success)
            {
                this.SetRecognizedTextListBox(speechResult.SpeechResult.Items);
            }
            else
            {
                MessageBox.Show(
                    speechResult.Exception == null ? "Error recognizing the speech." : speechResult.Exception.Message,
                    "Error", MessageBoxButton.OK);
            }
        }
 private async void RecoServiceClient_OnMessageReceived(SpeechServiceResult result)
 {
     // Let's ignore all hypotheses and other messages for now and only report back on the final phrase
     if (result.Path == SpeechServiceResult.SpeechMessagePaths.SpeechHypothesis)
     {
         await Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, () => {
             lblResult.Text  = "SPEECH HYPOTHESIS RETURNED: " + Environment.NewLine;
             lblResult.Text += result.Result.Text;
         });
     }
     else if (result.Path == SpeechServiceResult.SpeechMessagePaths.SpeechPhrase)
     {
         await Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, () => {
             lblResult.Text  = "RECOGNITION STATUS: " + result.Result.RecognitionStatus + Environment.NewLine;
             lblResult.Text += "FINAL RESULT: " + result.Result.DisplayText + Environment.NewLine;
         });
     }
 }
        /// <summary>
        /// Speech Grammars Received handler.
        /// </summary>
        /// <param name="result">
        /// Service Result.
        /// </param>
        private void OnSpeechGrammarsReceived(SpeechServiceResult result)
        {
            Debug.Assert(result != null, "result is null");

            this.RecognizingProgress.Visibility     = Visibility.Collapsed;
            this.RetrievingGrammarsLabel.Visibility = Visibility.Collapsed;
            this.SetButtonStates(true, false, false, false);

            this.SpeechDomainsList.Visibility = Visibility.Visible;
            if (result.Status == Status.Success)
            {
                this.availableGrammars = result.SpeechResult.Items;
                this.SetSpeechGrammarsListBox();
            }
            else
            {
                MessageBox.Show("Error receiving available speech grammars.", "Error", MessageBoxButton.OK);
                // this.NoGrammarsLabel.Visibility = Visibility.Visible;
            }
        }
    /// <summary>
    /// RecoServiceClient_OnMessageReceived event handler:
    /// This event handler gets fired every time a new message comes back via WebSocket.
    /// </summary>
    /// <param name="result"></param>
    private void RecoServiceClient_OnMessageReceived(SpeechServiceResult result)
    {
        try
        {
            if (result.Path == SpeechServiceResult.SpeechMessagePaths.SpeechHypothesis)
            {
                UpdateUICanvasLabel(result.Result.Text, FontStyle.Italic);
            }
            else if (result.Path == SpeechServiceResult.SpeechMessagePaths.SpeechPhrase)
            {
                if (isRecognizing)
                {
                    StopRecording();
                }

                UpdateUICanvasLabel(result.Result.DisplayText, FontStyle.Normal);

                // LUIS integration after speech recognition
                if (luis != null)
                {
                    luis.PredictAndHandleAsync(result.Result.DisplayText);
                }

                Debug.Log("* RECOGNITION STATUS: " + result.Result.RecognitionStatus);
                Debug.Log("* FINAL RESULT: " + result.Result.DisplayText);
            }
            else
            {
                string msg = String.Format("Other result. See details below:{0}Path: {1}{2}Status: {3}",
                                           Environment.NewLine, result.Path.ToString(), Environment.NewLine, result.Result.RecognitionStatus);
                Debug.LogError(msg);
            }
        }
        catch (Exception ex)
        {
            string msg = String.Format("Error: Something went wrong when posting speech recognition results. See error details below:{0}{1}{2}{3}",
                                       Environment.NewLine, ex.ToString(), Environment.NewLine, ex.Message);
            Debug.LogError(msg);
            UpdateUICanvasLabel(msg, FontStyle.Normal);
        }
    }