示例#1
0
        /// <summary>
        /// Writes the response result.
        /// </summary>
        /// <param name="e">The <see cref="SpeechResponseEventArgs"/> instance containing the event data.</param>
        private void WriteResponseResult(SpeechResponseEventArgs e)
        {
            if (e.PhraseResponse.Results.Length == 0)
            {
                this.WriteLine("No phrase response is available.");
            }
            else
            {
                this.WriteLine("********* Final n-BEST Results *********");
                for (int i = 0; i < e.PhraseResponse.Results.Length; i++)
                {
                    this.WriteLine(
                        "[{0}] Confidence={1}, Text=\"{2}\"",
                        i,
                        e.PhraseResponse.Results[i].Confidence,
                        e.PhraseResponse.Results[i].DisplayText);
                }
                // HIER IST DAS ENDE
                for (int i = 0; i < e.PhraseResponse.Results.Length; i++)
                {
                    Console.WriteLine(e.PhraseResponse.Results[i].DisplayText);
                    InitializeConnection(e.PhraseResponse.Results[i].DisplayText);
                }

                // Background Mic Image
                string imagePath = path2 + @"\..\..\images\";
                Console.WriteLine(imagePath);
                ImageBrush imgBrush = new ImageBrush();
                imgBrush.ImageSource    = new BitmapImage(new Uri(imagePath + @"mic_standard.png", UriKind.Relative));
                _startButton.Background = imgBrush;

                _meinText.Text = e.PhraseResponse.Results[0].DisplayText;
                this.WriteLine();
            }
        }
        /// <summary>
        ///     Called when a final response is received; 
        /// </summary>
        void OnResponseReceivedHandler(object sender, SpeechResponseEventArgs e)
        {
            bool isFinalDicationMessage = m_recoMode == SpeechRecognitionMode.LongDictation &&
                                                        (e.PhraseResponse.RecognitionStatus == RecognitionStatus.EndOfDictation ||
                                                         e.PhraseResponse.RecognitionStatus == RecognitionStatus.DictationEndSilenceTimeout);

            if (m_isMicrophoneReco && ((m_recoMode == SpeechRecognitionMode.ShortPhrase) || isFinalDicationMessage))
            {
                // we got the final result, so it we can end the mic reco.  No need to do this
                // for dataReco, since we already called endAudio() on it as soon as we were done
                // sending all the data.
                m_micClient.EndMicAndRecognition();
            }

            if (!isFinalDicationMessage)
            {
                Console.WriteLine("********* Final NBEST Results *********");
                for (int i = 0; i < e.PhraseResponse.Results.Length; i++)
                {
                    Console.WriteLine("[{0}] Confidence={1} Text=\"{2}\"",
                                      i, e.PhraseResponse.Results[i].Confidence,
                                      e.PhraseResponse.Results[i].DisplayText);
                }
                Console.WriteLine();
            }
        }
示例#3
0
        private void WriteResponseResult(SpeechResponseEventArgs e)
        {
            if (e.PhraseResponse.Results.Length == 0)
            {
                this.WriteLine("No phrase response is available.");
            }
            else
            {
                this.WriteLine("********* Final n-BEST Results *********");
                for (int i = 0; i < e.PhraseResponse.Results.Length; i++)
                {
                    this.WriteLine(
                        "[{0}] Confidence={1}, Text=\"{2}\"",
                        i,
                        e.PhraseResponse.Results[i].Confidence,
                        e.PhraseResponse.Results[i].DisplayText);


                    if (e.PhraseResponse.Results[i].Confidence == Confidence.High)
                    {
                        sendText.Text = e.PhraseResponse.Results[i].DisplayText.Replace(".", "");;
                        sendButton_Click(this, new EventArgs());
                    }
                }

                this.WriteLine("\n");
            }
        }
示例#4
0
        /// <summary>
        /// Writes the response result.
        /// </summary>
        /// <param name="e">The <see cref="SpeechResponseEventArgs"/> instance containing the event data.</param>
        private void WriteResponseResult(SpeechResponseEventArgs e)
        {
            if (e.PhraseResponse.Results.Length == 0)
            {
                this.WriteLine("No phrase response is available.");
            }
            else
            {
                //this.WriteLine("********* Final n-BEST Results *********");

                DateTime CurrentDateTime = DateTime.Now;
                TimeSpan span            = CurrentDateTime.Subtract(startDateTime);
                String[] cultureNames    = { "en-US", "en-GB", "fr-FR", "de-DE", "ru-RU" };
                var      culture         = new CultureInfo("en-GB");

                //this.WriteLine("{0}: {1}", "en-GB", localDate.ToString(culture));
                for (int i = 0; i < e.PhraseResponse.Results.Length; i++)
                {
                    //this.WriteLine("[{0}] Confidence={1}, Text=\"{2}\"", i, e.PhraseResponse.Results[i].Confidence, e.PhraseResponse.Results[i].DisplayText);
                    this.WriteLine("{0} | {2}", Math.Round(span.TotalMilliseconds), CurrentDateTime.ToString(culture), e.PhraseResponse.Results[i].DisplayText);
                }

                //this.WriteLine();
            }
        }
示例#5
0
 private void OnShortResponseReceived(object sender, SpeechResponseEventArgs e)
 {
     HasError     = false;
     ErrorMessage = "";
     Result.Add(e.PhraseResponse);
     waitHandle.Set();
 }
示例#6
0
 //receiveHandlear 내용 출력 메소드
 private void WriteResponseResult(SpeechResponseEventArgs e)
 {
     if (e.PhraseResponse.Results.Length == 0)
     {
         //codeText.Text += "No phrase response is available.";
     }
     else
     {
         //codeText.Text += "********* Final n-BEST Results *********";
         for (int i = 0; i < e.PhraseResponse.Results.Length; i++)
         {
             //아래내용 다른 textbox에 +=하면 된다.
             //callerStatement.Text += e.PhraseResponse.Results[i].DisplayText; // e.PhraseResponse.Results[i].Confidence +
             string text     = e.PhraseResponse.Results[i].DisplayText;
             var    client   = LanguageServiceClient.Create();
             var    response = client.AnnotateText(new Document()
             {
                 Content = text,
                 Type    = Document.Types.Type.PlainText
             },
                                                   new Features()
             {
                 ExtractSyntax = true
             });
             CorrectSentences(response.Sentences, response.Tokens);
         }
         //codeText.Text += "\n";
     }
 }
示例#7
0
        private void ResponseReceived(object sender, SpeechResponseEventArgs e)

        {
            if (Glob.speechthreadcontrol)
            {
                {
                    for (int i = 0; i < e.PhraseResponse.Results.Length; i++)
                    {
                        result = e.PhraseResponse.Results[i].DisplayText;
                    }
                    try
                    {
                        TextWriter txt = new StreamWriter("D:\\Output\\Output.txt", true);
                        txt.Write(result);
                        txt.Close();
                    }
                    catch (IOException error) { }

                    try
                    {
                        TextWriter txt = new StreamWriter("D:\\Output\\Outputwithtime.txt", true);
                        txt.Write(" " + DateTime.Now.ToString("h:mm:ss") + " " + result);
                        txt.Close();
                    }
                    catch (IOException error) { }
                }
            }
            else
            {
                result = e.PhraseResponse.Results.ToString();
            }
        }
        /// <summary>
        ///     Called when a final response is received;
        /// </summary>
        void OnMicDictationResponseReceivedHandler(object sender, SpeechResponseEventArgs e)
        {
            WriteLine("--- OnMicDictationResponseReceivedHandler ---");
            if (e.PhraseResponse.RecognitionStatus == RecognitionStatus.EndOfDictation ||
                e.PhraseResponse.RecognitionStatus == RecognitionStatus.DictationEndSilenceTimeout)
            {
                Dispatcher.Invoke((Action)(() =>
                {
                    _FinalResponseEvent.Set();

                    // we got the final result, so it we can end the mic reco.  No need to do this
                    // for dataReco, since we already called endAudio() on it as soon as we were done
                    // sending all the data.
                    _micClient.EndMicAndRecognition();

                    // BUGBUG: Work around for the issue when cached _micClient cannot be re-used for recognition.
                    _micClient.Dispose();
                    _micClient = null;

                    _startButton.IsEnabled = true;
                    _radioGroup.IsEnabled = true;
                }));
            }
            WriteResponseResult(e);
        }
        private void OnDataDictationResponseReceivedHandler(object sender, SpeechResponseEventArgs e)
        {
            if (e.PhraseResponse.Results.Length < 1)
            {
                return;
            }

            for (int i = 0; i < 1; i++)
            {
                Confidence a = e.PhraseResponse.Results[i].Confidence;

                string textAudio = e.PhraseResponse.Results[i].DisplayText;

                textAudioGlobal += textAudio;

                int tam = textAudio.Length;
            }
            //if (e.PhraseResponse.RecognitionStatus == RecognitionStatus.EndOfDictation ||
            //    e.PhraseResponse.RecognitionStatus == RecognitionStatus.DictationEndSilenceTimeout)
            //{
            //    string xx = "";
            //}

            //for (int i = 0; i < e.PhraseResponse.Results.Length; i++)
            //{
            //    Confidence a = e.PhraseResponse.Results[i].Confidence;
            //    string xxyy = e.PhraseResponse.Results[i].DisplayText;
            //    ViewBag.MensajeFinal += xxyy;
            //    abc += xxyy;
            //}
        }
示例#10
0
        /// <summary>
        /// Writes the response result.
        /// </summary>
        /// <param name="e">The <see cref="SpeechResponseEventArgs"/> instance containing the event data.</param>
        private void WriteResponseResult(SpeechResponseEventArgs e)
        {
            if (e.PhraseResponse.Results.Length == 0)
            {
                this.WriteLine("No phrase response is available.");
            }
            else
            {
                this.WriteLine("********* Final n-BEST Results *********");
                using (StreamWriter w = File.AppendText(@"<file_path_to_output_file.txt>"))
                {
                    for (int i = 0; i < e.PhraseResponse.Results.Length; i++)
                    {
                        this.WriteLine(
                            "[{0}] Confidence={1}, Text=\"{2}\"",
                            i,
                            e.PhraseResponse.Results[i].Confidence,
                            e.PhraseResponse.Results[i].DisplayText);
                        //}
                        w.WriteLine(e.PhraseResponse.Results[i].DisplayText);
                    }
                }
                //for (int i = 0; i < e.PhraseResponse.Results.Length; i++)
                //{
                //this.WriteLine(
                //"[{0}] Confidence={1}, Text=\"{2}\"",
                //i,
                //e.PhraseResponse.Results[i].Confidence,
                //e.PhraseResponse.Results[i].DisplayText);
                //}

                //this.WriteLine();
            }
        }
示例#11
0
        private void WriteResponseResult(SpeechResponseEventArgs e)
        {
            if (e.PhraseResponse.Results.Length == 0)
            {
                //this.WriteLine("Please anwser the question");
                //if (missedResonseCount > 1)
                //{
                //    this.voice.Speak("I'm sorry. I could not hear you properly.");
                //}
                //missedResonseCount++;
                Send("dummy");
                //this.micClient.StartMicAndRecognition();
            }
            else
            {
                missedResonseCount = 0;
                this.WriteLine("********* Final n-BEST Results *********");
                for (int i = 0; i < e.PhraseResponse.Results.Length; i++)
                {
                    this.WriteLine(
                        "[{0}] Confidence={1}, Text=\"{2}\"",
                        i,
                        e.PhraseResponse.Results[i].Confidence,
                        e.PhraseResponse.Results[i].DisplayText);
                }

                var firstGuess = e.PhraseResponse.Results.FirstOrDefault().DisplayText.Replace(".", "");

                Send(firstGuess);
                this.WriteLine("\n");
            }
        }
示例#12
0
        private void OnMicShortPhraseResponseReceivedHandler(object sender, SpeechResponseEventArgs e)
        {
            Dispatcher.Invoke((Action)(() =>
            {
                Debug.WriteLine("--- OnMicShortPhraseResponseReceivedHandler ---");

                // we got the final result, so it we can end the mic reco.  No need to do this
                // for dataReco, since we already called endAudio() on it as soon as we were done
                // sending all the data.
                this.clientUserSpeech.EndMicAndRecognition();

                btnMic.IsEnabled = true;
                btnSend.Visibility = Visibility.Visible;
                dockText.Width = MinTextWidth; // this should be moved to MVVM

                var text = WriteResponseResult(e);
                if (text == null)
                {
                    HandleCouldNotUnderstand();
                }
                else
                {
                    SendTextAsync(text);
                }
            }));
        }
示例#13
0
 private void WriteResponseResult(SpeechResponseEventArgs e)
 {
     Console.WriteLine(e.PhraseResponse.RecognitionStatus);
     if (e.PhraseResponse.Results.Length == 0)
     {
         WriteLine("No phrase resonse is available.");
         startListening();
     }
     else
     {
         WriteLine("********* Final n-BEST Results *********");
         for (int i = 0; i < e.PhraseResponse.Results.Length; i++)
         {
             WriteLine("[{0}] Confidence={1}, Text=\"{2}\"",
                       i, e.PhraseResponse.Results[i].Confidence,
                       e.PhraseResponse.Results[i].DisplayText);
             if (i == 0)
             {
                 message = e.PhraseResponse.Results[i].DisplayText;
             }
         }
         if (this.speechToTextCallBack != null)
         {
             this.speechToTextCallBack(message);
         }
         WriteLine();
     }
 }
示例#14
0
        private void OnResponseReceived(object sender, SpeechResponseEventArgs e)
        {
            if (e.PhraseResponse.RecognitionStatus == RecognitionStatus.RecognitionSuccess)
            {
                void add(RecognizedPhrase phrase)
                {
                    this._phrases.Add(phrase.LexicalForm);
                    this.Log(phrase.LexicalForm);
                };

                e.PhraseResponse
                .Results
                .ToList()
                ?.ForEach(add);
            }
            else
            {
                this.Log(e.PhraseResponse.RecognitionStatus);
            }

            if (e.PhraseResponse.RecognitionStatus == RecognitionStatus.EndOfDictation ||
                e.PhraseResponse.RecognitionStatus == RecognitionStatus.DictationEndSilenceTimeout)
            {
                this.WriteFile();
                this.OnFinish?.Invoke(this, new EventArgs());
            }
        }
示例#15
0
        /// <summary>
        ///     Called when a final response is received;
        /// </summary>
        void OnResponseReceivedHandler(object sender, SpeechResponseEventArgs e)
        {
            bool isFinalDicationMessage = m_recoMode == SpeechRecognitionMode.LongDictation &&
                                          (e.PhraseResponse.RecognitionStatus == RecognitionStatus.EndOfDictation ||
                                           e.PhraseResponse.RecognitionStatus == RecognitionStatus.DictationEndSilenceTimeout);

            if (m_isMicrophoneReco && ((m_recoMode == SpeechRecognitionMode.ShortPhrase) || isFinalDicationMessage))
            {
                // we got the final result, so it we can end the mic reco.  No need to do this
                // for dataReco, since we already called endAudio() on it as soon as we were done
                // sending all the data.
                m_micClient.EndMicAndRecognition();
            }

            if (!isFinalDicationMessage)
            {
                Console.WriteLine("********* Final NBEST Results *********");
                for (int i = 0; i < e.PhraseResponse.Results.Length; i++)
                {
                    Console.WriteLine("[{0}] Confidence={1} Text=\"{2}\"",
                                      i, e.PhraseResponse.Results[i].Confidence,
                                      e.PhraseResponse.Results[i].DisplayText);
                }
                Console.WriteLine();
            }
        }
示例#16
0
        /// <summary>
        /// Writes the response result.
        /// </summary>
        /// <param name="e">The <see cref="SpeechResponseEventArgs"/> instance containing the event data.</param>
        private void WriteResponseResult(SpeechResponseEventArgs e)
        {
            if (e.PhraseResponse.Results.Length == 0)
            {
                this.WriteLine("No phrase response is available.");
            }
            else
            {
                this.WriteLine("********* Final n-BEST Results *********");
                for (int i = 0; i < e.PhraseResponse.Results.Length; i++)
                {
                    this.WriteLine(
                        "[{0}] Confidence={1}, Text=\"{2}\"",
                        i,
                        e.PhraseResponse.Results[i].Confidence,
                        e.PhraseResponse.Results[i].DisplayText);
                    //uart
                    port.Open();
                    port.Write(e.PhraseResponse.Results[i].DisplayText);
                    port.Close();
                }

                this.WriteLine();
            }
        }
示例#17
0
        private void OnMicDictationResponseReceivedHandler(object sender, SpeechResponseEventArgs e)
        {
            this.WriteLine("--- OnMicDictationResponseReceivedHandler ---");
            if (e.PhraseResponse.RecognitionStatus == RecognitionStatus.EndOfDictation ||
                e.PhraseResponse.RecognitionStatus == RecognitionStatus.DictationEndSilenceTimeout)
            {
                // we got the final result, so it we can end the mic reco.  No need to do this
                // for dataReco, since we already called endAudio() on it as soon as we were done
                // sending all the data.
                this.micClient.EndMicAndRecognition();

                if (btnSpeech.InvokeRequired)
                {
                    boxSpeech.Invoke((MethodInvoker) delegate
                    {
                        btnSpeech.Enabled = true;
                    });
                }
                else
                {
                    btnSpeech.Enabled = true;
                };
            }

            this.WriteResponseResult(e);

            if (e.PhraseResponse.Results.Length > 0)
            {
                this.checkSpeechCommand(e.PhraseResponse.Results[0].DisplayText);
            }
        }
示例#18
0
        /// <summary>
        /// Called when a final response is received.
        /// </summary>
        /// <param name="sender">The source of the event.</param>
        /// <param name="e">An object that contains the event data.</param>
        private void OnResponseReceivedHandler(object sender, SpeechResponseEventArgs e)
        {
            // get the current (oldest) recognition task from the queue
            if (!this.pendingRecognitionTasks.TryPeek(out var currentRecognitionTask))
            {
                // This probably means that we have just received an end-of-dictation response which normally
                // arrives after a successful recognition result, so we would have already completed the
                // recognition task. Hence we just ignore the response.
                return;
            }

            // update the in-progress recognition task
            currentRecognitionTask.AppendResult(e.PhraseResponse);

            if (currentRecognitionTask.IsDoneSpeaking)
            {
                // current recognition task is no longer in progress so finalize and remove it
                currentRecognitionTask.IsFinalized = true;
                this.PostWithOriginatingTimeConsistencyCheck(this.Out, currentRecognitionTask.BuildSpeechRecognitionResult(), currentRecognitionTask.SpeechEndTime);
                this.pendingRecognitionTasks.TryDequeue(out _);
            }

            // Post the raw result from the underlying recognition engine
            var originatingTime = currentRecognitionTask.SpeechStartTime.Add(e.PhraseResponse.Offset).Add(e.PhraseResponse.Duration);

            this.PostWithOriginatingTimeConsistencyCheck(this.SpeechResponseEvent, e, originatingTime);
        }
        private void OnMicDictationResponseReceivedHandler(object sender, SpeechResponseEventArgs e)
        {
            this.WriteLine("--- OnMicDictationResponseReceivedHandler ---");
            if (e.PhraseResponse.RecognitionStatus == RecognitionStatus.EndOfDictation ||
                e.PhraseResponse.RecognitionStatus == RecognitionStatus.DictationEndSilenceTimeout)
            {
                Dispatcher.Invoke(
                    (Action)(() =>
                {
                    // we got the final result, so it we can end the mic reco.  No need to do this
                    // for dataReco, since we already called endAudio() on it as soon as we were done
                    // sending all the data.
                    this.micClient.EndMicAndRecognition();

                    buttonStartSpeech.IsEnabled = true;
                }));
            }

            if (e.PhraseResponse.Results.Length > 0)
            {
                AnswerText(e.PhraseResponse.Results[0].DisplayText);
            }

            this.WriteResponseResult(e);
        }
        /// <summary>
        /// 写入响应结果。
        /// </summary>
        /// <param name="e">The <see cref="SpeechResponseEventArgs"/> instance containing the event data.</param>
        private void WriteResponseResult(SpeechResponseEventArgs e)
        {
            if (e.PhraseResponse.Results.Length == 0)
            {
                this.WriteLine("没有收到语音");
            }
            else
            {
                this.WriteLine("*********最终建议文本*********");

                for (int i = 0; i < e.PhraseResponse.Results.Length; i++)
                {
                    this.WriteLine(
                        "[建议{0}] , Text=\"{2}\"",
                        i,
                        e.PhraseResponse.Results[i].Confidence,
                        e.PhraseResponse.Results[i].DisplayText);
                }
                if (IsMicrophoneClientDictation == true || IsDataClientDictation == true)
                {
                    this.WriteLine("正在继续读取语音");
                }
                else if (IsMicrophoneClientShortPhrase == true || IsDataClientShortPhrase == true)
                {
                    this.WriteLine("结束");
                }
                this.WriteLine();
            }
        }
示例#21
0
        //Writes the response result.
        private async Task EchoResponseAsync(SpeechResponseEventArgs e)
        {
            WriteLine("Speech To Text Result:");
            //handle the case when there are no results.
            //common situation is when there is a pause from user and audio captured has no speech in it
            if (e.PhraseResponse.Results.Length == 0)
            {
                WriteLine("No phrase response is available.");
                WriteLine();
            }
            else
            {
                //speech to text usually returns an array of returns ranked highest first to lowest
                //we will print all of the results
                for (int i = 0; i < e.PhraseResponse.Results.Length; i++)
                {
                    WriteLine(
                        "[{0}] Confidence={1}, Text=\"{2}\"",
                        i,
                        e.PhraseResponse.Results[i].Confidence,
                        e.PhraseResponse.Results[i].DisplayText);
                }
                WriteLine();

                //send transcribed text to bot and get the response
                var result = await this.GetBotReplyAsync(e.PhraseResponse.Results[0].DisplayText);

                //Play audio from text to speech API
                await PlaySpeechAudioAsync(result);
            }
        }
示例#22
0
        private void DataClient_OnResponseReceived(object sender, SpeechResponseEventArgs e)
        {
            try
            {
                var firstResult = e.PhraseResponse.Results.First();
                // _parts.Add(firstResult.LexicalForm);

                var content = new Content();
                content.MeetingId  = currentMeetingId;
                content.EmployeeId = Guid.Parse("BF49B343-97C2-4544-AC07-8964C269280D");
                content.Sequence   = 159;
                content.Line       = firstResult.LexicalForm;
                content.CategoryId = Guid.Parse("61027F10-21A9-4C91-B4C1-7E7E99A29853");

                using (var db = new AzureDb("Server=lofndb.database.windows.net;Database=lofn2;User Id=lofn;Password=Passw0rd; "))
                {
                    db.Contents.Add(content);

                    db.SaveChanges();
                    // TODO: Handle transaction and failures.
                }
            }
            catch (Exception ex)
            {
                throw;
            }
        }
示例#23
0
        /// <summary>
        /// Writes the response result.
        /// </summary>
        /// <param name="e">The <see cref="SpeechResponseEventArgs"/> instance containing the event data.</param>
        private void WriteResponseResult(SpeechResponseEventArgs e)
        {
            if (e.PhraseResponse.Results.Length == 0)
            {
                this.WriteLine("No phrase response is available.");
            }
            else
            {
                this.WriteLine("********* Final n-BEST Results *********");
                for (int i = 0; i < e.PhraseResponse.Results.Length; i++)
                {
                    this.WriteLine(
                        "[{0}] Confidence={1}, Text=\"{2}\"",
                        i,
                        e.PhraseResponse.Results[i].Confidence,
                        e.PhraseResponse.Results[i].DisplayText);
                }

                using (System.IO.StreamWriter file =
                           new System.IO.StreamWriter(fileName, true))
                {
                    file.WriteLine(e.PhraseResponse.Results[0].DisplayText);
                }
                this.WriteLine();
            }
        }
示例#24
0
 private void OnResponseReceived(object sender, SpeechResponseEventArgs e)
 {
     Console.WriteLine(e.PhraseResponse.RecognitionStatus);
     if (e.PhraseResponse.RecognitionStatus == RecognitionStatus.InitialSilenceTimeout ||
         e.PhraseResponse.RecognitionStatus == RecognitionStatus.DictationEndSilenceTimeout)
     {
         Task.Run(() =>
         {
             lock (speechClientLocker)
             {
                 speechClient = SpeechRecognitionServiceFactory.CreateMicrophoneClient(SpeechRecognitionMode.LongDictation, "en-US", cloudCreds.SpeechAPIKey);
                 speechClient.OnPartialResponseReceived += OnPartialResponseReceived;
                 speechClient.OnResponseReceived        += OnResponseReceived;
                 speechClient.StartMicAndRecognition();
             }
         });
     }
     else
     {
         var result = e.PhraseResponse.Results?.OrderByDescending(i => i.Confidence).Select(i => i.DisplayText).FirstOrDefault();
         if (!string.IsNullOrEmpty(result))
         {
             ResponseReceived?.Invoke(result);
         }
     }
 }
 private void OnResponseReceivedHandler(object sender, SpeechResponseEventArgs e)
 {
     if (e.PhraseResponse.RecognitionStatus == RecognitionStatus.RecognitionSuccess)
     {
         string phraseResponse = e.PhraseResponse.Results.OrderBy(r => r.Confidence).FirstOrDefault().DisplayText;
         this._callback.Invoke(phraseResponse.ToString());
     }
 }
示例#26
0
 /// <summary>
 /// Called when a final response is received;
 /// </summary>
 /// <param name="sender">The sender.</param>
 /// <param name="e">The <see cref="SpeechResponseEventArgs"/> instance containing the event data.</param>
 private void OnMicShortPhraseResponseReceivedHandler(object sender, SpeechResponseEventArgs e)
 {
     // we got the final result, so it we can end the mic reco.  No need to do this
     // for dataReco, since we already called endAudio() on it as soon as we were done
     // sending all the data.
     _micClient.EndMicAndRecognition();
     this.WriteResponseResult(e);
 }
示例#27
0
 private void OnMicShortPhraseResponseReceivedHandler(object sender, SpeechResponseEventArgs e)
 {
     Dispatcher.Invoke((Action)(() =>
     {
         // active = false;
         this.micClient.EndMicAndRecognition();
     }));
 }
示例#28
0
        //This event handler gets called when full response audio is sent and transcribed
        private void OnMicShortPhraseResponseReceivedHandler(object sender, SpeechResponseEventArgs e)
        {
            thinking.PlaySync();

            this.EchoResponseAsync(e).Wait();

            StartMicrophone();
        }
 private void OnDataDictationResponseReceivedHandler(object sender, SpeechResponseEventArgs e)
 {
     if (e?.PhraseResponse?.Results != null && e.PhraseResponse.Results.Any())
     {
         var context = GlobalHost.ConnectionManager.GetHubContext <ChatHub>();
         context.Clients.Client(_connectionId).audioRecognized(e.PhraseResponse.Results[0]?.DisplayText);
     }
 }
示例#30
0
 private void OnMicShortPhraseResponseReceivedHandler(object sender, SpeechResponseEventArgs e)
 {
     this.BeginInvoke((MethodInvoker) delegate() {
         //this.WriteLine("--- OnMicShortPhraseResponseReceivedHandler ---");
         micClient.EndMicAndRecognition();
         btnSpeech.Enabled = true;
     });
 }
示例#31
0
 public static void OnResponseReceivedHandler(object sender, SpeechResponseEventArgs e)
 {
     for (int i = 0; i < e.PhraseResponse.Results.Length; i++)
     {
         Console.Write("{0} ", e.PhraseResponse.Results[i].DisplayText);
     }
     Console.WriteLine();
 }
示例#32
0
        private void OnResponseReceived(object sender, SpeechResponseEventArgs e)
        {
            Dispatcher.Invoke(() =>
            {
                _micClient.EndMicAndRecognition();
                RecordButton.IsEnabled = true;
                RecordButton.Content = "Record";
            });

            var log = "";

            for (var i = 0; i < e.PhraseResponse.Results.Length; i++)
            {
                log +=
                    $"{i}. Confidence: {e.PhraseResponse.Results[i].Confidence}, Text: \"{e.PhraseResponse.Results[i].DisplayText}\"\r\n";
            }

            WriteToLog(log);
        }
        void OnResponseReceivedHandler(object sender, SpeechResponseEventArgs e)
        {
            if (ignoreNextString)
            {
                return;
            }

            if (recording)
            {
                for (int i = 0; i < e.PhraseResponse.Results.Length; i++)
                {
                    this.Dispatcher.Invoke((Action)(() =>
                    {
                        this.fullText += e.PhraseResponse.Results[i].DisplayText + " ";
                        userInput.Text = fullText;
                        /* userInput.Text = e.PhraseResponse.Results[i].DisplayText;
                         this.fullText += " " + userInput.Text;*/
                        pingPong = 0;
                    }));
                }
            }
        }