//Writes the response result. private async Task EchoResponseAsync(SpeechRecognitionResultEventArgs e) { WriteLine("Speech To Text Result:"); //handle the case when there are no results. //common situation is when there is a pause from user and audio captured has no speech in it if (e.Result.Text.Length == 0) { WriteLine("No phrase response is available."); WriteLine(); } else { WriteLine( "Text=\"{0}\"", e.Result.Text); WriteLine(); string result = string.Empty; //send transcribed text to bot and get the response result = await this.GetBotReplyAsync(e.Result.Text); //Play audio from text to speech API await PlaySpeechAudioAsync(result); //Start Microphone StartMicrophone(); } }
// 识别过程中的中间结果 private void Recognizer_IntermediateResultReceived(object sender, SpeechRecognitionResultEventArgs e) { if (!string.IsNullOrEmpty(e.Result.Text)) { Log("中间结果: " + e.Result.Text); } }
private void Recognizer_IntermediateResultReceived(object sender, SpeechRecognitionResultEventArgs e) { Dispatcher.Invoke(() => { CurrentMessageBlock.Text = e.Result.RecognizedText.ToUpper(); }); }
private async void _speechClient_IntermediateResultReceived(object sender, SpeechRecognitionResultEventArgs e) { if (e.Result.Text.Length > 10) { Console.WriteLine(e.Result.Text); } }
private async void OnFinalSpeechRecognitionResultReceived(object sender, SpeechRecognitionResultEventArgs e) => await RunOnUi(async() => { if (e.Result.Status == SpeechRecognitionStatus.Success && e.Result.Text?.Length > 0) { AddChatMessage(MessageSource.User, e.Result.Text); await _botClient.SendMessageToBot(e.Result.Text); } });
// 识别的最终结果 private void Recognizer_FinalResultReceived(object sender, SpeechRecognitionResultEventArgs e) { if (!string.IsNullOrEmpty(e.Result.Text)) { Log("最终结果: " + e.Result.Text); ProcessSttResult(e.Result.Text); } }
private async void Recognizer_FinalResultReceived(object sender, SpeechRecognitionResultEventArgs e) { if (!string.IsNullOrEmpty(e.Result.Text)) { string jsonLuis = await GetLuisResultAsync(e.Result.Text); ExecuteLuisResult(jsonLuis); } }
private void Recognizer_FinalResultReceived(object sender, SpeechRecognitionResultEventArgs e) { Dispatcher.Invoke(() => { if (!string.IsNullOrEmpty(e.Result.RecognizedText)) { LastMessageBlock.Text = e.Result.RecognizedText.ToUpper(); CurrentMessageBlock.Text = string.Empty; } }); }
private void OnFinalResultReceivedHandler(object sender, SpeechRecognitionResultEventArgs e) { Console.WriteLine(string.Format(CultureInfo.InvariantCulture, "Speech recognition: Final result: {0} ", e.ToString())); if (e.Result.RecognitionStatus == RecognitionStatus.Recognized && !string.IsNullOrEmpty(e.Result.Text)) { if (this.Status == SRClientStatus.WorkingOnce) { this.StopRecognition(); this.Status = SRClientStatus.Idle; } this._sendMessage(WebSocketMessageType.FinalResult, e.Result.Text); } }
/// <summary> /// Logs the Final result /// </summary> private void FinalResultEventHandler(SpeechRecognitionResultEventArgs e, RecoType rt) { TextBox log; if (rt == RecoType.Basic) { log = this.bingLogText; this.SetCurrentText(this.bingCurrentText, e.Result.Text); } else { log = this.crisLogText; this.SetCurrentText(this.crisCurrentText, e.Result.Text); } this.WriteLine(log); this.WriteLine(log, " --- Final result received --- "); this.WriteLine(log, e.Result.Text); }
private async void _speechClient_FinalResultReceived(object sender, SpeechRecognitionResultEventArgs e) { string result = ""; if (e.Result.RecognitionStatus == RecognitionStatus.Recognized) { //Console.WriteLine(e.Result.Text); // do anything with the result here result = e.Result.Text; //_correlationId = Guid.Empty; } else if (e.Result.RecognitionStatus == RecognitionStatus.InitialSilenceTimeout) { result = "[Silence]"; } Console.WriteLine(result); using (StreamWriter w = File.AppendText("output.txt")) { w.WriteLine(result); } //Debug.WriteLine("Final result: " + e.Result.Text); }
/// <summary> /// Logs the Final result /// </summary> private void FinalResultEventHandler(SpeechRecognitionResultEventArgs e, RecoType rt) { TextBox log; if (rt == RecoType.Base) { log = this.baseModelLogText; this.SetCurrentText(this.baseModelCurrentText, e.Result.Text); } else { log = this.customModelLogText; this.SetCurrentText(this.customModelCurrentText, e.Result.Text); } this.WriteLine(log); this.WriteLine(log, $" --- Final result received. Status: {e.Result.RecognitionStatus.ToString()}. --- "); if (!string.IsNullOrEmpty(e.Result.Text)) { this.WriteLine(log, e.Result.Text); } }
/// <summary> /// Logs Intermediate Recognition results /// </summary> private void IntermediateResultEventHandler(SpeechRecognitionResultEventArgs e) { recognizer.StopContinuousRecognitionAsync(); }
private void OnIntermediatedResultReceived(object sender, SpeechRecognitionResultEventArgs e) { Console.WriteLine(String.Format(CultureInfo.InvariantCulture, "Speech recognition: Intermediate result: {0} ", e.ToString())); this._sendMessage(WebSocketMessageType.PartialResult, e.Result.Text); }
/// <summary> /// Logs Intermediate Recognition results /// </summary> private void IntermediateResultEventHandler(SpeechRecognitionResultEventArgs e, RecoType rt) { var log = (rt == RecoType.Base) ? this.baseModelLogText : this.customModelLogText; this.WriteLine(log, "Intermediate result: {0} ", e.Result.Text); }
private static void OnResult(object sender, SpeechRecognitionResultEventArgs e) { Console.WriteLine("[RecognizedText] " + e.Result.RecognizedText); }
private async void OnIntermediateSpeechRecognitionResultReceived(object sender, SpeechRecognitionResultEventArgs e) => await RunOnUi(() => { if (e.Result.Status == SpeechRecognitionStatus.Success) { _model["HypothesisText"] = e?.Result?.Text ?? string.Empty; } });
/// <summary> /// Logs the Final result /// </summary> private void FinalResultEventHandler(SpeechRecognitionResultEventArgs e) { thinking.PlaySync(); this.EchoResponseAsync(e).Wait(); }