protected override void OnActivityResult(int requestCode, Result resultCode, Intent data) { base.OnActivityResult(requestCode, resultCode, data); if (requestCode == SpeechRecognitionAndroidService.SpeakRequestCode) { if (resultCode == Result.Ok) { var matches = data.GetStringArrayListExtra(RecognizerIntent.ExtraResults); if (matches.Count != 0) { Console.WriteLine(matches[0]); SpeechRecognized?.Invoke( this, new SpeechRecognizedEvent(recognizedText: matches[0])); } else { ReturnEmptyResponse(); } } else { ReturnEmptyResponse(); } } }
private void InvokeSpeechRecognized(String recognizedText) { if (SpeechRecognized != null) { SpeechRecognized.Invoke(this, recognizedText); } }
protected void RecEngine_SpeechRecognized(object sender, SpeechRecognizedEventArgs e) { VoiceCommand voiceCommand = _listCommands.FirstOrDefault(x => x.Commands.Contains(e.Result.Text))?.Copy(); if (voiceCommand == null) { return; } voiceCommand.DoSayResponse = false; if (e.Result.Confidence < ConfidenceThreshold) { voiceCommand.ActionToPerform = VoiceCommandAction.DidntGetThat; voiceCommand.Response = "I didn't get that"; } switch (voiceCommand.ActionToPerform) { case VoiceCommandAction.GiveFeedback: if (IsListening) { _isGivingFeedback = true; SayResponseAsync(voiceCommand.Response); } break; case VoiceCommandAction.StopGivingFeedback: if (IsListening) { SayResponseAsync(voiceCommand.Response); _isGivingFeedback = false; } break; case VoiceCommandAction.DidntGetThat: if (IsListening) { SayResponseAsync(voiceCommand.Response); } break; case VoiceCommandAction.StartListening: IsListening = true; SayResponseAsync(voiceCommand.Response); break; case VoiceCommandAction.StopListening: IsListening = false; SayResponseAsync(voiceCommand.Response); break; default: voiceCommand.DoSayResponse = true; break; } if (!IsListening && voiceCommand.ActionToPerform != VoiceCommandAction.StopListening) { return; } SpeechRecognized?.Invoke(this, new ODSpeechRecognizedEventArgs(voiceCommand)); }
void _rg_SpeechRecognized(object sender, SpeechRecognizedEventArgs e) { if (e.Result == null) { return; } var ev = new RecognitionEventArgs(e.Result.Confidence, e.Result.Text); SpeechRecognized?.Invoke(sender, ev); }
private void HandleSpeechCompleted(object sender, RecognizeCompletedEventArgs eventArgs) { if (eventArgs.Result is null) { SpeechNotRecognized?.Invoke(this, new EventArgs()); } else { var answer = new AnswerSelectedEventArgs { Result = true, FieldName = currentAnswer.FieldName, SelectedAnswer = GetProperResult(eventArgs), }; SpeechRecognized?.Invoke(this, answer); } }
// Method called from the recognizer when a recognition has occurred. // Only called for SpeechRecognition events, not SpeechRecognitionRejected. internal void OnRecognitionInternal(SpeechRecognizedEventArgs eventArgs) { Debug.Assert(eventArgs.Result.Grammar == this); SpeechRecognized?.Invoke(this, eventArgs); }
private void ReturnEmptyResponse() { SpeechRecognized?.Invoke(this, new SpeechRecognizedEvent(recognizedText: "")); }
public async Task TranslateAsync(YouTubeVideo youTubeVideo, string fromLanguage, IEnumerable <string> toLanguages) { // Declare the necessary directories and files variables var outputPath = Path.Combine("Output", Guid.NewGuid().ToString()); var downloadFilePath = Path.Combine(outputPath, "input.mp4"); // StringBuilders for data to be passed to event subscriber var tsb = new StringBuilder(); var osb = new StringBuilder(); var info = new StringBuilder(); var config = SpeechTranslationConfig.FromSubscription( configuration["AzureSpeechTranslation:SubscriptionKey"], configuration["AzureSpeechTranslation:Region"]); config.SpeechRecognitionLanguage = fromLanguage; foreach (var language in toLanguages) { config.AddTargetLanguage(language); } var vidBytes = await youTubeVideo.GetBytesAsync(); // Before saving the video, create the directory CreateOutputDirectory(outputPath); // Save the video await File.WriteAllBytesAsync(downloadFilePath, vidBytes); // Extract the audio from the video to work on it var wavAudioFile = await ExtractingWavAudioAsync(downloadFilePath); var stopTranslation = new TaskCompletionSource <int>(); var lineCount = 1; using (var audioInput = AudioConfig.FromWavFileInput(wavAudioFile)) { using (var recognizer = new TranslationRecognizer(config, audioInput)) { // Subscribes to events. recognizer.Recognized += (s, e) => { if (e.Result.Reason == ResultReason.TranslatedSpeech) { foreach (var element in e.Result.Translations) { var fromTime = TimeSpan.FromTicks(e.Result.OffsetInTicks); var toTime = fromTime.Add(e.Result.Duration); osb.AppendLine($"{lineCount}"); osb.AppendLine($"{fromTime.ToString(@"hh\:mm\:ss\.fff")} --> {toTime.ToString(@"hh\:mm\:ss\.fff")}"); osb.AppendLine(e.Result.Text); osb.AppendLine(); tsb.AppendLine($"{lineCount}"); tsb.AppendLine($"{fromTime.ToString(@"hh\:mm\:ss\.fff")} --> {toTime.ToString(@"hh\:mm\:ss\.fff")}"); tsb.AppendLine(element.Value); tsb.AppendLine(); var speechServicesEventArgs = SetSpeechServicesInformationArgs(fromLanguage, element.Key, osb.ToString(), tsb.ToString()); osb.Clear(); tsb.Clear(); SpeechRecognized?.Invoke(this, speechServicesEventArgs); } lineCount++; } else if (e.Result.Reason == ResultReason.RecognizedSpeech) { info.AppendLine($"RECOGNIZED: Text={e.Result.Text}"); info.AppendLine($" Speech not translated."); var speechServicesEventArgs = SetSpeechServicesInformationArgs(fromLanguage, information: info.ToString()); info.Clear(); SpeechRecognized?.Invoke(this, speechServicesEventArgs); } else if (e.Result.Reason == ResultReason.NoMatch) { info.AppendLine($"NOMATCH: Speech could not be recognized."); var speechServicesEventArgs = SetSpeechServicesInformationArgs(fromLanguage, information: info.ToString()); info.Clear(); SpeechRecognized?.Invoke(this, speechServicesEventArgs); } }; recognizer.Canceled += (s, e) => { info.AppendLine($"CANCELED: Reason={e.Reason}"); if (e.Reason == CancellationReason.Error) { info.AppendLine($"CANCELED: ErrorCode={e.ErrorCode}"); info.AppendLine($"CANCELED: ErrorDetails={e.ErrorDetails}"); info.AppendLine($"CANCELED: Did you update the subscription info?"); } var speechServicesEventArgs = SetSpeechServicesInformationArgs(fromLanguage, information: info.ToString()); info.Clear(); SpeechCanceled?.Invoke(this, speechServicesEventArgs); stopTranslation.TrySetResult(0); }; recognizer.SpeechStartDetected += (s, e) => { info.AppendLine("Speech start detected event."); var speechServicesEventArgs = SetSpeechServicesInformationArgs(fromLanguage, information: info.ToString()); info.Clear(); SpeechStartDetected?.Invoke(this, speechServicesEventArgs); }; recognizer.SpeechEndDetected += (s, e) => { info.AppendLine("Speech end detected event."); var speechServicesEventArgs = SetSpeechServicesInformationArgs(fromLanguage, information: info.ToString()); info.Clear(); SpeechEndDetected?.Invoke(this, speechServicesEventArgs); }; recognizer.SessionStarted += (s, e) => { info.AppendLine("Start translation..."); info.AppendLine("Session started event."); var speechServicesEventArgs = SetSpeechServicesInformationArgs(fromLanguage, information: info.ToString()); info.Clear(); SpeechSessionStarted?.Invoke(this, speechServicesEventArgs); }; recognizer.SessionStopped += (s, e) => { info.AppendLine("Session stopped event."); info.AppendLine("Stop translation."); var speechServicesEventArgs = SetSpeechServicesInformationArgs(fromLanguage, information: info.ToString()); info.Clear(); SpeechSessionStopped?.Invoke(this, speechServicesEventArgs); stopTranslation.TrySetResult(0); }; // Starts continuous recognition. Uses StopContinuousRecognitionAsync() to stop recognition. await recognizer.StartContinuousRecognitionAsync().ConfigureAwait(false); // Waits for completion. // Use Task.WaitAny to keep the task rooted. Task.WaitAny(new[] { stopTranslation.Task }); // Stops translation. await recognizer.StopContinuousRecognitionAsync().ConfigureAwait(false); } } //Housekeeping Directory.Delete(outputPath, true); }
protected virtual void FireSpeechRecognized(string sentence) { SpeechRecognized?.Invoke(this, new SpeechRecognitionEventArgs(sentence)); }
protected void OnSpeechRecognized(SpeechRecognizedEvent e) { SpeechRecognized?.Invoke(this, e); }
private void recognizer_SpeechRecognized(object sender, SpeechRecognizedEventArgs e) { Console.WriteLine("SpeechRecognized:" + e.Result.Text); OnRecognized?.Invoke(e.Result.Text); }
private void SpeechRecognizedProxy(object sender, SpeechRecognizedEventArgs e) { SpeechRecognized?.Invoke(this, e); }