public async Task RecordSpeechFromMicrophoneAsync(string command) { string recognizedText = string.Empty; if (this.mute == false) { ElementSoundPlayer.State = ElementSoundPlayerState.On; ElementSoundPlayer.Volume = 0.5; ElementSoundPlayer.Play(ElementSoundKind.Invoke); //BackgroundMediaPlayer.Current.SetUriSource(new Uri("ms-winsoundevent:Notification.Mail")); } BackgroundMediaPlayer.Current.Play(); Helper.MessageBoxLongAsync(command, "Voice"); using (SpeechRecognizer recognizer = new Windows.Media.SpeechRecognition.SpeechRecognizer()) { await recognizer.CompileConstraintsAsync(); SpeechRecognitionResult result = await recognizer.RecognizeAsync(); if (result.Status == SpeechRecognitionResultStatus.Success) { recognizedText = result.Text; } } VoiceResult = (recognizedText); }
public override IObservable <string> ListenUntilPause() => Observable.FromAsync(async ct => { var speech = new WinSpeechRecognizer(); await speech.CompileConstraintsAsync(); this.ListenSubject.OnNext(true); var result = await speech.RecognizeAsync(); this.ListenSubject.OnNext(false); return(result.Text); });
public async void StartRecognizingSpeech() { SpeechRecognitionResult speechRecognitionResult = await speechRecognizer.RecognizeAsync(); if (speechRecognitionResult.Status == SpeechRecognitionResultStatus.Success) { this.OnSpeechRecognized(speechRecognitionResult.Text); } else { this.OnSpeechRecognitionError(); } }
public override IObservable <string> ContinuousDictation() { return(Observable.Create <string>(async ob => { var speech = new WinSpeechRecognizer(); await speech.CompileConstraintsAsync(); this.ListenSubject.OnNext(true); var result = await speech.RecognizeAsync(); var words = result.Text.Split(' '); foreach (var word in words) { ob.OnNext(word); } return () => { this.ListenSubject.OnNext(false); speech.Dispose(); }; })); }
public static async Task <string> GetText() { var language = new Windows.Globalization.Language("en-US"); using (var speechRecognizer = new Windows.Media.SpeechRecognition.SpeechRecognizer(language)) { await speechRecognizer.CompileConstraintsAsync(); speechRecognizer.StateChanged += SpeechRecognizerStateChangedHandler;; var result = await speechRecognizer.RecognizeAsync(); if (result.Status == SpeechRecognitionResultStatus.Success) { return(result.Text); } else { // we need to control confidence and other factors } } return(null); }
async void ISpeechRecognition.StartListening() { _inactive = false; // Start recognition. try { if (_speechModule.TextToSpeech.IsSpeaking) { _speechModule.LanguageModel.AI.Engine.Debugger.Log( Galatea.Diagnostics.DebuggerLogLevel.Diagnostic, "TTS is speaking; Listening paused..."); } else { //// Get out of this f*****g loop //if (_isListening) return; //_isListening = true; // Start Listening int ruleId = -1; SpeechRecognitionStatus status = SpeechRecognitionStatus.Empty; SpeechRecognitionResult speechRecognitionResult = await speechRecognizer.RecognizeAsync(); // If successful, display the recognition result. if (speechRecognitionResult.Status == SpeechRecognitionResultStatus.Success) { if (string.IsNullOrEmpty(speechRecognitionResult.Text)) { ruleId = 0; status = speechRecognitionResult.Status.Convert(); } } else { //resultTextBlock.Visibility = Visibility.Visible; //resultTextBlock.Text = string.Format("Speech Recognition Failed, Status: {0}", speechRecognitionResult.Status.ToString()); } // Fire Event Recognized?.Invoke(this, new SpeechRecognizedEventArgs(ruleId, speechRecognitionResult.Text, null, status)); //_isListening = false; } } catch (TaskCanceledException exception) { // TaskCanceledException will be thrown if you exit the scenario while the recognizer is actively // processing speech. Since this happens here when we navigate out of the scenario, don't try to // show a message dialog for this exception. System.Diagnostics.Debug.WriteLine("TaskCanceledException caught while recognition in progress (can be ignored):"); System.Diagnostics.Debug.WriteLine(exception.ToString()); } catch (System.InvalidOperationException exception) { // No idea why it keeps throwing this Exception _speechModule.LanguageModel.AI.Engine.Debugger.Log(Galatea.Diagnostics.DebuggerLogLevel.Error, exception.Message); _speechModule.LanguageModel.AI.Engine.Debugger.Log(Galatea.Diagnostics.DebuggerLogLevel.StackTrace, exception.StackTrace); } catch (Exception exception) { string msg; // Handle the speech privacy policy error. if ((uint)exception.HResult == HResultPrivacyStatementDeclined) { msg = Galatea.Globalization.RoboticsResources.SpeechRecognition_PrivacySettings_NotAccepted; throw new TeaSpeechException(msg, exception); } //else //{ // msg = exception.Message; //} //var messageDialog = new Windows.UI.Popups.MessageDialog(msg, "Exception"); //await messageDialog.ShowAsync(); throw; } }