/// <summary> /// Uses the recognizer constructed earlier to listen for speech from the user before displaying /// it back on the screen. Uses the built-in speech recognition UI. /// </summary> /// <param name="sender">Button that triggered this event</param> /// <param name="e">State information about the routed event</param> private async void RecognizeWithUI_Click(object sender, RoutedEventArgs e) { // Reset the text to prompt the user. resultTextBlock.Text = speechResourceMap.GetValue("SRGSListeningPromptText", speechContext).ValueAsString; // Start recognition. try { recognitionOperation = speechRecognizer.RecognizeWithUIAsync(); SpeechRecognitionResult speechRecognitionResult = await recognitionOperation; HandleRecognitionResult(speechRecognitionResult); } catch (TaskCanceledException exception) { // TaskCanceledException will be thrown if you exit the scenario while the recognizer is actively // processing speech. Since this happens here when we navigate out of the scenario, don't try to // show a message dialog for this exception. System.Diagnostics.Debug.WriteLine("TaskCanceledException caught while recognition in progress (can be ignored):"); System.Diagnostics.Debug.WriteLine(exception.ToString()); } catch (Exception exception) { // Handle the speech privacy policy error. if ((uint)exception.HResult == HResultPrivacyStatementDeclined) { resultTextBlock.Visibility = Visibility.Visible; resultTextBlock.Text = "The privacy statement was declined."; } else { var messageDialog = new Windows.UI.Popups.MessageDialog(exception.Message, "Exception"); await messageDialog.ShowAsync(); } } }
public async void StartOverlayRecognization() { OnstartEvent(new EventArgs()); // Create an instance of SpeechRecognizer. speechRecognizer = InitSpeechRecognizer(); // Listen for audio input issues. speechRecognizer.RecognitionQualityDegrading += speechRecognizer_RecognitionQualityDegrading; // Add a web search grammar to the recognizer. var webSearchGrammar = new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.WebSearch, "webSearch"); speechRecognizer.UIOptions.AudiblePrompt = "Say what you want to search for..."; speechRecognizer.UIOptions.ExampleText = @"Ex. 'weather for London'"; speechRecognizer.Constraints.Add(webSearchGrammar); // Compile the constraint. await speechRecognizer.CompileConstraintsAsync(); // Start recognition. SpeechRecognitionResult speechRecognitionResult = await speechRecognizer.RecognizeWithUIAsync(); //await speechRecognizer.RecognizeWithUIAsync(); speechToTextEventArgs.SpeechResult = speechRecognitionResult.Text; OnHaveResultEvent(speechToTextEventArgs); //// Do something with the recognition result. //var messageDialog = new Windows.UI.Popups.MessageDialog(speechRecognitionResult.Text, "Text spoken"); //await messageDialog.ShowAsync(); }
private async void initSpeeach(object sender, Windows.UI.Xaml.RoutedEventArgs e) { await recoWithUI.CompileConstraintsAsync(); recoWithUI.UIOptions.AudiblePrompt = "What do you want to calculate?"; recoWithUI.UIOptions.ExampleText = "salary equals 12 times 15"; var result = await recoWithUI.RecognizeWithUIAsync(); if (result.Text != "") { App.Model.OpenNotebook.Lines.Add(new Line { LineNumber = App.Model.OpenNotebook.Lines.Count + 1, Expression = result.Text }); double ans = App.Model.OpenNotebook.Solver.EvaluateNested(result.Text); var synth = new Windows.Media.SpeechSynthesis.SpeechSynthesizer(); // Generate the audio stream from plain text. SpeechSynthesisStream stream = await synth.SynthesizeTextToStreamAsync("The answer is " + ans.ToString()); // Send the stream to the media object. mediaElement.SetSource(stream, stream.ContentType); mediaElement.Play(); } }
public async Task <string> ListenAsync(string prompt, string example) { if (_SpeechRecognizer == null) { await LoadRecognizerAsync(); } try { _SpeechRecognizer.UIOptions.ShowConfirmation = false; _SpeechRecognizer.UIOptions.AudiblePrompt = prompt; _SpeechRecognizer.UIOptions.ExampleText = example; var result = await _SpeechRecognizer.RecognizeWithUIAsync(); switch (result.Status) { case SpeechRecognitionResultStatus.Success: return(result.Text); case SpeechRecognitionResultStatus.UserCanceled: return(string.Empty); default: throw new Exception("Speech recognition failed. Status: " + result.Status.ToString()); } } catch (TaskCanceledException e) { throw new Exception("Cancelled", e); } catch (Exception e) when(e.HResult.Equals(0x80045509)) { throw new Exception("Disabled in settings", e); } catch { throw; } }
public async void Recognize() { if (_isListening == false && _speechRecognizer.State == SpeechRecognizerState.Idle) { try { StatusBlock.Text = "Listening..."; _isListening = true; _speechRecognizer.UIOptions.IsReadBackEnabled = false; recognitionOperation = _speechRecognizer.RecognizeWithUIAsync(); SpeechRecognitionResult speechRecognitionResult = await recognitionOperation; _isListening = false; if (speechRecognitionResult.Status == SpeechRecognitionResultStatus.Success) { ResultGenerated(speechRecognitionResult); } else { //TODO: handle failure; } } catch (Exception ex) { var messageDialog = new Windows.UI.Popups.MessageDialog(ex.Message, "Exception"); await messageDialog.ShowAsync(); _isListening = false; } } else { _isListening = false; } }
public static string SpeechToText() { //Console.WriteLine("Recognizing.."); Task <string> task = Task.Run <string>(async() => { SpeechRecognizer speechRecognizer = new SpeechRecognizer(); await speechRecognizer.CompileConstraintsAsync(); speechRecognizer.UIOptions.ShowConfirmation = false; speechRecognizer.UIOptions.IsReadBackEnabled = false; try { SpeechRecognitionResult speechRecognitionResult = await speechRecognizer.RecognizeWithUIAsync(); return(speechRecognitionResult.Text); } catch (Exception ex) { if (ex.HResult == -2147199735) { MessageBox.Show("Your need to enable speech recognition on your computer.\n\n" + "Open Settings > Privacy > Speech, inking, & typing > click 'Get to know me'.", "Error"); } throw ex; } }); task.Wait(); //Console.WriteLine("Speech Recognized: {0}", task.Result); return(task.Result); }
}//合成声音 public async Task <string> BegiRecongnize() { string Result = ""; try { using (SpeechRecognizer recognizer = new SpeechRecognizer()) { SpeechRecognitionCompilationResult compilationResult = await recognizer.CompileConstraintsAsync(); if (compilationResult.Status == SpeechRecognitionResultStatus.Success) { recognizer.UIOptions.IsReadBackEnabled = false; recognizer.UIOptions.ShowConfirmation = false; recognizer.UIOptions.AudiblePrompt = "我在听,请说..."; SpeechRecognitionResult recognitionResult = await recognizer.RecognizeWithUIAsync(); //SpeechRecognitionResult recognitionResult = await recognizer.RecognizeAsync(); if (recognitionResult.Status == SpeechRecognitionResultStatus.Success) { Result = recognitionResult.Text; } } } } catch (Exception ex) { Result = ex.Message; } return(Result); }//前台识别声音
/// <summary> /// Uses the recognizer constructed earlier to listen for speech from the user before displaying /// it back on the screen. Uses the built-in speech recognition UI. /// </summary> /// <param name="sender">Button that triggered this event</param> /// <param name="e">State information about the routed event</param> private async void RecognizeWithUI_Click(object sender, RoutedEventArgs e) { // Reset the text to prompt the user. resultTextBlock.Text = "Speak to choose colors for the circle, background, and border. Try saying 'blue background, red border, green circle'."; // Start recognition. try { SpeechRecognitionResult speechRecognitionResult = await speechRecognizer.RecognizeWithUIAsync(); HandleRecognitionResult(speechRecognitionResult); } catch (ObjectDisposedException exception) { // ObjectDisposedException will be thrown if you exit the scenario while the recogizer is actively // processing speech. Since this happens here when we navigate out of the scenario, don't try to // show a message dialog for this exception. System.Diagnostics.Debug.WriteLine("ObjectDisposedException caught while recognition in progress (can be ignored):"); System.Diagnostics.Debug.WriteLine(exception.ToString()); } catch (Exception exception) { // Handle the speech privacy policy error. if ((uint)exception.HResult == HResultPrivacyStatementDeclined) { resultTextBlock.Visibility = Visibility.Visible; resultTextBlock.Text = "The privacy statement was declined."; } else { var messageDialog = new Windows.UI.Popups.MessageDialog(exception.Message, "Exception"); await messageDialog.ShowAsync(); } } }
private async Task <SpeechRecognitionResult> RecognizeSpeech() { try { if (recognizer == null) { recognizer = new SpeechRecognizer(); string[] possibleAnswers = { "Welche Orte gibt es in meiner Nähe?", "Welche sind diese?", "Welcher ist dieser?", "Welche Orte?", "Welche Orte genau?" }; var listConstraint = new SpeechRecognitionListConstraint(possibleAnswers, "Answer"); recognizer.UIOptions.ExampleText = @"Bsp. 'Welche Orte gibt es in meiner Nähe?'"; recognizer.Constraints.Add(listConstraint); await recognizer.CompileConstraintsAsync(); } SpeechRecognitionResult result = await recognizer.RecognizeWithUIAsync(); return(result); } catch (Exception exception) { const uint HResultPrivacyStatementDeclined = 0x80045509; if ((uint)exception.HResult == HResultPrivacyStatementDeclined) { var messageDialog = new Windows.UI.Popups.MessageDialog("You must accept the speech privacy policy"); messageDialog.ShowAsync().GetResults(); } else { Debug.WriteLine("Error: " + exception.Message); } } return(null); }
/// <summary> /// 语音识别 /// </summary> /// <returns>识别文本</returns> public async Task <string> RecognizeAsync() { if (_initialization == null || _initialization.IsFaulted) { _initialization = InitializeRecognizer(SpeechRecognizer.SystemSpeechLanguage); } await _initialization; CancelRecognitionOperation(); // Start recognition. try { _recognitionOperation = _speechRecognizer.RecognizeWithUIAsync(); SpeechRecognitionResult speechRecognitionResult = await _recognitionOperation; // If successful, return the recognition result. if (speechRecognitionResult.Status == SpeechRecognitionResultStatus.Success) { return(speechRecognitionResult.Text); } else { throw new Exception($"Speech recognition failed. Status: {speechRecognitionResult.Status}"); } } catch (Exception ex) when((uint)ex.HResult == HResultPrivacyStatementDeclined) { // Handle the speech privacy policy error. var messageDialog = new MessageDialog("您没有同意语音识别隐私声明,请同意后重试"); await messageDialog.ShowAsync(); throw; } }
public void RecordSpeachToText() { Task.Factory.StartNew(async() => { try { var language = new Windows.Globalization.Language("en-US"); var speechRecognizer = new SpeechRecognizer(language); await speechRecognizer.CompileConstraintsAsync(); var result = await speechRecognizer.RecognizeWithUIAsync(); Recorded?.Invoke(result.Text); } catch (System.Runtime.InteropServices.COMException) { Device.BeginInvokeOnMainThread(async() => { var messageDialog = new Windows.UI.Popups.MessageDialog("Please download en-US Language-Pack", "Language not found"); await messageDialog.ShowAsync(); Recorded?.Invoke(""); }); } catch { Device.BeginInvokeOnMainThread(async() => { var messageDialog = new Windows.UI.Popups.MessageDialog("No permission to record", "Permission denied"); await messageDialog.ShowAsync(); Recorded?.Invoke(""); }); } }); }
private async System.Threading.Tasks.Task VoiceRecognition() { try { Windows.Media.SpeechRecognition.SpeechRecognitionResult speechRecognitionResult = await speechRecognizer.RecognizeWithUIAsync(); //テキストボックスに入れる String result = speechRecognitionResult.Text; //リストから検索して該当するものを選択する if (result != "") { if (result == "ホーム") { webview.Navigate(StartURL); } else { textBox.Text = result; foreach (var item in listView.Items) { if ((String)item == result) { listView.SelectedItem = item; } } } } } catch { } }
private async Task <SpeechRecognitionResult> SpeechRecognizeAsync() { if (_speechRecognizer == null) { // Create an instance of SpeechRecognizer. _speechRecognizer = new SpeechRecognizer(); var songs = new[] { "order", "product", "manage", "capture", "home" }; // Generates the collection which we expect user will say one of. // Create an instance of the constraint. // Pass the collection and an optional tag to identify. var playConstraint = new SpeechRecognitionListConstraint(songs); // Add it into teh recognizer _speechRecognizer.Constraints.Add(playConstraint); // Then add the constraint for pausing and resuming. //var pauseConstraint = new SpeechRecognitionListConstraint(new[] { "Pause", "Resume" }, "pauseAndResume"); //_speechRecognizer.Constraints.Add(pauseConstraint); // Compile the dictation grammar by default. await _speechRecognizer.CompileConstraintsAsync(); } // Start recognition and return the result. return(await _speechRecognizer.RecognizeWithUIAsync()); }
private async void GrdMain_Tapped(object sender, TappedRoutedEventArgs e) { SpeechRecognitionResult result = await _recognizer.RecognizeWithUIAsync(); if (result.Status == SpeechRecognitionResultStatus.Success) { GrdMain.Background = new SolidColorBrush(GetColor(result.Text)); } }
private async void btnSpeechRecognition_Tapped(object sender, TappedRoutedEventArgs e) { var speechRecognizer = new SpeechRecognizer(); await speechRecognizer.CompileConstraintsAsync(); SpeechRecognitionResult speechRecognitionResult = await speechRecognizer.RecognizeWithUIAsync(); txtSearch.Text = speechRecognitionResult.Text; }
public async Task <SpeechRecognitionResult> ShowSpeechUIAsync() { try { SpeechRecognitionResult recognitionResult = await speechRecognizerUI.RecognizeWithUIAsync(); return(recognitionResult); } catch { } return(null); }
private async void btnSpeechRecognition_Tapped(object sender, TappedRoutedEventArgs e) { // Create an instance of SpeechRecognizer. var speechRecognizer = new SpeechRecognizer(); // Compile the constraint. await speechRecognizer.CompileConstraintsAsync(); SpeechRecognitionResult speechRecognitionResult = await speechRecognizer.RecognizeWithUIAsync(); //display the recognition result txtSearch.Text = speechRecognitionResult.Text; }
async private void OnRecognize(object sender, RoutedEventArgs e) { //general dictation var recognizer = new SpeechRecognizer(); recognizer.UIOptions.ExampleText = "Say something, I'm giving up on you."; await recognizer.CompileConstraintsAsync(); var result = await recognizer.RecognizeWithUIAsync(); txt_dictation.Text = result.Text; }
/// <summary> /// Uses the recognizer constructed earlier to listen for speech from the user before displaying /// it back on the screen. Uses the built-in speech recognition UI. /// </summary> /// <param name="sender">Button that triggered this event</param> /// <param name="e">State information about the routed event</param> private async void RecognizeWithUIListConstraint_Click(object sender, RoutedEventArgs e) { heardYouSayTextBlock.Visibility = resultTextBlock.Visibility = Visibility.Collapsed; // Start recognition. try { recognitionOperation = speechRecognizer.RecognizeWithUIAsync(); SpeechRecognitionResult speechRecognitionResult = await recognitionOperation; // If successful, display the recognition result. if (speechRecognitionResult.Status == SpeechRecognitionResultStatus.Success) { string tag = "unknown"; if (speechRecognitionResult.Constraint != null) { // Only attempt to retreive the tag if we didn't hit the garbage rule. tag = speechRecognitionResult.Constraint.Tag; } heardYouSayTextBlock.Visibility = resultTextBlock.Visibility = Visibility.Visible; resultTextBlock.Text = string.Format("Heard: '{0}', (Tag: '{1}', Confidence: {2})", speechRecognitionResult.Text, tag, speechRecognitionResult.Confidence.ToString()); } else { resultTextBlock.Visibility = Visibility.Visible; resultTextBlock.Text = string.Format("Speech Recognition Failed, Status: {0}", speechRecognitionResult.Status.ToString()); } } catch (TaskCanceledException exception) { // TaskCanceledException will be thrown if you exit the scenario while the recognizer is actively // processing speech. Since this happens here when we navigate out of the scenario, don't try to // show a message dialog for this exception. System.Diagnostics.Debug.WriteLine("TaskCanceledException caught while recognition in progress (can be ignored):"); System.Diagnostics.Debug.WriteLine(exception.ToString()); } catch (Exception exception) { // Handle the speech privacy policy error. if ((uint)exception.HResult == HResultPrivacyStatementDeclined) { resultTextBlock.Visibility = Visibility.Visible; resultTextBlock.Text = "The privacy statement was declined."; } else { var messageDialog = new Windows.UI.Popups.MessageDialog(exception.Message, "Exception"); await messageDialog.ShowAsync(); } } }
private async void button_Click_1(object sender, RoutedEventArgs e) { var speechRecognizer = new SpeechRecognizer(); // Compile the dictation grammar by default. await speechRecognizer.CompileConstraintsAsync(); SpeechRecognitionResult speechRecognitionResult = await speechRecognizer.RecognizeWithUIAsync(); // Do something with the recognition result. var messageDialog = new MessageDialog(speechRecognitionResult.Text, "Text spoken"); await messageDialog.ShowAsync(); }
private async void ConvertButton_Click(object sender, RoutedEventArgs e) { // Disable button so it cannot be tapped twice. ConvertButton.IsEnabled = false; // Stop speech synthesis while recognizing. if (answerElement != null) { answerElement.Stop(); } var speechRecognizer = new SpeechRecognizer(); StorageFile file; if (viewModel.CheckForRepeat) { file = await StorageFile.GetFileFromApplicationUriAsync(new Uri("ms-appx:///Grammars/Repeat.grxml")); } else { file = await StorageFile.GetFileFromApplicationUriAsync(new Uri("ms-appx:///Grammars/Convert.grxml")); } var constraint = new SpeechRecognitionGrammarFileConstraint(file); speechRecognizer.Constraints.Add(constraint); await speechRecognizer.CompileConstraintsAsync(); speechRecognizer.UIOptions.ShowConfirmation = false; try { var result = await speechRecognizer.RecognizeWithUIAsync(); if (result.Status == SpeechRecognitionResultStatus.Success) { viewModel.Convert(result); } else { ConvertButton.IsEnabled = true; } } catch (Exception) { // Catch this so we don't crash when receiving a phone call. ConvertButton.IsEnabled = true; } }
public async Task <string> GetTextFromSpeechAsync(bool withUI = false) { if (_recognizer == null) { await InitializeRecognizerAsync(); } SpeechRecognitionResult recognition = null; if (withUI) { recognition = await _recognizer.RecognizeWithUIAsync(); } else { recognition = await _recognizer.RecognizeAsync(); } if (recognition.Status == SpeechRecognitionResultStatus.Success && recognition.Confidence != SpeechRecognitionConfidence.Rejected) { Debug.WriteLine($"[Speech to Text]: result: {recognition.Text}, {recognition.RawConfidence.ToString()}, {recognition.Confidence.ToString()}"); var alternativeResults = recognition.GetAlternates(MaxRecognitionResultAlternates); foreach (var r in alternativeResults) { Debug.WriteLine($"[Speech to Text]: alternative: {r.Text}, {r.RawConfidence.ToString()}, {r.Confidence.ToString()}"); } var topResult = alternativeResults.Where(r => r.Confidence == SpeechRecognitionConfidence.High).FirstOrDefault(); if (topResult != null) { return(topResult.Text); } topResult = alternativeResults.Where(r => r.Confidence == SpeechRecognitionConfidence.Medium).FirstOrDefault(); if (topResult != null) { return(topResult.Text); } topResult = alternativeResults.Where(r => r.Confidence == SpeechRecognitionConfidence.Low).FirstOrDefault(); if (topResult != null) { return(topResult.Text); } } return(string.Empty); }
async Task <string> RecordSpeechFromMicrophoneAsync(bool isGamepad) { string recognizedText = string.Empty; try { using (SpeechRecognizer recognizer = new SpeechRecognizer()) { await recognizer.CompileConstraintsAsync(); SpeechRecognitionResult result = null; if (isGamepad) { result = await recognizer.RecognizeAsync(); } else { result = await recognizer.RecognizeWithUIAsync(); } if (result.Status == SpeechRecognitionResultStatus.Success) { recognizedText = result.Text; } } } // Catch errors related to the recognition operation. catch (Exception err) { // Define a variable that holds the error for the speech recognition privacy policy. // This value maps to the SPERR_SPEECH_PRIVACY_POLICY_NOT_ACCEPTED error, // as described in the Windows.Phone.Speech.Recognition error codes section later on. const int privacyPolicyHResult = unchecked ((int)0x80045509); // Check whether the error is for the speech recognition privacy policy. if (err.HResult == privacyPolicyHResult) { var dialog = new MessageDialog("You will need to accept the speech privacy policy in order to use speech recognition in this app."); await dialog.ShowAsync(); } else { // Handle other types of errors here. } } return(recognizedText); }
async private void OnRecognizeFromList(object sender, RoutedEventArgs e) { //if you are listening for one word this is a good thing to use var recognizer = new SpeechRecognizer(); recognizer.UIOptions.ExampleText = "To test this say one, two, or three"; var list = new SpeechRecognitionListConstraint("To test this say one, two, or three".Split(',')); recognizer.Constraints.Add(list); await recognizer.CompileConstraintsAsync(); var result = await recognizer.RecognizeWithUIAsync(); txt_dictation.Text = result.Text; }
public async void StartRecognizing_Click(object sender, RoutedEventArgs e) { // Create an instance of SpeechRecognizer. var language = new Windows.Globalization.Language("en-US"); var speechRecognizer = new SpeechRecognizer(language); // Compile the dictation grammar by default. await speechRecognizer.CompileConstraintsAsync(); // Start recognition. SpeechRecognitionResult speechRecognitionResult = await speechRecognizer.RecognizeWithUIAsync(); // Pass recognized text to LUIS CallLUISService(speechRecognitionResult.Text); }
async Task<bool> answerYN(string question) { var language = SpeechRecognizer.SystemSpeechLanguage; speakString(question); string[] yn = {"Yes", "No"}; SpeechRecognizer speechRecognizer = new SpeechRecognizer(); SpeechRecognitionListConstraint list = new SpeechRecognitionListConstraint(yn, "yesOrNo"); speechRecognizer.Constraints.Add(list); await speechRecognizer.CompileConstraintsAsync(); SpeechRecognitionResult answerResult = await speechRecognizer.RecognizeWithUIAsync(); if (answerResult.Text == "Yes") return true; else return false; }
private async void ClickMe_Click(object sender, RoutedEventArgs e) { await InitializeRecognizerAsync(); var result = await _recognizer.RecognizeWithUIAsync(); this.Output.Text += result.Text + "\n"; Message userMessage = new Message { FromProperty = fromUser, Text = result.Text }; await client.Conversations.PostMessageAsync(conversation.ConversationId, userMessage); }
async private void OnSayPrompt(object sender, RoutedEventArgs e) { try { var button = sender as Button; button.Content = "..."; bool failed_signin = true; Random random = new Random(); int seed_value = random.Next(0, 1000); int selection = seed_value % 4; var tuple = passphrases[selection]; var prompt = (string)tuple.Phrase; //general dictation var recognizer = new SpeechRecognizer(); var phrases = passphrases.Select(i => (string)i.Phrase).ToList(); var list_constraint = new SpeechRecognitionListConstraint(phrases); recognizer.Constraints.Add(list_constraint); recognizer.StateChanged += Recognizer_StateChanged; recognizer.UIOptions.ExampleText = $"Repeat the phrase '{prompt}'"; await recognizer.CompileConstraintsAsync(); var result = await recognizer.RecognizeWithUIAsync(); if (result.Status == SpeechRecognitionResultStatus.Success) { if (result.Text.ToLower() == prompt.ToLower()) { if (txt_employeecode2.Password == "employee") { failed_signin = false; Frame.Navigate(_target_type); } } } if (failed_signin) { button.Content = "Failed connection"; } } catch (Exception ex) { await new MessageDialog(ex.Message).ShowAsync(); } }
private async void UISpeechListeningClick(object sender, RoutedEventArgs e) { SpeechListeningChanged(true, UISpeechListening, UISpeechLoading); var speechRecognizer = new SpeechRecognizer(); speechRecognizer.HypothesisGenerated += UISpeechRecognizer_HypothesisGenerated; speechRecognizer.UIOptions.IsReadBackEnabled = false; speechRecognizer.UIOptions.ShowConfirmation = false; await speechRecognizer.CompileConstraintsAsync(); SpeechRecognitionResult speechRecognitionResult = await speechRecognizer.RecognizeWithUIAsync(); UISpeechText.Text = speechRecognitionResult.Text; SpeechListeningChanged(false, UISpeechListening, UISpeechLoading); }
async Task <string> GetSpeechTextAsync() { string text = string.Empty; using (SpeechRecognizer recognizer = new SpeechRecognizer()) { await recognizer.CompileConstraintsAsync(); SpeechRecognitionResult result = await recognizer.RecognizeWithUIAsync(); if (result.Status == SpeechRecognitionResultStatus.Success) { text = result.Text; } } return(text); }
/// <summary> /// 语音转文字 /// </summary> /// <returns></returns> public static async Task <string> SpeechToTextAsync() { var recognizedText = string.Empty; //Max time to wait for the speaker to speak _recognizer.Timeouts.InitialSilenceTimeout = TimeSpan.FromSeconds(3); await _recognizer.CompileConstraintsAsync(); SpeechRecognitionResult result = await _recognizer.RecognizeWithUIAsync(); if (result.Status == SpeechRecognitionResultStatus.Success) { recognizedText = result.Text; } return(recognizedText); }
private async void StartWithUi() { _startListening.Visibility = Visibility.Collapsed; _startListeningWithUi.Visibility = Visibility.Collapsed; _stopListening.Visibility = Visibility.Visible; var selectedLanguage = (Windows.Globalization.Language)_topicLanguages.SelectedItem; recognizer = new SpeechRecognizer(selectedLanguage); recognizer.HypothesisGenerated += Recognizer_HypothesisGenerated; recognizer.RecognitionQualityDegrading += Recognizer_RecognitionQualityDegrading; recognizer.StateChanged += Recognizer_StateChanged; await recognizer.CompileConstraintsAsync(); await recognizer.RecognizeWithUIAsync(); }
private async void StartVoiceRecognition() { await SpeakText( "Say Captains Log at any time to create a log entry." ); speechRecognizerCaptainsLogCommand = new SpeechRecognizer(); while ( !cancellationSource.IsCancellationRequested ) { // Listen for user to say "Captains Log" ISpeechRecognitionConstraint commandConstraint = new SpeechRecognitionListConstraint( new[] { "Captains Log", "Computer Captains Log" } ); speechRecognizerCaptainsLogCommand.Constraints.Add( commandConstraint ); await speechRecognizerCaptainsLogCommand.CompileConstraintsAsync(); SpeechRecognitionResult commandResult = await speechRecognizerCaptainsLogCommand.RecognizeAsync(); if ( commandResult.Status != SpeechRecognitionResultStatus.Success || commandResult.Confidence == SpeechRecognitionConfidence.Rejected || cancellationSource.IsCancellationRequested ) { continue; } // Recognized user saying "Captains Log" // Listen for the user's dictation entry var captainsLogDictationRecognizer = new SpeechRecognizer(); ISpeechRecognitionConstraint dictationConstraint = new SpeechRecognitionTopicConstraint( SpeechRecognitionScenario.Dictation, "LogEntry", "LogEntryDictation" ); captainsLogDictationRecognizer.Constraints.Add( dictationConstraint ); await captainsLogDictationRecognizer.CompileConstraintsAsync(); captainsLogDictationRecognizer.UIOptions.ExampleText = "Boldly going where no man or woman has gone before."; captainsLogDictationRecognizer.UIOptions.AudiblePrompt = "Go ahead"; captainsLogDictationRecognizer.UIOptions.IsReadBackEnabled = true; captainsLogDictationRecognizer.UIOptions.ShowConfirmation = true; SpeechRecognitionResult dictationResult = await captainsLogDictationRecognizer.RecognizeWithUIAsync(); if ( dictationResult.Status != SpeechRecognitionResultStatus.Success || dictationResult.Confidence == SpeechRecognitionConfidence.Rejected || string.IsNullOrWhiteSpace( dictationResult.Text ) || cancellationSource.IsCancellationRequested ) { captainsLogDictationRecognizer.Dispose(); continue; } // Recognized user's dictation entry AddLogEntry( dictationResult.Text ); captainsLogDictationRecognizer.Dispose(); } speechRecognizerCaptainsLogCommand.Dispose(); }
private async void Talk() { #if WINDOWS_PHONE_APP // Create an instance of SpeechRecognizer var speechRecognizer = new SpeechRecognizer(); // Compile the dictation grammar by default await speechRecognizer.CompileConstraintsAsync(); // Start recognition var speechRecognitionResult = await speechRecognizer.RecognizeWithUIAsync(); // Add text to message that will be sent TextMessage += speechRecognitionResult.Text; #endif }
public async Task<RecognizedSpeech> Recognize(string constraints, bool ui) { SpeechRecognitionGrammarFileConstraint grammarFileConstraint = null; var result = new RecognizedSpeech(); bool isTable = false; Dictionary<string, string> dictionary = null; if (!string.IsNullOrWhiteSpace(constraints)) { isTable = constraints.StartsWith("{table:"); if (isTable) { var name = constraints.Substring(7); var i = name.IndexOf("}", StringComparison.CurrentCultureIgnoreCase); name = name.Substring(0, i); var constraintBuilder = new StringBuilder(); dictionary = MainPage.Instance.mainDictionary[name]; Debug.WriteLine("table "+name+" count=" + dictionary.Count); foreach (var key in dictionary.Keys) { constraintBuilder.Append(key.Replace(","," ")); constraintBuilder.Append(","); } if (constraintBuilder.Length < 2) { result.error = -3; return result; } constraints = constraintBuilder.ToString(0, constraintBuilder.Length - 1); constraints = constraints.Replace(";", "-").Replace("&"," and ").Replace("&"," and "); } //build grammar constraints var grammarFileTemplate = await StorageFile.GetFileFromApplicationUriAsync(new Uri("ms-appx:///GrammarConstraintTemplate.grxml")); const string wordTemplate = "<item>{0}</item>"; const string itemTemplate = "<item><one-of>{0}</one-of><tag>out=\"{1}\";</tag></item>"; var itemBuilder = new StringBuilder(); var items = constraints.Split(';'); string keyword = null; foreach (var itemPart in items) { var item = itemPart; var equals = item.IndexOf('='); if (equals > -1) { keyword = item.Substring(0, equals); item = item.Substring(equals + 1); } var words = item.Split(','); var wordBuilder = new StringBuilder(); foreach (var word in words) { wordBuilder.AppendFormat(wordTemplate, word); } if (!string.IsNullOrWhiteSpace(keyword)) { itemBuilder.AppendFormat(itemTemplate, wordBuilder, keyword); } else { itemBuilder.Append(wordBuilder); } } var localFolder = ApplicationData.Current.LocalFolder; var grammarTemplate = await FileIO.ReadTextAsync(grammarFileTemplate); var grammarFile = await localFolder.CreateFileAsync("GrammarConstraint.grxml", CreationCollisionOption.ReplaceExisting); var finalGrammarText = string.Format(grammarTemplate, itemBuilder); await FileIO.WriteTextAsync(grammarFile, finalGrammarText); grammarFileConstraint = new SpeechRecognitionGrammarFileConstraint(grammarFile, "constraints"); } if (isRecognizing && recognizer != null) { await recognizer.StopRecognitionAsync(); } recognizer = new SpeechRecognizer(); //if (recognizer != null) //{ //} //else //{ // //recognizer.Constraints?.Clear(); // //await recognizer.CompileConstraintsAsync(); //} if (grammarFileConstraint != null) { recognizer.Constraints.Add(grammarFileConstraint); } SpeechRecognitionResult recognize = null; try { isRecognizing = false; SpeechStatusChanged?.Invoke(this, new SpeechArgs { Status = SpeechStatus.None }); await recognizer.CompileConstraintsAsync(); isRecognizing = true; SpeechStatusChanged?.Invoke(this, new SpeechArgs { Status = SpeechStatus.Listening }); recognize = await (ui ? recognizer.RecognizeWithUIAsync() : recognizer.RecognizeAsync()); } catch (Exception e) { Debug.WriteLine(e.GetType() + ":" + e.Message); if (recognize != null) { result.status = recognize.Status; } result.confidence = 5; return result; } finally { isRecognizing = false; SpeechStatusChanged?.Invoke(this, new SpeechArgs { Status = isUserStopped ? SpeechStatus.Stopped : SpeechStatus.None }); } result.status = isUserStopped ? SpeechRecognitionResultStatus.UserCanceled : recognize.Status; if (constraints == null) { result.text = recognize.Text; return result; } result.confidence = (int) recognize.Confidence; var text = recognize.Text.ToUpperInvariant(); var items2 = constraints.Split(';'); string keyword2 = null; var index = 1; foreach (var itemPart in items2) { var item = itemPart; var equals = item.IndexOf('='); if (equals > -1) { keyword2 = item.Substring(0, equals); item = item.Substring(equals + 1); } var words = item.Split(','); var innerIndex = 1; foreach (var word in words) { if (word.ToUpperInvariant().Equals(text)) { result.text = keyword2 ?? word; if (isTable) { result.action = dictionary[result.text]; } result.index = items2.Length == 1 ? innerIndex : index; return result; } innerIndex++; } index++; } result.text = recognize.Text; return result; }
private async void VoiceInput_Click(object sender, RoutedEventArgs e) { string message = ""; try { SpeechRecognizer speechRecognizer = new SpeechRecognizer(); SpeechRecognitionCompilationResult compilationResult = await speechRecognizer.CompileConstraintsAsync(); if (compilationResult.Status == SpeechRecognitionResultStatus.Success) { var result = await speechRecognizer.RecognizeWithUIAsync(); if (result.Confidence == SpeechRecognitionConfidence.Rejected) { message = "语音识别不到"; } else { InputText.Text = result.Text; } } } catch (Exception err) { message = "异常信息:" + err.Message + err.HResult; } if (message != "") await new MessageDialog(message).ShowAsync(); }