private async void btnTalk_Click(object sender, RoutedEventArgs e) { // Create an instance of SpeechRecognizer. var speechRecognizer = new Windows.Media.SpeechRecognition.SpeechRecognizer(); // Compile the dictation grammar that is loaded by default. await speechRecognizer.CompileConstraintsAsync(); // Start recognition. try { Windows.Media.SpeechRecognition.SpeechRecognitionResult speechRecognitionResult = await speechRecognizer.RecognizeWithUIAsync(); // If successful, display the recognition result. if (speechRecognitionResult.Status == Windows.Media.SpeechRecognition.SpeechRecognitionResultStatus.Success) { txtSource.Text = speechRecognitionResult.Text; } } catch (Exception exception) { if ((uint)exception.HResult == HResultPrivacyStatementDeclined) { //this.resultTextBlock.Visibility = Visibility.Visible; lblResult.Text = "Özür dilerim, konuşma tanımayı kullanmak mümkün değildi. Konuşma gizlilik bildirimini kabul edilmedi."; } else { var messageDialog = new Windows.UI.Popups.MessageDialog(exception.Message, "Exception"); messageDialog.ShowAsync().GetResults(); } } }
} // end void private async void StartRecognizing_Click(object sender, RoutedEventArgs e) { // Create an instance of SpeechRecognizer. var speechRecognizer = new Windows.Media.SpeechRecognition.SpeechRecognizer(); // Compile the dictation grammar by default. await speechRecognizer.CompileConstraintsAsync(); // Start recognition. Windows.Media.SpeechRecognition.SpeechRecognitionResult speechRecognitionResult = await speechRecognizer.RecognizeWithUIAsync(); ContentDialog notifyDelete = new ContentDialog() { Title = "Confirm delete?", Content = speechRecognitionResult.Text, PrimaryButtonText = "Save Note", SecondaryButtonText = "Cancel" }; ContentDialogResult result = await notifyDelete.ShowAsync(); if (result == ContentDialogResult.Primary) { tbNote.Text = speechRecognitionResult.Text; } else { // User pressed Cancel or the back arrow. // Terms of use were not accepted. } // Do something with the recognition result. //var messageDialog = new Windows.UI.Popups.MessageDialog(speechRecognitionResult.Text, "Text spoken"); //await messageDialog.ShowAsync(); } // end StartRecognizing_Click
private async void RecognizeWithDictationGrammar_Click(object sender, RoutedEventArgs e) { // Create an instance of SpeechRecognizer. var speechRecognizer = new Windows.Media.SpeechRecognition.SpeechRecognizer(); // Compile the dictation grammar that is loaded by default. await speechRecognizer.CompileConstraintsAsync(); this.heardYouSayTextBlock.Visibility = this.resultTextBlock.Visibility = Visibility.Collapsed; // Start recognition. try { Windows.Media.SpeechRecognition.SpeechRecognitionResult speechRecognitionResult = await speechRecognizer.RecognizeWithUIAsync(); // If successful, display the recognition result. if (speechRecognitionResult.Status == Windows.Media.SpeechRecognition.SpeechRecognitionResultStatus.Success) { this.heardYouSayTextBlock.Visibility = this.resultTextBlock.Visibility = Visibility.Visible; this.resultTextBlock.Text = speechRecognitionResult.Text; } } catch (Exception exception) { if ((uint)exception.HResult == App.HResultPrivacyStatementDeclined) { this.resultTextBlock.Visibility = Visibility.Visible; this.resultTextBlock.Text = "The privacy statement was declined."; } else { var messageDialog = new Windows.UI.Popups.MessageDialog(exception.Message, "Exception"); messageDialog.ShowAsync().GetResults(); } } }
/// <summary> /// Generate Text from Voice /// </summary> /// <returns>Generated text</returns> public async Task<string> VoiceToText() { // Create an instance of SpeechRecognizer. var speechRecognizer = new Windows.Media.SpeechRecognition.SpeechRecognizer(); // Compile the dictation grammar by default. await speechRecognizer.CompileConstraintsAsync(); // Start recognition. Windows.Media.SpeechRecognition.SpeechRecognitionResult speechRecognitionResult = await speechRecognizer.RecognizeWithUIAsync(); return speechRecognitionResult.Text; }
private async void btnMicFrench_Click(object sender, RoutedEventArgs e) { try { Windows.Media.SpeechRecognition.SpeechRecognizer speechRecognizer = new Windows.Media.SpeechRecognition.SpeechRecognizer(new Windows.Globalization.Language("fr")); // se le puede pasar parámetro de idiioma, ahoria agarra el del sisytem await speechRecognizer.CompileConstraintsAsync(); Windows.Media.SpeechRecognition.SpeechRecognitionResult resultado = await speechRecognizer.RecognizeWithUIAsync(); txtDescripcionFrances.Text = resultado.Text; } catch (Exception ex) { } }
private async void btnMicEspaniol_Click(object sender, RoutedEventArgs e) { try { Windows.Media.SpeechRecognition.SpeechRecognizer speechRecognizer = new Windows.Media.SpeechRecognition.SpeechRecognizer(new Windows.Globalization.Language("es-MX")); // se le puede pasar parámetro de idiioma, ahoria agarra el del sisytem await speechRecognizer.CompileConstraintsAsync(); Windows.Media.SpeechRecognition.SpeechRecognitionResult resultado = await speechRecognizer.RecognizeWithUIAsync(); txtpalespanol.Text = resultado.Text; } catch (Exception) { throw; } }
private async void RecognizeWithListConstraint_Click(object sender, RoutedEventArgs e) { // Create an instance of SpeechRecognizer. var speechRecognizer = new Windows.Media.SpeechRecognition.SpeechRecognizer(); // You could create any IEnumerable dynamically. string[] responses = { "Yes", "No" }; // Add a list constraint to the recognizer. var listConstraint = new Windows.Media.SpeechRecognition.SpeechRecognitionListConstraint(responses, "yesOrNo"); speechRecognizer.UIOptions.ExampleText = @"Ex. ""Yes"", ""No"""; speechRecognizer.Constraints.Add(listConstraint); // Compile the constraint. await speechRecognizer.CompileConstraintsAsync(); this.heardYouSayTextBlock.Visibility = this.resultTextBlock.Visibility = Visibility.Collapsed; // Start recognition. try { Windows.Media.SpeechRecognition.SpeechRecognitionResult speechRecognitionResult = await speechRecognizer.RecognizeWithUIAsync(); // If successful, display the recognition result. if (speechRecognitionResult.Status == Windows.Media.SpeechRecognition.SpeechRecognitionResultStatus.Success) { this.heardYouSayTextBlock.Visibility = this.resultTextBlock.Visibility = Visibility.Visible; this.resultTextBlock.Text = speechRecognitionResult.Text; } } catch (Exception exception) { if ((uint)exception.HResult == App.HResultPrivacyStatementDeclined) { this.resultTextBlock.Visibility = Visibility.Visible; this.resultTextBlock.Text = "The privacy statement was declined."; } else { var messageDialog = new Windows.UI.Popups.MessageDialog(exception.Message, "Exception"); messageDialog.ShowAsync().GetResults(); } } }
private async void RecognizeWithWebSearchGrammar_Click(object sender, RoutedEventArgs e) { // Create an instance of SpeechRecognizer. var speechRecognizer = new Windows.Media.SpeechRecognition.SpeechRecognizer(); // Add a web search grammar to the recognizer. var webSearchGrammar = new Windows.Media.SpeechRecognition.SpeechRecognitionTopicConstraint(Windows.Media.SpeechRecognition.SpeechRecognitionScenario.WebSearch, "webSearch"); speechRecognizer.UIOptions.AudiblePrompt = "Say what you want to search for..."; speechRecognizer.UIOptions.ExampleText = @"Ex. ""weather for London"""; speechRecognizer.Constraints.Add(webSearchGrammar); // Compile the constraint. await speechRecognizer.CompileConstraintsAsync(); this.heardYouSayTextBlock.Visibility = this.resultTextBlock.Visibility = Visibility.Collapsed; // Start recognition. try { Windows.Media.SpeechRecognition.SpeechRecognitionResult speechRecognitionResult = await speechRecognizer.RecognizeWithUIAsync(); // If successful, display the recognition result. if (speechRecognitionResult.Status == Windows.Media.SpeechRecognition.SpeechRecognitionResultStatus.Success) { this.heardYouSayTextBlock.Visibility = this.resultTextBlock.Visibility = Visibility.Visible; this.resultTextBlock.Text = speechRecognitionResult.Text; } } catch (Exception exception) { if ((uint)exception.HResult == App.HResultPrivacyStatementDeclined) { this.resultTextBlock.Visibility = Visibility.Visible; this.resultTextBlock.Text = "The privacy statement was declined."; } else { var messageDialog = new Windows.UI.Popups.MessageDialog(exception.Message, "Exception"); messageDialog.ShowAsync().GetResults(); } } }
private static async Task <string> Listen() { // Create an instance of SpeechRecognizer. var speechRecognizer = new Windows.Media.SpeechRecognition.SpeechRecognizer(); // Compile the dictation grammar by default. await speechRecognizer.CompileConstraintsAsync(); // Start recognition. Windows.Media.SpeechRecognition.SpeechRecognitionResult speechRecognitionResult = await speechRecognizer.RecognizeWithUIAsync(); // Do something with the recognition result. //var messageDialog = new Windows.UI.Popups.MessageDialog(speechRecognitionResult.Text, "Text spoken"); //await messageDialog.ShowAsync(); var whatWasSaid = speechRecognitionResult.Text; return(whatWasSaid); }
//语音识别 Voice Recognition private async void StartRecognizing_Click(object sender, RoutedEventArgs e) { if (await SpeechRecognition.RequestMicrophonePermission()) { // Create an instance of SpeechRecognizer. var speechRecognizer = new Windows.Media.SpeechRecognition.SpeechRecognizer(); // Compile the dictation grammar by default. await speechRecognizer.CompileConstraintsAsync(); // Start recognition. Windows.Media.SpeechRecognition.SpeechRecognitionResult speechRecognitionResult = await speechRecognizer.RecognizeWithUIAsync(); // Do something with the recognition result. HomePageViewModel.Current.QueryWord(speechRecognitionResult.Text); //var messageDialog = new Windows.UI.Popups.MessageDialog(speechRecognitionResult.Text, "Text spoken"); //await messageDialog.ShowAsync(); } }
private async void startDetect() { // Create an instance of SpeechRecognizer. var speechRecognizer = new Windows.Media.SpeechRecognition.SpeechRecognizer(); string[] responses = { "start", "quit" }; // Add a list constraint to the recognizer. var listConstraint = new Windows.Media.SpeechRecognition.SpeechRecognitionListConstraint(responses, "startOrStart"); speechRecognizer.Constraints.Add(listConstraint); // Compile the constraint. await speechRecognizer.CompileConstraintsAsync(); // Start recognition. //textBlock1.Text = "Say Start"; //Recognise with UI Windows.Media.SpeechRecognition.SpeechRecognitionResult speechRecognitionResult = await speechRecognizer.RecognizeWithUIAsync(); //Recognise without UI //Windows.Media.SpeechRecognition.SpeechRecognitionResult speechRecognitionResult = await speechRecognizer.RecognizeAsync(); if (speechRecognitionResult.Text == "start") { //textBlock2.Text = "Start detected"; await Task.Delay(2000); startRecAsync(); } if (speechRecognitionResult.Text == "quit") { CoreApplication.Exit(); } }
public static async Task <string> GetTextFromSpeech() { // Create an instance of SpeechRecognizer. var speechRecognizer = new Windows.Media.SpeechRecognition.SpeechRecognizer(); // Compile the dictation grammar by default. await speechRecognizer.CompileConstraintsAsync(); // Start recognition. Windows.Media.SpeechRecognition.SpeechRecognitionResult speechRecognitionResult = await speechRecognizer.RecognizeWithUIAsync(); return(speechRecognitionResult.Text); }
public async Task <string> Reconnaissance() { try { // Create an instance of SpeechRecognizer. var speechRecognizer = new Windows.Media.SpeechRecognition.SpeechRecognizer(); // Compile the dictation grammar by default. await speechRecognizer.CompileConstraintsAsync(); // Start recognition. Windows.Media.SpeechRecognition.SpeechRecognitionResult speechRecognitionResult = await speechRecognizer.RecognizeWithUIAsync(); if (speechRecognitionResult.Status == Windows.Media.SpeechRecognition.SpeechRecognitionResultStatus.Success) { return(speechRecognitionResult.Text); } return(String.Empty); } catch (Exception ex) { return(String.Empty); } }
private async void VoiceIconTapped(object sender, RoutedEventArgs e) { // Create an instance of SpeechRecognizer. var speechRecognizer = new Windows.Media.SpeechRecognition.SpeechRecognizer(); // Compile the dictation grammar by default. await speechRecognizer.CompileConstraintsAsync(); // Start recognition. Windows.Media.SpeechRecognition.SpeechRecognitionResult speechRecognitionResult = await speechRecognizer.RecognizeWithUIAsync(); // Do something with the recognition result. //var messageDialog = new Windows.UI.Popups.MessageDialog(speechRecognitionResult.Text, "Text spoken"); //await messageDialog.ShowAsync(); this.UserInput.Text = speechRecognitionResult.Text; }
private async void Click_Record(object sender, RoutedEventArgs e) { // Create an instance of file storage dealer Windows.Storage.StorageFolder storageFolder = Windows.Storage.ApplicationData.Current.LocalFolder; // Create a new file named as "chickenorfish.txt", replace if already exists. Windows.Storage.StorageFile sampleFile = await storageFolder.CreateFileAsync("chickenOrFish.txt", Windows.Storage.CreationCollisionOption.ReplaceExisting); //await Windows.Storage.FileIO.WriteTextAsync(sampleFile, "Hello sir what would you like for breakfast chicken please good morning sir what would you like for lunch I want fish noodles thanks"); // Create an instance of SpeechRecognizer. var speechRecognizer = new Windows.Media.SpeechRecognition.SpeechRecognizer(); // Compile the dictation grammar by default. await speechRecognizer.CompileConstraintsAsync(); // Start recognition. Windows.Media.SpeechRecognition.SpeechRecognitionResult speechRecognitionResult = await speechRecognizer.RecognizeWithUIAsync(); // store recognized text as string script0. string script0 = speechRecognitionResult.Text; // write script0 to the text file "chickenOrFish.txt". await Windows.Storage.FileIO.WriteTextAsync(sampleFile, script0); var messageDialog = new Windows.UI.Popups.MessageDialog("Heard you say: ", speechRecognitionResult.Text); }
private async void VoiceSearchButton_OnClick(object sender, RoutedEventArgs e) { try { // Create an instance of SpeechRecognizer. var speechRecognizer = new Windows.Media.SpeechRecognition.SpeechRecognizer(); // Listen for audio input issues. //speechRecognizer.RecognitionQualityDegrading += speechRecognizer_RecognitionQualityDegrading; // Add a web search grammar to the recognizer. var webSearchGrammar = new Windows.Media.SpeechRecognition.SpeechRecognitionTopicConstraint(Windows.Media.SpeechRecognition.SpeechRecognitionScenario.WebSearch, "webSearch"); speechRecognizer.UIOptions.AudiblePrompt = "Say what you want to search for..."; speechRecognizer.UIOptions.ExampleText = @"Ex. 'Play Rahman songs'"; speechRecognizer.Constraints.Add(webSearchGrammar); // Compile the constraint. await speechRecognizer.CompileConstraintsAsync(); // Start recognition. Windows.Media.SpeechRecognition.SpeechRecognitionResult speechRecognitionResult = await speechRecognizer.RecognizeWithUIAsync(); //await speechRecognizer.RecognizeWithUIAsync(); // Do something with the recognition result. var messageDialog = new Windows.UI.Popups.MessageDialog(speechRecognitionResult.Text, "Text spoken"); await messageDialog.ShowAsync(); } catch (Exception err) { // Define a variable that holds the error for the speech recognition privacy policy. // This value maps to the SPERR_SPEECH_PRIVACY_POLICY_NOT_ACCEPTED error, // as described in the Windows.Phone.Speech.Recognition error codes section later on. const int privacyPolicyHResult = unchecked ((int)0x80045509); // Check whether the error is for the speech recognition privacy policy. if (err.HResult == privacyPolicyHResult) { var messageDialog = new Windows.UI.Popups.MessageDialog("You will need to accept the speech privacy policy in order to use speech recognition in this app.", "Error"); await messageDialog.ShowAsync(); } } }
private async void Button_Click(object sender, Windows.UI.Xaml.RoutedEventArgs e) { TextStatus.Text = "Listening...."; // Create an instance of SpeechRecognizer. var speechRecognizer = new Windows.Media.SpeechRecognition.SpeechRecognizer(); // Compile the dictation grammar by default. await speechRecognizer.CompileConstraintsAsync(); // Start recognition. Windows.Media.SpeechRecognition.SpeechRecognitionResult speechRecognitionResult = await speechRecognizer.RecognizeWithUIAsync(); TextCommand.Text = speechRecognitionResult.Text; await SendToBot(TextCommand.Text); }
private async void UxStartSpeechRecognition_Click(object sender, RoutedEventArgs e) { // Create an instance of SpeechRecognizer. var speechRecognizer = new Windows.Media.SpeechRecognition.SpeechRecognizer(); // Compile the dictation grammar by default. await speechRecognizer.CompileConstraintsAsync(); // Start recognition. Windows.Media.SpeechRecognition.SpeechRecognitionResult speechRecognitionResult = await speechRecognizer.RecognizeWithUIAsync(); switch (speechRecognitionResult.Text.ToLower()) { case "eins": case "1": this.Result = 1; break; case "null": case "0": this.Result = 0; break; default: this.Result = -1; break; } // Do something with the recognition result. if (speechRecognitionResult.Text.ToLower() == "eins" || speechRecognitionResult.Text.ToLower() == "1" || speechRecognitionResult.Text.ToLower() == "null" || speechRecognitionResult.Text.ToLower() == "0") { var messageDialog = new Windows.UI.Popups.MessageDialog($"Ok drücken zum Fortfahren", $"'{speechRecognitionResult.Text}' erkannt"); await messageDialog.ShowAsync(); } else { var messageDialog = new Windows.UI.Popups.MessageDialog($"'{speechRecognitionResult.Text}' erkannt.", "Ungültige Eingabe, bitte nochmal versuchen."); await messageDialog.ShowAsync(); } }
private async void MediaElement_MediaEnded(object sender, RoutedEventArgs e) { var speechRecognizer = new Windows.Media.SpeechRecognition.SpeechRecognizer(); string[] responses = { "George", "John", "Tony", "Jason", "Antony", "Gabriel" }; if (msg == 0) { con = new Windows.Media.SpeechRecognition.SpeechRecognitionListConstraint(responses, "yesOrNo"); } else if (msg == 1) { responses = new string[] { "Hello", "What time is it", "I was created at Hackathon" , "Jarvis call my girlfriend", "Who are your Creators", "Bye" }; con = new Windows.Media.SpeechRecognition.SpeechRecognitionListConstraint(responses, "yesOrNo"); } else if (msg == 2) { responses = new string[] { "Good morning" }; con = new Windows.Media.SpeechRecognition.SpeechRecognitionListConstraint(responses, "yesOrNo"); } speechRecognizer.UIOptions.AudiblePrompt = "Say what you want to search for..."; speechRecognizer.UIOptions.ExampleText = @"George"; if (msg == 0) { speechRecognizer.Constraints.Add(con); } else if (msg == 1) { speechRecognizer.Constraints.Add(con); } else if (msg == 2) { speechRecognizer.Constraints.Add(con); } // Compile the dictation grammar by default. await speechRecognizer.CompileConstraintsAsync(); // Start recognition. Windows.Media.SpeechRecognition.SpeechRecognitionResult speechRecognitionResult = await speechRecognizer.RecognizeWithUIAsync(); //psaxneis epafes if (msg == 0) { if (speechRecognitionResult.Text != "") { Name = speechRecognitionResult.Text; //start2(); findContact(); return; } else { await Task.Delay(2000); Start(); } } else if (msg == 1) { if (speechRecognitionResult.Text != "") { Message = speechRecognitionResult.Text; jarvis(); } else { await Task.Delay(2000); Start2(); } } else if (msg == 2) { if (speechRecognitionResult.Text.Contains("")) { Message = speechRecognitionResult.Text; ComposeEmail(contactt, Message); } else { await Task.Delay(2000); Start4(); } } }