private async void DefaultRecognizing_OnClick(object sender, RoutedEventArgs e) { // カスタム制約なし var speechRecognizer = new Windows.Media.SpeechRecognition.SpeechRecognizer(); await speechRecognizer.CompileConstraintsAsync(); var result = await speechRecognizer.RecognizeWithUIAsync(); var dialog = new MessageDialog(result.Text, "Text spoken"); await dialog.ShowAsync(); }
private async void StartRecognition(object sender, RoutedEventArgs e) { // Create an instance of SpeechRecognizer. var speechRecognizer = new Windows.Media.SpeechRecognition.SpeechRecognizer(); // Compile the dictation grammar by default. await speechRecognizer.CompileConstraintsAsync(); // Start recognition. SpeechRecognitionResult speechRecognitionResult = await speechRecognizer.RecognizeWithUIAsync(); //SpeechRecognitionResult speechRecognitionResult = await speechRecognizer.RecognizeAsync(); // Do something with the recognition result. var messageDialog = new MessageDialog(speechRecognitionResult.Text, "Text spoken"); await messageDialog.ShowAsync(); }
private async void StartRecognition(object sender, RoutedEventArgs e) { // Create an instance of SpeechRecognizer. var speechRecognizer = new Windows.Media.SpeechRecognition.SpeechRecognizer(); // Compile the dictation grammar by default. await speechRecognizer.CompileConstraintsAsync(); // Start recognition. SpeechRecognitionResult speechRecognitionResult = await speechRecognizer.RecognizeWithUIAsync(); //SpeechRecognitionResult speechRecognitionResult = await speechRecognizer.RecognizeAsync(); // Do something with the recognition result. var messageDialog = new MessageDialog(speechRecognitionResult.Text, "Text spoken"); await messageDialog.ShowAsync(); }
private async void ListConstraintRecognizing_OnClick(object sender, RoutedEventArgs e) { // プログラムによる一覧の制約の指定 (SpeechRecognitionListConstraint) var speechRecognizer = new Windows.Media.SpeechRecognition.SpeechRecognizer(); string[] responses = { "Yes", "No" }; var list = new SpeechRecognitionListConstraint(responses, "yesOrNo"); speechRecognizer.UIOptions.ExampleText = @"Ex. 'yes', 'no'"; speechRecognizer.Constraints.Add(list); await speechRecognizer.CompileConstraintsAsync(); var result = await speechRecognizer.RecognizeWithUIAsync(); var dialog = new MessageDialog(result.Text, "Text spoken"); dialog.ShowAsync(); }
private async void TopicConstraintRecognizing_OnClick(object sender, RoutedEventArgs e) { // Web 検索文法の指定 (SpeechRecognitionTopicConstraint) var speechRecognizer = new Windows.Media.SpeechRecognition.SpeechRecognizer(); speechRecognizer.RecognitionQualityDegrading += speechRecognizer_RecognitionQualityDegrading; var webSearchGrammar = new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.WebSearch, "webSearch"); speechRecognizer.UIOptions.AudiblePrompt = "Say what you want to speach for ..."; speechRecognizer.UIOptions.ExampleText = @"Ex. 'weather for London"; speechRecognizer.Constraints.Add(webSearchGrammar); await speechRecognizer.CompileConstraintsAsync(); var result = await speechRecognizer.RecognizeWithUIAsync(); var dialog = new MessageDialog(result.Text, "Text spoken"); await dialog.ShowAsync(); }
private async void GrammarFileConstraintRecognizing_OnClick(object sender, RoutedEventArgs e) { // SRGS 文法 (SpeechRecognitionGrammarFileConstraint) var speechRecognizer = new Windows.Media.SpeechRecognition.SpeechRecognizer(); var storageFile = await StorageFile.GetFileFromApplicationUriAsync(new Uri("ms-appx:///Sample.grxml")); var grammarFileCOnstraint = new SpeechRecognitionGrammarFileConstraint(storageFile, "colors"); speechRecognizer.UIOptions.ExampleText = @"Ex. 'blue background', 'green text'"; speechRecognizer.Constraints.Add(grammarFileCOnstraint); await speechRecognizer.CompileConstraintsAsync(); var result = await speechRecognizer.RecognizeWithUIAsync(); var dialog = new MessageDialog(result.Text, "Text spoken"); dialog.ShowAsync(); }
private async void BtnSpeechRecogWeatherSearchAsync_Click(object sender, RoutedEventArgs e) { // Create an instance of SpeechRecognizer. var speechRecognizer = new Windows.Media.SpeechRecognition.SpeechRecognizer(); // Listen for audio input issues. /////// speechRecognizer.RecognitionQualityDegrading += speechRecognizer_RecognitionQualityDegrading; // Add a web search grammar to the recognizer. var webSearchGrammar = new Windows.Media.SpeechRecognition.SpeechRecognitionTopicConstraint(Windows.Media.SpeechRecognition.SpeechRecognitionScenario.WebSearch, "webSearch"); speechRecognizer.UIOptions.AudiblePrompt = "Say what you want to search for..."; speechRecognizer.UIOptions.ExampleText = @"Ex. 'weather for London'"; speechRecognizer.Constraints.Add(webSearchGrammar); // Compile the constraint. await speechRecognizer.CompileConstraintsAsync(); // Start recognition. Windows.Media.SpeechRecognition.SpeechRecognitionResult speechRecognitionResult = await speechRecognizer.RecognizeWithUIAsync(); //await speechRecognizer.RecognizeWithUIAsync(); // Do something with the recognition result. var messageDialog = new Windows.UI.Popups.MessageDialog(speechRecognitionResult.Text, "Text spoken"); await messageDialog.ShowAsync(); }
public async void CallAssitant(TextBlock speechText) { // Compile the dictation grammar by default. await speechRecognizer.CompileConstraintsAsync(); //recognitionOperation = speechRecognizer.RecognizeAsync(); //SpeechRecognitionResult speechRecognitionResult = await recognitionOperation; //// Start recognition. //if (speechRecognitionResult.Status == SpeechRecognitionResultStatus.Success) //{ // TextSaid = "\n" + speechRecognitionResult.Text; //} Windows.Media.SpeechRecognition.SpeechRecognitionResult speechRecognitionResult = await speechRecognizer.RecognizeWithUIAsync(); TextSaid = "\n" + speechRecognitionResult.Text; speechText.Text = speechText.Text + TextSaid; //This code is commented out because i am trying to live without a dialogue box }
//Lancement reconnaissance vocale private async void micro_Click(object sender, RoutedEventArgs e) { var speechRecognizer = new Windows.Media.SpeechRecognition.SpeechRecognizer(); string[] responses = commandesHttp[0].ToArray(); //Ne compare que avec des commandes vocale connue var listConstraint = new Windows.Media.SpeechRecognition.SpeechRecognitionListConstraint(responses, "commandeHttp"); speechRecognizer.UIOptions.ExampleText = @"Ex. 'Yana comment vas tu ?'"; speechRecognizer.Constraints.Add(listConstraint); await speechRecognizer.CompileConstraintsAsync(); Windows.Media.SpeechRecognition.SpeechRecognitionResult speechRecognitionResult = await speechRecognizer.RecognizeWithUIAsync(); allmessages.Add(new Message { TextMessage = speechRecognitionResult.Text, Time = DateTime.Now.ToString(), Status = "Sent", tofrom = true }); var index = Array.FindIndex(responses, row => row.Contains(speechRecognitionResult.Text)); string reponse = ""; //Verification si commande http ou socket try { if (index < commandesHttp[1].Count) { reponse = await getReponseHttp(commandesHttp[1][index]); } else { getReponseSocket(speechRecognitionResult.Text); } } catch { reponse = "Une erreur est survenue"; } }
async void clickStart(object sender, RoutedEventArgs e) { // Create an instance of SpeechRecognizer. var speechRecognizer = new Windows.Media.SpeechRecognition.SpeechRecognizer(); // Compile the dictation grammar by default. await speechRecognizer.CompileConstraintsAsync(); // Start recognition. Windows.Media.SpeechRecognition.SpeechRecognitionResult speechRecognitionResult = await speechRecognizer.RecognizeWithUIAsync(); // Do something with the recognition result. var messageDialog = new Windows.UI.Popups.MessageDialog(speechRecognitionResult.Text, "Text spoken"); await messageDialog.ShowAsync(); //Task.Factory.StartNew(async () => //{ // try // { // Speech.Initialize(); //await Speech.StartRecognition(); // } // catch (Exception ex) // { // throw ex; // } //}); }