/// <summary> /// When activating the scenario, ensure we have permission from the user to access their microphone, and /// provide an appropriate path for the user to enable access to the microphone if they haven't /// given explicit permission for it. /// Construct a speech recognizer using the default dictation grammar. /// </summary> /// <param name="e">The navigation event details</param> protected async override void OnNavigatedTo(NavigationEventArgs e) { bool permissionGained = await AudioCapturePermissions.RequestMicrophonePermission(); if (permissionGained) { // enable the recognition buttons btnRecognizeWithUI.IsEnabled = true; btnRecognizeWithoutUI.IsEnabled = true; } else { this.resultTextBlock.Visibility = Visibility.Visible; this.resultTextBlock.Text = "Permission to access capture resources was not given by the user, reset the application setting in Settings->Privacy->Microphone."; } // Create an instance of SpeechRecognizer. this.speechRecognizer = new SpeechRecognizer(); // Provide feedback to the user about the state of the recognizer. speechRecognizer.StateChanged += SpeechRecognizer_StateChanged; // Compile the dictation grammar that is loaded by default. await speechRecognizer.CompileConstraintsAsync(); }
/// <summary> /// Upon entering the scenario, ensure that we have permissions to use the Microphone. This may entail popping up /// a dialog to the user on Desktop systems. Only enable functionality once we've gained that permission in order to /// prevent errors from occurring when using the SpeechRecognizer. If speech is not a primary input mechanism, developers /// should consider disabling appropriate parts of the UI if the user does not have a recording device, or does not allow /// audio input. /// </summary> /// <param name="e">Unused navigation parameters</param> protected async override void OnNavigatedTo(NavigationEventArgs e) { rootPage = MainPage.Current; // Keep track of the UI thread dispatcher, as speech events will come in on a separate thread. dispatcher = CoreWindow.GetForCurrentThread().Dispatcher; // Prompt the user for permission to access the microphone. This request will only happen // once, it will not re-prompt if the user rejects the permission. bool permissionGained = await AudioCapturePermissions.RequestMicrophonePermission(); if (permissionGained) { btnContinuousRecognize.IsEnabled = true; } else { resultTextBlock.Text = "Permission to access capture resources was not given by the user, reset the application setting in Settings->Privacy->Microphone."; } Language speechLanguage = SpeechRecognizer.SystemSpeechLanguage; string langTag = speechLanguage.LanguageTag; speechContext = ResourceContext.GetForCurrentView(); speechContext.Languages = new string[] { langTag }; speechResourceMap = ResourceManager.Current.MainResourceMap.GetSubtree("LocalizationSpeechResources"); PopulateLanguageDropdown(); // Initialize the recognizer. Since the recognizer is disposed on scenario exit, we need to make sure we re-initialize it on scenario // entrance, as the xaml page may not get destroyed between openings of the scenario. await InitializeRecognizer(SpeechRecognizer.SystemSpeechLanguage); }
/// <summary> /// When activating the scenario, ensure we have permission from the user to access their microphone, and /// provide an appropriate path for the user to enable access to the microphone if they haven't /// given explicit permission for it. /// </summary> /// <param name="e">The navigation event details</param> protected async override void OnNavigatedTo(NavigationEventArgs e) { // Save the UI thread dispatcher to allow speech status messages to be shown on the UI. dispatcher = CoreWindow.GetForCurrentThread().Dispatcher; bool permissionGained = await AudioCapturePermissions.RequestMicrophonePermission(); if (permissionGained) { // Enable the recognition buttons. btnRecognizeWithUI.IsEnabled = true; btnRecognizeWithoutUI.IsEnabled = true; Language speechLanguage = SpeechRecognizer.SystemSpeechLanguage; string langTag = speechLanguage.LanguageTag; speechContext = ResourceContext.GetForCurrentView(); speechContext.Languages = new string[] { langTag }; speechResourceMap = ResourceManager.Current.MainResourceMap.GetSubtree("LocalizationSpeechResources"); PopulateLanguageDropdown(); await InitializeRecognizer(SpeechRecognizer.SystemSpeechLanguage); } else { resultTextBlock.Visibility = Visibility.Visible; resultTextBlock.Text = "Permission to access capture resources was not given by the user; please set the application setting in Settings->Privacy->Microphone."; btnRecognizeWithUI.IsEnabled = false; btnRecognizeWithoutUI.IsEnabled = false; cbLanguageSelection.IsEnabled = false; } }
/// <summary> /// When activating the scenario, ensure we have permission from the user to access their microphone, and /// provide an appropriate path for the user to enable access to the microphone if they haven't /// given explicit permission for it. /// Construct a recognizer with a simple list of recognized terms. /// </summary> /// <param name="e">The navigation event details</param> protected async override void OnNavigatedTo(NavigationEventArgs e) { bool permissionGained = await AudioCapturePermissions.RequestMicrophonePermission(); if (permissionGained) { // enable the recognition buttons btnRecognizeWithUI.IsEnabled = true; btnRecognizeWithoutUI.IsEnabled = true; } else { this.resultTextBlock.Visibility = Visibility.Visible; this.resultTextBlock.Text = "Permission to access capture resources was not given by the user, reset the application setting in Settings->Privacy->Microphone."; } // Create an instance of SpeechRecognizer. speechRecognizer = new SpeechRecognizer(); // Provide feedback to the user about the state of the recognizer. speechRecognizer.StateChanged += SpeechRecognizer_StateChanged; // You could create any IEnumerable dynamically. string[] responses = { "Yes", "No" }; // Add a list constraint to the recognizer. var listConstraint = new SpeechRecognitionListConstraint(responses, "yesOrNo"); speechRecognizer.UIOptions.ExampleText = @"Ex. ""Yes"", ""No"""; speechRecognizer.Constraints.Add(listConstraint); // Compile the constraint. await speechRecognizer.CompileConstraintsAsync(); }
/// <summary> /// Upon entering the scenario, ensure that we have permissions to use the Microphone. This may entail popping up /// a dialog to the user on Desktop systems. Only enable functionality once we've gained that permission in order to /// prevent errors from occurring when using the SpeechRecognizer. If speech is not a primary input mechanism, developers /// should consider disabling appropriate parts of the UI if the user does not have a recording device, or does not allow /// audio input. /// </summary> /// <param name="e">Unused navigation parameters</param> protected async override void OnNavigatedTo(NavigationEventArgs e) { rootPage = MainPage.Current; // Keep track of the UI thread dispatcher, as speech events will come in on a separate thread. dispatcher = CoreWindow.GetForCurrentThread().Dispatcher; // Prompt the user for permission to access the microphone. This request will only happen // once, it will not re-prompt if the user rejects the permission. bool permissionGained = await AudioCapturePermissions.RequestMicrophonePermission(); if (permissionGained) { btnContinuousRecognize.IsEnabled = true; } else { this.resultTextBlock.Visibility = Visibility.Visible; this.resultTextBlock.Text = "Permission to access capture resources was not given by the user, reset the application setting in Settings->Privacy->Microphone."; } // Initialize resource map to retrieve localized speech strings. Language speechLanguage = SpeechRecognizer.SystemSpeechLanguage; string langTag = speechLanguage.LanguageTag; speechContext = ResourceContext.GetForCurrentView(); speechContext.Languages = new string[] { langTag }; speechResourceMap = ResourceManager.Current.MainResourceMap.GetSubtree("LocalizationSpeechResources"); PopulateLanguageDropdown(); await InitializeRecognizer(SpeechRecognizer.SystemSpeechLanguage); }
/// <summary> /// Upon entering the scenario, ensure that we have permissions to use the Microphone. This may entail popping up /// a dialog to the user on Desktop systems. Only enable functionality once we've gained that permission in order to /// prevent errors from occurring when using the SpeechRecognizer. If speech is not a primary input mechanism, developers /// should consider disabling appropriate parts of the UI if the user does not have a recording device, or does not allow /// audio input. /// </summary> /// <param name="e">Unused navigation parameters</param> protected async override void OnNavigatedTo(NavigationEventArgs e) { rootPage = MainPage.Current; // Keep track of the UI thread dispatcher, as speech events will come in on a separate thread. dispatcher = CoreWindow.GetForCurrentThread().Dispatcher; // Prompt the user for permission to access the microphone. This request will only happen // once, it will not re-prompt if the user rejects the permission. bool permissionGained = await AudioCapturePermissions.RequestMicrophonePermission(); if (permissionGained) { btnContinuousRecognize.IsEnabled = true; PopulateLanguageDropdown(); await InitializeRecognizer(SpeechRecognizer.SystemSpeechLanguage); } else { this.dictationTextBox.Text = "Permission to access capture resources was not given by the user, reset the application setting in Settings->Privacy->Microphone."; btnContinuousRecognize.IsEnabled = false; cbLanguageSelection.IsEnabled = false; } }
/// <summary> /// When activating the scenario, ensure we have permission from the user to access their microphone, and /// provide an appropriate path for the user to enable access to the microphone if they haven't /// given explicit permission for it. /// </summary> /// <param name="e">The navigation event details</param> protected async override void OnNavigatedTo(NavigationEventArgs e) { rootPage = MainPage.Current; bool permissionGained = await AudioCapturePermissions.RequestMicrophonePermission(); if (permissionGained) { // Enable the recognition button. btnMicrophone.IsEnabled = true; } else { resultTextBlock.Visibility = Visibility.Visible; resultTextBlock.Text = "Permission to access capture resources was not given by the user; please set the application setting in Settings->Privacy->Microphone."; } }
/// <summary> /// Upon entering the scenario, ensure that we have permissions to use the Microphone. This may entail popping up /// a dialog to the user on Desktop systems. Only enable functionality once we've gained that permission in order to /// prevent errors from occurring when using the SpeechRecognizer. If speech is not a primary input mechanism, developers /// should consider disabling appropriate parts of the UI if the user does not have a recording device, or does not allow /// audio input. /// </summary> /// <param name="e">Unused navigation parameters</param> protected async override void OnNavigatedTo(NavigationEventArgs e) { rootPage = MainPage.Current; // Keep track of the UI thread dispatcher, as speech events will come in on a separate thread. dispatcher = CoreWindow.GetForCurrentThread().Dispatcher; // Prompt the user for permission to access the microphone. This request will only happen // once, it will not re-prompt if the user rejects the permission. bool permissionGained = await AudioCapturePermissions.RequestMicrophonePermission(); if (permissionGained) { btnContinuousRecognize.IsEnabled = true; } else { this.dictationTextBox.Text = "Permission to access capture resources was not given by the user, reset the application setting in Settings->Privacy->Microphone."; } this.speechRecognizer = new SpeechRecognizer(); // Provide feedback to the user about the state of the recognizer. This can be used to provide visual feedback in the form // of an audio indicator to help the user understand whether they're being heard. speechRecognizer.StateChanged += SpeechRecognizer_StateChanged; // Apply the dictation topic constraint to optimize for dictated freeform speech. var dictationConstraint = new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.Dictation, "dictation"); speechRecognizer.Constraints.Add(dictationConstraint); SpeechRecognitionCompilationResult result = await speechRecognizer.CompileConstraintsAsync(); if (result.Status != SpeechRecognitionResultStatus.Success) { rootPage.NotifyUser("Grammar Compilation Failed: " + result.Status.ToString(), NotifyType.ErrorMessage); btnContinuousRecognize.IsEnabled = false; } // Handle continuous recognition events. Completed fires when various error states occur. ResultGenerated fires when // some recognized phrases occur, or the garbage rule is hit. HypothesisGenerated fires during recognition, and // allows us to provide incremental feedback based on what the user's currently saying. speechRecognizer.ContinuousRecognitionSession.Completed += ContinuousRecognitionSession_Completed; speechRecognizer.ContinuousRecognitionSession.ResultGenerated += ContinuousRecognitionSession_ResultGenerated; speechRecognizer.HypothesisGenerated += SpeechRecognizer_HypothesisGenerated; }
/// <summary> /// When activating the scenario, ensure we have permission from the user to access their microphone, and /// provide an appropriate path for the user to enable access to the microphone if they haven't /// given explicit permission for it. /// </summary> /// <param name="e">The navigation event details</param> protected async override void OnNavigatedTo(NavigationEventArgs e) { // Save the UI thread dispatcher to allow speech status messages to be shown on the UI. dispatcher = CoreWindow.GetForCurrentThread().Dispatcher; bool permissionGained = await AudioCapturePermissions.RequestMicrophonePermission(); if (permissionGained) { // Enable the recognition buttons. btnRecognizeWithUI.IsEnabled = true; btnRecognizeWithoutUI.IsEnabled = true; } else { resultTextBlock.Visibility = Visibility.Visible; resultTextBlock.Text = "Permission to access capture resources was not given by the user; please set the application setting in Settings->Privacy->Microphone."; } await InitializeRecognizer(); }
/// <summary> /// Upon entering the scenario, ensure that we have permissions to use the Microphone. This may entail popping up /// a dialog to the user on Desktop systems. Only enable functionality once we've gained that permission in order to /// prevent errors from occurring when using the SpeechRecognizer. If speech is not a primary input mechanism, developers /// should consider disabling appropriate parts of the UI if the user does not have a recording device, or does not allow /// audio input. /// </summary> /// <param name="e">Unused navigation parameters</param> protected async override void OnNavigatedTo(NavigationEventArgs e) { rootPage = MainPage.Current; // Keep track of the UI thread dispatcher, as speech events will come in on a separate thread. dispatcher = CoreWindow.GetForCurrentThread().Dispatcher; // Prompt the user for permission to access the microphone. This request will only happen // once, it will not re-prompt if the user rejects the permission. bool permissionGained = await AudioCapturePermissions.RequestMicrophonePermission(); if (permissionGained) { btnContinuousRecognize.IsEnabled = true; } else { resultTextBlock.Text = "Permission to access capture resources was not given by the user, reset the application setting in Settings->Privacy->Microphone."; } // Initialize the recognizer. Since the recognizer is disposed on scenario exit, we need to make sure we re-initialize it on scenario // entrance, as the xaml page may not get destroyed between openings of the scenario. InitializeRecognizer(); }
/// <summary> /// Upon entering the scenario, ensure that we have permissions to use the Microphone. This may entail popping up /// a dialog to the user on Desktop systems. Only enable functionality once we've gained that permission in order to /// prevent errors from occurring when using the SpeechRecognizer. If speech is not a primary input mechanism, developers /// should consider disabling appropriate parts of the UI if the user does not have a recording device, or does not allow /// audio input. /// </summary> /// <param name="e">Unused navigation parameters</param> protected async override void OnNavigatedTo(NavigationEventArgs e) { rootPage = MainPage.Current; // Keep track of the UI thread dispatcher, as speech events will come in on a separate thread. dispatcher = CoreWindow.GetForCurrentThread().Dispatcher; // Prompt the user for permission to access the microphone. This request will only happen // once, it will not re-prompt if the user rejects the permission. bool permissionGained = await AudioCapturePermissions.RequestMicrophonePermission(); if (permissionGained) { btnContinuousRecognize.IsEnabled = true; } else { this.resultTextBlock.Visibility = Visibility.Visible; this.resultTextBlock.Text = "Permission to access capture resources was not given by the user, reset the application setting in Settings->Privacy->Microphone."; } if (this.speechRecognizer == null) { this.speechRecognizer = new SpeechRecognizer(); } // Provide feedback to the user about the state of the recognizer. This can be used to provide visual feedback in the form // of an audio indicator to help the user understand whether they're being heard. speechRecognizer.StateChanged += SpeechRecognizer_StateChanged; // Build a command-list grammar. Commands should ideally be drawn from a resource file for localization, and // be grouped into tags for alternate forms of the same command. speechRecognizer.Constraints.Add(new SpeechRecognitionListConstraint(new List <string>() { "Go Home" }, "Home")); speechRecognizer.Constraints.Add(new SpeechRecognitionListConstraint(new List <string>() { "Go to Contoso Studio" }, "GoToContosoStudio")); speechRecognizer.Constraints.Add(new SpeechRecognitionListConstraint(new List <string>() { "Show Message", "Open Message" }, "Message")); speechRecognizer.Constraints.Add(new SpeechRecognitionListConstraint(new List <string>() { "Send Email", "Create Email" }, "Email")); speechRecognizer.Constraints.Add(new SpeechRecognitionListConstraint(new List <string>() { "Call Nita Farley", "Call Nita" }, "CallNita")); speechRecognizer.Constraints.Add(new SpeechRecognitionListConstraint(new List <string>() { "Call Wayne Sigmon ", "Call Wayne" }, "CallWayne")); SpeechRecognitionCompilationResult result = await speechRecognizer.CompileConstraintsAsync(); if (result.Status != SpeechRecognitionResultStatus.Success) { // Disable the recognition buttons. btnContinuousRecognize.IsEnabled = false; // Let the user know that the grammar didn't compile properly. resultTextBlock.Visibility = Visibility.Visible; resultTextBlock.Text = "Unable to compile grammar."; } // Handle continuous recognition events. Completed fires when various error states occur. ResultGenerated fires when // some recognized phrases occur, or the garbage rule is hit. speechRecognizer.ContinuousRecognitionSession.Completed += ContinuousRecognitionSession_Completed; speechRecognizer.ContinuousRecognitionSession.ResultGenerated += ContinuousRecognitionSession_ResultGenerated; }