Example #1
0
        private async Task FromMicrophone()
        {
            SourceLanguageConfig language = SourceLanguageConfig.FromLanguage(_language);

            _activeRecognizer = new Microsoft.CognitiveServices.Speech.SpeechRecognizer(_config, language);

            _activeRecognizer.Recognized += OnRecognized;
            _activeRecognizer.Canceled   += OnCanceled;

            await _activeRecognizer.StartContinuousRecognitionAsync();
        }
Example #2
0
        public async void AzureRecognize_Click(object sender, RoutedEventArgs e)
        {
            btnAzureRecognize.IsEnabled = false;

            try
            {
                // Creates a speech recognizer using microphone as audio input. The default language is "en-us".
                var factory = SpeechFactory.FromSubscription(AzureSubscriptonInfo.SpeechServicesKey, AzureSubscriptonInfo.SpeechServicesRegion);
                azureRecognizer = factory.CreateSpeechRecognizer();

                azureRecognizer.OnSpeechDetectedEvent += async(s, recognitionEvent) =>
                {
                    String output = $"Recognition event. Event: {recognitionEvent.EventType.ToString()}.\n";
                    await Dispatcher.RunAsync(CoreDispatcherPriority.Normal, () => {
                        rootPage.NotifyUser(output, NotifyType.StatusMessage);
                    });
                };

                azureRecognizer.IntermediateResultReceived += async(s, speechRecognitionResult) => {
                    string hypothesis = speechRecognitionResult.Result.Text;

                    // Update the textbox with the currently confirmed text, and the hypothesis combined.
                    string textboxContent = dictatedTextBuilder.ToString() + " ??? " + hypothesis + " ...";
                    await Dispatcher.RunAsync(CoreDispatcherPriority.Normal, () =>
                    {
                        dictationTextBox.Text  = textboxContent;
                        btnClearText.IsEnabled = true;
                    });
                };

                azureRecognizer.FinalResultReceived += (s, speechRecognitionResult) => {
                    if (speechRecognitionResult.Result.RecognitionStatus == RecognitionStatus.Recognized)
                    {
                        AppendTextToDictationOutput(speechRecognitionResult.Result.Text);
                    }
                };

                azureRecognizer.RecognitionErrorRaised += async(s, recognitionErrorRaised) => {
                    String output = $"An error occurred. Status: {recognitionErrorRaised.Status.ToString()}, FailureReason: {recognitionErrorRaised.FailureReason}\n";
                    await Dispatcher.RunAsync(CoreDispatcherPriority.Normal, () => {
                        rootPage.NotifyUser(output, NotifyType.ErrorMessage);
                    });
                };

                azureRecognizer.OnSessionEvent += async(s, sessionEvent) => {
                    String output = $"Session event. Event: {sessionEvent.EventType.ToString()}.\n";
                    await Dispatcher.RunAsync(CoreDispatcherPriority.Normal, () => {
                        rootPage.NotifyUser(output, NotifyType.StatusMessage);
                    });
                };

                await azureRecognizer.StartContinuousRecognitionAsync().ConfigureAwait(false);
            }
            catch (Exception exception)
            {
                var messageDialog = new Windows.UI.Popups.MessageDialog(exception.Message, "Exception");
                await messageDialog.ShowAsync();
            }

            await Dispatcher.RunAsync(CoreDispatcherPriority.Normal, () =>
            {
                btnAzureRecognize.IsEnabled = true;
            });
        }