Esempio n. 1
0
        public async Task <string> RecognizeSpeechAsync(string audioUrl)
        {
            var audioName = await CloudConvert.ConvertAudioToWavAsync(audioUrl);

            var config        = SpeechConfig.FromSubscription(Settings.SubscriptionKey, Settings.SubscriptionRegion);
            var textConverted = "";

            using (var audioInput = AudioConfig.FromWavFileInput(audioName))
                using (var recognizer = new Microsoft.CognitiveServices.Speech.SpeechRecognizer(config, audioInput))
                {
                    var result = await recognizer.RecognizeOnceAsync();

                    switch (result.Reason)
                    {
                    case ResultReason.NoMatch:
                        textConverted = "Sorry, I couldn't understand what you said.";
                        break;

                    case ResultReason.RecognizedSpeech:
                        textConverted = result.Text;
                        break;

                    default:
                        break;
                    }
                }

            File.Delete(audioName);
            return(textConverted);
        }
Esempio n. 2
0
        private async Task FromMicrophone()
        {
            SourceLanguageConfig language = SourceLanguageConfig.FromLanguage(_language);

            _activeRecognizer = new Microsoft.CognitiveServices.Speech.SpeechRecognizer(_config, language);

            _activeRecognizer.Recognized += OnRecognized;
            _activeRecognizer.Canceled   += OnCanceled;

            await _activeRecognizer.StartContinuousRecognitionAsync();
        }
Esempio n. 3
0
        public bool StopRecognizer()
        {
            Microsoft.CognitiveServices.Speech.SpeechRecognizer recognizer = _activeRecognizer;

            if (recognizer == null)
            {
                return(true);
            }
            else
            {
                recognizer.StopContinuousRecognitionAsync().Wait();

                _activeRecognizer = null;

                return(true);
            }
        }
Esempio n. 4
0
        public async void AzureRecognize_Click(object sender, RoutedEventArgs e)
        {
            btnAzureRecognize.IsEnabled = false;

            try
            {
                // Creates a speech recognizer using microphone as audio input. The default language is "en-us".
                var factory = SpeechFactory.FromSubscription(AzureSubscriptonInfo.SpeechServicesKey, AzureSubscriptonInfo.SpeechServicesRegion);
                azureRecognizer = factory.CreateSpeechRecognizer();

                azureRecognizer.OnSpeechDetectedEvent += async(s, recognitionEvent) =>
                {
                    String output = $"Recognition event. Event: {recognitionEvent.EventType.ToString()}.\n";
                    await Dispatcher.RunAsync(CoreDispatcherPriority.Normal, () => {
                        rootPage.NotifyUser(output, NotifyType.StatusMessage);
                    });
                };

                azureRecognizer.IntermediateResultReceived += async(s, speechRecognitionResult) => {
                    string hypothesis = speechRecognitionResult.Result.Text;

                    // Update the textbox with the currently confirmed text, and the hypothesis combined.
                    string textboxContent = dictatedTextBuilder.ToString() + " ??? " + hypothesis + " ...";
                    await Dispatcher.RunAsync(CoreDispatcherPriority.Normal, () =>
                    {
                        dictationTextBox.Text  = textboxContent;
                        btnClearText.IsEnabled = true;
                    });
                };

                azureRecognizer.FinalResultReceived += (s, speechRecognitionResult) => {
                    if (speechRecognitionResult.Result.RecognitionStatus == RecognitionStatus.Recognized)
                    {
                        AppendTextToDictationOutput(speechRecognitionResult.Result.Text);
                    }
                };

                azureRecognizer.RecognitionErrorRaised += async(s, recognitionErrorRaised) => {
                    String output = $"An error occurred. Status: {recognitionErrorRaised.Status.ToString()}, FailureReason: {recognitionErrorRaised.FailureReason}\n";
                    await Dispatcher.RunAsync(CoreDispatcherPriority.Normal, () => {
                        rootPage.NotifyUser(output, NotifyType.ErrorMessage);
                    });
                };

                azureRecognizer.OnSessionEvent += async(s, sessionEvent) => {
                    String output = $"Session event. Event: {sessionEvent.EventType.ToString()}.\n";
                    await Dispatcher.RunAsync(CoreDispatcherPriority.Normal, () => {
                        rootPage.NotifyUser(output, NotifyType.StatusMessage);
                    });
                };

                await azureRecognizer.StartContinuousRecognitionAsync().ConfigureAwait(false);
            }
            catch (Exception exception)
            {
                var messageDialog = new Windows.UI.Popups.MessageDialog(exception.Message, "Exception");
                await messageDialog.ShowAsync();
            }

            await Dispatcher.RunAsync(CoreDispatcherPriority.Normal, () =>
            {
                btnAzureRecognize.IsEnabled = true;
            });
        }
Esempio n. 5
0
        public async Task RecognizeSpeechAsync()
        {
            var config = SpeechConfig.FromSubscription("541650d764734a37a947aa914ba310cc", "westeurope");

            using (Microsoft.CognitiveServices.Speech.SpeechRecognizer recognizer = new Microsoft.CognitiveServices.Speech.SpeechRecognizer(config))
            {
                var result = await recognizer.RecognizeOnceAsync();


                // Checks result.
                bool   speech1on = false;
                string result2   = result.Text;

                if (result.Reason == ResultReason.RecognizedSpeech)
                {
                    if (result.Text.Contains("Mirror"))
                    {
                        int i = result.Text.IndexOf(" ") + 1;

                        speech1on = true;
                        result2   = result.Text.Substring(i);
                    }
                    else
                    {
                        speech1on = false;
                    }
                    if (speech1on)
                    {
                        var wa = new WAEngine {
                            APIKey = "3YYV6P-R5W8R7TY35"
                        };
                        WALogger.LogLevel        = WALogLevel.None;
                        WALogger.ConsoleLogLevel = WALogLevel.Verbose;
                        var question = result2;
                        var query    = new WAQuery()
                        {
                            Input = question, Format = WAQueryFormat.PlainText
                        };
                        var queryresult = new WAQueryResult();
                        speech.Visible = true;
                        speech.Text    = question;
                        query.PodStates.Add("test");
                        query.AppID = wa.APIKey;
                        string url     = query.FormatQuery();
                        var    result1 = wa.RunQuery(query);
                        foreach (var pod in result1.Pods)
                        {
                            foreach (var subpod in pod.SubPods)
                            {
                                string titlu = pod.Title;

                                if (titlu.Contains("Result"))
                                {
                                    System.Speech.Synthesis.SpeechSynthesizer synth = new System.Speech.Synthesis.SpeechSynthesizer();

                                    // Configure the audio output.
                                    synth.SetOutputToDefaultAudioDevice();

                                    // Speak a string.
                                    synth.Speak(subpod.PlainText);
                                    raspuns.Visible = true;
                                    raspuns.Text    = subpod.PlainText;
                                }
                                else if (titlu.Contains("Basic information"))
                                {
                                    System.Speech.Synthesis.SpeechSynthesizer synth = new System.Speech.Synthesis.SpeechSynthesizer();

                                    // Configure the audio output.
                                    synth.SetOutputToDefaultAudioDevice();

                                    synth.Speak(subpod.PlainText);
                                    raspuns.Visible = true;
                                    raspuns.Text    = subpod.PlainText;
                                }


                                else if (titlu.Contains("Estimates for"))
                                {
                                    System.Speech.Synthesis.SpeechSynthesizer synth = new System.Speech.Synthesis.SpeechSynthesizer();

                                    // Configure the audio output.
                                    synth.SetOutputToDefaultAudioDevice();

                                    synth.Speak(subpod.PlainText);
                                    raspuns.Visible = true;
                                    raspuns.Text    = subpod.PlainText;
                                }

                                else if (titlu.Contains("Biological"))
                                {
                                    System.Speech.Synthesis.SpeechSynthesizer synth = new System.Speech.Synthesis.SpeechSynthesizer();

                                    // Configure the audio output.
                                    synth.SetOutputToDefaultAudioDevice();

                                    synth.Speak(subpod.PlainText);
                                    raspuns.Visible = true;
                                    raspuns.Text    = subpod.PlainText;
                                }

                                else if (titlu.Contains("Definitions"))
                                {
                                    System.Speech.Synthesis.SpeechSynthesizer synth = new System.Speech.Synthesis.SpeechSynthesizer();

                                    // Configure the audio output.
                                    synth.SetOutputToDefaultAudioDevice();

                                    synth.Speak(subpod.PlainText);
                                    raspuns.Visible = true;
                                    raspuns.Text    = subpod.PlainText;
                                }
                            }
                        }
                    }
                }
            }
            RecognizeSpeechAsync();
        }