Пример #1
0
        private async Task InitializeItemRecognizer()
        {
            CleanupItemRecognizer();

            try
            {
                this.itemRecognizer = new SpeechRecognizer(lang);
                this.itemRecognizer.StateChanged += ItemRecognizerStateChanged;

                var dictationConstraint = new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.Dictation, "dictation");
                this.itemRecognizer.Constraints.Add(dictationConstraint);

                var result = await itemRecognizer.CompileConstraintsAsync();

                if (result.Status != SpeechRecognitionResultStatus.Success)
                {
                    this.SetDebugMessage("Grammar Compilation Failed: " + result.Status.ToString());
                }

                this.itemRecognizer.StateChanged += ItemRecognizerStateChanged;
                this.itemRecognizer.ContinuousRecognitionSession.Completed       += ContinuousRecognitionSession_Completed;;
                this.itemRecognizer.ContinuousRecognitionSession.ResultGenerated += OnItemRecognizerResult;
            }
            catch (Exception ex)
            {
                // TODO: handle speech initialization errors
                this.SetDebugMessage(ex.Message);
            }
        }
Пример #2
0
        private async void InitializeSpeechRecognizer()
        {
            try
            {
                if (speechRecognizer != null)
                {
                    speechRecognizer.RecognizeAsync().Cancel();
                    speechRecognizer.RecognizeAsync().Close();
                    this.speechRecognizer.Dispose();
                    this.speechRecognizer = null;
                }
                speechRecognizer = new SpeechRecognizer();
                var topicConstraing = new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.Dictation, "Development");
                speechRecognizer.Constraints.Add(topicConstraing);
                await speechRecognizer.CompileConstraintsAsync();

                this.Operation = await speechRecognizer.RecognizeAsync();

                if (Operation.Status == SpeechRecognitionResultStatus.Success)
                {
                    ResultGenerated(Operation.Text);
                    speechRecognizer.RecognizeAsync().Cancel();
                    speechRecognizer.Dispose();
                    speechRecognizer = null;
                }
            }
            catch (Exception)
            {
            }
        }
    private async void Setup(Language language)
    {
        if (_recogniser != null)
        {
            _recogniser.ContinuousRecognitionSession.Completed       -= Recogniser_Completed;
            _recogniser.ContinuousRecognitionSession.ResultGenerated -= Recogniser_ResultGenerated;
            _recogniser.HypothesisGenerated -= SpeechRecognizer_HypothesisGenerated;
            _recogniser.Dispose();
            _recogniser = null;
        }
        _recogniser = new SpeechRecognizer(language);
        SpeechRecognitionTopicConstraint constraint = new SpeechRecognitionTopicConstraint(
            SpeechRecognitionScenario.Dictation, "dictation");

        _recogniser.Constraints.Add(constraint);
        SpeechRecognitionCompilationResult result = await _recogniser.CompileConstraintsAsync();

        if (result.Status != SpeechRecognitionResultStatus.Success)
        {
            await ShowDialogAsync($"Grammar Compilation Failed: {result.Status.ToString()}");
        }
        _recogniser.ContinuousRecognitionSession.Completed       += Recogniser_Completed;
        _recogniser.ContinuousRecognitionSession.ResultGenerated += Recogniser_ResultGenerated;
        _recogniser.HypothesisGenerated += SpeechRecognizer_HypothesisGenerated;
    }
        public static async void RecognizeSpeech()
        {
            SpeechRecognizer recognizer = new SpeechRecognizer();

            recognizer.Timeouts.BabbleTimeout         = System.TimeSpan.FromSeconds(120.0);
            recognizer.Timeouts.EndSilenceTimeout     = System.TimeSpan.FromSeconds(120.0);
            recognizer.Timeouts.InitialSilenceTimeout = System.TimeSpan.FromSeconds(120.0);
            SpeechRecognitionTopicConstraint topicConstraint = new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.Dictation, "Message");

            recognizer.Constraints.Add(topicConstraint);
            await recognizer.CompileConstraintsAsync();

            try {
                SpeechRecognitionResult result = await recognizer.RecognizeAsync();

                //use result.GetAlternates for more precisivion
                if (result.Confidence != SpeechRecognitionConfidence.Rejected)
                {
                    if (result.Text != "")
                    {
                        string speechResult = result.Text.Remove(result.Text.Length - 1);
                        int    num;
                        bool   isNumber = Int32.TryParse(speechResult, out num);
                        if (isNumber)
                        {
                            MainPage.SetAudioTempCommand(speechResult);
                        }
                        else
                        {
                            UiUtils.ShowNotification("Your message could not be parsed as number. Please specify a number!");
                        }
                    }
                    else
                    {
                        UiUtils.ShowNotification("Your message could not be parsed. Please repeat!");
                    }
                }
                else
                {
                    UiUtils.ShowNotification("Sorry, could not get that. Can you repeat?");
                }
            }catch (Exception ex)
            {
                const int privacyPolicyHResult = unchecked ((int)0x80045509);
                const int networkNotAvailable  = unchecked ((int)0x80045504);

                if (ex.HResult == privacyPolicyHResult)
                {
                    UiUtils.ShowNotification("You will need to accept the speech privacy policy in order to use speech recognition in this app. Consider activating `Get to know me` in 'Settings->Privacy->Speech, inking & typing`");
                }
                else if (ex.HResult == networkNotAvailable)
                {
                    UiUtils.ShowNotification("The network connection is not available");
                }
                else
                {
                    var t = ex.Message;
                }
            }
        }
        /// <summary>
        /// When activating the scenario, ensure we have permission from the user to access their microphone, and
        /// provide an appropriate path for the user to enable access to the microphone if they haven't
        /// given explicit permission for it.
        /// </summary>
        /// <param name="e">The navigation event details</param>
        protected async override void OnNavigatedTo(NavigationEventArgs e)
        {
            bool permissionGained = await AudioCapturePermissions.RequestMicrophonePermission();

            if (permissionGained)
            {
                // enable the recognition buttons
                btnRecognizeWithUI.IsEnabled    = true;
                btnRecognizeWithoutUI.IsEnabled = true;
            }
            else
            {
                this.resultTextBlock.Visibility = Visibility.Visible;
                this.resultTextBlock.Text       = "Permission to access capture resources was not given by the user, reset the application setting in Settings->Privacy->Microphone.";
            }

            // Create an instance of SpeechRecognizer.
            speechRecognizer = new SpeechRecognizer();

            // Provide feedback to the user about the state of the recognizer.
            speechRecognizer.StateChanged += SpeechRecognizer_StateChanged;

            // Add a web search grammar to the recognizer.
            var webSearchGrammar = new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.WebSearch, "webSearch");

            speechRecognizer.Constraints.Add(webSearchGrammar);

            // Compile the constraint.
            await speechRecognizer.CompileConstraintsAsync();
        }
		private async void StartVoiceRecognition()
		{
			await SpeakText( "Say Captains Log at any time to create a log entry." );

			speechRecognizerCaptainsLogCommand = new SpeechRecognizer();

			while ( !cancellationSource.IsCancellationRequested )
			{
				// Listen for user to say "Captains Log"
				ISpeechRecognitionConstraint commandConstraint = 
					new SpeechRecognitionListConstraint( new[] { "Captains Log", "Computer Captains Log" } );
				speechRecognizerCaptainsLogCommand.Constraints.Add( commandConstraint );
				await speechRecognizerCaptainsLogCommand.CompileConstraintsAsync();

				SpeechRecognitionResult commandResult = await speechRecognizerCaptainsLogCommand.RecognizeAsync();

				if ( commandResult.Status != SpeechRecognitionResultStatus.Success
					|| commandResult.Confidence == SpeechRecognitionConfidence.Rejected
					|| cancellationSource.IsCancellationRequested )
				{
					continue;
				}
				// Recognized user saying "Captains Log"

				// Listen for the user's dictation entry
				var captainsLogDictationRecognizer = new SpeechRecognizer();

				ISpeechRecognitionConstraint dictationConstraint = 
					new SpeechRecognitionTopicConstraint( 
						SpeechRecognitionScenario.Dictation, "LogEntry", "LogEntryDictation" );

				captainsLogDictationRecognizer.Constraints.Add( dictationConstraint );

				await captainsLogDictationRecognizer.CompileConstraintsAsync();

				captainsLogDictationRecognizer.UIOptions.ExampleText = "Boldly going where no man or woman has gone before.";
				captainsLogDictationRecognizer.UIOptions.AudiblePrompt = "Go ahead";
				captainsLogDictationRecognizer.UIOptions.IsReadBackEnabled = true;
				captainsLogDictationRecognizer.UIOptions.ShowConfirmation = true;

				SpeechRecognitionResult dictationResult = await captainsLogDictationRecognizer.RecognizeWithUIAsync();

				if ( dictationResult.Status != SpeechRecognitionResultStatus.Success
					|| dictationResult.Confidence == SpeechRecognitionConfidence.Rejected
					|| string.IsNullOrWhiteSpace( dictationResult.Text )
					|| cancellationSource.IsCancellationRequested )
				{
					captainsLogDictationRecognizer.Dispose();

					continue;
				}
				// Recognized user's dictation entry

				AddLogEntry( dictationResult.Text );

				captainsLogDictationRecognizer.Dispose();
			}

			speechRecognizerCaptainsLogCommand.Dispose();
		}
Пример #7
0
        public async Task <bool> InitializeRecognizerAsync()
        {
            Debug.WriteLine("[Speech to Text]: initializing Speech Recognizer...");
            if (_recognizer != null)
            {
                return(true);
            }

            _recognizer = new SpeechRecognizer(SpeechRecognizer.SystemSpeechLanguage);
            // Set UI text
            _recognizer.UIOptions.AudiblePrompt = "What you want to do...";

            // This requires internet connection
            SpeechRecognitionTopicConstraint topicConstraint = new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.Dictation, "Development");

            _recognizer.Constraints.Add(topicConstraint);

            SpeechRecognitionCompilationResult result = await _recognizer.CompileConstraintsAsync();   // Required

            if (result.Status != SpeechRecognitionResultStatus.Success)
            {
                Debug.WriteLine("[Speech to Text]: Grammar Compilation Failed: " + result.Status.ToString());
                return(false);
            }

            _recognizer.ContinuousRecognitionSession.ResultGenerated += (s, e) => { Debug.WriteLine($"[Speech to Text]: recognizer results: {e.Result.Text}, {e.Result.RawConfidence.ToString()}, {e.Result.Confidence.ToString()}"); };
            Debug.WriteLine("[Speech to Text]: done initializing Speech Recognizer");
            return(true);
        }
Пример #8
0
        /// <summary>
        /// Initialize Speech Recognizer and compile constraints.
        /// </summary>
        /// <param name="recognizerLanguage">Language to use for the speech recognizer</param>
        /// <returns>Awaitable task.</returns>
        private async Task InitializeRecognizer(Language recognizerLanguage)
        {
            MicrophoneAccessStatus status = await AudioCapturePermissions.RequestMicrophoneAccessAsync();
            if (status != MicrophoneAccessStatus.Allowed)
            {
                string prompt = status == MicrophoneAccessStatus.NoCaptureDevices ?
                    "没有检测到音频捕获设备,请检查设备后重试" :
                    "您没有允许本应用访问麦克风,请在 设置 -> 隐私 -> 麦克风 中设置";
                var messageDialog = new MessageDialog(prompt);
                await messageDialog.ShowAsync();
                throw new Exception($"Request microphone access failed. Status: {status}");
            }

            Dispose();

            // Create an instance of SpeechRecognizer.
            _speechRecognizer = new SpeechRecognizer(recognizerLanguage);

            // Add a web search topic constraint to the recognizer.
            var webSearchGrammar = new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.WebSearch, "webSearch");
            _speechRecognizer.Constraints.Add(webSearchGrammar);

            // RecognizeWithUIAsync allows developers to customize the prompts.    
            _speechRecognizer.UIOptions.AudiblePrompt = "请说出您想搜索的东西";
            _speechRecognizer.UIOptions.ExampleText = "例如:“你好,美女”";

            // Compile the constraint.
            SpeechRecognitionCompilationResult compilationResult = await _speechRecognizer.CompileConstraintsAsync();

            // Check to make sure that the constraints were in a proper format and the recognizer was able to compile it.
            if (compilationResult.Status != SpeechRecognitionResultStatus.Success)
                throw new Exception($"Unable to compile grammar. Status: {compilationResult.Status}");
        }
Пример #9
0
        public async Task InitializeSpeechRecognizerAsync()
        {
            if (this.speechRecognizer != null)
            {
                this.DisposeSpeechRecognizer();
            }

            this.dictatedTextBuilder = new StringBuilder();
            this.speechRecognizer    = new SpeechRecognizer();

            var dictationConstraint = new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.Dictation, "dictation");

            speechRecognizer.Constraints.Add(dictationConstraint);
            SpeechRecognitionCompilationResult result = await speechRecognizer.CompileConstraintsAsync();

            if (result.Status != SpeechRecognitionResultStatus.Success)
            {
                await new MessageDialog("CompileConstraintsAsync returned " + result.Status, "Error initializing SpeechRecognizer").ShowAsync();
                return;
            }

            this.speechRecognizer.ContinuousRecognitionSession.ResultGenerated += ContinuousRecognitionSession_ResultGenerated;;
            this.speechRecognizer.ContinuousRecognitionSession.Completed       += ContinuousRecognitionSession_Completed;
            this.speechRecognizer.HypothesisGenerated += SpeechRecognizer_HypothesisGenerated;
        }
        /// <summary>
        /// Tries to iniziaizlize the SpeechRecognizer object
        /// </summary>
        /// <returns>true if SpeechRecognizer is succesfully inizialized, false otherwise</returns>
        private async Task <bool> TryInitSpeech()
        {
            bool retVal = false;

            try
            {
                await TryDisposeSpeech();

                speechRecognizer = new SpeechRecognizer();

                var dictationConstraint = new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.Dictation, DICTATION);
                speechRecognizer.Constraints.Add(dictationConstraint);
                SpeechRecognitionCompilationResult compilationResult = await speechRecognizer.CompileConstraintsAsync();

                speechRecognizer.HypothesisGenerated += SpeechRecognizer_HypothesisGenerated;

                if (compilationResult.Status == SpeechRecognitionResultStatus.Success)
                {
                    retVal = true;
                }
            }
            catch (Exception ex)
            {
                System.Diagnostics.Debug.WriteLine(ex.Message);
                Text   = SPEECH_RECOGNITION_FAILED;
                retVal = false;
            }

            return(retVal);
        }
Пример #11
0
 public MainPage()
 {
     this.InitializeComponent();
     var recognizer = new SpeechRecognizer();
     var topicconstraint = new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.WebSearch, "Development");
     recognizer.Constraints.Add(topicconstraint);
     var result = 
     recognizer.CompileConstraintsAsync();
 }
Пример #12
0
        public async Task Initialize()
        {
            if (!this.permissionGained)
            {
                await WindowManagerService.Current.MainDispatcher.RunAsync(CoreDispatcherPriority.Normal, async() => this.permissionGained = await AudioCapturePermissions.RequestMicrophonePermission());
            }

            try
            {
                if (this.speechRecognizer != null)
                {
                    this.speechRecognizer.StateChanged -= this.SpeechRecognizer_StateChanged;
                    this.speechRecognizer.ContinuousRecognitionSession.Completed       -= this.ContinuousRecognitionSession_Completed;
                    this.speechRecognizer.ContinuousRecognitionSession.ResultGenerated -= this.ContinuousRecognitionSession_ResultGenerated;
                    this.speechRecognizer.HypothesisGenerated -= this.SpeechRecognizer_HypothesisGenerated;

                    this.speechRecognizer.Dispose();
                    this.speechRecognizer = null;
                }

                var recognizerLanguage = new Language(App.Settings.SpeechLocale); // SpeechRecognizer.SystemSpeechLanguage
                this.speechRecognizer = new SpeechRecognizer(recognizerLanguage);

                // Provide feedback to the user about the state of the recognizer. This can be used to provide visual feedback in the form
                // of an audio indicator to help the user understand whether they're being heard.
                this.speechRecognizer.StateChanged += this.SpeechRecognizer_StateChanged;

                // Apply the dictation topic constraint to optimize for dictated free-form speech.
                var dictationConstraint = new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.Dictation, "dictation");
                this.speechRecognizer.Constraints.Add(dictationConstraint);
                var result = await this.speechRecognizer.CompileConstraintsAsync();

                if (result.Status != SpeechRecognitionResultStatus.Success)
                {
                    await this.dialogService.ShowError(result.Status.ToString(), "Grammar Compilation Failed", "OK", null);
                }

                // Handle continuous recognition events. Completed fires when various error states occur. ResultGenerated fires when
                // some recognized phrases occur, or the garbage rule is hit. HypothesisGenerated fires during recognition, and
                // allows us to provide incremental feedback based on what the user's currently saying.
                this.speechRecognizer.ContinuousRecognitionSession.Completed       += this.ContinuousRecognitionSession_Completed;
                this.speechRecognizer.ContinuousRecognitionSession.ResultGenerated += this.ContinuousRecognitionSession_ResultGenerated;
                this.speechRecognizer.HypothesisGenerated += this.SpeechRecognizer_HypothesisGenerated;
                await this.StartRecognizing(true);
            }
            catch (Exception ex)
            {
                if ((uint)ex.HResult == HResultRecognizerNotFound)
                {
                    throw new Exception("Speech Language pack for selected language not installed.", ex);
                }
                else
                {
                    throw;
                }
            }
        }
Пример #13
0
        public MainPage()
        {
            this.InitializeComponent();
            var recognizer      = new SpeechRecognizer();
            var topicconstraint = new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.WebSearch, "Development");

            recognizer.Constraints.Add(topicconstraint);
            var result =
                recognizer.CompileConstraintsAsync();
        }
Пример #14
0
        /// <summary>
        /// When activating the scenario, ensure we have permission from the user to access their microphone, and
        /// provide an appropriate path for the user to enable access to the microphone if they haven't
        /// given explicit permission for it.
        /// </summary>
        /// <param name="e">The navigation event details</param>
        private async Task InitSpeech()
        {
            // Save the UI thread dispatcher to allow speech status messages to be shown on the UI.
            dispatcher = CoreWindow.GetForCurrentThread().Dispatcher;

            bool permissionGained = await AudioCapturePermissions.RequestMicrophonePermission();

            if (permissionGained)
            {
                // Enable the recognition buttons.
                button.IsEnabled = true;

                if (speechRecognizer != null)
                {
                    // cleanup prior to re-initializing this scenario.
                    //speechRecognizer.StateChanged -= SpeechRecognizer_StateChanged;

                    this.speechRecognizer.Dispose();
                    this.speechRecognizer = null;
                }

                // Create an instance of SpeechRecognizer.
                speechRecognizer = new SpeechRecognizer();

                // Provide feedback to the user about the state of the recognizer.
                //speechRecognizer.StateChanged += SpeechRecognizer_StateChanged;

                // Compile the dictation topic constraint, which optimizes for dictated speech.
                var dictationConstraint = new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.Dictation, "dictation");
                speechRecognizer.Constraints.Add(dictationConstraint);
                SpeechRecognitionCompilationResult compilationResult = await speechRecognizer.CompileConstraintsAsync();

                speechRecognizer.HypothesisGenerated += SpeechRecognizer_HypothesisGenerated;

                // Check to make sure that the constraints were in a proper format and the recognizer was able to compile it.
                if (compilationResult.Status != SpeechRecognitionResultStatus.Success)
                {
                    // Disable the recognition buttons.
                    button.IsEnabled = false;

                    // Let the user know that the grammar didn't compile properly.
                    //resultTextBlock.Visibility = Visibility.Visible;
                    //resultTextBlock.Text = "Unable to compile grammar.";
                }
            }
            else
            {
                // "Permission to access capture resources was not given by the user; please set the application setting in Settings->Privacy->Microphone.";
                button.IsEnabled = false;
            }

            await Task.Yield();
        }
        public async Task <bool> InitializeRecognizerAsync()
        {
            Debug.WriteLine("[Speech to Text]: initializing Speech Recognizer...");
            var language = new Windows.Globalization.Language(_languageName);

            _recognizer = new SpeechRecognizer(language);
            // Set timeout settings.
            _recognizer.Timeouts.InitialSilenceTimeout = TimeSpan.FromSeconds(_recognizerInitialSilenceTimeOutInSeconds);
            _recognizer.Timeouts.BabbleTimeout         = TimeSpan.FromSeconds(_recognizerBabbleTimeoutInSeconds);
            _recognizer.Timeouts.EndSilenceTimeout     = TimeSpan.FromSeconds(_recognizerEndSilenceTimeoutInSeconds);
            // Set UI text
            _recognizer.UIOptions.AudiblePrompt = "Say what you want to do...";

            if (!this.IsOffline())
            {
                // This requires internet connection
                SpeechRecognitionTopicConstraint topicConstraint = new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.Dictation, "Development");
                _recognizer.Constraints.Add(topicConstraint);
            }
            else
            {
                // In case of network issue
                string[] responses =
                {
                    "I would like to rent a bike",
                    "I want to rent a bike",
                    "I'd like to rent a bike",
                    "rent a bike",
                    "I would like to rent a bicycle",
                    "I want to rent a bicycle",
                    "I'd like to rent a bicycle",
                    "rent a bicycle"
                };

                // Add a list constraint to the recognizer.
                var listConstraint = new SpeechRecognitionListConstraint(responses, "rentBikePhrases");
                _recognizer.Constraints.Add(listConstraint);
            }

            SpeechRecognitionCompilationResult result = await _recognizer.CompileConstraintsAsync();   // Required

            if (result.Status != SpeechRecognitionResultStatus.Success)
            {
                Debug.WriteLine("[Speech to Text]: Grammar Compilation Failed: " + result.Status.ToString());
                return(false);
            }

            _recognizer.HypothesisGenerated += Recognizer_HypothesisGenerated;
            _recognizer.StateChanged        += Recognizer_StateChanged;
            _recognizer.ContinuousRecognitionSession.ResultGenerated += (s, e) => { Debug.WriteLine($"[Speech to Text]: recognizer results: {e.Result.Text}, {e.Result.RawConfidence.ToString()}, {e.Result.Confidence.ToString()}"); };
            Debug.WriteLine("[Speech to Text]: done initializing Speech Recognizer");
            return(true);
        }
Пример #16
0
        /// <summary>
        /// Initializes a SpeechRecognition object, configuring the recognizer and grammar.
        /// </summary>
        /// <returns>True if the initialization process succeeded.</returns>
        public async Task <bool> Initialize()
        {
            speechRecognizer = new SpeechRecognizer(SpeechRecognizer.SystemSpeechLanguage);

            var webSearchGrammar = new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.WebSearch, "webSearch");

            speechRecognizer.Constraints.Add(webSearchGrammar);

            var compilationResult = await speechRecognizer.CompileConstraintsAsync();

            return(compilationResult.Status == SpeechRecognitionResultStatus.Success);
        }
Пример #17
0
        public async Task InitializeSpeechRecognizerAsync()
        {
            speechRecognizerUI = new SpeechRecognizer();

            speechRecognizerUI.UIOptions.IsReadBackEnabled = true;
            speechRecognizerUI.UIOptions.ShowConfirmation  = true;

            SpeechRecognitionTopicConstraint topicConstraint = new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.Dictation, "Dear Diary");

            speechRecognizerUI.Constraints.Add(topicConstraint);
            await speechRecognizerUI.CompileConstraintsAsync();
        }
        /// <summary>
        /// When activating the scenario, ensure we have permission from the user to access their microphone, and
        /// provide an appropriate path for the user to enable access to the microphone if they haven't
        /// given explicit permission for it.
        /// </summary>
        /// <param name="e">The navigation event details</param>
        private async Task InitSpeech()
        {
            // Save the UI thread dispatcher to allow speech status messages to be shown on the UI.
            dispatcher = CoreWindow.GetForCurrentThread().Dispatcher;

            bool permissionGained = await AudioCapturePermissions.RequestMicrophonePermission();
            if (permissionGained)
            {
                // Enable the recognition buttons.
                button.IsEnabled = true;

                if (speechRecognizer != null)
                {
                    // cleanup prior to re-initializing this scenario.
                    //speechRecognizer.StateChanged -= SpeechRecognizer_StateChanged;

                    this.speechRecognizer.Dispose();
                    this.speechRecognizer = null;
                }

                // Create an instance of SpeechRecognizer.
                speechRecognizer = new SpeechRecognizer();

                // Provide feedback to the user about the state of the recognizer.
                //speechRecognizer.StateChanged += SpeechRecognizer_StateChanged;

                // Compile the dictation topic constraint, which optimizes for dictated speech.
                var dictationConstraint = new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.Dictation, "dictation");
                speechRecognizer.Constraints.Add(dictationConstraint);
                SpeechRecognitionCompilationResult compilationResult = await speechRecognizer.CompileConstraintsAsync();

                speechRecognizer.HypothesisGenerated += SpeechRecognizer_HypothesisGenerated;

                // Check to make sure that the constraints were in a proper format and the recognizer was able to compile it.
                if (compilationResult.Status != SpeechRecognitionResultStatus.Success)
                {
                    // Disable the recognition buttons.
                    button.IsEnabled = false;

                    // Let the user know that the grammar didn't compile properly.
                    //resultTextBlock.Visibility = Visibility.Visible;
                    //resultTextBlock.Text = "Unable to compile grammar.";
                }

            }
            else
            {
                // "Permission to access capture resources was not given by the user; please set the application setting in Settings->Privacy->Microphone.";
                button.IsEnabled = false;
            }

            await Task.Yield();
        }
Пример #19
0
        async private void OnRecognizeNoUI(object sender, RoutedEventArgs e)
        {
            var recognizer = new SpeechRecognizer();

            var topic = new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.WebSearch, "webSearch");

            recognizer.Constraints.Add(topic);
            await recognizer.CompileConstraintsAsync();

            var result = await recognizer.RecognizeAsync();

            txt_dictation.Text = result.Text;
        }
Пример #20
0
        private async void VoiceButton_Tapped(object sender, TappedRoutedEventArgs e)
        {
            var speechRecognizer = new SpeechRecognizer();
            var topicConstraing  = new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.Dictation, "Development");

            speechRecognizer.Constraints.Add(topicConstraing);
            await speechRecognizer.CompileConstraintsAsync();

            speechRecognizer.ContinuousRecognitionSession.Completed       += ContinuousRecognitionSession_Completed;
            speechRecognizer.ContinuousRecognitionSession.ResultGenerated += ContinuousRecognitionSession_ResultGenerated;

            speechRecognizer.ContinuousRecognitionSession.StartAsync();
        }
Пример #21
0
        private async void InitializeRecognizer()
        {
            var grammar = new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.Dictation, "SmartSounder");

            _recognizer.Constraints.Add(grammar);
            var result = await _recognizer.CompileConstraintsAsync();

            _recognizer.ContinuousRecognitionSession.AutoStopSilenceTimeout = new TimeSpan(0, 0, 2);
            if (result.Status != SpeechRecognitionResultStatus.Success)
            {
                throw new Exception();
            }
        }
Пример #22
0
        public async void RecognizeSpeech()
        {
            var speechRecognizer = new SpeechRecognizer(SpeechRecognizer.SystemSpeechLanguage);

            var dictationConstraint = new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.Dictation, "dictation");

            speechRecognizer.Constraints.Add(dictationConstraint);

            var srcpres = await speechRecognizer.CompileConstraintsAsync();

            if (srcpres.Status != SpeechRecognitionResultStatus.Success)
            {
                Console.WriteLine("Failed to compile constraints");
                exitEvent.Set();
                return;
            }

            while (true)
            {
                var res = await speechRecognizer.RecognizeAsync();

                switch (res.Status)
                {
                case SpeechRecognitionResultStatus.Success:
                    break;

                default:
                    Console.WriteLine($"Failed ({res.Status.ToString()}), try again");
                    continue;
                }

                switch (res.Confidence)
                {
                case SpeechRecognitionConfidence.Low:
                case SpeechRecognitionConfidence.Rejected:
                    Console.WriteLine("Not enough confidence...");
                    continue;
                }

                UiBuiltins.Notification(
                    header: "Text spoken",
                    message: res.Text
                    );

                if (res.Text == ExitKeyword)
                {
                    exitEvent.Set();
                    break;
                }
            }
        }
Пример #23
0
        /// <summary>
        /// Runs the service.
        /// </summary>
        /// <param name="state">The state<see cref="object"/></param>
        private static async void Run(object state)
        {
            try
            {
                // restart listener if nothing has happend for more than 30 seconds
                if (lastListenCylce > DateTime.Now.AddSeconds(-30))
                {
                    return;
                }

                if (recognizer != null)
                {
                    try
                    {
                        await recognizer.StopRecognitionAsync();
                    }
                    catch (Exception ex)
                    {
                        Log(ex);
                    }
                }

                recognizer = new SpeechRecognizer(new Language("de-DE"));
                recognizer.Timeouts.InitialSilenceTimeout = TimeSpan.FromSeconds(2);
                recognizer.Timeouts.EndSilenceTimeout     = TimeSpan.FromSeconds(0.5);
                recognizer.StateChanged += RecognizerStateChanged;
                recognizer.ContinuousRecognitionSession.ResultGenerated += RecognizerResultGenerated;

                var textGrammar = new SpeechRecognitionListConstraint(new List <string> {
                    "Licht an", "Licht aus"
                });
                var webSearchGrammar = new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.WebSearch, "webSearch");
                recognizer.Constraints.Add(textGrammar);
                recognizer.Constraints.Add(webSearchGrammar);
                SpeechRecognitionCompilationResult compilationResult = await recognizer.CompileConstraintsAsync();

                if (compilationResult.Status == SpeechRecognitionResultStatus.Success)
                {
                    Log(LogLevel.Debug, "Speechrecognition compile result: " + compilationResult.ToString());
                    await Listen();
                }
                else
                {
                    Log(LogLevel.Debug, "Speechrecognition compile result: " + compilationResult.ToString());
                }
            }
            catch (Exception ex)
            {
                Log(ex);
            }
        }
Пример #24
0
        /// <summary>
        /// Initialize Speech Recognizer and compile constraints.
        /// </summary>
        /// <param name="recognizerLanguage">Language to use for the speech recognizer</param>
        /// <returns>Awaitable task.</returns>
        private async Task InitializeRecognizer(Language recognizerLanguage)
        {
            if (speechRecognizer != null)
            {
                this.speechRecognizer.Dispose();
                this.speechRecognizer = null;
            }

            try
            {
                // Create an instance of SpeechRecognizer.
                speechRecognizer = new SpeechRecognizer(recognizerLanguage);

                // Compile the dictation topic constraint, which optimizes for dictated speech.
                var dictationConstraint = new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.Dictation, "dictation");
                speechRecognizer.Constraints.Add(dictationConstraint);
            }
            catch (Exception ex)
            {
                System.Diagnostics.Debug.WriteLine(ex.Message);
                throw;
            }

            try
            {
                SpeechRecognitionCompilationResult compilationResult = await speechRecognizer.CompileConstraintsAsync();

                //// RecognizeWithUIAsync allows developers to customize the prompts.
                //speechRecognizer.UIOptions.AudiblePrompt = "Dictate a phrase or sentence...";
                //speechRecognizer.UIOptions.ExampleText = speechResourceMap.GetValue("DictationUIOptionsExampleText", speechContext).ValueAsString;

                // Check to make sure that the constraints were in a proper format and the recognizer was able to compile it.
                if (compilationResult.Status != SpeechRecognitionResultStatus.Success)
                {
                    //// Disable the recognition buttons.
                    //btnRecognizeWithUI.IsEnabled = false;
                    //btnRecognizeWithoutUI.IsEnabled = false;

                    //// Let the user know that the grammar didn't compile properly.
                    //resultTextBlock.Visibility = Visibility.Visible;
                    //resultTextBlock.Text = "Unable to compile grammar.";
                }
            }
            catch (Exception ex)
            {
                System.Diagnostics.Debug.WriteLine(ex.Message);
                throw;
            }
        }
        /*
         *  Starts continuous recognition sessions for prolonged audio input from the user
         */
        public static void InitContinuousSpeechRecognition()
        {
            m_recognizer = new SpeechRecognizer();
            m_recognizer.Timeouts.BabbleTimeout         = System.TimeSpan.FromSeconds(120.0);
            m_recognizer.Timeouts.EndSilenceTimeout     = System.TimeSpan.FromSeconds(120.0);
            m_recognizer.Timeouts.InitialSilenceTimeout = System.TimeSpan.FromSeconds(120.0);
            Debug.WriteLine("print1");
            SpeechRecognitionTopicConstraint topicConstraint = new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.Dictation, "Development");

            m_recognizer.Constraints.Add(topicConstraint);

            IAsyncOperation <SpeechRecognitionCompilationResult> asyncResult = m_recognizer.CompileConstraintsAsync();

            asyncResult.Completed += CompileConstraintsCompletedHandler;
            Debug.WriteLine("print2");
        }
Пример #26
0
        private async void btnSearch_Click(object sender, RoutedEventArgs e)
        {
            this.txtCortanaMessages.Text = "Je vous écoute...";
            Windows.Globalization.Language langFR = new Windows.Globalization.Language("fr-FR");
            SpeechRecognizer recognizer           = new SpeechRecognizer(langFR);

            SpeechRecognitionTopicConstraint topicConstraint
                = new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.Dictation, "Development");

            recognizer.Constraints.Add(topicConstraint);
            await recognizer.CompileConstraintsAsync(); // Required

            var recognition = recognizer.RecognizeAsync();

            recognition.Completed += this.Recognition_Completed;
        }
Пример #27
0
        public MainPage()
        {
            InitializeComponent();
            NavigationCacheMode = NavigationCacheMode.Required;

            _speechRecognizer = new SpeechRecognizer();
            _speechRecognizer.StateChanged += SpeechRecognizerOnStateChanged;
            _speechRecognizer.RecognitionQualityDegrading += SpeechRecognizerOnRecognitionQualityDegrading;

            // Create an instance of SpeechRecognizer.
            var topicConstraint = new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.WebSearch, "webSearch");

            _speechRecognizer.Constraints.Add(topicConstraint);

            // Compile the dictation grammar by default.
            _speechRecognizer.CompileConstraintsAsync();
        }
Пример #28
0
        async private void OnRecognizeFromWeb(object sender, RoutedEventArgs e)
        {
            var recognizer = new SpeechRecognizer();

            recognizer.UIOptions.ExampleText       = "You can say 'Something'";
            recognizer.UIOptions.AudiblePrompt     = "Say something I'm giving up on you.";
            recognizer.UIOptions.IsReadBackEnabled = false;
            recognizer.UIOptions.ShowConfirmation  = false;
            var topic = new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.WebSearch, "webSearch");

            recognizer.Constraints.Add(topic);
            await recognizer.CompileConstraintsAsync();

            var result = await recognizer.RecognizeWithUIAsync();

            txt_dictation.Text = result.Text;
        }
Пример #29
0
		private async void Init(Windows.Globalization.Language language)
		{
			ListenButton.IsEnabled = false;
			bool permissionGained = await AudioCapturePermissions.RequestMicrophonePermission();
			if (!permissionGained)
			{
				MessageDialog("Permission to access capture resources was not given by the user, reset the application setting in Settings->Privacy->Microphone.");
			}

			var recognizer = new SpeechRecognizer(language);
			var topicConstraint = new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.Dictation, "Development");
			recognizer.Constraints.Add(topicConstraint);
			var compilationResult = await recognizer.CompileConstraintsAsync();

			_SpeechRecognizer = recognizer;
			ListenButton.IsEnabled = true;
		}
Пример #30
0
 public async Task LoadRecognizerAsync()
 {
     var permission = await Template10.Utils.AudioUtils.RequestMicrophonePermission();
     if (permission && _SpeechRecognizer == null)
     {
         _SpeechRecognizer = new SpeechRecognizer(SpeechRecognizer.SystemSpeechLanguage);
         var constraint = new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.Dictation, "dictation");
         _SpeechRecognizer.Constraints.Add(constraint);
         var compilation = await _SpeechRecognizer.CompileConstraintsAsync();
         if (compilation.Status != SpeechRecognitionResultStatus.Success)
             throw new Exception(compilation.Status.ToString());
     }
     else if (!permission)
     {
         throw new Exception("RequestMicrophonePermission returned false");
     }
 }
Пример #31
0
        /// <summary>
        /// Upon entering the scenario, ensure that we have permissions to use the Microphone. This may entail popping up
        /// a dialog to the user on Desktop systems. Only enable functionality once we've gained that permission in order to
        /// prevent errors from occurring when using the SpeechRecognizer. If speech is not a primary input mechanism, developers
        /// should consider disabling appropriate parts of the UI if the user does not have a recording device, or does not allow
        /// audio input.
        /// </summary>
        /// <param name="e">Unused navigation parameters</param>
        protected async override void OnNavigatedTo(NavigationEventArgs e)
        {
            rootPage = MainPage.Current;

            // Keep track of the UI thread dispatcher, as speech events will come in on a separate thread.
            dispatcher = CoreWindow.GetForCurrentThread().Dispatcher;

            // Prompt the user for permission to access the microphone. This request will only happen
            // once, it will not re-prompt if the user rejects the permission.
            bool permissionGained = await AudioCapturePermissions.RequestMicrophonePermission();

            if (permissionGained)
            {
                btnContinuousRecognize.IsEnabled = true;
            }
            else
            {
                this.dictationTextBox.Text = "Permission to access capture resources was not given by the user, reset the application setting in Settings->Privacy->Microphone.";
            }

            this.speechRecognizer = new SpeechRecognizer();

            // Provide feedback to the user about the state of the recognizer. This can be used to provide visual feedback in the form
            // of an audio indicator to help the user understand whether they're being heard.
            speechRecognizer.StateChanged += SpeechRecognizer_StateChanged;

            // Apply the dictation topic constraint to optimize for dictated freeform speech.
            var dictationConstraint = new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.Dictation, "dictation");

            speechRecognizer.Constraints.Add(dictationConstraint);
            SpeechRecognitionCompilationResult result = await speechRecognizer.CompileConstraintsAsync();

            if (result.Status != SpeechRecognitionResultStatus.Success)
            {
                rootPage.NotifyUser("Grammar Compilation Failed: " + result.Status.ToString(), NotifyType.ErrorMessage);
                btnContinuousRecognize.IsEnabled = false;
            }

            // Handle continuous recognition events. Completed fires when various error states occur. ResultGenerated fires when
            // some recognized phrases occur, or the garbage rule is hit. HypothesisGenerated fires during recognition, and
            // allows us to provide incremental feedback based on what the user's currently saying.
            speechRecognizer.ContinuousRecognitionSession.Completed       += ContinuousRecognitionSession_Completed;
            speechRecognizer.ContinuousRecognitionSession.ResultGenerated += ContinuousRecognitionSession_ResultGenerated;
            speechRecognizer.HypothesisGenerated += SpeechRecognizer_HypothesisGenerated;
        }
Пример #32
0
        private static async void InitializeSpeechRecognizer(Language language)
        {
            speechRecognizer = new SpeechRecognizer(language);
            var grammar = new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.Dictation, "Dictation");

            speechRecognizer.Constraints.Add(grammar);
            await speechRecognizer.CompileConstraintsAsync();

            SpeechRecognitionCompilationResult result =
                await speechRecognizer.CompileConstraintsAsync();

            speechRecognizer.ContinuousRecognitionSession.ResultGenerated +=
                ContinuousRecognitionSession_ResultGenerated;
            speechRecognizer.ContinuousRecognitionSession.Completed +=
                ContinuousRecognitionSession_Completed;
            speechRecognizer.HypothesisGenerated +=
                SpeechRecognizer_HypothesisGenerated;
        }
Пример #33
0
        private async Task CompileDictationConstraint()
        {
#if VERBOSE_DEBUG
            Debug.WriteLine("SpeechManager: Compiling dictation constraint");
#endif

            SpeechRecognizer.Constraints.Clear();

            // Apply the dictation topic constraint to optimize for dictated freeform speech.
            var dictationConstraint = new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.Dictation, "dictation");
            SpeechRecognizer.Constraints.Add(dictationConstraint);
            var result = await SpeechRecognizer.CompileConstraintsAsync();

            if (result.Status != SpeechRecognitionResultStatus.Success)
            {
                Debug.WriteLine("SpeechRecognizer.CompileConstraintsAsync failed for dictation");
            }
        }
Пример #34
0
        private async void Init(Windows.Globalization.Language language)
        {
            ListenButton.IsEnabled = false;
            bool permissionGained = await AudioCapturePermissions.RequestMicrophonePermission();

            if (!permissionGained)
            {
                MessageDialog("Permission to access capture resources was not given by the user, reset the application setting in Settings->Privacy->Microphone.");
            }

            var recognizer      = new SpeechRecognizer(language);
            var topicConstraint = new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.Dictation, "Development");

            recognizer.Constraints.Add(topicConstraint);
            var compilationResult = await recognizer.CompileConstraintsAsync();

            _SpeechRecognizer      = recognizer;
            ListenButton.IsEnabled = true;
        }
Пример #35
0
        /// <summary>
                /// Initialize Speech Recognizer and compile constraints.
                /// </summary>
                /// <param name="recognizerLanguage">Language to use for the speech recognizer</param>
                /// <returns>Awaitable task.</returns>
        private async Task InitializeRecognizer()
        {
            // await InitializeRecognizer(SpeechRecognizer.SystemSpeechLanguage);
            // dispatcher = CoreWindow.GetForCurrentThread().Dispatcher;
            if (speechRecognizer != null)
            {
                // cleanup prior to re-initializing this scenario.
                speechRecognizer.StateChanged -= SpeechRecognizer_StateChanged;
                speechRecognizer.ContinuousRecognitionSession.Completed       -= ContinuousRecognitionSession_Completed;
                speechRecognizer.ContinuousRecognitionSession.ResultGenerated -= ContinuousRecognitionSession_ResultGenerated;
                speechRecognizer.HypothesisGenerated -= SpeechRecognizer_HypothesisGenerated;

                this.speechRecognizer.Dispose();
                this.speechRecognizer = null;
            }

            this.speechRecognizer = new SpeechRecognizer(SpeechRecognizer.SystemSpeechLanguage);

            // Provide feedback to the user about the state of the recognizer. This can be used to provide visual feedback in the form
            // of an audio indicator to help the user understand whether they're being heard.
            speechRecognizer.StateChanged += SpeechRecognizer_StateChanged;

            // Apply the dictation topic constraint to optimize for dictated freeform speech.
            var dictationConstraint = new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.Dictation, "dictation");

            speechRecognizer.Constraints.Add(dictationConstraint);
            SpeechRecognitionCompilationResult result = await speechRecognizer.CompileConstraintsAsync();

            if (result.Status != SpeechRecognitionResultStatus.Success)
            {
                Debug.WriteLine("Grammar Compilation Failed: " + result.Status.ToString());
                // rootPage.NotifyUser("Grammar Compilation Failed: " + result.Status.ToString(), NotifyType.ErrorMessage);
                //btnContinuousRecognize.IsEnabled = false;
            }

            // Handle continuous recognition events. Completed fires when various error states occur. ResultGenerated fires when
            // some recognized phrases occur, or the garbage rule is hit. HypothesisGenerated fires during recognition, and
            // allows us to provide incremental feedback based on what the user's currently saying.
            speechRecognizer.ContinuousRecognitionSession.Completed       += ContinuousRecognitionSession_Completed;
            speechRecognizer.ContinuousRecognitionSession.ResultGenerated += ContinuousRecognitionSession_ResultGenerated;
            speechRecognizer.HypothesisGenerated += SpeechRecognizer_HypothesisGenerated;
        }
Пример #36
0
        //------------------------------------------------------------------------------------------------------------------------
        private async void Page_Loaded(object sender, RoutedEventArgs e)
        {
            //use the connection string aquired from the Device Explorer
            deviceClient = DeviceClient.CreateFromConnectionString("HostName=demoyodiwohub......", TransportType.Http1);

#if GROVE_ENABLED
            rotarywatcher = new RotaryWatcher(GrovePi.Pin.AnalogPin2);
            rotarywatcher.OnNewValueAcquiredCb = OnSensedValue;
            rotarywatcher.Watch();
            buttonwatcher = new ButtonWatcher(GrovePi.Pin.DigitalPin3);
            buttonwatcher.OnNewValueAcquiredCb = OnSensedValue;
            //buttonwatcher.Watch();
            lightwatcher = new LightWatcher(GrovePi.Pin.DigitalPin4);
            lightwatcher.OnNewValueAcquiredCb = OnSensedValue;
            //lightwatcher.Watch();
            led = new Led(GrovePi.Pin.DigitalPin5);
            lcd = new LCD();
#endif

            //start speech recognition
            try
            {
                speechRecognizer = new SpeechRecognizer(SpeechRecognizer.SystemSpeechLanguage);
                speechRecognizer.StateChanged += SpeechRecognizer_StateChanged;

                // Apply the dictation topic constraint to optimize for dictated freeform speech.
                var dictationConstraint = new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.Dictation, "dictation");
                speechRecognizer.Constraints.Add(dictationConstraint);
                var result = await speechRecognizer.CompileConstraintsAsync();

                if (result.Status == SpeechRecognitionResultStatus.Success)
                {
                    //start recogniser
                    try { recognHeartBeat(); } catch { }
                }
            }
            catch { }

            //receive events from the Azure IOT hub
            ReceiveDataFromAzure();
        }
Пример #37
0
        private async void VoiceButton_Click(object sender, RoutedEventArgs e)
        {
            try
            {
                // Get the top user-preferred language and its display name.
                var topUserLanguage = Windows.System.UserProfile.GlobalizationPreferences.Languages[0];
                var language = new Windows.Globalization.Language(topUserLanguage);

                firstStopAttemptDone = false;
                listening = true;
                using (speechRecognizer = new SpeechRecognizer(language))
                {

                    var dictationConstraint = new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.WebSearch, WEB_SEARCH);
                    speechRecognizer.Constraints.Add(dictationConstraint);
                    SpeechRecognitionCompilationResult compilationResult = await speechRecognizer.CompileConstraintsAsync();

                    // setting timeouts
                    speechRecognizer.Timeouts.InitialSilenceTimeout = TimeSpan.FromSeconds(4.0);
                    speechRecognizer.Timeouts.BabbleTimeout = TimeSpan.FromSeconds(4.0);
                    speechRecognizer.Timeouts.EndSilenceTimeout = TimeSpan.FromSeconds(1.0);

                    speechRecognizer.HypothesisGenerated += SpeechRecognizer_HypothesisGenerated;

                    if (compilationResult.Status != SpeechRecognitionResultStatus.Success)
                        return;

                    VisualStateManager.GoToState(this, VISUAL_STATE_LISTENING, true);
                    this.IsReadOnly = true;
                    this.Text = LISTENING_TEXT;

                    SpeechRecognitionResult speechRecognitionResult = await speechRecognizer.RecognizeAsync();
                    if (speechRecognitionResult.Status == SpeechRecognitionResultStatus.Success)
                        Text = speechRecognitionResult.Text;
                    else
                        Text = SPEECH_RECOGNITION_FAILED;

                   
                }
            }
            catch (Exception ex)
            {
                System.Diagnostics.Debug.WriteLine(ex.Message);
                Text = string.Empty;
            }
            finally
            {
                timer.Stop();
                hypotesis = string.Empty;
                VisualStateManager.GoToState(this, VISUAL_STATE_NOT_LISTENING, true);
                this.IsReadOnly = false;
                listening = false;
            }
        }
        private async Task InitializeRecognizer(Language recognizerLanguage)
        {
            if (speechRecognizer != null)
            {
                this.speechRecognizer.Dispose();
                this.speechRecognizer = null;
            }

            speechRecognizer = new SpeechRecognizer(recognizerLanguage);

            var dictationConstraint = new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.Dictation, "dictation");
            speechRecognizer.Constraints.Add(dictationConstraint);
            SpeechRecognitionCompilationResult compilationResult = await speechRecognizer.CompileConstraintsAsync();


            if (compilationResult.Status != SpeechRecognitionResultStatus.Success)
            {
                Status.Text = "エラー";
            }
        }
Пример #39
0
 private async void VoiceRecognizer()
 {
     voiceRecognizer = new SpeechRecognizer();
     SpeechRecognitionTopicConstraint topicContraint = new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.Dictation, "development");
     voiceRecognizer.Constraints.Add(topicContraint);
     SpeechRecognitionCompilationResult result = await voiceRecognizer.CompileConstraintsAsync();
     SpeechRecognitionResult speechRecognitionResult = await voiceRecognizer.RecognizeAsync();
     //voiceRecognizer.ContinuousRecognitionSession.Completed += ContinuousRecognitionSession_Completed;
     //voiceRecognizer.ContinuousRecognitionSession.ResultGenerated += ContinuousRecognitionSession_ResultGenerated;
     //await voiceRecognizer.ContinuousRecognitionSession.StartAsync();
     if (pname == "Lorenzo")
     {
         if (speechRecognitionResult.Text.Contains("expensive") || speechRecognitionResult.Text.Contains("expense"))
         {
             //speechText.Text = "So much expensive";
             ReadVoice(Error.Not_Found);
             //pageView.Navigate(new Uri("http://www.americanas.com.br/produto/113151382/carro-eletrico-sport-car-vermelho-6v"));
         }
         else
         {
             ReadVoice(Error.Not_Found);
         }
     }
     else
     {
         ReadVoice(Error.Not_Found);
     }
 }
Пример #40
0
        private async Task InitSpeech()
        {
            dispatcher = CoreWindow.GetForCurrentThread().Dispatcher;

            bool permissionGained = await Template10.Utils.AudioUtils.RequestMicrophonePermission();
            if (permissionGained)
            {
                button.IsEnabled = true;

                if (speechRecognizer != null)
                {
                    this.speechRecognizer.Dispose();
                    this.speechRecognizer = null;
                }

                speechRecognizer = new SpeechRecognizer();

                var dictationConstraint = new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.Dictation, "dictation");
                speechRecognizer.Constraints.Add(dictationConstraint);
                SpeechRecognitionCompilationResult compilationResult = await speechRecognizer.CompileConstraintsAsync();

                speechRecognizer.HypothesisGenerated += SpeechRecognizer_HypothesisGenerated;

                if (compilationResult.Status != SpeechRecognitionResultStatus.Success)
                    button.IsEnabled = false;

            }
            else
            {
                Text = string.Format("Permission to access mic denied by the user");
                button.IsEnabled = false;
            }

            await Task.Yield();
        }
        private async void InitializeSpeechRecognizer()
        {
            try
            {
                if (speechRecognizer != null)
                {
                    speechRecognizer.RecognizeAsync().Cancel();
                    speechRecognizer.RecognizeAsync().Close();
                    this.speechRecognizer.Dispose();
                    this.speechRecognizer = null;
                }
                speechRecognizer = new SpeechRecognizer();
                var topicConstraing = new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.Dictation, "Development");
                speechRecognizer.Constraints.Add(topicConstraing);
                await speechRecognizer.CompileConstraintsAsync();

                this.Operation = await speechRecognizer.RecognizeAsync();
                if (Operation.Status == SpeechRecognitionResultStatus.Success)
                {
                    ResultGenerated(Operation.Text);
                    speechRecognizer.RecognizeAsync().Cancel();
                    speechRecognizer.Dispose();
                    speechRecognizer = null;
                }
            }
            catch (Exception)
            {
            }
        }
        /// <summary>
        /// Initialize Speech Recognizer and compile constraints.
        /// </summary>
        /// <param name="recognizerLanguage">Language to use for the speech recognizer</param>
        /// <returns>Awaitable task.</returns>
        private async Task InitializeRecognizer(Language recognizerLanguage)
        {
            if (speechRecognizer != null)
            {
                // cleanup prior to re-initializing this scenario.
                speechRecognizer.StateChanged -= SpeechRecognizer_StateChanged;

                this.speechRecognizer.Dispose();
                this.speechRecognizer = null;
            }

            // Create an instance of SpeechRecognizer.
            speechRecognizer = new SpeechRecognizer(recognizerLanguage);

            // Provide feedback to the user about the state of the recognizer.
            speechRecognizer.StateChanged += SpeechRecognizer_StateChanged;

            // Add a web search topic constraint to the recognizer.
            var webSearchGrammar = new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.WebSearch, "webSearch");
            speechRecognizer.Constraints.Add(webSearchGrammar);

            // RecognizeWithUIAsync allows developers to customize the prompts.    
            speechRecognizer.UIOptions.AudiblePrompt = "Say what you want to search for...";
            speechRecognizer.UIOptions.ExampleText = speechResourceMap.GetValue("WebSearchUIOptionsExampleText", speechContext).ValueAsString;
            
            // Compile the constraint.
            SpeechRecognitionCompilationResult compilationResult = await speechRecognizer.CompileConstraintsAsync();

            // Check to make sure that the constraints were in a proper format and the recognizer was able to compile it.
            if (compilationResult.Status != SpeechRecognitionResultStatus.Success)
            {
                // Disable the recognition buttons.
                btnRecognizeWithUI.IsEnabled = false;
                btnRecognizeWithoutUI.IsEnabled = false;

                // Let the user know that the grammar didn't compile properly.
                resultTextBlock.Visibility = Visibility.Visible;
                resultTextBlock.Text = "Unable to compile grammar.";
            }
        }
Пример #43
0
        private async Task InitializeRecognizer(Language recognizerLanguage)
        {
            if (speechRecognizer != null)
            {
                speechRecognizer.ContinuousRecognitionSession.Completed -= ContinuousRecognitionSession_Completed;
                speechRecognizer.ContinuousRecognitionSession.ResultGenerated -= ContinuousRecognitionSession_ResultGenerated;
                speechRecognizer.HypothesisGenerated -= SpeechRecognizer_HypothesisGenerated;
                speechRecognizer.Dispose();
                speechRecognizer = null;

            }
            speechRecognizer = new SpeechRecognizer(recognizerLanguage);
            var dictationConstraint = new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.Dictation, "dictation");
            speechRecognizer.Constraints.Add(dictationConstraint);
            SpeechRecognitionCompilationResult result = await speechRecognizer.CompileConstraintsAsync();
            if (result.Status != SpeechRecognitionResultStatus.Success)
            {
                checkError.Visibility = Visibility.Visible;
                errorCheck.Visibility = Visibility.Visible;
                errorCheck.Text = "Recognition Failed!";
            }

            // Handle continuous recognition events. Completed fires when various error states occur. ResultGenerated fires when
            // some recognized phrases occur, or the garbage rule is hit. HypothesisGenerated fires during recognition, and
            // allows us to provide incremental feedback based on what the user's currently saying.
            speechRecognizer.ContinuousRecognitionSession.Completed += ContinuousRecognitionSession_Completed;
            speechRecognizer.ContinuousRecognitionSession.ResultGenerated += ContinuousRecognitionSession_ResultGenerated;
            speechRecognizer.HypothesisGenerated += SpeechRecognizer_HypothesisGenerated;

        }
Пример #44
0
        // Initialize Speech Recognizer
        private async void InitializeSpeechRecognizer()
        {
            // Initialize name recognizer
            nameRecognizer = new SpeechRecognizer();

            // Create list constraint
            SpeechRecognitionListConstraint listConstraint = new SpeechRecognitionListConstraint(listOfNames);

            // Add list constraint and compile
            nameRecognizer.Constraints.Add(listConstraint);
            SpeechRecognitionCompilationResult nameResult = await nameRecognizer.CompileConstraintsAsync();

            if (nameResult.Status != SpeechRecognitionResultStatus.Success)
            {
                ListenerStatus.Text = "Unable to initialize Name listener.";
                return;
            }

            // Initialize item recognizer
            itemRecognizer = new SpeechRecognizer();

            // Create topic constraint
            SpeechRecognitionTopicConstraint topicConstraint = new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.WebSearch, "Short Form");

            // Add topic constraint and compile
            itemRecognizer.Constraints.Add(topicConstraint);
            SpeechRecognitionCompilationResult itemresult = await itemRecognizer.CompileConstraintsAsync();

            if (itemresult.Status != SpeechRecognitionResultStatus.Success)
            {
                ListenerStatus.Text = "Unable to initialize Item listener.";
                return;
            }

            listeningIsEnabled = true;

            ListenerStatus.Text = "Listeners initialized correctly.";
        }
Пример #45
0
        //protected async override void OnNavigatedTo(NavigationEventArgs e)
        protected async Task GotoNavigation()
        {
            // Prompt the user for permission to access the microphone. This request will only happen
            // once, it will not re-prompt if the user rejects the permission.
            bool permissionGained = await AudioCapturePermissions.RequestMicrophonePermission();
            if (permissionGained)
            {

                //btnContinuousRecognize.IsEnabled = true;
            }
            else
            {
                this.dictationTextBox.Text = "Permission to access capture resources was not given by the user, reset the application setting in Settings->Privacy->Microphone.";
            }

            //  var lang = new Windows.Globalization.Language("en-US");
            // var lang = new Windows.Globalization.Language("en-US");zh-CN


            // Apply the dictation topic constraint to optimize for dictated freeform speech.
            var dictationConstraint = new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.Dictation, "dictation");

            //object outValue;
            //CoreApplication.Properties.TryGetValue("speechRecognizer", out outValue);
            //SpeechRecognizer t = (SpeechRecognizer)outValue;
            // t.Constraints.Add(dictationConstraint);

            speechRecognizer.Constraints.Add(dictationConstraint);
            //SpeechRecognitionCompilationResult result = await speechRecognizer.CompileConstraintsAsync();
            try
            {
                await speechRecognizer.CompileConstraintsAsync();
            }
            catch (Exception ex)
            {
                string mg = ex.Message;
            }
            //if (result.Status != SpeechRecognitionResultStatus.Success)
            //{
            //    // btnContinuousRecognize.IsEnabled = false;
            //}

            //handle contiuous recognition events. Completed filres when various error states occur. ResultGenerated fires when
            // some recognized phrases occur, or hte garbate rule is hit. HypothesisGenerated fires during recognition, and 
            //allow us to rovide incremental feedback based on what the users' currently saying
            speechRecognizer.ContinuousRecognitionSession.Completed += ContinuousRecognitionSession_Completed;
            speechRecognizer.ContinuousRecognitionSession.ResultGenerated += ContinuousRecognitionSession_ResultGenerated;
            speechRecognizer.HypothesisGenerated += SpeechRecognizer_HypothesisGenerated;
            await startDictate();
        }
Пример #46
0
        private async Task InitSpeech()
        {
            button.IsEnabled = true;

            if (speechRecognizer != null)
            {
                this.speechRecognizer.Dispose();
                this.speechRecognizer = null;
            }

            speechRecognizer = new SpeechRecognizer();

            var dictationConstraint = new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.Dictation, DICTATION);
            speechRecognizer.Constraints.Add(dictationConstraint);
            SpeechRecognitionCompilationResult compilationResult = await speechRecognizer.CompileConstraintsAsync();

            speechRecognizer.HypothesisGenerated += SpeechRecognizer_HypothesisGenerated;

            if (compilationResult.Status != SpeechRecognitionResultStatus.Success)
                button.IsEnabled = false;

            await Task.Yield();
        }
Пример #47
0
        /// <summary>
        /// Tries to iniziaizlize the SpeechRecognizer object
        /// </summary>
        /// <returns>true if SpeechRecognizer is succesfully inizialized, false otherwise</returns>
        private async Task<bool> TryInitSpeech()
        {
            bool retVal = false;

            try
            {
                await TryDisposeSpeech();

                speechRecognizer = new SpeechRecognizer();

                var dictationConstraint = new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.Dictation, DICTATION);
                speechRecognizer.Constraints.Add(dictationConstraint);
                SpeechRecognitionCompilationResult compilationResult = await speechRecognizer.CompileConstraintsAsync();

                speechRecognizer.HypothesisGenerated += SpeechRecognizer_HypothesisGenerated;

                if (compilationResult.Status == SpeechRecognitionResultStatus.Success)
                    retVal = true;
            }
            catch (Exception ex)
            {
                System.Diagnostics.Debug.WriteLine(ex.Message);
                Text = SPEECH_RECOGNITION_FAILED;
                retVal = false;
            }

            return retVal;
        }
Пример #48
0
        private async void InitializeSpeechRecognizer()
        {
            if (speechRecognizer != null)
            {
                this.speechRecognizer.Dispose();
                this.speechRecognizer = null;
            }
            speechRecognizer = new SpeechRecognizer();
            var topicConstraing = new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.Dictation, "Development");
            speechRecognizer.Constraints.Add(topicConstraing);
            await speechRecognizer.CompileConstraintsAsync();

            var operation = await speechRecognizer.RecognizeAsync();
            if (!this.Completed && operation.Status == SpeechRecognitionResultStatus.Success)
            {
                this.Completed = true;
                ResultGenerated(operation.Text);
                speechRecognizer.RecognizeAsync().Cancel();
                speechRecognizer.Dispose();
                speechRecognizer = null;
            }
        }
        private async void btnSearch_Click(object sender, RoutedEventArgs e)
        {
            this.txtCortanaMessages.Text = "Je vous écoute...";
            Windows.Globalization.Language langFR = new Windows.Globalization.Language("fr-FR");
            SpeechRecognizer recognizer = new SpeechRecognizer(langFR);

            SpeechRecognitionTopicConstraint topicConstraint
                    = new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.Dictation, "Development");

            recognizer.Constraints.Add(topicConstraint);
            await recognizer.CompileConstraintsAsync(); // Required

            var recognition = recognizer.RecognizeAsync();
            recognition.Completed += this.Recognition_Completed;
        }
        /// <summary>
        /// Initialize Speech Recognizer and compile constraints.
        /// </summary>
        /// <param name="recognizerLanguage">Language to use for the speech recognizer</param>
        /// <returns>Awaitable task.</returns>
        private async Task InitializeRecognizer(Language recognizerLanguage)
        {
            if (speechRecognizer != null)
            {
                // cleanup prior to re-initializing this scenario.
                speechRecognizer.StateChanged -= SpeechRecognizer_StateChanged;
                speechRecognizer.ContinuousRecognitionSession.Completed -= ContinuousRecognitionSession_Completed;
                speechRecognizer.ContinuousRecognitionSession.ResultGenerated -= ContinuousRecognitionSession_ResultGenerated;
                speechRecognizer.HypothesisGenerated -= SpeechRecognizer_HypothesisGenerated;

                this.speechRecognizer.Dispose();
                this.speechRecognizer = null;
            }

            this.speechRecognizer = new SpeechRecognizer(recognizerLanguage);

            // Provide feedback to the user about the state of the recognizer. This can be used to provide visual feedback in the form
            // of an audio indicator to help the user understand whether they're being heard.
            speechRecognizer.StateChanged += SpeechRecognizer_StateChanged;

            // Apply the dictation topic constraint to optimize for dictated freeform speech.
            var dictationConstraint = new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.Dictation, "dictation");
            speechRecognizer.Constraints.Add(dictationConstraint);
            SpeechRecognitionCompilationResult result = await speechRecognizer.CompileConstraintsAsync();
            if (result.Status != SpeechRecognitionResultStatus.Success)
            {
                rootPage.NotifyUser("Grammar Compilation Failed: " + result.Status.ToString(), NotifyType.ErrorMessage);
                btnContinuousRecognize.IsEnabled = false;
            }

            // Handle continuous recognition events. Completed fires when various error states occur. ResultGenerated fires when
            // some recognized phrases occur, or the garbage rule is hit. HypothesisGenerated fires during recognition, and
            // allows us to provide incremental feedback based on what the user's currently saying.
            speechRecognizer.ContinuousRecognitionSession.Completed += ContinuousRecognitionSession_Completed;
            speechRecognizer.ContinuousRecognitionSession.ResultGenerated += ContinuousRecognitionSession_ResultGenerated;
            speechRecognizer.HypothesisGenerated += SpeechRecognizer_HypothesisGenerated;
        }
Пример #51
0
        private async void VoiceButton_Tapped(object sender, TappedRoutedEventArgs e)
        {
            var speechRecognizer = new SpeechRecognizer();
            var topicConstraing = new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.Dictation, "Development");
            speechRecognizer.Constraints.Add(topicConstraing);
            await speechRecognizer.CompileConstraintsAsync();

            speechRecognizer.ContinuousRecognitionSession.Completed += ContinuousRecognitionSession_Completed;
            speechRecognizer.ContinuousRecognitionSession.ResultGenerated += ContinuousRecognitionSession_ResultGenerated;

            speechRecognizer.ContinuousRecognitionSession.StartAsync();
        }
Пример #52
0
        public async void Run(IBackgroundTaskInstance taskInstance)
        {
            BackgroundTaskDeferral deferral = taskInstance.GetDeferral(); // This must be retrieved prior to subscribing to events below which use it

            using (MopidyClient client = new MopidyClient())
            {
                await client.Open();
                await client.Play("spotify:track:1hKdDCpiI9mqz1jVHRKG0E");

                var speechRecognizer = new SpeechRecognizer(SpeechRecognizer.SystemSpeechLanguage);

                var webSearchGrammar = new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.WebSearch, "webSearch");
                speechRecognizer.Constraints.Add(webSearchGrammar);

                SpeechRecognitionCompilationResult compilationResult = await speechRecognizer.CompileConstraintsAsync();

                // Check to make sure that the constraints were in a proper format and the recognizer was able to compile it.
                if (compilationResult.Status == SpeechRecognitionResultStatus.Success)
                {
                    while (true)
                    {
                        var recognitionOperation = speechRecognizer.RecognizeAsync();
                        SpeechRecognitionResult speechRecognitionResult = await recognitionOperation;

                        if (speechRecognitionResult.Status == SpeechRecognitionResultStatus.Success)
                        {
                            if (speechRecognitionResult.Text.StartsWith("play", StringComparison.OrdinalIgnoreCase))
                            {
                                string playSearchString = speechRecognitionResult.Text.Substring(4).Trim();

                                string uri;
                                if (playSearchString.StartsWith("artist", StringComparison.OrdinalIgnoreCase))
                                {
                                    uri = await client.SearchArtist(playSearchString.Substring(6).Trim());
                                }
                                else
                                {
                                    uri = await client.Search(playSearchString);
                                }

                                if (uri != null)
                                {
                                    await client.Play(uri);
                                }
                            }
                            else if (speechRecognitionResult.Text.StartsWith("stop", StringComparison.OrdinalIgnoreCase))
                            {
                                await client.Stop();
                            }
                            else if (speechRecognitionResult.Text.StartsWith("louder", StringComparison.OrdinalIgnoreCase))
                            {
                                int volume = await client.GetVolume();
                                volume = Math.Min(volume + 10, 100);
                                await client.SetVolume(volume);
                            }
                            else if (speechRecognitionResult.Text.StartsWith("quieter", StringComparison.OrdinalIgnoreCase))
                            {
                                int volume = await client.GetVolume();
                                volume = Math.Max(volume - 10, 0);
                                await client.SetVolume(volume);
                            }
                            else if (speechRecognitionResult.Text.StartsWith("mute", StringComparison.OrdinalIgnoreCase))
                            {
                                await client.SetVolume(0);
                            }
                        }
                        else
                        {
                            //resultTextBlock.Visibility = Visibility.Visible;
                            //resultTextBlock.Text = string.Format("Speech Recognition Failed, Status: {0}", speechRecognitionResult.Status.ToString());
                        }

                    }
                }
            }
        }