Пример #1
1
        private async void InitRecognitionEngine()
        {
            try
            {
                speechRecognizer = new SpeechRecognizer(new Language(languageTag));
            }
            catch
            {
                speechRecognizer = new SpeechRecognizer();
            }

            speechRecognizer.Constraints.Add(new SpeechRecognitionListConstraint(GetCommandsText(), "tag1"));

            //var op = speechRecognizer.CompileConstraintsAsync();
            //op.AsTask().Wait();
            ////var a = op.GetResults();

            //var op2 = speechRecognizer.RecognizeAsync();
            //op2.AsTask().Wait();
            //SpeechRecognitionResult result = op2.GetResults();
            //if (result.Status == SpeechRecognitionResultStatus.Success)
            //{
            //}

            var a = await speechRecognizer.CompileConstraintsAsync();
            var b = a;
            SpeechRecognitionResult result = await speechRecognizer.RecognizeAsync();
            //if (result.Status == SpeechRecognitionResultStatus.Success)
            //    phoneNumber = result.Text;







            //    var cultureInfo = new CultureInfo("ru-RU");
            //    //var cultureInfo = new CultureInfo("en-US");
            //    Thread.CurrentThread.CurrentCulture = cultureInfo;
            //    Thread.CurrentThread.CurrentUICulture = cultureInfo;

            //    /*
            //    •en-GB. English (United Kingdom)
            //    •en-US. English (United States)
            //    •de-DE. German (Germany)
            //    •es-ES. Spanish (Spain)
            //    •fr-FR. French (France)
            //    •ja-JP. Japanese (Japan)
            //    •zh-CN. Chinese (China)
            //    •zh-TW. Chinese (Taiwan)
            //    */

            //    var commands = GetCommandsText();
            //    var choices = new Choices(commands);
            //    var builder = new GrammarBuilder(choices);
            //    builder.Culture = cultureInfo;

            //    recognitionEngine = new SpeechRecognitionEngine();// (cultureInfo);
            //    recognitionEngine.SetInputToDefaultAudioDevice();
            //    recognitionEngine.UnloadAllGrammars();
            //    recognitionEngine.LoadGrammar(new Grammar(builder));
            //    //recognitionEngine.LoadGrammar(new DictationGrammar()); // любой текст

            //    recognitionEngine.SpeechRecognized += recognitionEngine_SpeechRecognized;
            //    recognitionEngine.RecognizeAsync(RecognizeMode.Multiple);
        }
Пример #2
0
        public static async Task <SpeechRecognitionResult> ListenOnceAsync()
        {
            if (!IsStarted)
            {
                try
                {
                    IsStarted  = true;
                    recognizer = new SpeechRecognizer();
                    // compile the speech constraints and start listening
                    await recognizer.CompileConstraintsAsync();

                    // keep listening until the result isn't an empty string since sometimes it rings up false positives
                    SpeechRecognitionResult result = null;
                    while (result == null || StringUtils.IsBlank(result.Text))
                    {
                        result = await recognizer.RecognizeAsync();
                    }
                    return(result);
                }
                catch (Exception)
                {
                    return(null);
                }
            }
            else
            {
                throw new Exception("Can't Listen when already started!");
            }
        }
Пример #3
0
        public async void StartOverlayRecognization()
        {
            OnstartEvent(new EventArgs());
            // Create an instance of SpeechRecognizer.
            speechRecognizer = InitSpeechRecognizer();

            // Listen for audio input issues.
            speechRecognizer.RecognitionQualityDegrading += speechRecognizer_RecognitionQualityDegrading;

            // Add a web search grammar to the recognizer.
            var webSearchGrammar = new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.WebSearch, "webSearch");

            speechRecognizer.UIOptions.AudiblePrompt = "Say what you want to search for...";
            speechRecognizer.UIOptions.ExampleText   = @"Ex. 'weather for London'";
            speechRecognizer.Constraints.Add(webSearchGrammar);

            // Compile the constraint.
            await speechRecognizer.CompileConstraintsAsync();

            // Start recognition.
            SpeechRecognitionResult speechRecognitionResult = await speechRecognizer.RecognizeWithUIAsync();

            //await speechRecognizer.RecognizeWithUIAsync();

            speechToTextEventArgs.SpeechResult = speechRecognitionResult.Text;
            OnHaveResultEvent(speechToTextEventArgs);

            //// Do something with the recognition result.
            //var messageDialog = new Windows.UI.Popups.MessageDialog(speechRecognitionResult.Text, "Text spoken");

            //await messageDialog.ShowAsync();
        }
		private async void StartVoiceRecognition()
		{
			await SpeakText( "Say Captains Log at any time to create a log entry." );

			speechRecognizerCaptainsLogCommand = new SpeechRecognizer();

			while ( !cancellationSource.IsCancellationRequested )
			{
				// Listen for user to say "Captains Log"
				ISpeechRecognitionConstraint commandConstraint = 
					new SpeechRecognitionListConstraint( new[] { "Captains Log", "Computer Captains Log" } );
				speechRecognizerCaptainsLogCommand.Constraints.Add( commandConstraint );
				await speechRecognizerCaptainsLogCommand.CompileConstraintsAsync();

				SpeechRecognitionResult commandResult = await speechRecognizerCaptainsLogCommand.RecognizeAsync();

				if ( commandResult.Status != SpeechRecognitionResultStatus.Success
					|| commandResult.Confidence == SpeechRecognitionConfidence.Rejected
					|| cancellationSource.IsCancellationRequested )
				{
					continue;
				}
				// Recognized user saying "Captains Log"

				// Listen for the user's dictation entry
				var captainsLogDictationRecognizer = new SpeechRecognizer();

				ISpeechRecognitionConstraint dictationConstraint = 
					new SpeechRecognitionTopicConstraint( 
						SpeechRecognitionScenario.Dictation, "LogEntry", "LogEntryDictation" );

				captainsLogDictationRecognizer.Constraints.Add( dictationConstraint );

				await captainsLogDictationRecognizer.CompileConstraintsAsync();

				captainsLogDictationRecognizer.UIOptions.ExampleText = "Boldly going where no man or woman has gone before.";
				captainsLogDictationRecognizer.UIOptions.AudiblePrompt = "Go ahead";
				captainsLogDictationRecognizer.UIOptions.IsReadBackEnabled = true;
				captainsLogDictationRecognizer.UIOptions.ShowConfirmation = true;

				SpeechRecognitionResult dictationResult = await captainsLogDictationRecognizer.RecognizeWithUIAsync();

				if ( dictationResult.Status != SpeechRecognitionResultStatus.Success
					|| dictationResult.Confidence == SpeechRecognitionConfidence.Rejected
					|| string.IsNullOrWhiteSpace( dictationResult.Text )
					|| cancellationSource.IsCancellationRequested )
				{
					captainsLogDictationRecognizer.Dispose();

					continue;
				}
				// Recognized user's dictation entry

				AddLogEntry( dictationResult.Text );

				captainsLogDictationRecognizer.Dispose();
			}

			speechRecognizerCaptainsLogCommand.Dispose();
		}
Пример #5
0
        public async Task <bool> RegisterCortanaCommands(Dictionary <string, Action> commands)
        {
            try
            {
                cortanaCommands  = commands;
                SpeechRecognizer = new SpeechRecognizer();
                var constraint = new SpeechRecognitionListConstraint(cortanaCommands.Keys);
                SpeechRecognizer.Constraints.Clear();
                SpeechRecognizer.Constraints.Add(constraint);
                var result = await SpeechRecognizer.CompileConstraintsAsync();

                if (result.Status == SpeechRecognitionResultStatus.Success)
                {
                    SpeechRecognizer.ContinuousRecognitionSession.StartAsync();
                    SpeechRecognizer.ContinuousRecognitionSession.ResultGenerated += (s, e) =>
                    {
                        if (e.Result.RawConfidence >= 0.5f)
                        {
                            Action handler;
                            if (cortanaCommands.TryGetValue(e.Result.Text, out handler))
                            {
                                Application.InvokeOnMain(handler);
                            }
                        }
                    };
                    return(true);
                }
                return(false);
            }
            catch (Exception exc)
            {
                LogSharp.Warn("RegisterCortanaCommands: " + exc);
                return(false);
            }
        }
Пример #6
0
        private async Task CompilePhrases()
        {
#if VERBOSE_DEBUG
            Debug.WriteLine("SpeechManager: Compiling command phrase constraints");
#endif

            try
            {
                SpeechRecognizer.Constraints.Clear();

                AvailablePhrases.ForEach(p =>
                {
                    string phraseNoSpaces = p.Replace(" ", String.Empty);
                    SpeechRecognizer.Constraints.Add(
                        new SpeechRecognitionListConstraint(
                            new List <string>()
                    {
                        p
                    },
                            phraseNoSpaces));
                });

                var result = await SpeechRecognizer.CompileConstraintsAsync();

                if (result.Status != SpeechRecognitionResultStatus.Success)
                {
                    Debug.WriteLine("SpeechManager: CompileConstraintsAsync failed for phrases");
                }
            }
            catch (Exception ex)
            {
                Debug.WriteLine(ex.ToString());
            }
        }
Пример #7
0
        /// <summary>
        /// Code for voice recognition.
        /// </summary>
        //To initialize Speech Recognizer
        public async void InitSpeechRecognizer(int n)
        {
            if (n == 0)
            {
                Rec.Dispose();
                return;
            }
            Rec = new SpeechRecognizer();
            Rec.ContinuousRecognitionSession.ResultGenerated += Rec_ResultGenerated;

            StorageFile Store = await Package.Current.InstalledLocation.GetFileAsync(@"GrammarFile.xml");

            SpeechRecognitionGrammarFileConstraint constraint = new SpeechRecognitionGrammarFileConstraint(Store);

            Rec.Constraints.Add(constraint);
            SpeechRecognitionCompilationResult result = await Rec.CompileConstraintsAsync();

            if (result.Status == SpeechRecognitionResultStatus.Success)
            {
                status.Text = "Speech Recognition started.";
                tts(status.Text);
                Rec.UIOptions.AudiblePrompt = "Speech Recognition started.";
                await Rec.ContinuousRecognitionSession.StartAsync();
            }
        }
Пример #8
0
        public async Task InitializeRecognizer()
        {
            if (_speechRecognizer != null)
            {
                // cleanup prior to re-initializing this scenario.
                DisposeSpeechRecognizer();
            }

            _speechRecognizer = new SpeechRecognizer(new Language("de-DE"));
            _speechRecognizer.HypothesisGenerated += SpeechRecognizer_HypothesisGenerated;

            //_speechRecognizer.Timeouts.InitialSilenceTimeout = TimeSpan.FromSeconds(10);
            //_speechRecognizer.Timeouts.EndSilenceTimeout = TimeSpan.FromSeconds(10);
            //_speechRecognizer.Timeouts.BabbleTimeout = TimeSpan.FromSeconds(10);

            // Apply the dictation topic constraint to optimize for dictated freeform speech.
            var dictationConstraint = new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.Dictation, "maja");

            _speechRecognizer.Constraints.Add(dictationConstraint);
            SpeechRecognitionCompilationResult result = await _speechRecognizer.CompileConstraintsAsync();

            if (result.Status != SpeechRecognitionResultStatus.Success)
            {
                ShowMessage("Grammar Compilation Failed: ");
            }
        }
Пример #9
0
        private async void initializeSpeechRecognizer()
        {
            // Initialize recognizer
            recognizer = new SpeechRecognizer();

            // Set event handlers
            recognizer.StateChanged += RecognizerStateChanged;
            recognizer.ContinuousRecognitionSession.ResultGenerated += RecognizerResultGenerated;

            // Load Grammer file constraint
            string      fileName           = String.Format(SRGS_FILE);
            StorageFile grammarContentFile = await Package.Current.InstalledLocation.GetFileAsync(fileName);

            SpeechRecognitionGrammarFileConstraint grammarConstraint = new SpeechRecognitionGrammarFileConstraint(grammarContentFile);

            // Add to grammer constraint
            recognizer.Constraints.Add(grammarConstraint);

            // Compile grammer
            SpeechRecognitionCompilationResult compilationResult = await recognizer.CompileConstraintsAsync();

            Debug.WriteLine("Status: " + compilationResult.Status.ToString());

            // If successful, display the recognition result.
            if (compilationResult.Status == SpeechRecognitionResultStatus.Success)
            {
                Debug.WriteLine("Result: " + compilationResult.ToString());

                await recognizer.ContinuousRecognitionSession.StartAsync();
            }
            else
            {
                Debug.WriteLine("Status: " + compilationResult.Status);
            }
        }
Пример #10
0
        public static async Task InitializeRecognizer(Language recognizerLanguage)
        {
            try
            {
                // determine the language code being used.
                StorageFile grammarContentFile = await Package.Current.InstalledLocation.GetFileAsync(GrammarPath);

                // Initialize the SpeechRecognizer and add the grammar.
                speechRecognizer = new SpeechRecognizer(recognizerLanguage);

                // RecognizeWithUIAsync allows developers to customize the prompts.
                SpeechRecognitionGrammarFileConstraint grammarConstraint = new SpeechRecognitionGrammarFileConstraint(grammarContentFile);
                speechRecognizer.Constraints.Add(grammarConstraint);
                SpeechRecognitionCompilationResult compilationResult = await speechRecognizer.CompileConstraintsAsync();

                // Check to make sure that the constraints were in a proper format and the recognizer was able to compile it.
                if (compilationResult.Status != SpeechRecognitionResultStatus.Success)
                {
                }
                else
                {
                    // Set EndSilenceTimeout to give users more time to complete speaking a phrase.
                    speechRecognizer.Timeouts.EndSilenceTimeout = TimeSpan.FromSeconds(SpeechTimespan);
                }
            }
            catch (Exception ex)
            {
                var messageDialog = new Windows.UI.Popups.MessageDialog(ex.Message, "Exception");
                await messageDialog.ShowAsync();

                throw;
            }
        }
        /// <summary>
        /// When activating the scenario, ensure we have permission from the user to access their microphone, and
        /// provide an appropriate path for the user to enable access to the microphone if they haven't
        /// given explicit permission for it.
        /// Construct a recognizer with a simple list of recognized terms.
        /// </summary>
        /// <param name="e">The navigation event details</param>
        protected async override void OnNavigatedTo(NavigationEventArgs e)
        {
            bool permissionGained = await AudioCapturePermissions.RequestMicrophonePermission();

            if (permissionGained)
            {
                // enable the recognition buttons
                btnRecognizeWithUI.IsEnabled    = true;
                btnRecognizeWithoutUI.IsEnabled = true;
            }
            else
            {
                this.resultTextBlock.Visibility = Visibility.Visible;
                this.resultTextBlock.Text       = "Permission to access capture resources was not given by the user, reset the application setting in Settings->Privacy->Microphone.";
            }

            // Create an instance of SpeechRecognizer.
            speechRecognizer = new SpeechRecognizer();

            // Provide feedback to the user about the state of the recognizer.
            speechRecognizer.StateChanged += SpeechRecognizer_StateChanged;

            // You could create any IEnumerable dynamically.
            string[] responses = { "Yes", "No" };

            // Add a list constraint to the recognizer.
            var listConstraint = new SpeechRecognitionListConstraint(responses, "yesOrNo");

            speechRecognizer.UIOptions.ExampleText = @"Ex. ""Yes"", ""No""";
            speechRecognizer.Constraints.Add(listConstraint);

            // Compile the constraint.
            await speechRecognizer.CompileConstraintsAsync();
        }
        /// <summary>
        /// When activating the scenario, ensure we have permission from the user to access their microphone, and
        /// provide an appropriate path for the user to enable access to the microphone if they haven't
        /// given explicit permission for it.
        /// Construct a speech recognizer using the default dictation grammar.
        /// </summary>
        /// <param name="e">The navigation event details</param>
        protected async override void OnNavigatedTo(NavigationEventArgs e)
        {
            bool permissionGained = await AudioCapturePermissions.RequestMicrophonePermission();

            if (permissionGained)
            {
                // enable the recognition buttons
                btnRecognizeWithUI.IsEnabled    = true;
                btnRecognizeWithoutUI.IsEnabled = true;
            }
            else
            {
                this.resultTextBlock.Visibility = Visibility.Visible;
                this.resultTextBlock.Text       = "Permission to access capture resources was not given by the user, reset the application setting in Settings->Privacy->Microphone.";
            }

            // Create an instance of SpeechRecognizer.
            this.speechRecognizer = new SpeechRecognizer();

            // Provide feedback to the user about the state of the recognizer.
            speechRecognizer.StateChanged += SpeechRecognizer_StateChanged;

            // Compile the dictation grammar that is loaded by default.
            await speechRecognizer.CompileConstraintsAsync();
        }
Пример #13
0
        public async Task <string> RecordSpeechFromMicrophoneAsync()
        {
            string recognizedText = string.Empty;

            using (SpeechRecognizer recognizer = new SpeechRecognizer(SpeechRecognizer.SystemSpeechLanguage))
            {
                recognizer.Constraints.Add(new SpeechRecognitionListConstraint(acceptedUserInput));
                await recognizer.CompileConstraintsAsync();

                SpeechRecognitionResult result = await recognizer.RecognizeAsync();

                StringBuilder stringBuilder = new StringBuilder();

                if (result.Status == SpeechRecognitionResultStatus.Success)
                {
                    if (result.Confidence == SpeechRecognitionConfidence.High)
                    {
                        stringBuilder.Append(result.Text);
                    }
                    else
                    {
                        IReadOnlyList <SpeechRecognitionResult> alternatives =
                            result.GetAlternates(1);

                        if (alternatives.First().RawConfidence > 0.5)
                        {
                            stringBuilder.Append(alternatives.First().Text);
                        }
                    }

                    recognizedText = stringBuilder.ToString();
                }
            }
            return(recognizedText);
        }
Пример #14
0
        public async void StartRecognization()
        {
            OnstartEvent(new EventArgs());
            // Create an instance of SpeechRecognizer.
            speechRecognizer = InitSpeechRecognizer();

            // Listen for audio input issues.
            speechRecognizer.RecognitionQualityDegrading += speechRecognizer_RecognitionQualityDegrading;

            // Add a web search grammar to the recognizer.
            var webSearchGrammar = new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.WebSearch, "webSearch");

            speechRecognizer.UIOptions.AudiblePrompt = "Say what you want to search for...";
            speechRecognizer.UIOptions.ExampleText   = @"Ex. 'weather for London'";
            speechRecognizer.Constraints.Add(webSearchGrammar);

            // Compile the constraint.
            await speechRecognizer.CompileConstraintsAsync();

            speechRecognizer.ContinuousRecognitionSession.ResultGenerated += ContinuousRecognitionSession_ResultGenerated;
            speechRecognizer.ContinuousRecognitionSession.Completed       += ContinuousRecognitionSession_Completed;

            if (speechRecognizer.State == SpeechRecognizerState.Idle)
            {
                await speechRecognizer.ContinuousRecognitionSession.StartAsync();
            }
        }
Пример #15
0
        public override async Task InitializeAsync()
        {
            if (speechRecognizer == null)
            {
                try
                {
                    var recognizer = new SpeechRecognizer(ConvertAILangToSystem(config.Language));

                    // INFO: Dictation is default Constraint
                    //var webSearchGrammar = new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.Dictation, "dictation");
                    //recognizer.Constraints.Add(webSearchGrammar);

                    await recognizer.CompileConstraintsAsync();

                    lock (speechRecognizerLock)
                    {
                        if (speechRecognizer == null)
                        {
                            speechRecognizer = recognizer;
                        }
                    }
                }
                catch (Exception e)
                {
                    if ((uint)e.HResult == HRESULT_LANG_NOT_SUPPORTED)
                    {
                        throw new AIServiceException(string.Format("Specified language {0} not supported or not installed on device", config.Language.code), e);
                    }
                    throw;
                }
            }
        }
Пример #16
0
        private async Task InitializeRecognizer(Language recognizerLanguage)
        {
            if (speechRecognizer != null)
            {
                // cleanup prior to re-initializing this scenario.
                speechRecognizer.StateChanged -= SpeechRecognizer_StateChanged;
                speechRecognizer.ContinuousRecognitionSession.Completed       -= ContinuousRecognitionSession_Completed;
                speechRecognizer.ContinuousRecognitionSession.ResultGenerated -= ContinuousRecognitionSession_ResultGenerated;
                speechRecognizer.HypothesisGenerated -= SpeechRecognizer_HypothesisGenerated;

                this.speechRecognizer.Dispose();
                this.speechRecognizer = null;
            }

            this.speechRecognizer = new SpeechRecognizer(recognizerLanguage);

            // Provide feedback to the user about the state of the recognizer. This can be used to provide visual feedback in the form
            // of an audio indicator to help the user understand whether they're being heard.
            speechRecognizer.StateChanged += SpeechRecognizer_StateChanged;

            // Apply the dictation topic constraint to optimize for dictated freeform speech.
            var dictationConstraint = new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.Dictation, "dictation");

            speechRecognizer.Constraints.Add(dictationConstraint);
            SpeechRecognitionCompilationResult result = await speechRecognizer.CompileConstraintsAsync();

            if (result.Status != SpeechRecognitionResultStatus.Success)
            {
                btnContinuousRecognize.IsEnabled = false;
            }
            speechRecognizer.ContinuousRecognitionSession.Completed       += ContinuousRecognitionSession_Completed;
            speechRecognizer.ContinuousRecognitionSession.ResultGenerated += ContinuousRecognitionSession_ResultGenerated;
            speechRecognizer.HypothesisGenerated += SpeechRecognizer_HypothesisGenerated;
        }
        private async void StartSpeechRecognizer()
        {
            // Compile the loaded GrammarFiles
            SpeechRecognitionCompilationResult compilationResult = await _recognizer.CompileConstraintsAsync();

            // If successful, display the recognition result.
            if (compilationResult.Status == SpeechRecognitionResultStatus.Success)
            {
                Debug.WriteLine("Result: " + compilationResult.ToString());

                SpeechContinuousRecognitionSession session = _recognizer.ContinuousRecognitionSession;
                try
                {
                    await session.StartAsync();
                }
                catch (Exception e)
                {
                    //TODO this needs to report to the user that something failed.
                    //also potentially write to a log somewhere.
                    Debug.WriteLine(e.Data);
                }
            }
            else
            {
                //TODO this needs to report to the user that something failed.
                //also potentially write to a log somewhere.
                Debug.WriteLine("Status: " + compilationResult.Status);
            }
        }
Пример #18
0
        /// <summary>
        /// Starts a speech recognition session that can recognize the topics on the checklist and possibly related words, if that functionality is available
        /// </summary>
        /// Also adds the right callbacks to the speech recognizer
        private async void startSpeechRecognition()
        {
            var constraint = new SpeechRecognitionListConstraint(app.getKeywords());

            if (speechRecognizer?.State == SpeechRecognizerState.Capturing)
            {
                await speechRecognizer.ContinuousRecognitionSession.StopAsync();
            }

            speechRecognizer = new SpeechRecognizer();
            speechRecognizer.Constraints.Add(constraint);
            var speechCompilationResult = await speechRecognizer.CompileConstraintsAsync();

            speechRecognizer.ContinuousRecognitionSession.ResultGenerated += speechResultCallback;

            if (speechRecognizer.State == SpeechRecognizerState.Idle)
            {
                await speechRecognizer.ContinuousRecognitionSession.StartAsync();

                Debug.WriteLine("Started speech recognition session");
            }
            else
            {
                Debug.WriteLine("Speech recognizer is not idle, attemptint to reboot");
                await speechRecognizer.ContinuousRecognitionSession.StopAsync();

                await speechRecognizer.ContinuousRecognitionSession.StartAsync();
            }
        }
Пример #19
0
        // start a speech recognition session and capture the results
        private async void speechButton_Click(object sender, RoutedEventArgs e)
        {
            speechButton.Content   = "Listening...";
            speechButton.IsEnabled = false;

            // Create an instance of SpeechRecognizer.
            using (var speechRecognizer = new SpeechRecognizer())
            {
                // Compile the dictation grammar by default.
                await speechRecognizer.CompileConstraintsAsync();

                // Start recognition.
                SpeechRecognitionResult speechRecognitionResult = await speechRecognizer.RecognizeAsync();

                // output result
                speechOutputBox.Text   = speechRecognitionResult.Text;
                speechButton.Content   = "Speak";
                speechButton.IsEnabled = true;
                //speechInputSubmitted(speechRecognitionResult);

                bool correctGuess = false;
                try
                {
                    correctGuess = mapLearner.guess(speechRecognitionResult);
                }
                catch (ArgumentNullException) { }
                finally
                {
                    AnswerSubmitted(correctGuess);
                }
            }
        }
Пример #20
0
        /// <summary>
        /// Initialize Speech Recognizer and compile constraints.
        /// </summary>
        /// <param name="recognizerLanguage">Language to use for the speech recognizer</param>
        /// <returns>Awaitable task.</returns>
        private async Task InitializeRecognizer(Language recognizerLanguage)
        {
            if (speechRecognizer != null)
            {
                // cleanup prior to re-initializing this scenario.
                speechRecognizer.StateChanged -= SpeechRecognizer_StateChanged;
                speechRecognizer.ContinuousRecognitionSession.Completed       -= ContinuousRecognitionSession_Completed;
                speechRecognizer.ContinuousRecognitionSession.ResultGenerated -= ContinuousRecognitionSession_ResultGenerated;

                this.speechRecognizer.Dispose();
                this.speechRecognizer = null;
            }

            this.speechRecognizer = new SpeechRecognizer(recognizerLanguage);
            speechRecognizer.Timeouts.EndSilenceTimeout = new TimeSpan(0, 0, 0, 8);
            speechRecognizer.Timeouts.BabbleTimeout     = new TimeSpan(0, 0, 0, 3);

            speechRecognizer.StateChanged += SpeechRecognizer_StateChanged;

            var dictationConstraint = new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.Dictation, "dictation");

            speechRecognizer.Constraints.Add(dictationConstraint);
            SpeechRecognitionCompilationResult result = await speechRecognizer.CompileConstraintsAsync();

            speechRecognizer.ContinuousRecognitionSession.Completed       += ContinuousRecognitionSession_Completed;
            speechRecognizer.ContinuousRecognitionSession.ResultGenerated += ContinuousRecognitionSession_ResultGenerated;
        }
Пример #21
0
        /// <summary>
        /// Tries to iniziaizlize the SpeechRecognizer object
        /// </summary>
        /// <returns>true if SpeechRecognizer is succesfully inizialized, false otherwise</returns>
        private async Task <bool> TryInitSpeech()
        {
            bool retVal = false;

            try
            {
                await TryDisposeSpeech();

                speechRecognizer = new SpeechRecognizer();

                var dictationConstraint = new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.Dictation, DICTATION);
                speechRecognizer.Constraints.Add(dictationConstraint);
                SpeechRecognitionCompilationResult compilationResult = await speechRecognizer.CompileConstraintsAsync();

                speechRecognizer.HypothesisGenerated += SpeechRecognizer_HypothesisGenerated;

                if (compilationResult.Status == SpeechRecognitionResultStatus.Success)
                {
                    retVal = true;
                }
            }
            catch (Exception ex)
            {
                System.Diagnostics.Debug.WriteLine(ex.Message);
                Text   = SPEECH_RECOGNITION_FAILED;
                retVal = false;
            }

            return(retVal);
        }
        /// <summary>
        /// Creates a SpeechRecognizer instance and initializes the grammar.
        /// </summary>
        private async Task InitializeRecognizer()
        {
            // Create an instance of SpeechRecognizer.
            speechRecognizer = new SpeechRecognizer();

            // Provide feedback to the user about the state of the recognizer.
            speechRecognizer.StateChanged += SpeechRecognizer_StateChanged;

            // Compile the dictation topic constraint, which optimizes for dictated speech.
            var dictationConstraint = new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.Dictation, "dictation");

            speechRecognizer.Constraints.Add(dictationConstraint);
            SpeechRecognitionCompilationResult compilationResult = await speechRecognizer.CompileConstraintsAsync();

            // Check to make sure that the constraints were in a proper format and the recognizer was able to compile it.
            if (compilationResult.Status != SpeechRecognitionResultStatus.Success)
            {
                // Disable the recognition buttons.
                btnRecognizeWithUI.IsEnabled    = false;
                btnRecognizeWithoutUI.IsEnabled = false;

                // Let the user know that the grammar didn't compile properly.
                resultTextBlock.Visibility = Visibility.Visible;
                resultTextBlock.Text       = "Unable to compile grammar.";
            }
        }
Пример #23
0
        private async Task InitializeTomaNota(Language recognizerLanguage)
        {
            if (speechRecognizerNotas != null)
            {
                //si vengo de una ejecución anterior, hacemos limpieza
                speechRecognizerNotas.StateChanged -= SpeechRecognizer_StateChanged;
                speechRecognizerNotas.ContinuousRecognitionSession.Completed       -= ContinuousRecognitionSession_Completed;
                speechRecognizerNotas.ContinuousRecognitionSession.ResultGenerated -= ContinuousRecognitionSession_ResultGenerated;
                speechRecognizerNotas.HypothesisGenerated -= SpeechRecognizer_HypothesisGenerated;

                this.speechRecognizerNotas.Dispose();
                this.speechRecognizerNotas = null;
            }

            this.speechRecognizerNotas = new SpeechRecognizer(recognizerLanguage);

            speechRecognizerNotas.StateChanged += SpeechRecognizer_StateChanged; //feedback al usuario

            // en vez de gramática, aplicamos el caso de uso "Dictado"
            var dictationConstraint = new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.Dictation, "dictation");

            speechRecognizerNotas.Constraints.Add(dictationConstraint);
            SpeechRecognitionCompilationResult result = await speechRecognizerNotas.CompileConstraintsAsync();

            if (result.Status != SpeechRecognitionResultStatus.Success)
            {
                var messageDialog = new Windows.UI.Popups.MessageDialog(result.Status.ToString(), "Excepción inicializando la toma de notas: ");
                await messageDialog.ShowAsync();
            }

            // nos registramos a los eventos
            speechRecognizerNotas.ContinuousRecognitionSession.Completed       += ContinuousRecognitionSession_Completed;       //no hubo éxito
            speechRecognizerNotas.ContinuousRecognitionSession.ResultGenerated += ContinuousRecognitionSession_ResultGenerated; //o entendió, o llegó basura
            speechRecognizerNotas.HypothesisGenerated += SpeechRecognizer_HypothesisGenerated;                                  //se va alimentando de lo que va llegando para dar feedback
        }
Пример #24
0
        private async void InitializeSpeechRecognizer()
        {
            // Initialize SpeechRecognizer Object (Khởi tạo đối tượng SpeechRecognizer)
            MyRecognizer = new SpeechRecognizer();

            // Register Event Handlers
            MyRecognizer.StateChanged += MyRecognizer_StateChanged;
            MyRecognizer.ContinuousRecognitionSession.ResultGenerated += MyRecognizer_ResultGenerated;

            // Create Grammar File Object (Tạo đối tượng Grammar từ mygrammar.xml đã xác định từ trước)
            StorageFile GrammarContentFile = await Package.Current.InstalledLocation.GetFileAsync(@"mygrammar.xml");

            // Add Grammar Constraint from Grammar File
            SpeechRecognitionGrammarFileConstraint GrammarConstraint = new SpeechRecognitionGrammarFileConstraint(GrammarContentFile);

            MyRecognizer.Constraints.Add(GrammarConstraint);

            // Compile Grammar
            SpeechRecognitionCompilationResult CompilationResult = await MyRecognizer.CompileConstraintsAsync();

            // Write Debug Information
            Debug.WriteLine("Status: " + CompilationResult.Status.ToString());

            // If Compilation Successful, Start Continuous Recognition Session
            if (CompilationResult.Status == SpeechRecognitionResultStatus.Success)
            {
                await MyRecognizer.ContinuousRecognitionSession.StartAsync();
            }
        }
Пример #25
0
        private async Task InitializeRecognizer()
        {
            if (speechRecognizer != null)
            {
                this.speechRecognizer.Dispose();
                this.speechRecognizer = null;
            }

            // Create an instance of SpeechRecognizer.
            speechRecognizer = new SpeechRecognizer();
            //set of responses
            string[] responses = { "hey sanya", "what's up sanya" };

            //list constraint to the recognizer
            var listConstraint = new SpeechRecognitionListConstraint(responses, "AssitantName");

            speechRecognizer.Constraints.Add(listConstraint);

            // Compile the dictation topic constraint, which optimizes for dictated speech.
            SpeechRecognitionCompilationResult compilationResult = await speechRecognizer.CompileConstraintsAsync();


            // Check to make sure that the constraints were in a proper format and the recognizer was able to compile it.
            if (compilationResult.Status != SpeechRecognitionResultStatus.Success)
            {
                // Let the user know that the grammar didn't compile properly.
                resultTextBlock.Visibility = Visibility.Visible;
                resultTextBlock.Text       = "Unable to compile grammar.";
            }
        }
Пример #26
0
        /// <summary>
        /// Starts the speech recognition
        /// </summary>
        public async void Start()
        {
            if (m_IsDisposed)
            {
                throw new ObjectDisposedException(nameof(DateTimeProvider));
            }

            var hasPermission = await HasMicrophonePermission();

            if (!hasPermission)
            {
                throw new UnauthorizedAccessException("No access to microphone!");
            }

            var grammarFile = await Package.Current.InstalledLocation.GetFileAsync(GrammerFile);

            var grammarConstraint = new SpeechRecognitionGrammarFileConstraint(grammarFile);

            m_Recognizer.Constraints.Add(grammarConstraint);

            var compilationResult = await m_Recognizer.CompileConstraintsAsync();

            if (compilationResult.Status == SpeechRecognitionResultStatus.Success)
            {
                await m_Recognizer.ContinuousRecognitionSession.StartAsync();
            }
        }
        /// <summary>
        /// Creates a SpeechRecognizer instance and initializes the grammar.
        /// </summary>
        private async void InitializeRecognizer()
        {
            // Initialize the SRGS-compliant XML file.
            // For more information about grammars for Windows apps and how to
            // define and use SRGS-compliant grammars in your app, see
            // https://msdn.microsoft.com/en-us/library/dn596121.aspx

            StorageFile grammarContentFile = await Package.Current.InstalledLocation.GetFileAsync(@"SRGSColors.xml");

            // Initialize the SpeechRecognizer and add the grammar.
            recognizer = new SpeechRecognizer();

            // Provide feedback to the user about the state of the recognizer.
            recognizer.StateChanged += SpeechRecognizer_StateChanged;

            SpeechRecognitionGrammarFileConstraint grammarConstraint = new SpeechRecognitionGrammarFileConstraint(grammarContentFile);

            recognizer.Constraints.Add(grammarConstraint);
            SpeechRecognitionCompilationResult compilationResult = await recognizer.CompileConstraintsAsync();

            if (compilationResult.Status != SpeechRecognitionResultStatus.Success)
            {
                return;
            }

            // Set EndSilenceTimeout to give users more time to complete speaking a phrase.
            recognizer.Timeouts.EndSilenceTimeout = TimeSpan.FromSeconds(1.2);
        }
Пример #28
0
        /// <summary>
        /// Initializes the speech recognizer.
        /// </summary>
        public async void Initialize()
        {
            // Local recognizer
            triggerRecognizer = new SpeechRecognizer();

            var list = new SpeechRecognitionListConstraint(activationPhrases);

            triggerRecognizer.Constraints.Add(list);
            await triggerRecognizer.CompileConstraintsAsync();

            triggerRecognizer.ContinuousRecognitionSession.Completed += localSessionCompleted;

            triggerRecognizer.ContinuousRecognitionSession.ResultGenerated +=
                LocalSessionResult;

            //triggerRecognizer.HypothesisGenerated += CommandHypothesisGenerated;


            // Command recognizer (web)
            speechRecognizer = new SpeechRecognizer();
            var result = await speechRecognizer.CompileConstraintsAsync();

            speechRecognizer.ContinuousRecognitionSession.ResultGenerated +=
                CommandResultGenerated;

            speechRecognizer.HypothesisGenerated += CommandHypothesisGenerated;

            speechRecognizer.ContinuousRecognitionSession.Completed +=
                CommandSessionCompleted;

            await StartTriggerRecognizer();

            OnResponseReceived(initText);
        }
Пример #29
0
        private async void InitSpeechRecognition()
        {
            try
            {

                if (speechRecognizerContinuous == null)
                {
                    speechRecognizerContinuous = new SpeechRecognizer();
                    speechRecognizerContinuous.Constraints.Add(
                        new SpeechRecognitionListConstraint(
                            new List<String>() { "Start Listening" }, "start"));
                    SpeechRecognitionCompilationResult contCompilationResult =
                        await speechRecognizerContinuous.CompileConstraintsAsync();


                    if (contCompilationResult.Status != SpeechRecognitionResultStatus.Success)
                    {
                        throw new Exception();
                    }
                    speechRecognizerContinuous.ContinuousRecognitionSession.ResultGenerated += ContinuousRecognitionSession_ResultGenerated;
                }

                await speechRecognizerContinuous.ContinuousRecognitionSession.StartAsync();
            }
            catch (Exception ex)
            {
                System.Diagnostics.Debug.WriteLine(ex.Message);
            }
        }
Пример #30
0
        private async Task <SpeechRecognitionResult> SpeechRecognizeAsync()
        {
            if (_speechRecognizer == null)
            {
                // Create an instance of SpeechRecognizer.
                _speechRecognizer = new SpeechRecognizer();

                var songs = new[] { "order", "product", "manage", "capture", "home" };

                // Generates the collection which we expect user will say one of.

                // Create an instance of the constraint.
                // Pass the collection and an optional tag to identify.
                var playConstraint = new SpeechRecognitionListConstraint(songs);

                // Add it into teh recognizer
                _speechRecognizer.Constraints.Add(playConstraint);

                // Then add the constraint for pausing and resuming.

                //var pauseConstraint = new SpeechRecognitionListConstraint(new[] { "Pause", "Resume" }, "pauseAndResume");
                //_speechRecognizer.Constraints.Add(pauseConstraint);

                // Compile the dictation grammar by default.
                await _speechRecognizer.CompileConstraintsAsync();
            }

            // Start recognition and return the result.
            return(await _speechRecognizer.RecognizeWithUIAsync());
        }
Пример #31
0
        }//前台识别声音

        public async Task <string> BackGroundRec()
        {
            string Result = "";

            try
            {
                using (SpeechRecognizer recognizer = new SpeechRecognizer())
                {
                    SpeechRecognitionCompilationResult compilationResult = await recognizer.CompileConstraintsAsync();

                    if (compilationResult.Status == SpeechRecognitionResultStatus.Success)
                    {
                        recognizer.UIOptions.IsReadBackEnabled = false;
                        recognizer.UIOptions.ShowConfirmation  = false;
                        recognizer.UIOptions.AudiblePrompt     = "我在听,请说...";
                        //SpeechRecognitionResult recognitionResult = await recognizer.RecognizeWithUIAsync();
                        SpeechRecognitionResult recognitionResult = await recognizer.RecognizeAsync();

                        if (recognitionResult.Status == SpeechRecognitionResultStatus.Success)
                        {
                            Result = recognitionResult.Text;
                        }
                    }
                }
            }
            catch (Exception ex)
            {
                Result = ex.Message;
            }
            return(Result);
        }//后台常驻声音
        /// <summary>
        /// Creates a SpeechRecognizer instance and initializes the grammar.
        /// </summary>
        private async Task InitializeRecognizer()
        {
            // Create an instance of SpeechRecognizer.
            speechRecognizer = new SpeechRecognizer();

            // Provide feedback to the user about the state of the recognizer.
            speechRecognizer.StateChanged += SpeechRecognizer_StateChanged;

            // Add a web search topic constraint to the recognizer.
            var webSearchGrammar = new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.WebSearch, "webSearch");

            speechRecognizer.Constraints.Add(webSearchGrammar);

            // RecognizeWithUIAsync allows developers to customize the prompts.
            speechRecognizer.UIOptions.AudiblePrompt = "Say what you want to search for...";
            speechRecognizer.UIOptions.ExampleText   = @"Ex. ""weather for London""";

            // Compile the constraint.
            SpeechRecognitionCompilationResult compilationResult = await speechRecognizer.CompileConstraintsAsync();

            // Check to make sure that the constraints were in a proper format and the recognizer was able to compile it.
            if (compilationResult.Status != SpeechRecognitionResultStatus.Success)
            {
                // Disable the recognition buttons.
                btnRecognizeWithUI.IsEnabled    = false;
                btnRecognizeWithoutUI.IsEnabled = false;

                // Let the user know that the grammar didn't compile properly.
                resultTextBlock.Visibility = Visibility.Visible;
                resultTextBlock.Text       = "Unable to compile grammar.";
            }
        }
        public async Task InitializeSpeechRecognizerAsync()
        {
            if (this.speechRecognizer != null)
            {
                this.DisposeSpeechRecognizer();
            }

            this.dictatedTextBuilder = new StringBuilder();
            this.speechRecognizer    = new SpeechRecognizer();

            var dictationConstraint = new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.Dictation, "dictation");

            speechRecognizer.Constraints.Add(dictationConstraint);
            SpeechRecognitionCompilationResult result = await speechRecognizer.CompileConstraintsAsync();

            if (result.Status != SpeechRecognitionResultStatus.Success)
            {
                await new MessageDialog("CompileConstraintsAsync returned " + result.Status, "Error initializing SpeechRecognizer").ShowAsync();
                return;
            }

            this.speechRecognizer.ContinuousRecognitionSession.ResultGenerated += ContinuousRecognitionSession_ResultGenerated;;
            this.speechRecognizer.ContinuousRecognitionSession.Completed       += ContinuousRecognitionSession_Completed;
            this.speechRecognizer.HypothesisGenerated += SpeechRecognizer_HypothesisGenerated;
        }
 private SpeechRecognitionService()
 {
     _recognizer = new SpeechRecognizer();
     _recognizer.Constraints.Add(new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.WebSearch,
         "webSearch"));
     _recognizer.CompileConstraintsAsync().AsTask().Wait();
     _recognizer.ContinuousRecognitionSession.ResultGenerated += RecognitionFound;
 }
Пример #35
0
 public MainPage()
 {
     this.InitializeComponent();
     var recognizer = new SpeechRecognizer();
     var topicconstraint = new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.WebSearch, "Development");
     recognizer.Constraints.Add(topicconstraint);
     var result = 
     recognizer.CompileConstraintsAsync();
 }
Пример #36
0
        private async void listenIn() {
            SpeechRecognizer speechRecognizer = new SpeechRecognizer();
            speechRecognizer.Constraints.Add(new SpeechRecognitionListConstraint(new List<String>() { "note finished" }));

            SpeechRecognitionCompilationResult comResult = await speechRecognizer.CompileConstraintsAsync();

            speechRecognizer.ContinuousRecognitionSession.ResultGenerated += Con_Result;

            await speechRecognizer.ContinuousRecognitionSession.StartAsync();
        }
Пример #37
0
        private async void listenIn()
        {
            SpeechRecognizer speechRecognizer = new SpeechRecognizer();
            speechRecognizer.Constraints.Add(new SpeechRecognitionListConstraint(new List<String>() { "sponge in", "sponge out", "instrument in", "needle in","needle out", "instrument out", "going to close" }));

            SpeechRecognitionCompilationResult comResult = await speechRecognizer.CompileConstraintsAsync();

            speechRecognizer.ContinuousRecognitionSession.ResultGenerated += Con_Result;

            await speechRecognizer.ContinuousRecognitionSession.StartAsync();
        }
        /// <summary>
        /// When activating the scenario, ensure we have permission from the user to access their microphone, and
        /// provide an appropriate path for the user to enable access to the microphone if they haven't
        /// given explicit permission for it.
        /// </summary>
        /// <param name="e">The navigation event details</param>
        private async Task InitSpeech()
        {
            // Save the UI thread dispatcher to allow speech status messages to be shown on the UI.
            dispatcher = CoreWindow.GetForCurrentThread().Dispatcher;

            bool permissionGained = await AudioCapturePermissions.RequestMicrophonePermission();
            if (permissionGained)
            {
                // Enable the recognition buttons.
                button.IsEnabled = true;

                if (speechRecognizer != null)
                {
                    // cleanup prior to re-initializing this scenario.
                    //speechRecognizer.StateChanged -= SpeechRecognizer_StateChanged;

                    this.speechRecognizer.Dispose();
                    this.speechRecognizer = null;
                }

                // Create an instance of SpeechRecognizer.
                speechRecognizer = new SpeechRecognizer();

                // Provide feedback to the user about the state of the recognizer.
                //speechRecognizer.StateChanged += SpeechRecognizer_StateChanged;

                // Compile the dictation topic constraint, which optimizes for dictated speech.
                var dictationConstraint = new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.Dictation, "dictation");
                speechRecognizer.Constraints.Add(dictationConstraint);
                SpeechRecognitionCompilationResult compilationResult = await speechRecognizer.CompileConstraintsAsync();

                speechRecognizer.HypothesisGenerated += SpeechRecognizer_HypothesisGenerated;

                // Check to make sure that the constraints were in a proper format and the recognizer was able to compile it.
                if (compilationResult.Status != SpeechRecognitionResultStatus.Success)
                {
                    // Disable the recognition buttons.
                    button.IsEnabled = false;

                    // Let the user know that the grammar didn't compile properly.
                    //resultTextBlock.Visibility = Visibility.Visible;
                    //resultTextBlock.Text = "Unable to compile grammar.";
                }

            }
            else
            {
                // "Permission to access capture resources was not given by the user; please set the application setting in Settings->Privacy->Microphone.";
                button.IsEnabled = false;
            }

            await Task.Yield();
        }
        public MainWindow()
        {
            InitializeComponent();

            recognizer = new SpeechRecognizer();
            List<String> constraints = new List<string>();
            //recognizer.Constraints.Add(new SpeechRecognitionListConstraint(constraints));
            IAsyncOperation<SpeechRecognitionCompilationResult> op = recognizer.CompileConstraintsAsync();
            resultGenerated = new TypedEventHandler<SpeechContinuousRecognitionSession, SpeechContinuousRecognitionResultGeneratedEventArgs>(UpdateTextBox);
            recognizer.ContinuousRecognitionSession.ResultGenerated += resultGenerated;
            OnStateChanged = new TypedEventHandler<SpeechRecognizer, SpeechRecognizerStateChangedEventArgs>(onStateChanged);
            recognizer.StateChanged += OnStateChanged;
            op.Completed += HandleCompilationCompleted;
        }
Пример #40
0
 async Task<bool> answerYN(string question)
 {
     var language = SpeechRecognizer.SystemSpeechLanguage;
     speakString(question);
     string[] yn = {"Yes", "No"};
     SpeechRecognizer speechRecognizer = new SpeechRecognizer();
     SpeechRecognitionListConstraint list = new SpeechRecognitionListConstraint(yn, "yesOrNo");
     speechRecognizer.Constraints.Add(list);
     await speechRecognizer.CompileConstraintsAsync();
     SpeechRecognitionResult answerResult = await speechRecognizer.RecognizeWithUIAsync();
     if (answerResult.Text == "Yes")
         return true;
     else
         return false;
 }
Пример #41
0
        public async void StartListening(object sender, EventArgs e)
        {
            try
           {    
                //args = e;
                speechRecognizer = new SpeechRecognizer();
                StorageFolder folder = ApplicationData.Current.LocalFolder;
                var uri = new System.Uri("ms-appx:///Assets/TestGrammar.xml");
                var file = await Windows.Storage.StorageFile.GetFileFromApplicationUriAsync(uri);
                speechRecognizer.Constraints.Clear();
                speechRecognizer.Constraints.Add(new SpeechRecognitionGrammarFileConstraint(file));
                SpeechRecognitionCompilationResult compilationResult = await speechRecognizer.CompileConstraintsAsync();
                if (compilationResult.Status != SpeechRecognitionResultStatus.Success)
                    throw new Exception("Grammar compilation failed");

                

                speechRecognizer.ContinuousRecognitionSession.Completed += ContinuousRecognitionSession_Completed;
                speechRecognizer.ContinuousRecognitionSession.ResultGenerated += ContinuousRecognitionSession_ResultGenerated;

                Debug.WriteLine("Listener initialized");
                isListening = true;
                await speechRecognizer.ContinuousRecognitionSession.StartAsync();
                uri = new System.Uri("ms-appx:///Assets/ResponseTemplates.xml");
                file = await Windows.Storage.StorageFile.GetFileFromApplicationUriAsync(uri);
                var t = new DialogueManager(file);
                var qq = t.GenerateResponse(new Dictionary<string, string>() { { "ACTION", "DESTINATION" }, { "DESTINATION", "COFFEE_SHOP" } }, ref args);
                Debug.WriteLine(qq);
                await Windows.ApplicationModel.Core.CoreApplication.MainView.CoreWindow.Dispatcher.RunAsync(CoreDispatcherPriority.Normal,
               () =>
               {
                   Speak(qq);
               });






            }
            catch (Exception ex)
            {
                isListening = false;
            }

            //return "I was returned";
        }
Пример #42
0
 public async Task LoadRecognizerAsync()
 {
     var permission = await Template10.Utils.AudioUtils.RequestMicrophonePermission();
     if (permission && _SpeechRecognizer == null)
     {
         _SpeechRecognizer = new SpeechRecognizer(SpeechRecognizer.SystemSpeechLanguage);
         var constraint = new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.Dictation, "dictation");
         _SpeechRecognizer.Constraints.Add(constraint);
         var compilation = await _SpeechRecognizer.CompileConstraintsAsync();
         if (compilation.Status != SpeechRecognitionResultStatus.Success)
             throw new Exception(compilation.Status.ToString());
     }
     else if (!permission)
     {
         throw new Exception("RequestMicrophonePermission returned false");
     }
 }
Пример #43
0
		private async void Init(Windows.Globalization.Language language)
		{
			ListenButton.IsEnabled = false;
			bool permissionGained = await AudioCapturePermissions.RequestMicrophonePermission();
			if (!permissionGained)
			{
				MessageDialog("Permission to access capture resources was not given by the user, reset the application setting in Settings->Privacy->Microphone.");
			}

			var recognizer = new SpeechRecognizer(language);
			var topicConstraint = new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.Dictation, "Development");
			recognizer.Constraints.Add(topicConstraint);
			var compilationResult = await recognizer.CompileConstraintsAsync();

			_SpeechRecognizer = recognizer;
			ListenButton.IsEnabled = true;
		}
        public async static Task<SpeechRecognizer>  InitRecognizer()
        {
            try
            {
                if (null != recognizer)
                {                   
                    recognizer.Dispose();
                    recognizer = null;
                }
                recognizer = new SpeechRecognizer(SpeechRecognizer.SystemSpeechLanguage);
                recognizer.Constraints.Add(
                    new SpeechRecognitionListConstraint(
                        new List<string>()
                        {
                        speechResourceMap.GetValue("account page", speechContext).ValueAsString,
                        speechResourceMap.GetValue("audit page", speechContext).ValueAsString,
                        speechResourceMap.GetValue("finace page", speechContext).ValueAsString,
                        speechResourceMap.GetValue("transfer page", speechContext).ValueAsString
                        }, "goto"));


                SpeechRecognitionCompilationResult compilationResult = await recognizer.CompileConstraintsAsync();
                if (compilationResult.Status != SpeechRecognitionResultStatus.Success)
                {
                    recognizer.Dispose();
                    recognizer = null;
                }

                //string uiOptionsText = string.Format("Try saying '{0}', '{1}' or '{2}'",
                //        speechResourceMap.GetValue("account page", speechContext).ValueAsString,
                //        speechResourceMap.GetValue("audit page", speechContext).ValueAsString,
                //        speechResourceMap.GetValue("audit page", speechContext).ValueAsString);
                //recognizer.UIOptions.ExampleText = uiOptionsText;
                return recognizer;
            }
            catch(Exception e)
            {             
                return null;
            }
           
        }
Пример #45
0
        protected override async void OnNavigatedTo(NavigationEventArgs e)
        {
            base.OnNavigatedTo(e);

            MediaElementCtrl.MediaEnded += MediaElementCtrl_MediaEnded;

            this.dispatcher = CoreWindow.GetForCurrentThread().Dispatcher;
            this.speechRecognizer = new SpeechRecognizer();

            SpeechRecognitionCompilationResult result = await speechRecognizer.CompileConstraintsAsync();

            speechRecognizer.ContinuousRecognitionSession.ResultGenerated += ContinuousRecognitionSession_ResultGenerated;
            speechRecognizer.ContinuousRecognitionSession.Completed += ContinuousRecognitionSession_Completed;

            #region TTS
            try
            {
                _voice = (from voiceInformation
                            in Windows.Media.SpeechSynthesis.SpeechSynthesizer.AllVoices
                          select voiceInformation).First();

                _speechSynthesizer = new Windows.Media.SpeechSynthesis.SpeechSynthesizer();
                _speechSynthesizer.Voice = _voice;
            }
            catch (Exception exception)
            {
                var messageDialog = new Windows.UI.Popups.MessageDialog(exception.Message, "Exception");
                messageDialog.ShowAsync().GetResults();
            }
            #endregion

            StartConversation();

            //#if DEBUG
            //            _questions.Last().Value = "1";
            //            EndConversation();
            //            return;
            //#endif
        }
Пример #46
0
        private async void VoiceButton_Tapped(object sender, TappedRoutedEventArgs e)
        {
            var speechRecognizer = new SpeechRecognizer();
            var topicConstraing = new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.Dictation, "Development");
            speechRecognizer.Constraints.Add(topicConstraing);
            await speechRecognizer.CompileConstraintsAsync();

            speechRecognizer.ContinuousRecognitionSession.Completed += ContinuousRecognitionSession_Completed;
            speechRecognizer.ContinuousRecognitionSession.ResultGenerated += ContinuousRecognitionSession_ResultGenerated;

            speechRecognizer.ContinuousRecognitionSession.StartAsync();
        }
Пример #47
0
        private async void InitializeSpeechRecognizer()
        {
            if (speechRecognizer != null)
            {
                this.speechRecognizer.Dispose();
                this.speechRecognizer = null;
            }
            speechRecognizer = new SpeechRecognizer();
            var topicConstraing = new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.Dictation, "Development");
            speechRecognizer.Constraints.Add(topicConstraing);
            await speechRecognizer.CompileConstraintsAsync();

            var operation = await speechRecognizer.RecognizeAsync();
            if (!this.Completed && operation.Status == SpeechRecognitionResultStatus.Success)
            {
                this.Completed = true;
                ResultGenerated(operation.Text);
                speechRecognizer.RecognizeAsync().Cancel();
                speechRecognizer.Dispose();
                speechRecognizer = null;
            }
        }
        private async Task InitializeRecognizer(Language recognizerLanguage)
        {
            if (speechRecognizer != null)
            {
                this.speechRecognizer.Dispose();
                this.speechRecognizer = null;
            }

            speechRecognizer = new SpeechRecognizer(recognizerLanguage);

            var dictationConstraint = new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.Dictation, "dictation");
            speechRecognizer.Constraints.Add(dictationConstraint);
            SpeechRecognitionCompilationResult compilationResult = await speechRecognizer.CompileConstraintsAsync();


            if (compilationResult.Status != SpeechRecognitionResultStatus.Success)
            {
                Status.Text = "エラー";
            }
        }
Пример #49
0
        private async Task InitContiniousRecognition()
        {
            try
            {
                if (speechRecognizerContinuous == null)
                {
                    speechRecognizerContinuous = new SpeechRecognizer();
                    speechRecognizerContinuous.Constraints.Add(new SpeechRecognitionListConstraint(new List<String>() { "Take a Picture", "Reset", "How Old" }, "start"));
                    SpeechRecognitionCompilationResult contCompilationResult = await speechRecognizerContinuous.CompileConstraintsAsync();

                    if (contCompilationResult.Status != SpeechRecognitionResultStatus.Success)
                    {
                        throw new Exception();
                    }
                    speechRecognizerContinuous.ContinuousRecognitionSession.ResultGenerated += ContinuousRecognitionSession_ResultGenerated;
                }

                await speechRecognizerContinuous.ContinuousRecognitionSession.StartAsync();
            }
            catch (Exception ex)
            {
                Debug.WriteLine(ex.Message);
            }
        }
Пример #50
0
 public async Task FindByVoiceAsync()
 {
     IsListening = true;
     _speechRecognizer = new SpeechRecognizer();
     var wordsFromJson = _agendaService.GetWordsFromJson().Where(w => w.Length >= 3).Distinct().ToList();
     _speechRecognizer.Constraints.Add(new SpeechRecognitionListConstraint(wordsFromJson, "keyword"));
     await _speechRecognizer.CompileConstraintsAsync();
     _speechRecognizer.ContinuousRecognitionSession.ResultGenerated +=
         ContinuousRecognitionSession_ResultGenerated;
     _speechRecognizer.HypothesisGenerated += SpeechRecognizerHypothesisGenerated;
     await _speechRecognizer.ContinuousRecognitionSession.StartAsync();
 }
Пример #51
0
        protected override async void OnNavigatedTo(NavigationEventArgs e)
        {
            base.OnNavigatedTo(e);

            // Note: to keep the amount of demo code to a minimum,
            // this example does not currently handle multitasking or OnNavigatedFrom events.

            // Get UI dispatcher for accessing the UI from the background thread
            // Callbacks from the speech recognition API will happen in a background thread.
            _dispatcher = CoreWindow.GetForCurrentThread().Dispatcher;

            // Initialize the speech recognizer
            _speechRecognizer = new SpeechRecognizer();

            // Compile the speech recognizer constraints using the default
            await _speechRecognizer.CompileConstraintsAsync();

            // Register for the callback when a result of the Continuous Recognition Session was generated.
            // In this method, we need to save the result text and confidence score.
            // If the user briefly pauses speaking, there will be multiple callbacks to this method during
            // the 7 seconds, each containing the current fragment of text.
            _speechRecognizer.ContinuousRecognitionSession.ResultGenerated += ContinuousRecognitionSession_ResultGenerated;

            // Register for the callback when the Continuous Recognition Session completes.
            // In the best case, this happens because our app stopped it after 7 seconds.
            // However, it can also happen if there is a timeout (the user doesn't speak) or if there
            // is a configuration issue.
            _speechRecognizer.ContinuousRecognitionSession.Completed += ContinuousRecognitionSession_Completed;

        }
Пример #52
0
        private async void lineRecog()
        {
                SpeechRecognizer speechRecognizer = new SpeechRecognizer();

                // Compile the default dictionary
                SpeechRecognitionCompilationResult compilationResult =
                                                        await speechRecognizer.CompileConstraintsAsync();

                // Start recognizing
                // Note: you can also use RecognizeWithUIAsync()
                SpeechRecognitionResult speechRecognitionResult = await speechRecognizer.RecognizeAsync();
                result = speechRecognitionResult.Text;
        }
Пример #53
0
        private async Task InitSpeech()
        {
            dispatcher = CoreWindow.GetForCurrentThread().Dispatcher;

            bool permissionGained = await Template10.Utils.AudioUtils.RequestMicrophonePermission();
            if (permissionGained)
            {
                button.IsEnabled = true;

                if (speechRecognizer != null)
                {
                    this.speechRecognizer.Dispose();
                    this.speechRecognizer = null;
                }

                speechRecognizer = new SpeechRecognizer();

                var dictationConstraint = new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.Dictation, "dictation");
                speechRecognizer.Constraints.Add(dictationConstraint);
                SpeechRecognitionCompilationResult compilationResult = await speechRecognizer.CompileConstraintsAsync();

                speechRecognizer.HypothesisGenerated += SpeechRecognizer_HypothesisGenerated;

                if (compilationResult.Status != SpeechRecognitionResultStatus.Success)
                    button.IsEnabled = false;

            }
            else
            {
                Text = string.Format("Permission to access mic denied by the user");
                button.IsEnabled = false;
            }

            await Task.Yield();
        }
        /// <summary>
        /// Initialize Speech Recognizer and compile constraints.
        /// </summary>
        /// <param name="recognizerLanguage">Language to use for the speech recognizer</param>
        /// <returns>Awaitable task.</returns>
        private async Task InitializeRecognizer(Language recognizerLanguage)
        {
            if (speechRecognizer != null)
            {
                // cleanup prior to re-initializing this scenario.
                speechRecognizer.ContinuousRecognitionSession.Completed -= ContinuousRecognitionSession_Completed;
                speechRecognizer.ContinuousRecognitionSession.ResultGenerated -= ContinuousRecognitionSession_ResultGenerated;
                speechRecognizer.StateChanged -= SpeechRecognizer_StateChanged;

                this.speechRecognizer.Dispose();
                this.speechRecognizer = null;
            }

            try {
                // Initialize the SRGS-compliant XML file.
                // For more information about grammars for Windows apps and how to
                // define and use SRGS-compliant grammars in your app, see
                // https://msdn.microsoft.com/en-us/library/dn596121.aspx

                // determine the language code being used.
                string languageTag = recognizerLanguage.LanguageTag;
                string fileName = String.Format("SRGS\\{0}\\SRGSColors.xml", languageTag);
                StorageFile grammarContentFile = await Package.Current.InstalledLocation.GetFileAsync(fileName);

                resultTextBlock.Text = speechResourceMap.GetValue("SRGSHelpText", speechContext).ValueAsString;

                // Initialize the SpeechRecognizer and add the grammar.
                speechRecognizer = new SpeechRecognizer(recognizerLanguage);

                // Provide feedback to the user about the state of the recognizer. This can be used to provide
                // visual feedback to help the user understand whether they're being heard.
                speechRecognizer.StateChanged += SpeechRecognizer_StateChanged;

                SpeechRecognitionGrammarFileConstraint grammarConstraint = new SpeechRecognitionGrammarFileConstraint(grammarContentFile);
                speechRecognizer.Constraints.Add(grammarConstraint);
                SpeechRecognitionCompilationResult compilationResult = await speechRecognizer.CompileConstraintsAsync();

                // Check to make sure that the constraints were in a proper format and the recognizer was able to compile them.
                if (compilationResult.Status != SpeechRecognitionResultStatus.Success)
                {
                    // Disable the recognition button.
                    btnContinuousRecognize.IsEnabled = false;

                    // Let the user know that the grammar didn't compile properly.
                    resultTextBlock.Text = "Unable to compile grammar.";
                }
                else
                {

                    // Set EndSilenceTimeout to give users more time to complete speaking a phrase.
                    speechRecognizer.Timeouts.EndSilenceTimeout = TimeSpan.FromSeconds(1.2);

                    // Handle continuous recognition events. Completed fires when various error states occur. ResultGenerated fires when
                    // some recognized phrases occur, or the garbage rule is hit.
                    speechRecognizer.ContinuousRecognitionSession.Completed += ContinuousRecognitionSession_Completed;
                    speechRecognizer.ContinuousRecognitionSession.ResultGenerated += ContinuousRecognitionSession_ResultGenerated;


                    btnContinuousRecognize.IsEnabled = true;

                    resultTextBlock.Text = speechResourceMap.GetValue("SRGSHelpText", speechContext).ValueAsString;
                    resultTextBlock.Visibility = Visibility.Visible;
                }
            }
            catch (Exception ex)
            {
                if ((uint)ex.HResult == HResultRecognizerNotFound)
                {
                    btnContinuousRecognize.IsEnabled = false;

                    resultTextBlock.Visibility = Visibility.Visible;
                    resultTextBlock.Text = "Speech Language pack for selected language not installed.";
                }
                else
                {
                    var messageDialog = new Windows.UI.Popups.MessageDialog(ex.Message, "Exception");
                    await messageDialog.ShowAsync();
                }
            }

        }
        /// <summary>
        /// Initialize Speech Recognizer and compile constraints.
        /// </summary>
        /// <param name="recognizerLanguage">Language to use for the speech recognizer</param>
        /// <returns>Awaitable task.</returns>
        private async Task InitializeRecognizer(Language recognizerLanguage)
        {
            if (speechRecognizer != null)
            {
                // cleanup prior to re-initializing this scenario.
                speechRecognizer.StateChanged -= SpeechRecognizer_StateChanged;

                this.speechRecognizer.Dispose();
                this.speechRecognizer = null;
            }

            // Create an instance of SpeechRecognizer.
            speechRecognizer = new SpeechRecognizer(recognizerLanguage);

            // Provide feedback to the user about the state of the recognizer.
            speechRecognizer.StateChanged += SpeechRecognizer_StateChanged;

            // Add a web search topic constraint to the recognizer.
            var webSearchGrammar = new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.WebSearch, "webSearch");
            speechRecognizer.Constraints.Add(webSearchGrammar);

            // RecognizeWithUIAsync allows developers to customize the prompts.    
            speechRecognizer.UIOptions.AudiblePrompt = "Say what you want to search for...";
            speechRecognizer.UIOptions.ExampleText = speechResourceMap.GetValue("WebSearchUIOptionsExampleText", speechContext).ValueAsString;
            
            // Compile the constraint.
            SpeechRecognitionCompilationResult compilationResult = await speechRecognizer.CompileConstraintsAsync();

            // Check to make sure that the constraints were in a proper format and the recognizer was able to compile it.
            if (compilationResult.Status != SpeechRecognitionResultStatus.Success)
            {
                // Disable the recognition buttons.
                btnRecognizeWithUI.IsEnabled = false;
                btnRecognizeWithoutUI.IsEnabled = false;

                // Let the user know that the grammar didn't compile properly.
                resultTextBlock.Visibility = Visibility.Visible;
                resultTextBlock.Text = "Unable to compile grammar.";
            }
        }
Пример #56
0
        /// <summary>
        /// Initializes speech recognition and begins listening.
        /// </summary>
        /// <param name="system">
        /// The <see cref="CelestialSystem"/> used to build voice commands and return results.
        /// </param>
        /// <returns>
        /// A <see cref="Task"/> that yields the result of the operation.
        /// </returns>
        public async Task<SpeechRecognitionResultStatus> InitializeAsync(CelestialSystem system)
        {
            // Validate
            if (isInitialized) { throw new InvalidOperationException("Already initialized."); }
            if (system == null) throw new ArgumentNullException("system");

            // Store
            this.system = system;

            // Create recognizer
            recognizer = new SpeechRecognizer();

            // Configure to never stop listening
            recognizer.ContinuousRecognitionSession.AutoStopSilenceTimeout = TimeSpan.MaxValue;

            // Subscribe to events
            recognizer.StateChanged += RecognizerStateChanged;
            recognizer.ContinuousRecognitionSession.Completed += ContinuousRecognitionSession_Completed;
            recognizer.ContinuousRecognitionSession.ResultGenerated += RecognizerResultGenerated;

            // Load constraint
            var constraint = await LoadDynamicConstraintAsync();

            // Add constraint to recognizer
            recognizer.Constraints.Add(constraint);

            // Compile
            var compileResult = await recognizer.CompileConstraintsAsync();

            Debug.WriteLine("Grammar Compiled: " + compileResult.Status.ToString());

            // We're initialized now
            isInitialized = true;

            // If successful start recognition
            if (compileResult.Status == SpeechRecognitionResultStatus.Success)
            {
                await recognizer.ContinuousRecognitionSession.StartAsync(SpeechContinuousRecognitionMode.Default);
            }

            // Return the result
            return compileResult.Status;
        }
Пример #57
0
 private async void VoiceRecognizer()
 {
     voiceRecognizer = new SpeechRecognizer();
     SpeechRecognitionTopicConstraint topicContraint = new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.Dictation, "development");
     voiceRecognizer.Constraints.Add(topicContraint);
     SpeechRecognitionCompilationResult result = await voiceRecognizer.CompileConstraintsAsync();
     SpeechRecognitionResult speechRecognitionResult = await voiceRecognizer.RecognizeAsync();
     //voiceRecognizer.ContinuousRecognitionSession.Completed += ContinuousRecognitionSession_Completed;
     //voiceRecognizer.ContinuousRecognitionSession.ResultGenerated += ContinuousRecognitionSession_ResultGenerated;
     //await voiceRecognizer.ContinuousRecognitionSession.StartAsync();
     if (pname == "Lorenzo")
     {
         if (speechRecognitionResult.Text.Contains("expensive") || speechRecognitionResult.Text.Contains("expense"))
         {
             //speechText.Text = "So much expensive";
             ReadVoice(Error.Not_Found);
             //pageView.Navigate(new Uri("http://www.americanas.com.br/produto/113151382/carro-eletrico-sport-car-vermelho-6v"));
         }
         else
         {
             ReadVoice(Error.Not_Found);
         }
     }
     else
     {
         ReadVoice(Error.Not_Found);
     }
 }
Пример #58
0
        /// <summary>
        /// Initialize Speech Recognizer and compile constraints.
        /// </summary>
        /// <param name="recognizerLanguage">Language to use for the speech recognizer</param>
        /// <returns>Awaitable task.</returns>
        private async Task InitializeRecognizer(Language recognizerLanguage)
        {
            if(speechRecognizer != null)
            {
                // cleanup prior to re-initializing this scenario.
                speechRecognizer.StateChanged -= SpeechRecognizer_StateChanged;

                this.speechRecognizer.Dispose();
                this.speechRecognizer = null;
            }
            try
            {
                // Create an instance of SpeechRecognizer.
                speechRecognizer = new SpeechRecognizer(recognizerLanguage);

                // Provide feedback to the user about the state of the recognizer.
                speechRecognizer.StateChanged += SpeechRecognizer_StateChanged;

                // Add a list constraint to the recognizer.
                speechRecognizer.Constraints.Add(
                    new SpeechRecognitionListConstraint(
                        new List<string>()
                        {
                        speechResourceMap.GetValue("ListGrammarGoHome", speechContext).ValueAsString
                        }, "Home"));
                speechRecognizer.Constraints.Add(
                    new SpeechRecognitionListConstraint(
                        new List<string>()
                        {
                        speechResourceMap.GetValue("ListGrammarGoToContosoStudio", speechContext).ValueAsString
                        }, "GoToContosoStudio"));
                speechRecognizer.Constraints.Add(
                    new SpeechRecognitionListConstraint(
                        new List<string>()
                        {
                        speechResourceMap.GetValue("ListGrammarShowMessage", speechContext).ValueAsString,
                        speechResourceMap.GetValue("ListGrammarOpenMessage", speechContext).ValueAsString
                        }, "Message"));
                speechRecognizer.Constraints.Add(
                    new SpeechRecognitionListConstraint(
                        new List<string>()
                        {
                        speechResourceMap.GetValue("ListGrammarSendEmail", speechContext).ValueAsString,
                        speechResourceMap.GetValue("ListGrammarCreateEmail", speechContext).ValueAsString
                        }, "Email"));
                speechRecognizer.Constraints.Add(
                    new SpeechRecognitionListConstraint(
                        new List<string>()
                        {
                        speechResourceMap.GetValue("ListGrammarCallNitaFarley", speechContext).ValueAsString,
                        speechResourceMap.GetValue("ListGrammarCallNita", speechContext).ValueAsString
                        }, "CallNita"));
                speechRecognizer.Constraints.Add(
                    new SpeechRecognitionListConstraint(
                        new List<string>()
                        {
                        speechResourceMap.GetValue("ListGrammarCallWayneSigmon", speechContext).ValueAsString,
                        speechResourceMap.GetValue("ListGrammarCallWayne", speechContext).ValueAsString
                        }, "CallWayne"));

                // RecognizeWithUIAsync allows developers to customize the prompts.
                string uiOptionsText = string.Format("Try saying '{0}', '{1}' or '{2}'",
                    speechResourceMap.GetValue("ListGrammarGoHome", speechContext).ValueAsString,
                    speechResourceMap.GetValue("ListGrammarGoToContosoStudio", speechContext).ValueAsString,
                    speechResourceMap.GetValue("ListGrammarShowMessage", speechContext).ValueAsString);
                speechRecognizer.UIOptions.ExampleText = uiOptionsText;
                helpTextBlock.Text = string.Format("{0}\n{1}", 
                    speechResourceMap.GetValue("ListGrammarHelpText", speechContext).ValueAsString,
                    uiOptionsText);

                // Compile the constraint.
                SpeechRecognitionCompilationResult compilationResult = await speechRecognizer.CompileConstraintsAsync();

                // Check to make sure that the constraints were in a proper format and the recognizer was able to compile it.
                if (compilationResult.Status != SpeechRecognitionResultStatus.Success)
                {
                    // Disable the recognition buttons.
                    btnRecognizeWithUI.IsEnabled = false;
                    btnRecognizeWithoutUI.IsEnabled = false;

                    // Let the user know that the grammar didn't compile properly.
                    resultTextBlock.Visibility = Visibility.Visible;
                    resultTextBlock.Text = "Unable to compile grammar.";
                }
                else
                {
                    btnRecognizeWithUI.IsEnabled = true;
                    btnRecognizeWithoutUI.IsEnabled = true;

                    resultTextBlock.Visibility = Visibility.Collapsed;
                }
            }
            catch(Exception ex)
            {
                if((uint)ex.HResult == HResultRecognizerNotFound)
                {
                    btnRecognizeWithUI.IsEnabled = false;
                    btnRecognizeWithoutUI.IsEnabled = false;

                    resultTextBlock.Visibility = Visibility.Visible;
                    resultTextBlock.Text = "Speech Language pack for selected language not installed.";
                }
                else
                {
                    var messageDialog = new Windows.UI.Popups.MessageDialog(ex.Message, "Exception");
                    await messageDialog.ShowAsync();
                }
            }
        }
Пример #59
0
        private async Task InitializeRecognizer()
        {
            bool permissionGained = await RequestMicrophonePermission();
            if (!permissionGained)
            {
                stat.Text = "No mic permission";
                return;
            }
            // Create an instance of SpeechRecognizer.
            speechRecognizer = new SpeechRecognizer();
            StorageFile grammarContentFile = await Package.Current.InstalledLocation.GetFileAsync(@"grammar.xml");
            SpeechRecognitionGrammarFileConstraint grammarConstraint = new SpeechRecognitionGrammarFileConstraint(grammarContentFile);
            speechRecognizer.Constraints.Add(grammarConstraint);
            SpeechRecognitionCompilationResult compilationResult = await speechRecognizer.CompileConstraintsAsync();

            if (compilationResult.Status != SpeechRecognitionResultStatus.Success)
            {
                stat.Text = "Error:" + compilationResult.Status.ToString();
                return;
            }

            // Set EndSilenceTimeout to give users more time to complete speaking a phrase.
            speechRecognizer.Timeouts.EndSilenceTimeout = TimeSpan.FromSeconds(1.2);
            speechRecognizer.StateChanged += SpeechRecognizer_StateChanged;
            speechRecognizer.ContinuousRecognitionSession.ResultGenerated += ContinuousRecognitionSession_ResultGenerated;
            await speechRecognizer.ContinuousRecognitionSession.StartAsync();
        }
        public override async Task InitializeAsync()
        {
            if (speechRecognizer == null)
            {
                try
                {
                    var recognizer = new SpeechRecognizer(ConvertAILangToSystem(config.Language));

                    // INFO: Dictation is default Constraint
                    //var webSearchGrammar = new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.Dictation, "dictation");
                    //recognizer.Constraints.Add(webSearchGrammar);

                    await recognizer.CompileConstraintsAsync();

                    lock (speechRecognizerLock)
                    {
                        if (speechRecognizer == null)
                        {
                            speechRecognizer = recognizer;
                        }
                    }
                }
                catch (Exception  e)
                {
                    if ((uint)e.HResult == HRESULT_LANG_NOT_SUPPORTED)
                    {
                        throw new AIServiceException(string.Format("Specified language {0} not supported or not installed on device", config.Language.code), e);
                    }
                    throw;
                }

            }
        }