예제 #1
0
        /// <summary>
        /// Tries to iniziaizlize the SpeechRecognizer object
        /// </summary>
        /// <returns>true if SpeechRecognizer is succesfully inizialized, false otherwise</returns>
        private async Task <bool> TryInitSpeech()
        {
            bool retVal = false;

            try
            {
                await TryDisposeSpeech();

                speechRecognizer = new SpeechRecognizer();

                var dictationConstraint = new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.Dictation, DICTATION);
                speechRecognizer.Constraints.Add(dictationConstraint);
                SpeechRecognitionCompilationResult compilationResult = await speechRecognizer.CompileConstraintsAsync();

                speechRecognizer.HypothesisGenerated += SpeechRecognizer_HypothesisGenerated;

                if (compilationResult.Status == SpeechRecognitionResultStatus.Success)
                {
                    retVal = true;
                }
            }
            catch (Exception ex)
            {
                System.Diagnostics.Debug.WriteLine(ex.Message);
                Text   = SPEECH_RECOGNITION_FAILED;
                retVal = false;
            }

            return(retVal);
        }
예제 #2
0
        private async Task InitializeTomaNota(Language recognizerLanguage)
        {
            if (speechRecognizerNotas != null)
            {
                //si vengo de una ejecución anterior, hacemos limpieza
                speechRecognizerNotas.StateChanged -= SpeechRecognizer_StateChanged;
                speechRecognizerNotas.ContinuousRecognitionSession.Completed       -= ContinuousRecognitionSession_Completed;
                speechRecognizerNotas.ContinuousRecognitionSession.ResultGenerated -= ContinuousRecognitionSession_ResultGenerated;
                speechRecognizerNotas.HypothesisGenerated -= SpeechRecognizer_HypothesisGenerated;

                this.speechRecognizerNotas.Dispose();
                this.speechRecognizerNotas = null;
            }

            this.speechRecognizerNotas = new SpeechRecognizer(recognizerLanguage);

            speechRecognizerNotas.StateChanged += SpeechRecognizer_StateChanged; //feedback al usuario

            // en vez de gramática, aplicamos el caso de uso "Dictado"
            var dictationConstraint = new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.Dictation, "dictation");

            speechRecognizerNotas.Constraints.Add(dictationConstraint);
            SpeechRecognitionCompilationResult result = await speechRecognizerNotas.CompileConstraintsAsync();

            if (result.Status != SpeechRecognitionResultStatus.Success)
            {
                var messageDialog = new Windows.UI.Popups.MessageDialog(result.Status.ToString(), "Excepción inicializando la toma de notas: ");
                await messageDialog.ShowAsync();
            }

            // nos registramos a los eventos
            speechRecognizerNotas.ContinuousRecognitionSession.Completed       += ContinuousRecognitionSession_Completed;       //no hubo éxito
            speechRecognizerNotas.ContinuousRecognitionSession.ResultGenerated += ContinuousRecognitionSession_ResultGenerated; //o entendió, o llegó basura
            speechRecognizerNotas.HypothesisGenerated += SpeechRecognizer_HypothesisGenerated;                                  //se va alimentando de lo que va llegando para dar feedback
        }
예제 #3
0
        private async void InitializeSpeechRecognizer()
        {
            // Initialize SpeechRecognizer Object (Khởi tạo đối tượng SpeechRecognizer)
            MyRecognizer = new SpeechRecognizer();

            // Register Event Handlers
            MyRecognizer.StateChanged += MyRecognizer_StateChanged;
            MyRecognizer.ContinuousRecognitionSession.ResultGenerated += MyRecognizer_ResultGenerated;

            // Create Grammar File Object (Tạo đối tượng Grammar từ mygrammar.xml đã xác định từ trước)
            StorageFile GrammarContentFile = await Package.Current.InstalledLocation.GetFileAsync(@"mygrammar.xml");

            // Add Grammar Constraint from Grammar File
            SpeechRecognitionGrammarFileConstraint GrammarConstraint = new SpeechRecognitionGrammarFileConstraint(GrammarContentFile);

            MyRecognizer.Constraints.Add(GrammarConstraint);

            // Compile Grammar
            SpeechRecognitionCompilationResult CompilationResult = await MyRecognizer.CompileConstraintsAsync();

            // Write Debug Information
            Debug.WriteLine("Status: " + CompilationResult.Status.ToString());

            // If Compilation Successful, Start Continuous Recognition Session
            if (CompilationResult.Status == SpeechRecognitionResultStatus.Success)
            {
                await MyRecognizer.ContinuousRecognitionSession.StartAsync();
            }
        }
        /// <summary>
        /// Creates a SpeechRecognizer instance and initializes the grammar.
        /// </summary>
        private async void InitializeRecognizer()
        {
            // Initialize the SRGS-compliant XML file.
            // For more information about grammars for Windows apps and how to
            // define and use SRGS-compliant grammars in your app, see
            // https://msdn.microsoft.com/en-us/library/dn596121.aspx

            StorageFile grammarContentFile = await Package.Current.InstalledLocation.GetFileAsync(@"SRGSColors.xml");

            // Initialize the SpeechRecognizer and add the grammar.
            recognizer = new SpeechRecognizer();

            // Provide feedback to the user about the state of the recognizer.
            recognizer.StateChanged += SpeechRecognizer_StateChanged;

            SpeechRecognitionGrammarFileConstraint grammarConstraint = new SpeechRecognitionGrammarFileConstraint(grammarContentFile);

            recognizer.Constraints.Add(grammarConstraint);
            SpeechRecognitionCompilationResult compilationResult = await recognizer.CompileConstraintsAsync();

            if (compilationResult.Status != SpeechRecognitionResultStatus.Success)
            {
                return;
            }

            // Set EndSilenceTimeout to give users more time to complete speaking a phrase.
            recognizer.Timeouts.EndSilenceTimeout = TimeSpan.FromSeconds(1.2);
        }
예제 #5
0
        private async Task InitializeRecognizer()
        {
            if (speechRecognizer != null)
            {
                this.speechRecognizer.Dispose();
                this.speechRecognizer = null;
            }

            // Create an instance of SpeechRecognizer.
            speechRecognizer = new SpeechRecognizer();
            //set of responses
            string[] responses = { "hey sanya", "what's up sanya" };

            //list constraint to the recognizer
            var listConstraint = new SpeechRecognitionListConstraint(responses, "AssitantName");

            speechRecognizer.Constraints.Add(listConstraint);

            // Compile the dictation topic constraint, which optimizes for dictated speech.
            SpeechRecognitionCompilationResult compilationResult = await speechRecognizer.CompileConstraintsAsync();


            // Check to make sure that the constraints were in a proper format and the recognizer was able to compile it.
            if (compilationResult.Status != SpeechRecognitionResultStatus.Success)
            {
                // Let the user know that the grammar didn't compile properly.
                resultTextBlock.Visibility = Visibility.Visible;
                resultTextBlock.Text       = "Unable to compile grammar.";
            }
        }
예제 #6
0
        /// <summary>
        /// Code for voice recognition.
        /// </summary>
        //To initialize Speech Recognizer
        public async void InitSpeechRecognizer(int n)
        {
            if (n == 0)
            {
                Rec.Dispose();
                return;
            }
            Rec = new SpeechRecognizer();
            Rec.ContinuousRecognitionSession.ResultGenerated += Rec_ResultGenerated;

            StorageFile Store = await Package.Current.InstalledLocation.GetFileAsync(@"GrammarFile.xml");

            SpeechRecognitionGrammarFileConstraint constraint = new SpeechRecognitionGrammarFileConstraint(Store);

            Rec.Constraints.Add(constraint);
            SpeechRecognitionCompilationResult result = await Rec.CompileConstraintsAsync();

            if (result.Status == SpeechRecognitionResultStatus.Success)
            {
                status.Text = "Speech Recognition started.";
                tts(status.Text);
                Rec.UIOptions.AudiblePrompt = "Speech Recognition started.";
                await Rec.ContinuousRecognitionSession.StartAsync();
            }
        }
예제 #7
0
        public static async Task InitializeRecognizer(Language recognizerLanguage)
        {
            try
            {
                // determine the language code being used.
                StorageFile grammarContentFile = await Package.Current.InstalledLocation.GetFileAsync(GrammarPath);

                // Initialize the SpeechRecognizer and add the grammar.
                speechRecognizer = new SpeechRecognizer(recognizerLanguage);

                // RecognizeWithUIAsync allows developers to customize the prompts.
                SpeechRecognitionGrammarFileConstraint grammarConstraint = new SpeechRecognitionGrammarFileConstraint(grammarContentFile);
                speechRecognizer.Constraints.Add(grammarConstraint);
                SpeechRecognitionCompilationResult compilationResult = await speechRecognizer.CompileConstraintsAsync();

                // Check to make sure that the constraints were in a proper format and the recognizer was able to compile it.
                if (compilationResult.Status != SpeechRecognitionResultStatus.Success)
                {
                }
                else
                {
                    // Set EndSilenceTimeout to give users more time to complete speaking a phrase.
                    speechRecognizer.Timeouts.EndSilenceTimeout = TimeSpan.FromSeconds(SpeechTimespan);
                }
            }
            catch (Exception ex)
            {
                var messageDialog = new Windows.UI.Popups.MessageDialog(ex.Message, "Exception");
                await messageDialog.ShowAsync();

                throw;
            }
        }
예제 #8
0
        private async Task InitializeRecognizer()
        {
            bool permissionGained = await RequestMicrophonePermission();

            if (!permissionGained)
            {
                stat.Text = "No mic permission";
                return;
            }
            // Create an instance of SpeechRecognizer.
            speechRecognizer = new SpeechRecognizer();
            StorageFile grammarContentFile = await Package.Current.InstalledLocation.GetFileAsync(@"grammar.xml");

            SpeechRecognitionGrammarFileConstraint grammarConstraint = new SpeechRecognitionGrammarFileConstraint(grammarContentFile);

            speechRecognizer.Constraints.Add(grammarConstraint);
            SpeechRecognitionCompilationResult compilationResult = await speechRecognizer.CompileConstraintsAsync();

            if (compilationResult.Status != SpeechRecognitionResultStatus.Success)
            {
                stat.Text = "Error:" + compilationResult.Status.ToString();
                return;
            }

            // Set EndSilenceTimeout to give users more time to complete speaking a phrase.
            speechRecognizer.Timeouts.EndSilenceTimeout = TimeSpan.FromSeconds(1.2);
            speechRecognizer.StateChanged += SpeechRecognizer_StateChanged;
            speechRecognizer.ContinuousRecognitionSession.ResultGenerated += ContinuousRecognitionSession_ResultGenerated;
            await speechRecognizer.ContinuousRecognitionSession.StartAsync();
        }
    private async void Setup(Language language)
    {
        if (_recogniser != null)
        {
            _recogniser.ContinuousRecognitionSession.Completed       -= Recogniser_Completed;
            _recogniser.ContinuousRecognitionSession.ResultGenerated -= Recogniser_ResultGenerated;
            _recogniser.HypothesisGenerated -= SpeechRecognizer_HypothesisGenerated;
            _recogniser.Dispose();
            _recogniser = null;
        }
        _recogniser = new SpeechRecognizer(language);
        SpeechRecognitionTopicConstraint constraint = new SpeechRecognitionTopicConstraint(
            SpeechRecognitionScenario.Dictation, "dictation");

        _recogniser.Constraints.Add(constraint);
        SpeechRecognitionCompilationResult result = await _recogniser.CompileConstraintsAsync();

        if (result.Status != SpeechRecognitionResultStatus.Success)
        {
            await ShowDialogAsync($"Grammar Compilation Failed: {result.Status.ToString()}");
        }
        _recogniser.ContinuousRecognitionSession.Completed       += Recogniser_Completed;
        _recogniser.ContinuousRecognitionSession.ResultGenerated += Recogniser_ResultGenerated;
        _recogniser.HypothesisGenerated += SpeechRecognizer_HypothesisGenerated;
    }
        /// <summary>
        /// Creates a SpeechRecognizer instance and initializes the grammar.
        /// </summary>
        private async Task InitializeRecognizer()
        {
            // Create an instance of SpeechRecognizer.
            speechRecognizer = new SpeechRecognizer();

            // Provide feedback to the user about the state of the recognizer.
            speechRecognizer.StateChanged += SpeechRecognizer_StateChanged;

            // You could create any IEnumerable dynamically.
            string[] responses = { "Yes", "No" };

            // Add a list constraint to the recognizer.
            var listConstraint = new SpeechRecognitionListConstraint(responses, "yesOrNo");

            speechRecognizer.Constraints.Add(listConstraint);

            // RecognizeWithUIAsync allows developers to customize the prompts.
            speechRecognizer.UIOptions.ExampleText = @"Ex. ""Yes"", ""No""";

            // Compile the constraint.
            SpeechRecognitionCompilationResult compilationResult = await speechRecognizer.CompileConstraintsAsync();

            // Check to make sure that the constraints were in a proper format and the recognizer was able to compile it.
            if (compilationResult.Status != SpeechRecognitionResultStatus.Success)
            {
                // Disable the recognition buttons.
                btnRecognizeWithUI.IsEnabled    = false;
                btnRecognizeWithoutUI.IsEnabled = false;

                // Let the user know that the grammar didn't compile properly.
                resultTextBlock.Visibility = Visibility.Visible;
                resultTextBlock.Text       = "Unable to compile grammar.";
            }
        }
예제 #11
0
파일: App.xaml.cs 프로젝트: P0W/AR
        private async void T1_Tick(object sender, object e)
        {
            if (rec.State == SpeechRecognizerState.Idle)
            {
                if (grammar == null)
                {
                    grammar = await rec.CompileConstraintsAsync();
                }

                if (dictatedText == null)
                {
                    dictatedText = new StringBuilder();
                }
                else
                {
                    dictatedText.Clear();
                }

                originalEditorText = "Hello World";

                await rec.ContinuousRecognitionSession.StartAsync();

                // t1.Stop();
            }
            else if (rec.State == SpeechRecognizerState.Processing)
            {
                //  t1.Start();
            }
            else
            {
                await rec.ContinuousRecognitionSession.CancelAsync();
            }
        }
예제 #12
0
        public async Task <bool> InitializeRecognizerAsync()
        {
            Debug.WriteLine("[Speech to Text]: initializing Speech Recognizer...");
            if (_recognizer != null)
            {
                return(true);
            }

            _recognizer = new SpeechRecognizer(SpeechRecognizer.SystemSpeechLanguage);
            // Set UI text
            _recognizer.UIOptions.AudiblePrompt = "What you want to do...";

            // This requires internet connection
            SpeechRecognitionTopicConstraint topicConstraint = new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.Dictation, "Development");

            _recognizer.Constraints.Add(topicConstraint);

            SpeechRecognitionCompilationResult result = await _recognizer.CompileConstraintsAsync();   // Required

            if (result.Status != SpeechRecognitionResultStatus.Success)
            {
                Debug.WriteLine("[Speech to Text]: Grammar Compilation Failed: " + result.Status.ToString());
                return(false);
            }

            _recognizer.ContinuousRecognitionSession.ResultGenerated += (s, e) => { Debug.WriteLine($"[Speech to Text]: recognizer results: {e.Result.Text}, {e.Result.RawConfidence.ToString()}, {e.Result.Confidence.ToString()}"); };
            Debug.WriteLine("[Speech to Text]: done initializing Speech Recognizer");
            return(true);
        }
예제 #13
0
        private async Task InitializeRecognizer(Language recognizerLanguage)
        {
            if (speechRecognizer != null)
            {
                // cleanup prior to re-initializing this scenario.
                speechRecognizer.StateChanged -= SpeechRecognizer_StateChanged;
                speechRecognizer.ContinuousRecognitionSession.Completed       -= ContinuousRecognitionSession_Completed;
                speechRecognizer.ContinuousRecognitionSession.ResultGenerated -= ContinuousRecognitionSession_ResultGenerated;
                speechRecognizer.HypothesisGenerated -= SpeechRecognizer_HypothesisGenerated;

                this.speechRecognizer.Dispose();
                this.speechRecognizer = null;
            }

            this.speechRecognizer = new SpeechRecognizer(recognizerLanguage);

            // Provide feedback to the user about the state of the recognizer. This can be used to provide visual feedback in the form
            // of an audio indicator to help the user understand whether they're being heard.
            speechRecognizer.StateChanged += SpeechRecognizer_StateChanged;

            // Apply the dictation topic constraint to optimize for dictated freeform speech.
            var dictationConstraint = new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.Dictation, "dictation");

            speechRecognizer.Constraints.Add(dictationConstraint);
            SpeechRecognitionCompilationResult result = await speechRecognizer.CompileConstraintsAsync();

            if (result.Status != SpeechRecognitionResultStatus.Success)
            {
                btnContinuousRecognize.IsEnabled = false;
            }
            speechRecognizer.ContinuousRecognitionSession.Completed       += ContinuousRecognitionSession_Completed;
            speechRecognizer.ContinuousRecognitionSession.ResultGenerated += ContinuousRecognitionSession_ResultGenerated;
            speechRecognizer.HypothesisGenerated += SpeechRecognizer_HypothesisGenerated;
        }
        /// <summary>
        /// Creates a SpeechRecognizer instance and initializes the grammar.
        /// </summary>
        private async Task InitializeRecognizer()
        {
            // Create an instance of SpeechRecognizer.
            speechRecognizer = new SpeechRecognizer();

            // Provide feedback to the user about the state of the recognizer.
            speechRecognizer.StateChanged += SpeechRecognizer_StateChanged;

            // Add a web search topic constraint to the recognizer.
            var webSearchGrammar = new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.WebSearch, "webSearch");

            speechRecognizer.Constraints.Add(webSearchGrammar);

            // RecognizeWithUIAsync allows developers to customize the prompts.
            speechRecognizer.UIOptions.AudiblePrompt = "Say what you want to search for...";
            speechRecognizer.UIOptions.ExampleText   = @"Ex. ""weather for London""";

            // Compile the constraint.
            SpeechRecognitionCompilationResult compilationResult = await speechRecognizer.CompileConstraintsAsync();

            // Check to make sure that the constraints were in a proper format and the recognizer was able to compile it.
            if (compilationResult.Status != SpeechRecognitionResultStatus.Success)
            {
                // Disable the recognition buttons.
                btnRecognizeWithUI.IsEnabled    = false;
                btnRecognizeWithoutUI.IsEnabled = false;

                // Let the user know that the grammar didn't compile properly.
                resultTextBlock.Visibility = Visibility.Visible;
                resultTextBlock.Text       = "Unable to compile grammar.";
            }
        }
예제 #15
0
        // Initialize Speech Recognizer and start async recognition
        private async void initializeSpeechRecognizer()
        {
            // Initialize recognizer
            recognizer = new SpeechRecognizer();

            // Set event handlers
            recognizer.StateChanged += RecognizerStateChanged;
            recognizer.ContinuousRecognitionSession.ResultGenerated += RecognizerResultGenerated;

            // Load Grammar file constraint
            string      fileName           = String.Format(SRGS_FILE);
            StorageFile grammarContentFile = await Package.Current.InstalledLocation.GetFileAsync(fileName);

            SpeechRecognitionGrammarFileConstraint grammarConstraint = new SpeechRecognitionGrammarFileConstraint(grammarContentFile);

            // Add to grammar constraint
            recognizer.Constraints.Add(grammarConstraint);

            // Compile grammar
            SpeechRecognitionCompilationResult compilationResult = await recognizer.CompileConstraintsAsync();

            Debug.WriteLine("Status: " + compilationResult.Status.ToString());

            // If successful, display the recognition result.
            if (compilationResult.Status == SpeechRecognitionResultStatus.Success)
            {
                Debug.WriteLine("Result: " + compilationResult.ToString());

                await recognizer.ContinuousRecognitionSession.StartAsync();
            }
            else
            {
                Debug.WriteLine("Status: " + compilationResult.Status);
            }
        }
        /// <summary>
        /// Update the keywork list
        /// </summary>
        /// <param name="keywords">Keywords to add.</param>
        private async void UpdateKeywordlist(string[] keywords)
        {
            if (this.recognizer != null)
            {
                if (this.IsRunning)
                {
                    throw new Exception("You cannot change the grammar while the service is running. Stop it before.");
                }

                // Clear previews constraints
                if (this.recognizer.Constraints.Count > 0)
                {
                    this.recognizer.Constraints.Clear();
                }

                try
                {
                    // Add new list
                    this.recognizer.Constraints.Add(new SpeechRecognitionListConstraint(keywords));
                }
                catch (Exception ex)
                {
                    System.Diagnostics.Debug.WriteLine("Keyword recognizer: " + ex.Message);
                }

                // Compile the constraint.
                SpeechRecognitionCompilationResult result = await this.recognizer.CompileConstraintsAsync();

                if (result.Status != SpeechRecognitionResultStatus.Success)
                {
                    this.IsRunning = false;
                    System.Diagnostics.Debug.WriteLine("Unable to compile grammar.");
                }
            }
        }
예제 #17
0
        public async Task InitializeRecognizer()
        {
            if (_speechRecognizer != null)
            {
                // cleanup prior to re-initializing this scenario.
                DisposeSpeechRecognizer();
            }

            _speechRecognizer = new SpeechRecognizer(new Language("de-DE"));
            _speechRecognizer.HypothesisGenerated += SpeechRecognizer_HypothesisGenerated;

            //_speechRecognizer.Timeouts.InitialSilenceTimeout = TimeSpan.FromSeconds(10);
            //_speechRecognizer.Timeouts.EndSilenceTimeout = TimeSpan.FromSeconds(10);
            //_speechRecognizer.Timeouts.BabbleTimeout = TimeSpan.FromSeconds(10);

            // Apply the dictation topic constraint to optimize for dictated freeform speech.
            var dictationConstraint = new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.Dictation, "maja");

            _speechRecognizer.Constraints.Add(dictationConstraint);
            SpeechRecognitionCompilationResult result = await _speechRecognizer.CompileConstraintsAsync();

            if (result.Status != SpeechRecognitionResultStatus.Success)
            {
                ShowMessage("Grammar Compilation Failed: ");
            }
        }
예제 #18
0
        /// <summary>
        /// Initialize Speech Recognizer and compile constraints.
        /// </summary>
        /// <param name="recognizerLanguage">Language to use for the speech recognizer</param>
        /// <returns>Awaitable task.</returns>
        private async Task InitializeRecognizer()
        {
            if (speechRecognizer != null)
            {
                this.speechRecognizer.Dispose();
                this.speechRecognizer = null;
            }

            this.speechRecognizer = new SpeechRecognizer();



            // Apply the dictation topic constraint to optimize for dictated freeform speech.
            var dictationConstraint = new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.Dictation, "dictation");

            speechRecognizer.Constraints.Add(dictationConstraint);
            SpeechRecognitionCompilationResult result = await speechRecognizer.CompileConstraintsAsync();

            if (result.Status != SpeechRecognitionResultStatus.Success)
            {
                dictationTextBox.Text = "Dictation Failed";
            }

            // Handle continuous recognition events. Completed fires when various error states occur. ResultGenerated fires when
            // some recognized phrases occur, or the garbage rule is hit. HypothesisGenerated fires during recognition, and
            // allows us to provide incremental feedback based on what the user's currently saying.
            speechRecognizer.ContinuousRecognitionSession.Completed       += ContinuousRecognitionSession_Completed;
            speechRecognizer.ContinuousRecognitionSession.ResultGenerated += ContinuousRecognitionSession_ResultGenerated;
            speechRecognizer.HypothesisGenerated += SpeechRecognizer_HypothesisGenerated;
        }
        public async Task InitializeSpeechRecognizerAsync()
        {
            if (this.speechRecognizer != null)
            {
                this.DisposeSpeechRecognizer();
            }

            this.dictatedTextBuilder = new StringBuilder();
            this.speechRecognizer    = new SpeechRecognizer();

            var dictationConstraint = new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.Dictation, "dictation");

            speechRecognizer.Constraints.Add(dictationConstraint);
            SpeechRecognitionCompilationResult result = await speechRecognizer.CompileConstraintsAsync();

            if (result.Status != SpeechRecognitionResultStatus.Success)
            {
                await new MessageDialog("CompileConstraintsAsync returned " + result.Status, "Error initializing SpeechRecognizer").ShowAsync();
                return;
            }

            this.speechRecognizer.ContinuousRecognitionSession.ResultGenerated += ContinuousRecognitionSession_ResultGenerated;;
            this.speechRecognizer.ContinuousRecognitionSession.Completed       += ContinuousRecognitionSession_Completed;
            this.speechRecognizer.HypothesisGenerated += SpeechRecognizer_HypothesisGenerated;
        }
        /// <summary>
        /// Creates a SpeechRecognizer instance and initializes the grammar.
        /// </summary>
        private async Task InitializeRecognizer()
        {
            // Create an instance of SpeechRecognizer.
            speechRecognizer = new SpeechRecognizer();

            // Provide feedback to the user about the state of the recognizer.
            speechRecognizer.StateChanged += SpeechRecognizer_StateChanged;

            // Compile the dictation topic constraint, which optimizes for dictated speech.
            var dictationConstraint = new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.Dictation, "dictation");

            speechRecognizer.Constraints.Add(dictationConstraint);
            SpeechRecognitionCompilationResult compilationResult = await speechRecognizer.CompileConstraintsAsync();

            // Check to make sure that the constraints were in a proper format and the recognizer was able to compile it.
            if (compilationResult.Status != SpeechRecognitionResultStatus.Success)
            {
                // Disable the recognition buttons.
                btnRecognizeWithUI.IsEnabled    = false;
                btnRecognizeWithoutUI.IsEnabled = false;

                // Let the user know that the grammar didn't compile properly.
                resultTextBlock.Visibility = Visibility.Visible;
                resultTextBlock.Text       = "Unable to compile grammar.";
            }
        }
예제 #21
0
        }//前台识别声音

        public async Task <string> BackGroundRec()
        {
            string Result = "";

            try
            {
                using (SpeechRecognizer recognizer = new SpeechRecognizer())
                {
                    SpeechRecognitionCompilationResult compilationResult = await recognizer.CompileConstraintsAsync();

                    if (compilationResult.Status == SpeechRecognitionResultStatus.Success)
                    {
                        recognizer.UIOptions.IsReadBackEnabled = false;
                        recognizer.UIOptions.ShowConfirmation  = false;
                        recognizer.UIOptions.AudiblePrompt     = "我在听,请说...";
                        //SpeechRecognitionResult recognitionResult = await recognizer.RecognizeWithUIAsync();
                        SpeechRecognitionResult recognitionResult = await recognizer.RecognizeAsync();

                        if (recognitionResult.Status == SpeechRecognitionResultStatus.Success)
                        {
                            Result = recognitionResult.Text;
                        }
                    }
                }
            }
            catch (Exception ex)
            {
                Result = ex.Message;
            }
            return(Result);
        }//后台常驻声音
        private async void StartSpeechRecognizer()
        {
            // Compile the loaded GrammarFiles
            SpeechRecognitionCompilationResult compilationResult = await _recognizer.CompileConstraintsAsync();

            // If successful, display the recognition result.
            if (compilationResult.Status == SpeechRecognitionResultStatus.Success)
            {
                Debug.WriteLine("Result: " + compilationResult.ToString());

                SpeechContinuousRecognitionSession session = _recognizer.ContinuousRecognitionSession;
                try
                {
                    await session.StartAsync();
                }
                catch (Exception e)
                {
                    //TODO this needs to report to the user that something failed.
                    //also potentially write to a log somewhere.
                    Debug.WriteLine(e.Data);
                }
            }
            else
            {
                //TODO this needs to report to the user that something failed.
                //also potentially write to a log somewhere.
                Debug.WriteLine("Status: " + compilationResult.Status);
            }
        }
예제 #23
0
        /// <summary>
        /// Initialize Speech Recognizer and compile constraints.
        /// </summary>
        /// <param name="recognizerLanguage">Language to use for the speech recognizer</param>
        /// <returns>Awaitable task.</returns>
        private async Task InitializeRecognizer(Language recognizerLanguage)
        {
            if (speechRecognizer != null)
            {
                // cleanup prior to re-initializing this scenario.
                speechRecognizer.StateChanged -= SpeechRecognizer_StateChanged;
                speechRecognizer.ContinuousRecognitionSession.Completed       -= ContinuousRecognitionSession_Completed;
                speechRecognizer.ContinuousRecognitionSession.ResultGenerated -= ContinuousRecognitionSession_ResultGenerated;

                this.speechRecognizer.Dispose();
                this.speechRecognizer = null;
            }

            this.speechRecognizer = new SpeechRecognizer(recognizerLanguage);
            speechRecognizer.Timeouts.EndSilenceTimeout = new TimeSpan(0, 0, 0, 8);
            speechRecognizer.Timeouts.BabbleTimeout     = new TimeSpan(0, 0, 0, 3);

            speechRecognizer.StateChanged += SpeechRecognizer_StateChanged;

            var dictationConstraint = new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.Dictation, "dictation");

            speechRecognizer.Constraints.Add(dictationConstraint);
            SpeechRecognitionCompilationResult result = await speechRecognizer.CompileConstraintsAsync();

            speechRecognizer.ContinuousRecognitionSession.Completed       += ContinuousRecognitionSession_Completed;
            speechRecognizer.ContinuousRecognitionSession.ResultGenerated += ContinuousRecognitionSession_ResultGenerated;
        }
예제 #24
0
        private async Task InitializeSpeechRecognizer()
        {
            /* if (speechRecognizer != null)
             * {
             *   speechRecognizer.StateChanged -= SpeechRecognizer_StateChanged;
             *   this.speechRecognizer.Dispose();
             *   this.speechRecognizer = null;
             * }
             */
            try
            {   //Create an instance of speech recognizer
                speechRecognizer = new SpeechRecognizer();

                //speechRecognizer.StateChanged += SpeechRecognizer_StateChanged;

                //Add grammar file constraint to the recognizer.
                //  var storageFile = await Windows.Storage.StorageFile.GetFileFromApplicationUriAsync(new Uri("ms-appx:///SRGSmusic.grxml"));
                //  var grammarfileConstraint = new Windows.Media.SpeechRecognition.SpeechRecognitionGrammarFileConstraint(storageFile, "music");
                string[] responses      = { "Play the song", "Introduce yourself", "Who are your creators", "Which day is it", "What is the temperature" };
                var      listConstraint = new SpeechRecognitionListConstraint(responses, "Action");
                //speechRecognizer.Constraints.Add(grammarfileConstraint);
                //resultTextBlock.Text = "Example play, pause";
                speechRecognizer.Constraints.Add(listConstraint);
                SpeechRecognitionCompilationResult compilationResult = await speechRecognizer.CompileConstraintsAsync();

                resultTextBlock.Text = "Yahan tak to aa hi gye";
                if (compilationResult.Status != SpeechRecognitionResultStatus.Success)
                {
                    // Disable the recognition button.
                    btnContinuousRecognize.IsEnabled = false;

                    // Let the user know that the grammar didn't compile properly.
                    resultTextBlock.Text = "Unable to compile grammar.";
                }
                else
                {
                    resultTextBlock.Text = "Compilation Successful!";
                    // Set EndSilenceTimeout to give users more time to complete speaking a phrase.
                    //speechRecognizer.Timeouts.EndSilenceTimeout = TimeSpan.FromSeconds(1.2);

                    btnContinuousRecognize.IsEnabled = true;
                }
            }
            catch (Exception ex)
            {
                if ((uint)ex.HResult == HResultRecognizerNotFound)
                {
                    btnContinuousRecognize.IsEnabled = false;

                    resultTextBlock.Visibility = Visibility.Visible;
                    resultTextBlock.Text       = "Speech Language pack for selected language not installed.";
                }
                else
                {
                    var messageDialog = new Windows.UI.Popups.MessageDialog(ex.Message, "Exception");
                    await messageDialog.ShowAsync();
                }
            }
        }
        private async Task InitializeRecognizer(Language recognizerLanguage)
        {
            if (speechRecognizer != null)
            {
                speechRecognizer.ContinuousRecognitionSession.ResultGenerated -= ContinuousRecognitionSession_ResultGenerated;
                this.speechRecognizer.Dispose();
                this.speechRecognizer = null;
            }

            try
            {
                this.speechRecognizer = new SpeechRecognizer(recognizerLanguage);

                speechRecognizer.Constraints.Add(
                    new SpeechRecognitionListConstraint(
                        new List <string>()
                {
                    speechResourceMap.GetValue("CheckTime", speechContext).ValueAsString
                }, "Time"));

                speechRecognizer.Constraints.Add(
                    new SpeechRecognitionListConstraint(
                        new List <string>()
                {
                    speechResourceMap.GetValue("CheckDate", speechContext).ValueAsString
                }, "Date"));

                SpeechRecognitionCompilationResult result = await speechRecognizer.CompileConstraintsAsync();

                if (result.Status != SpeechRecognitionResultStatus.Success)
                {
                    //Let the user know that the grammar didn't compile properly.
                    //resultTextBlock.Visibility = Visibility.Visible;
                    //resultTextBlock.Text = "Unable to compile grammar.";
                }
                else
                {
                    //resultTextBlock.Visibility = Visibility.Collapsed;

                    // Handle continuous recognition events. Completed fires when various error states occur. ResultGenerated fires when
                    // some recognized phrases occur, or the garbage rule is hit.
                    //speechRecognizer.ContinuousRecognitionSession.Completed += ContinuousRecognitionSession_Completed;
                    speechRecognizer.ContinuousRecognitionSession.ResultGenerated += ContinuousRecognitionSession_ResultGenerated;
                }
            }
            catch (Exception ex)
            {
                if ((uint)ex.HResult == HResultRecognizerNotFound)
                {
                    //resultTextBlock.Visibility = Visibility.Visible;
                    //resultTextBlock.Text = "Speech Language pack for selected language not installed.";
                }
                else
                {
                    var messageDialog = new Windows.UI.Popups.MessageDialog(ex.Message, "Exception");
                    await messageDialog.ShowAsync();
                }
            }
        }
예제 #26
0
        public async Task Init()
        {
            //TEST
            {
                bool isMicAvailable = true;
                try
                {
                    //var audioDevices = await Windows.Devices.Enumeration.DeviceInformation.FindAllAsync(Windows.Devices.Enumeration.DeviceClass.AudioCapture);
                    //var audioId = audioDevices.ElementAt(0);

                    var mediaCapture = new Windows.Media.Capture.MediaCapture();
                    var settings     = new Windows.Media.Capture.MediaCaptureInitializationSettings();
                    settings.StreamingCaptureMode =
                        Windows.Media.Capture.StreamingCaptureMode.Audio;
                    settings.MediaCategory = Windows.Media.Capture.MediaCategory.Communications;

                    //var _capture = new Windows.Media.Capture.MediaCapture();
                    //var _stream = new InMemoryRandomAccessStream();
                    //await _capture.InitializeAsync(settings);
                    //await _capture.StartRecordToStreamAsync(MediaEncodingProfile.CreateWav(AudioEncodingQuality.Medium), _stream);


                    await mediaCapture.InitializeAsync(settings);
                }
                catch (Exception)
                {
                    isMicAvailable = false;
                }
                if (!isMicAvailable)
                {
                    await Windows.System.Launcher.LaunchUriAsync(new Uri("ms-settings:privacy-microphone"));
                }
                else
                {
                }
            }
            // セットアップ
            {
                var language = new Windows.Globalization.Language("en-US");
                recognizer_ = new SpeechRecognizer(language);

                //this.dispatcher = CoreWindow.GetForCurrentThread().Dispatcher;

                recognizer_.ContinuousRecognitionSession.ResultGenerated +=
                    ContinuousRecognitionSession_ResultGenerated;

                recognizer_.ContinuousRecognitionSession.Completed +=
                    ContinuousRecognitionSession_Completed;

                recognizer_.HypothesisGenerated +=
                    SpeechRecognizer_HypothesisGenerated;

                SpeechRecognitionCompilationResult result = await recognizer_.CompileConstraintsAsync();

                System.Diagnostics.Debug.WriteLine(" compile res:" + result.Status.ToString());
            }
        }
예제 #27
0
        /// <summary>
        /// Initialize Speech Recognizer and compile constraints.
        /// </summary>
        /// <param name="recognizerLanguage">Language to use for the speech recognizer</param>
        /// <returns>Awaitable task.</returns>
        private async Task InitializeRecognizer(Language recognizerLanguage)
        {
            if (speechRecognizer != null)
            {
                // cleanup prior to re-initializing this scenario.
                speechRecognizer.ContinuousRecognitionSession.ResultGenerated -= ContinuousRecognitionSession_ResultGenerated;

                this.speechRecognizer.Dispose();
                this.speechRecognizer = null;
            }

            try
            {
                this.speechRecognizer = new SpeechRecognizer(recognizerLanguage);
                // Build a command-list grammar. Commands should ideally be drawn from a resource file for localization, and
                // be grouped into tags for alternate forms of the same command.
                speechRecognizer.Constraints.Add(
                    new SpeechRecognitionListConstraint(
                        new List <string>()
                {
                    speechResourceMap.GetValue("ListGrammarTakeNote", speechContext).ValueAsString
                }, "Note"));
                speechRecognizer.Constraints.Add(
                    new SpeechRecognitionListConstraint(
                        new List <string>()
                {
                    speechResourceMap.GetValue("ListGrammarSaveTrip", speechContext).ValueAsString
                }, "Trip"));
                // Update the help text in the UI to show localized examples
                string uiOptionsText = string.Format(listeningTip.Text,
                                                     speechResourceMap.GetValue("ListGrammarTakeNote", speechContext).ValueAsString,
                                                     speechResourceMap.GetValue("ListGrammarSaveTrip", speechContext).ValueAsString);
                listeningTip.Text = uiOptionsText;

                SpeechRecognitionCompilationResult result = await speechRecognizer.CompileConstraintsAsync();

                if (result.Status == SpeechRecognitionResultStatus.Success)
                {
                    // Handle continuous recognition events. Completed fires when various error states occur. ResultGenerated fires when
                    // some recognized phrases occur, or the garbage rule is hit.
                    speechRecognizer.ContinuousRecognitionSession.ResultGenerated += ContinuousRecognitionSession_ResultGenerated;
                }
            }
            catch (Exception ex)
            {
                if ((uint)ex.HResult == HResultRecognizerNotFound)
                {
                    var messageDialog = new Windows.UI.Popups.MessageDialog(ex.Message, "Speech Language pack for selected language not installed.");
                    await messageDialog.ShowAsync();
                }
                else
                {
                    var messageDialog = new Windows.UI.Popups.MessageDialog(ex.Message, "Exception");
                    await messageDialog.ShowAsync();
                }
            }
        }
예제 #28
0
        /// <summary>
        /// Initialize Speech Recognizer and compile constraints.
        /// </summary>
        /// <param name="recognizerLanguage">Language to use for the speech recognizer</param>
        /// <returns>Awaitable task.</returns>
        private async Task InitializeRecognizer()
        {
            if (speechRecognizer != null)
            {
                // cleanup prior to re-initializing this scenario.
                speechRecognizer.ContinuousRecognitionSession.Completed       -= ContinuousRecognitionSession_Completed;
                speechRecognizer.ContinuousRecognitionSession.ResultGenerated -= ContinuousRecognitionSession_ResultGenerated;
                speechRecognizer.StateChanged -= SpeechRecognizer_StateChanged;

                speechRecognizer.Dispose();
                speechRecognizer = null;
            }

            speechRecognizer = new SpeechRecognizer();

            // Provide feedback to the user about the state of the recognizer. This can be used to provide visual feedback in the form
            // of an audio indicator to help the user understand whether they're being heard.
            speechRecognizer.StateChanged += SpeechRecognizer_StateChanged;

            // It's not valid to pause a list grammar recognizer and recompile the constraints without at least one
            // constraint in place, so create a permanent constraint.
            var goHomeConstraint = new SpeechRecognitionListConstraint(new List <string>()
            {
                "Go Home"
            }, "gohome");

            // These speech recognition constraints will be added and removed from the recognizer.
            emailConstraint = new SpeechRecognitionListConstraint(new List <string>()
            {
                "Send email"
            }, "email");
            phoneConstraint = new SpeechRecognitionListConstraint(new List <string>()
            {
                "Call phone"
            }, "phone");

            // Add some of the constraints initially, so we don't start with an empty list of constraints.
            speechRecognizer.Constraints.Add(goHomeConstraint);
            speechRecognizer.Constraints.Add(emailConstraint);

            SpeechRecognitionCompilationResult result = await speechRecognizer.CompileConstraintsAsync();

            if (result.Status != SpeechRecognitionResultStatus.Success)
            {
                // Disable the recognition buttons.
                btnRecognize.IsEnabled = false;

                // Let the user know that the grammar didn't compile properly.
                resultTextBlock.Text = "Unable to compile grammar.";
            }

            // Handle continuous recognition events. Completed fires when various error states occur. ResultGenerated fires when
            // some recognized phrases occur, or the garbage rule is hit.
            speechRecognizer.ContinuousRecognitionSession.Completed       += ContinuousRecognitionSession_Completed;
            speechRecognizer.ContinuousRecognitionSession.ResultGenerated += ContinuousRecognitionSession_ResultGenerated;
        }
예제 #29
0
        protected override async void OnNavigatedTo(NavigationEventArgs e)
        {
            this.speechRecognizer = new SpeechRecognizer();
            SpeechRecognitionCompilationResult result = await speechRecognizer.CompileConstraintsAsync();

            speechRecognizer.ContinuousRecognitionSession.ResultGenerated += ContinuousRecognitionSession_ResultGenerated;
            speechRecognizer.ContinuousRecognitionSession.Completed       += ContinuousRecognitionSession_Completed;
            speechRecognizer.HypothesisGenerated += SpeechRecognizer_HypothesisGenerated;
            await this.speechRecognizer.RecognizeAsync();
        }
예제 #30
0
        private async Task <bool> InitializeRecognizer(Language recognizerLanguage)
        {
            if (speechRecognizer != null)
            {
                speechRecognizer.ContinuousRecognitionSession.Completed       -= ContinuousRecognitionSession_Completed;
                speechRecognizer.ContinuousRecognitionSession.ResultGenerated -= ContinuousRecognitionSession_ResultGenerated;
                speechRecognizer.StateChanged -= SpeechRecognizer_StateChanged;
                this.speechRecognizer.Dispose();
                this.speechRecognizer = null;
            }

            try
            {
                // determine the language code being used.
                //StorageFile grammarContentFile = await StorageFile.GetFileFromApplicationUriAsync(new Uri(@"ms-appx:///Voice/SRGS/Sonos.xml"));
                //SpeechRecognitionGrammarFileConstraint grammarConstraint = new SpeechRecognitionGrammarFileConstraint(grammarContentFile);
                // Initialize the SpeechRecognizer and add the grammar.
                speechRecognizer = new SpeechRecognizer(new Language("fr-FR"));

                AddFrenchConstraint();
                speechRecognizer.StateChanged += SpeechRecognizer_StateChanged;
                SpeechRecognitionCompilationResult compilationResult = await speechRecognizer.CompileConstraintsAsync();

                // Check to make sure that the constraints were in a proper format and the recognizer was able to compile them.
                if (compilationResult.Status != SpeechRecognitionResultStatus.Success)
                {
                    Debug.WriteLine(compilationResult.Status.ToString());
                    return(false);
                }
                else
                {
                    // Set EndSilenceTimeout to give users more time to complete speaking a phrase.
                    speechRecognizer.Timeouts.EndSilenceTimeout = TimeSpan.FromSeconds(0.8);
                    // Handle continuous recognition events. Completed fires when various error states occur. ResultGenerated fires when
                    // some recognized phrases occur, or the garbage rule is hit.
                    speechRecognizer.ContinuousRecognitionSession.Completed       += ContinuousRecognitionSession_Completed;
                    speechRecognizer.ContinuousRecognitionSession.ResultGenerated += ContinuousRecognitionSession_ResultGenerated;
                    return(true);
                }
            }
            catch (Exception ex)
            {
                if ((uint)ex.HResult == HResultRecognizerNotFound)
                {
                    Debug.WriteLine("Speech Language pack for selected language not installed.");
                }
                else
                {
                    Debug.WriteLine(ex.Message);
                }
                Otto.Message.ShowMessage("VOICE : " + ex.Message);
                return(false);
            }
        }