예제 #1
1
        private async void InitRecognitionEngine()
        {
            try
            {
                speechRecognizer = new SpeechRecognizer(new Language(languageTag));
            }
            catch
            {
                speechRecognizer = new SpeechRecognizer();
            }

            speechRecognizer.Constraints.Add(new SpeechRecognitionListConstraint(GetCommandsText(), "tag1"));

            //var op = speechRecognizer.CompileConstraintsAsync();
            //op.AsTask().Wait();
            ////var a = op.GetResults();

            //var op2 = speechRecognizer.RecognizeAsync();
            //op2.AsTask().Wait();
            //SpeechRecognitionResult result = op2.GetResults();
            //if (result.Status == SpeechRecognitionResultStatus.Success)
            //{
            //}

            var a = await speechRecognizer.CompileConstraintsAsync();
            var b = a;
            SpeechRecognitionResult result = await speechRecognizer.RecognizeAsync();
            //if (result.Status == SpeechRecognitionResultStatus.Success)
            //    phoneNumber = result.Text;







            //    var cultureInfo = new CultureInfo("ru-RU");
            //    //var cultureInfo = new CultureInfo("en-US");
            //    Thread.CurrentThread.CurrentCulture = cultureInfo;
            //    Thread.CurrentThread.CurrentUICulture = cultureInfo;

            //    /*
            //    •en-GB. English (United Kingdom)
            //    •en-US. English (United States)
            //    •de-DE. German (Germany)
            //    •es-ES. Spanish (Spain)
            //    •fr-FR. French (France)
            //    •ja-JP. Japanese (Japan)
            //    •zh-CN. Chinese (China)
            //    •zh-TW. Chinese (Taiwan)
            //    */

            //    var commands = GetCommandsText();
            //    var choices = new Choices(commands);
            //    var builder = new GrammarBuilder(choices);
            //    builder.Culture = cultureInfo;

            //    recognitionEngine = new SpeechRecognitionEngine();// (cultureInfo);
            //    recognitionEngine.SetInputToDefaultAudioDevice();
            //    recognitionEngine.UnloadAllGrammars();
            //    recognitionEngine.LoadGrammar(new Grammar(builder));
            //    //recognitionEngine.LoadGrammar(new DictationGrammar()); // любой текст

            //    recognitionEngine.SpeechRecognized += recognitionEngine_SpeechRecognized;
            //    recognitionEngine.RecognizeAsync(RecognizeMode.Multiple);
        }
예제 #2
0
        private async void InitializeSpeechRecognizer()
        {
            try
            {
                if (speechRecognizer != null)
                {
                    speechRecognizer.RecognizeAsync().Cancel();
                    speechRecognizer.RecognizeAsync().Close();
                    this.speechRecognizer.Dispose();
                    this.speechRecognizer = null;
                }
                speechRecognizer = new SpeechRecognizer();
                var topicConstraing = new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.Dictation, "Development");
                speechRecognizer.Constraints.Add(topicConstraing);
                await speechRecognizer.CompileConstraintsAsync();

                this.Operation = await speechRecognizer.RecognizeAsync();

                if (Operation.Status == SpeechRecognitionResultStatus.Success)
                {
                    ResultGenerated(Operation.Text);
                    speechRecognizer.RecognizeAsync().Cancel();
                    speechRecognizer.Dispose();
                    speechRecognizer = null;
                }
            }
            catch (Exception)
            {
            }
        }
예제 #3
0
        private async void btnSpeechRecognize_Click(object sender, RoutedEventArgs e)
        {
            string phoneNumber = string.Empty;

            using (SpeechRecognizer recognizer = new SpeechRecognizer())
            {
                recognizer.Constraints.Add(new SpeechRecognitionTopicConstraint
                                               (SpeechRecognitionScenario.FormFilling, "Phone"));
                await recognizer.CompileConstraintsAsync();

                recognizer.Timeouts.InitialSilenceTimeout = TimeSpan.FromSeconds(5);
                recognizer.Timeouts.EndSilenceTimeout     = TimeSpan.FromSeconds(20);
                try
                {
                    SpeechRecognitionResult result = await recognizer.RecognizeAsync();

                    if (result.Status == SpeechRecognitionResultStatus.Success)
                    {
                        tbPhoneNumber.Text = result.Text;
                    }
                    else
                    {
                        tbPhoneNumber.Text = result.Status.ToString();
                    }
                }
                catch (UnauthorizedAccessException ex)
                {
                    await new MessageDialog(ex.Message).ShowAsync();
                    //TODO: Redirect user to Settings to grant permission to the app
                }
            }
        }
예제 #4
0
        //Trigger this function to start speech recognition
        private async void StartRecognition()
        {
            Status.Fill = new SolidColorBrush(Color.FromArgb(255, 0, 200, 70));
            isListening = true;
            await userInput.CompileConstraintsAsync();

            SpeechRecognitionResult theResult = await userInput.RecognizeAsync();

            textBlock.Text = theResult.Text;
            SpeechSynthesisStream feedbackStream = await feedback.SynthesizeTextToStreamAsync("Ok!");

            //Uncomment this if you want to play feedback on the computer.
            //media.SetSource(feedbackStream, feedbackStream.ContentType);
            if (theResult.Text.Contains("red") || theResult.Text.Contains("angry") || theResult.Text.Contains("manchester") || theResult.Text.Contains("rose"))
            {
                changeColor("r");
            }
            else if (theResult.Text.Contains("blue") || theResult.Text.Contains("sad") || theResult.Text.Contains("chelsea") || theResult.Text.Contains("sea") || theResult.Text.Contains("ocean"))
            {
                changeColor("b");
            }
            else if (theResult.Text.Contains("green") || theResult.Text.Contains("forest") || theResult.Text.Contains("leaf") || theResult.Text.Contains("jealousy") || theResult.Text.Contains("tree") || theResult.Text.Contains("hulk") || theResult.Text.Contains("gamora"))
            {
                changeColor("g");
            }
            else if (theResult.Text.Contains("yellow") || theResult.Text.Contains("pikachu") || theResult.Text.Contains("cheese") || theResult.Text.Contains("gold") || theResult.Text.Contains("sponge") || theResult.Text.Contains("banana"))
            {
                changeColor("y");
            }
            else if (theResult.Text.Contains("white") || theResult.Text.Contains("bright") || theResult.Text.Contains("cloud") || theResult.Text.Contains("paper"))
            {
                changeColor("w");
            }
            else if (theResult.Text.Contains("purple") || theResult.Text.Contains("thanos") || theResult.Text.Contains("junk"))
            {
                changeColor("p");
            }
            else if (theResult.Text.Contains("rainbow") || theResult.Text.Contains("fade") || theResult.Text.Contains("multi"))
            {
                changeColor("x");
            }
            else if (theResult.Text.Contains("disco") || theResult.Text.Contains("random") || theResult.Text.Contains("colourful") || theResult.Text.Contains("beat"))
            {
                changeColor("d");
            }
            else if (theResult.Text.Contains("iron") || theResult.Text.Contains("marvel") || theResult.Text.Contains("flash"))
            {
                changeColor("k");
            }
            else if (theResult.Text.Contains("razor") || theResult.Text.Contains("razer") || theResult.Text.Contains("samsung"))
            {
                changeColor("!");
            }
            else if (theResult.Text.Contains("turn off") || theResult.Text.Contains("dark") || theResult.Text.Contains("black") || theResult.Text.Contains("empty"))
            {
                changeColor("e");
            }
            isListening = false;
            Status.Fill = new SolidColorBrush(Color.FromArgb(255, 200, 0, 0));
        }
예제 #5
0
        private static async Task <SpeechViewModel> EnableSpeechRecognition()
        {
            SpeechViewModel svm = new SpeechViewModel
            {
                PromptMessage = "Speak."
            };

            SpeechFactory factory = SpeechFactory.FromSubscription("a5a9e9b4c6164808be0c34ccd4d1e598", "westus");

            // Creates a SpeechRecognizer to accept audio input from the user
            SpeechRecognizer recognizer = factory.CreateSpeechRecognizer();

            // Accepts audio input from the user to recognize speech
            SpeechRecognitionResult result = await recognizer.RecognizeAsync();

            // Acts on recognized speech from audio input
            if (result.RecognitionStatus != RecognitionStatus.Recognized)
            {
                Console.WriteLine($"Recognition status: {result.RecognitionStatus.ToString()}");
                if (result.RecognitionStatus == RecognitionStatus.Canceled)
                {
                    svm.ResultMessage = $"There was an error, reason: {result.RecognitionFailureReason}";
                }
                else
                {
                    svm.ResultMessage = "No speech could be recognized.\n";
                }
            }
            else
            {
                svm.ResultMessage = $"{result.Text}";
            }

            return(svm);
        }
예제 #6
0
 private async void OnListenAsync(object sender, RoutedEventArgs e)
 {
     buttonOnListen.IsEnabled = false;
     // Start recognition.
     try
     {
         recognitionOperation = speechRecognizer.RecognizeAsync();
         SpeechRecognitionResult speechRecognitionResult = await recognitionOperation;
         // If successful, display the recognition result.
         if (speechRecognitionResult.Status == SpeechRecognitionResultStatus.Success)
         {
             // Access to the recognized text through speechRecognitionResult.Text;
         }
         else
         {
             // Handle speech recognition failure
         }
     }
     catch (TaskCanceledException exception)
     {
         // TaskCanceledException will be thrown if you exit the scenario while the recognizer is actively
         // processing speech. Since this happens here when we navigate out of the scenario, don't try to 
         // show a message dialog for this exception.
         System.Diagnostics.Debug.WriteLine("TaskCanceledException caught while recognition in progress (can be ignored):");
         System.Diagnostics.Debug.WriteLine(exception.ToString());
     }
     catch (Exception exception)
     {
         var messageDialog = new Windows.UI.Popups.MessageDialog(exception.Message, "Exception");
         await messageDialog.ShowAsync();
     }
     buttonOnListen.IsEnabled = true;
 }
예제 #7
0
        public async Task <string> RecordSpeechFromMicrophoneAsync()
        {
            string recognizedText = string.Empty;

            using (SpeechRecognizer recognizer = new SpeechRecognizer(SpeechRecognizer.SystemSpeechLanguage))
            {
                recognizer.Constraints.Add(new SpeechRecognitionListConstraint(acceptedUserInput));
                await recognizer.CompileConstraintsAsync();

                SpeechRecognitionResult result = await recognizer.RecognizeAsync();

                StringBuilder stringBuilder = new StringBuilder();

                if (result.Status == SpeechRecognitionResultStatus.Success)
                {
                    if (result.Confidence == SpeechRecognitionConfidence.High)
                    {
                        stringBuilder.Append(result.Text);
                    }
                    else
                    {
                        IReadOnlyList <SpeechRecognitionResult> alternatives =
                            result.GetAlternates(1);

                        if (alternatives.First().RawConfidence > 0.5)
                        {
                            stringBuilder.Append(alternatives.First().Text);
                        }
                    }

                    recognizedText = stringBuilder.ToString();
                }
            }
            return(recognizedText);
        }
예제 #8
0
        private async Task <SpeechRecognitionResult> Ask(string question, int timeout = 0, IEnumerable <string> responses = null)
        {
            await Say(question);

            _speechRecognizer.UIOptions.AudiblePrompt = question;
            if (timeout != 0)
            {
                _speechRecognizer.Timeouts.InitialSilenceTimeout = new TimeSpan(0, 0, timeout);
            }
            else
            {
                // Set this timeout as Default
                _speechRecognizer.Timeouts.InitialSilenceTimeout = new TimeSpan(0, 0, 30);
            }
            if (responses != null)
            {
                var listConstraint = new Windows.Media.SpeechRecognition.SpeechRecognitionListConstraint(responses, "answers");
                _speechRecognizer.Constraints.Clear();
                _speechRecognizer.Constraints.Add(listConstraint);
                await _speechRecognizer.CompileConstraintsAsync();
            }
            else if (_speechRecognizer.Constraints.Any())
            {
                _speechRecognizer.Constraints.Clear();
                await _speechRecognizer.CompileConstraintsAsync();
            }
            var result = await _speechRecognizer.RecognizeAsync();

            return(result);
        }
        public static async void RecognizeSpeech()
        {
            SpeechRecognizer recognizer = new SpeechRecognizer();

            recognizer.Timeouts.BabbleTimeout         = System.TimeSpan.FromSeconds(120.0);
            recognizer.Timeouts.EndSilenceTimeout     = System.TimeSpan.FromSeconds(120.0);
            recognizer.Timeouts.InitialSilenceTimeout = System.TimeSpan.FromSeconds(120.0);
            SpeechRecognitionTopicConstraint topicConstraint = new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.Dictation, "Message");

            recognizer.Constraints.Add(topicConstraint);
            await recognizer.CompileConstraintsAsync();

            try {
                SpeechRecognitionResult result = await recognizer.RecognizeAsync();

                //use result.GetAlternates for more precisivion
                if (result.Confidence != SpeechRecognitionConfidence.Rejected)
                {
                    if (result.Text != "")
                    {
                        string speechResult = result.Text.Remove(result.Text.Length - 1);
                        int    num;
                        bool   isNumber = Int32.TryParse(speechResult, out num);
                        if (isNumber)
                        {
                            MainPage.SetAudioTempCommand(speechResult);
                        }
                        else
                        {
                            UiUtils.ShowNotification("Your message could not be parsed as number. Please specify a number!");
                        }
                    }
                    else
                    {
                        UiUtils.ShowNotification("Your message could not be parsed. Please repeat!");
                    }
                }
                else
                {
                    UiUtils.ShowNotification("Sorry, could not get that. Can you repeat?");
                }
            }catch (Exception ex)
            {
                const int privacyPolicyHResult = unchecked ((int)0x80045509);
                const int networkNotAvailable  = unchecked ((int)0x80045504);

                if (ex.HResult == privacyPolicyHResult)
                {
                    UiUtils.ShowNotification("You will need to accept the speech privacy policy in order to use speech recognition in this app. Consider activating `Get to know me` in 'Settings->Privacy->Speech, inking & typing`");
                }
                else if (ex.HResult == networkNotAvailable)
                {
                    UiUtils.ShowNotification("The network connection is not available");
                }
                else
                {
                    var t = ex.Message;
                }
            }
        }
예제 #10
0
        private async void MainPage_Loaded(object sender, RoutedEventArgs e)
        {
            sR = new SpeechRecognizer();
            //set time

            //load grammar file
            var gf = await StorageFile.GetFileFromApplicationUriAsync(new Uri("ms-appx:///grammar.xml"));

            //add the constraint to the speach recogniser
            sR.Constraints.Add(new SpeechRecognitionGrammarFileConstraint(gf));

            var result = await sR.CompileConstraintsAsync();

            Debug.WriteLine(" not start");
            if (result.Status == SpeechRecognitionResultStatus.Success)
            {
                Debug.WriteLine("start");
                while (true)
                {
                    SpeechRecognitionResult srr = await sR.RecognizeAsync();

                    // use the semantic interpretation engine
                    // to get the  commands

                    string myCommand = srr.SemanticInterpretation.Properties["command"].Single();
                    string ruleID    = srr.RulePath[0];

                    var md = new Windows.UI.Popups.MessageDialog(myCommand, "User said this " + ruleID);
                    //  var md = new Windows.UI.Popups.MessageDialog(srr.Text, "User said this ");
                    await md.ShowAsync();
                }
            }
        }
예제 #11
0
        // start a speech recognition session and capture the results
        private async void speechButton_Click(object sender, RoutedEventArgs e)
        {
            speechButton.Content   = "Listening...";
            speechButton.IsEnabled = false;

            // Create an instance of SpeechRecognizer.
            using (var speechRecognizer = new SpeechRecognizer())
            {
                // Compile the dictation grammar by default.
                await speechRecognizer.CompileConstraintsAsync();

                // Start recognition.
                SpeechRecognitionResult speechRecognitionResult = await speechRecognizer.RecognizeAsync();

                // output result
                speechOutputBox.Text   = speechRecognitionResult.Text;
                speechButton.Content   = "Speak";
                speechButton.IsEnabled = true;
                //speechInputSubmitted(speechRecognitionResult);

                bool correctGuess = false;
                try
                {
                    correctGuess = mapLearner.guess(speechRecognitionResult);
                }
                catch (ArgumentNullException) { }
                finally
                {
                    AnswerSubmitted(correctGuess);
                }
            }
        }
예제 #12
0
        }//前台识别声音

        public async Task <string> BackGroundRec()
        {
            string Result = "";

            try
            {
                using (SpeechRecognizer recognizer = new SpeechRecognizer())
                {
                    SpeechRecognitionCompilationResult compilationResult = await recognizer.CompileConstraintsAsync();

                    if (compilationResult.Status == SpeechRecognitionResultStatus.Success)
                    {
                        recognizer.UIOptions.IsReadBackEnabled = false;
                        recognizer.UIOptions.ShowConfirmation  = false;
                        recognizer.UIOptions.AudiblePrompt     = "我在听,请说...";
                        //SpeechRecognitionResult recognitionResult = await recognizer.RecognizeWithUIAsync();
                        SpeechRecognitionResult recognitionResult = await recognizer.RecognizeAsync();

                        if (recognitionResult.Status == SpeechRecognitionResultStatus.Success)
                        {
                            Result = recognitionResult.Text;
                        }
                    }
                }
            }
            catch (Exception ex)
            {
                Result = ex.Message;
            }
            return(Result);
        }//后台常驻声音
예제 #13
0
        private async Task StartRecognizeAsync()
        {
            try
            {
                var speechRecognizer = new SpeechRecognizer();
                speechRecognizer.Grammars.AddGrammarFromList(
                    "answer",
                    _words);

                while (!_pleaseFinish)
                {
                    var result = await speechRecognizer.RecognizeAsync();

                    if (result.TextConfidence != SpeechRecognitionConfidence.Rejected)
                    {
                        ProcessResult(result);
                    }
                    else
                    {
                        Debug.WriteLine("No text!");
                    }
                }

            }
            finally
            {
                _isRunning = false;
            }
        }
		private async void StartVoiceRecognition()
		{
			await SpeakText( "Say Captains Log at any time to create a log entry." );

			speechRecognizerCaptainsLogCommand = new SpeechRecognizer();

			while ( !cancellationSource.IsCancellationRequested )
			{
				// Listen for user to say "Captains Log"
				ISpeechRecognitionConstraint commandConstraint = 
					new SpeechRecognitionListConstraint( new[] { "Captains Log", "Computer Captains Log" } );
				speechRecognizerCaptainsLogCommand.Constraints.Add( commandConstraint );
				await speechRecognizerCaptainsLogCommand.CompileConstraintsAsync();

				SpeechRecognitionResult commandResult = await speechRecognizerCaptainsLogCommand.RecognizeAsync();

				if ( commandResult.Status != SpeechRecognitionResultStatus.Success
					|| commandResult.Confidence == SpeechRecognitionConfidence.Rejected
					|| cancellationSource.IsCancellationRequested )
				{
					continue;
				}
				// Recognized user saying "Captains Log"

				// Listen for the user's dictation entry
				var captainsLogDictationRecognizer = new SpeechRecognizer();

				ISpeechRecognitionConstraint dictationConstraint = 
					new SpeechRecognitionTopicConstraint( 
						SpeechRecognitionScenario.Dictation, "LogEntry", "LogEntryDictation" );

				captainsLogDictationRecognizer.Constraints.Add( dictationConstraint );

				await captainsLogDictationRecognizer.CompileConstraintsAsync();

				captainsLogDictationRecognizer.UIOptions.ExampleText = "Boldly going where no man or woman has gone before.";
				captainsLogDictationRecognizer.UIOptions.AudiblePrompt = "Go ahead";
				captainsLogDictationRecognizer.UIOptions.IsReadBackEnabled = true;
				captainsLogDictationRecognizer.UIOptions.ShowConfirmation = true;

				SpeechRecognitionResult dictationResult = await captainsLogDictationRecognizer.RecognizeWithUIAsync();

				if ( dictationResult.Status != SpeechRecognitionResultStatus.Success
					|| dictationResult.Confidence == SpeechRecognitionConfidence.Rejected
					|| string.IsNullOrWhiteSpace( dictationResult.Text )
					|| cancellationSource.IsCancellationRequested )
				{
					captainsLogDictationRecognizer.Dispose();

					continue;
				}
				// Recognized user's dictation entry

				AddLogEntry( dictationResult.Text );

				captainsLogDictationRecognizer.Dispose();
			}

			speechRecognizerCaptainsLogCommand.Dispose();
		}
예제 #15
0
        private async Task StartRecognizeAsync()
        {
            try
            {
                var speechRecognizer = new SpeechRecognizer();
                speechRecognizer.Grammars.AddGrammarFromList(
                    "answer",
                    _words);

                while (!_pleaseFinish)
                {
                    var result = await speechRecognizer.RecognizeAsync();

                    if (result.TextConfidence != SpeechRecognitionConfidence.Rejected)
                    {
                        ProcessResult(result);
                    }
                    else
                    {
                        Debug.WriteLine("No text!");
                    }
                }
            }
            finally
            {
                _isRunning = false;
            }
        }
예제 #16
0
        private async void startRecognition_Click(object sender, RoutedEventArgs e)
        {
            try
            {
                startRecognition.IsEnabled = false;

                // Compile the dictation grammar that is loaded by default, and start recognition.
                await speechRecognizer.CompileConstraintsAsync();

                SpeechRecognitionResult speechRecognitionResult = await speechRecognizer.RecognizeAsync();

                if (speechRecognitionResult.Status == SpeechRecognitionResultStatus.Success)
                {
                    resultTextBox.Text = speechRecognitionResult.Text;
                }
            }
            catch (Exception ex)
            {
                MessageDialog dialog = new MessageDialog(LocalizableStrings.SPEECH_RECOGNITION_DISABLED);
                await dialog.ShowAsync();

                status.Log(ex.Message);
            }
            finally
            {
                startRecognition.IsEnabled = true;
            }
        }
예제 #17
0
        private async void Button_Click_2(object sender, RoutedEventArgs e)
        {
            SpeechRecognizer speechRecognizer = new SpeechRecognizer();
            speechRecognizer.Grammars.AddGrammarFromList("color", new List<string>
            {
                "红色",
                "白色",
                "蓝色",
                "绿色"
            });
            try
            {
                var result = await speechRecognizer.RecognizeAsync();
                if (result.TextConfidence == SpeechRecognitionConfidence.Rejected)
                {
                    MessageBox.Show("语音识别不到");
                }
                else
                {
                    MessageBox.Show(result.Text);
                }
            }
            catch (Exception err)
            {
                MessageBox.Show("请检查是否接收语音隐私协议" + err.Message + err.HResult);
            }

                
        }
예제 #18
0
        private async void ListenForPatrick()
        {
            IAsyncOperation <SpeechRecognitionResult> recognitionOperation;
            SpeechRecognitionResult speechRecognitionResult;

            while (true)
            {
                if (speechRecognizer != null)
                {
                    recognitionOperation    = speechRecognizer.RecognizeAsync();
                    speechRecognitionResult = await recognitionOperation;

                    if (speechRecognitionResult.Status == SpeechRecognitionResultStatus.Success)
                    {
                        // BUG: Sometimes hits success repetitively and does not listen for input
                        Debug.WriteLine(speechRecognitionResult.Text);
                        if (speechRecognitionResult.Text.Length > 0)
                        {
                            ProcessSpeech(speechRecognitionResult.Text);
                        }
                    }
                }
                else
                {
                    await System.Threading.Tasks.Task.Delay(250);
                }

                Blink.Begin();
            }
        }
예제 #19
0
        public static async Task <SpeechRecognitionResult> ListenOnceAsync()
        {
            if (!IsStarted)
            {
                try
                {
                    IsStarted  = true;
                    recognizer = new SpeechRecognizer();
                    // compile the speech constraints and start listening
                    await recognizer.CompileConstraintsAsync();

                    // keep listening until the result isn't an empty string since sometimes it rings up false positives
                    SpeechRecognitionResult result = null;
                    while (result == null || StringUtils.IsBlank(result.Text))
                    {
                        result = await recognizer.RecognizeAsync();
                    }
                    return(result);
                }
                catch (Exception)
                {
                    return(null);
                }
            }
            else
            {
                throw new Exception("Can't Listen when already started!");
            }
        }
예제 #20
0
        private void ButtonBase_OnClick(object sender, RoutedEventArgs e)
        {
            // Start recognition.
            IAsyncOperation <SpeechRecognitionResult> speechRecognitionResult = _speechRecognizer.RecognizeAsync();

            //IAsyncOperation<SpeechRecognitionResult> speechRecognitionResult = _speechRecognizer.RecognizeWithUIAsync();
            speechRecognitionResult.Completed += Completed;
        }
예제 #21
0
        private async void startListenBtn_Click(object sender, RoutedEventArgs e)
        {
            var result = await _speechRecognizer.RecognizeAsync();

            if (result.Status == SpeechRecognitionResultStatus.Success)
            {
                HandleResult(result);
            }
        }
예제 #22
0
        public async Task <SpeechRecognitionResult> RecognizeSpeech()
        {
            var speechRecognizer = new SpeechRecognizer();
            await speechRecognizer.CompileConstraintsAsync();

            SpeechRecognitionResult speechRecognitionResult = await speechRecognizer.RecognizeAsync();

            return(speechRecognitionResult);
        }
        /// <summary>
        /// Uses the recognizer constructed earlier to listen for speech from the user before displaying
        /// it back on the screen. Uses developer-provided UI for user feedback.
        /// </summary>
        /// <param name="sender">Button that triggered this event</param>
        /// <param name="e">State information about the routed event</param>
        private async void RecognizeWithoutUIDictationGrammar_Click(object sender, RoutedEventArgs e)
        {
            heardYouSayTextBlock.Visibility = resultTextBlock.Visibility = Visibility.Collapsed;

            // Disable the UI while recognition is occurring, and provide feedback to the user about current state.
            btnRecognizeWithUI.IsEnabled     = false;
            btnRecognizeWithoutUI.IsEnabled  = false;
            cbLanguageSelection.IsEnabled    = false;
            hlOpenPrivacySettings.Visibility = Visibility.Collapsed;
            listenWithoutUIButtonText.Text   = " listening for speech...";

            // Start recognition.
            try
            {
                recognitionOperation = speechRecognizer.RecognizeAsync();
                SpeechRecognitionResult speechRecognitionResult = await recognitionOperation;
                // If successful, display the recognition result.
                if (speechRecognitionResult.Status == SpeechRecognitionResultStatus.Success)
                {
                    heardYouSayTextBlock.Visibility = resultTextBlock.Visibility = Visibility.Visible;
                    resultTextBlock.Text            = speechRecognitionResult.Text;
                }
                else
                {
                    resultTextBlock.Visibility = Visibility.Visible;
                    resultTextBlock.Text       = string.Format("Speech Recognition Failed, Status: {0}", speechRecognitionResult.Status.ToString());
                }
            }
            catch (TaskCanceledException exception)
            {
                // TaskCanceledException will be thrown if you exit the scenario while the recognizer is actively
                // processing speech. Since this happens here when we navigate out of the scenario, don't try to
                // show a message dialog for this exception.
                System.Diagnostics.Debug.WriteLine("TaskCanceledException caught while recognition in progress (can be ignored):");
                System.Diagnostics.Debug.WriteLine(exception.ToString());
            }
            catch (Exception exception)
            {
                // Handle the speech privacy policy error.
                if ((uint)exception.HResult == HResultPrivacyStatementDeclined)
                {
                    hlOpenPrivacySettings.Visibility = Visibility.Visible;
                }
                else
                {
                    var messageDialog = new Windows.UI.Popups.MessageDialog(exception.Message, "Exception");
                    await messageDialog.ShowAsync();
                }
            }

            // Reset UI state.
            listenWithoutUIButtonText.Text  = " without UI";
            cbLanguageSelection.IsEnabled   = true;
            btnRecognizeWithUI.IsEnabled    = true;
            btnRecognizeWithoutUI.IsEnabled = true;
        }
예제 #24
0
    private async Task <SpeechRecognitionResult> RecognizeSpeechAsync()
    {
        SpeechRecognitionResult result = null;

        if (speechRecognizer != null)
        {
            result = await speechRecognizer.RecognizeAsync();
        }

        return(result);
    }
예제 #25
0
        private async void Button_Click(object sender, RoutedEventArgs e)
        {
            await InitSpeech();

            // Disable the UI while recognition is occurring, and provide feedback to the user about current state.
            button.IsEnabled = false;
            text.IsReadOnly  = true;

            text.Text = " listening for speech...";

            // Start recognition.
            try
            {
                //IAsyncOperation<SpeechRecognitionResult> recognitionOperation = speechRecognizer.RecognizeAsync();
                SpeechRecognitionResult speechRecognitionResult = await speechRecognizer.RecognizeAsync();

                //SpeechRecognitionResult speechRecognitionResult = await recognitionOperation;
                // If successful, display the recognition result.
                if (speechRecognitionResult.Status == SpeechRecognitionResultStatus.Success)
                {
                    text.Text = speechRecognitionResult.Text;
                }
                else
                {
                    text.Text = string.Format("Speech Recognition Failed, Status: {0}", speechRecognitionResult.Status.ToString());
                }
            }
            catch (TaskCanceledException exception)
            {
                // TaskCanceledException will be thrown if you exit the scenario while the recognizer is actively
                // processing speech. Since this happens here when we navigate out of the scenario, don't try to
                // show a message dialog for this exception.
                System.Diagnostics.Debug.WriteLine("TaskCanceledException caught while recognition in progress (can be ignored):");
                System.Diagnostics.Debug.WriteLine(exception.ToString());
            }
            catch (Exception exception)
            {
                //// Handle the speech privacy policy error.
                //if ((uint)exception.HResult == HResultPrivacyStatementDeclined)
                //{
                //    hlOpenPrivacySettings.Visibility = Visibility.Visible;
                //}
                //else
                //{
                //    var messageDialog = new Windows.UI.Popups.MessageDialog(exception.Message, "Exception");
                //    await messageDialog.ShowAsync();
                //}
            }

            // Reset UI state.
            button.IsEnabled = true;
            text.IsReadOnly  = false;
        }
예제 #26
0
        private async Task InitializeIntentRecognizer()
        {
            string spokenWord = string.Empty;

            try
            {
                // Initialize recognizer
                using (var intentRecognizer = new SpeechRecognizer())
                {
                    var compilationResult = await intentRecognizer.CompileConstraintsAsync();

                    // If successful, display the recognition result.
                    if (compilationResult.Status == SpeechRecognitionResultStatus.Success)
                    {
                        // change default of 5 seconds
                        intentRecognizer.Timeouts.InitialSilenceTimeout = TimeSpan.FromSeconds(10);
                        // change default of 0.5 seconds
                        intentRecognizer.Timeouts.EndSilenceTimeout = TimeSpan.FromSeconds(5);
                        SpeechRecognitionResult result = await intentRecognizer.RecognizeAsync();

                        if (result.Status == SpeechRecognitionResultStatus.Success)
                        {
                            spokenWord = result.Text;
                        }
                    }
                }

                if (!string.IsNullOrEmpty(spokenWord))
                {
                    if (!string.IsNullOrEmpty(spokenWord))
                    {
                        var result = await client.GetStringAsync(baseURL + "&sessionId=" + Guid.NewGuid().ToString() +
                                                                 "&query=" + Uri.EscapeUriString(spokenWord));

                        var results = JObject.Parse(result);
                        var output  = (string)results["result"]["fulfillment"]["speech"];

                        await this.Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, () => {
                            PlayResponse(output);
                        });
                    }
                }
            }
            catch (Exception ex)
            {
                //log
            }
            finally
            {
                //result the main recognition session to listen for trigger word
                await recognizer.ContinuousRecognitionSession.StartAsync();
            }
        }
예제 #27
0
        /// <summary>
        /// Starts the Speech Recognition session
        /// </summary>
        /// <param name="e"></param>
        protected override async void OnNavigatedTo(NavigationEventArgs e)
        {
            dispatcher = Windows.ApplicationModel.Core.CoreApplication.GetCurrentView().CoreWindow.Dispatcher;

            speechRecognizer = new SpeechRecognizer();
            await speechRecognizer.CompileConstraintsAsync();

            speechRecognizer.HypothesisGenerated += speechHypothesisCallback;
            speechRecognizer.StateChanged        += onSpeechRecognitionEnded;

            await speechRecognizer.RecognizeAsync();
        }
예제 #28
0
        async Task RecordSpeechFromMicrophoneAsync(VoiceInformation voiceInformation)
        {
            if (!await AudioCapturePermissions.RequestMicrophonePermission())
            {
                return;
            }

            if (voiceInformation == null)
            {
                return;
            }

            if (!await DoRecognition())
            {
                await SpeakAndListen(listenOnly : true);
            }

            async Task <bool> DoRecognition()
            {
                using (SpeechRecognizer speechRecognizer = new SpeechRecognizer(new Windows.Globalization.Language(voiceInformation.Language)))
                {
                    SpeechRecognitionConstraints.ToList().ForEach(c => speechRecognizer.Constraints.Add(c));

                    speechRecognizer.Timeouts.InitialSilenceTimeout = TimeSpan.FromSeconds(SpeechRecognitionConstants.InitialSilenceTimeout);
                    speechRecognizer.Timeouts.EndSilenceTimeout     = TimeSpan.FromSeconds(SpeechRecognitionConstants.EndSilenceTimeout);

                    await speechRecognizer.CompileConstraintsAsync();

                    SpeechRecognitionResult result = await speechRecognizer.RecognizeAsync();

                    if (
                        result.Status == SpeechRecognitionResultStatus.Success &&
                        new HashSet <SpeechRecognitionConfidence>
                    {
                        SpeechRecognitionConfidence.High,
                        SpeechRecognitionConfidence.Medium,
                        SpeechRecognitionConfidence.Low
                    }.Contains(result.Confidence) &&
                        uiNotificationService.CanGoBack
                        )
                    {
                        await GoBack();

                        return(true);
                    }
                    else
                    {
                        return(false);
                    }
                }
            }
        }
예제 #29
0
        private async void VoiceButton_Click(object sender, RoutedEventArgs e)
        {
            try
            {
                listening = true;

                // if SpeechRecognizer inizialization failed notthing else to do
                if (await TryInitSpeech() == false)
                {
                    return;
                }

                VisualStateManager.GoToState(this, VISUAL_STATE_LISTENING, true);
                this.IsReadOnly = true;
                this.Text       = LISTENING_TEXT;

                SpeechRecognitionResult speechRecognitionResult = await speechRecognizer.RecognizeAsync();

                if (speechRecognitionResult.Status == SpeechRecognitionResultStatus.Success)
                {
                    // remove last chat of recognized text if it is a point (never saw a text box filled like a sentence with the point at the end)
                    if (speechRecognitionResult.Text.Length > 1 && speechRecognitionResult.Text.Substring(speechRecognitionResult.Text.Length - 1, 1) == ".")
                    {
                        Text = speechRecognitionResult.Text.Remove(speechRecognitionResult.Text.Length - 1);
                    }
                    else
                    {
                        Text = speechRecognitionResult.Text;
                    }
                }
                else
                {
                    Text = SPEECH_RECOGNITION_FAILED;
                }

                hypotesis = string.Empty;
            }
            catch (Exception ex)
            {
                System.Diagnostics.Debug.WriteLine(ex.Message);
                Text = SPEECH_RECOGNITION_FAILED;
            }
            finally
            {
                timer.Stop();
                await TryDisposeSpeech();

                VisualStateManager.GoToState(this, VISUAL_STATE_NOT_LISTENING, true);
                this.IsReadOnly = false;
                listening       = false;
            }
        }
예제 #30
0
        async private void OnRecognizeNoUI(object sender, RoutedEventArgs e)
        {
            var recognizer = new SpeechRecognizer();

            var topic = new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.WebSearch, "webSearch");

            recognizer.Constraints.Add(topic);
            await recognizer.CompileConstraintsAsync();

            var result = await recognizer.RecognizeAsync();

            txt_dictation.Text = result.Text;
        }
예제 #31
0
        public async void RecognizeSpeech()
        {
            var speechRecognizer = new SpeechRecognizer(SpeechRecognizer.SystemSpeechLanguage);

            var dictationConstraint = new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.Dictation, "dictation");

            speechRecognizer.Constraints.Add(dictationConstraint);

            var srcpres = await speechRecognizer.CompileConstraintsAsync();

            if (srcpres.Status != SpeechRecognitionResultStatus.Success)
            {
                Console.WriteLine("Failed to compile constraints");
                exitEvent.Set();
                return;
            }

            while (true)
            {
                var res = await speechRecognizer.RecognizeAsync();

                switch (res.Status)
                {
                case SpeechRecognitionResultStatus.Success:
                    break;

                default:
                    Console.WriteLine($"Failed ({res.Status.ToString()}), try again");
                    continue;
                }

                switch (res.Confidence)
                {
                case SpeechRecognitionConfidence.Low:
                case SpeechRecognitionConfidence.Rejected:
                    Console.WriteLine("Not enough confidence...");
                    continue;
                }

                UiBuiltins.Notification(
                    header: "Text spoken",
                    message: res.Text
                    );

                if (res.Text == ExitKeyword)
                {
                    exitEvent.Set();
                    break;
                }
            }
        }
예제 #32
0
        public async Task RunVoiceRecognition()
        {
            lock (_speechRecognizer)
            {
                if (_running)
                {
                    Debug.WriteLine("Skipping voice recognition: Already running.");
                    return;
                }
                _running = true;
            }

            try
            {
                if (_commandCallback.Count == 0)
                {
                    Debug.WriteLine("No voice commands available");
                    new TextToSpeech("No voice commands available").Play();
                    return;
                }

                _speechRecognizer.Constraints.Clear();
                _speechRecognizer.Constraints.Add(new SpeechRecognitionListConstraint(_commandCallback.Keys));
                await _speechRecognizer.CompileConstraintsAsync();

                SetLed(GpioPinValue.High);
                new TextToSpeech("Listening").Play();
                SpeechRecognitionResult result = await _speechRecognizer.RecognizeAsync();

                SetLed(GpioPinValue.Low);

                if (result.Status != SpeechRecognitionResultStatus.Success || String.IsNullOrEmpty(result.Text))
                {
                    Debug.WriteLine($"Recognition failed: {result.Status} - {result.Text}");
                    ShowHelp("Sorry, didn't catch that.");
                }
                else if (result.Text == "Help")
                {
                    ShowHelp();
                }
                else if (_commandCallback.TryGetValue(result.Text, out RoutedEventHandler callback))
                {
                    callback?.Invoke(this, null);
                }
            }
            finally
            {
                _running = false;
            }
        }
예제 #33
0
        private async void SpeechListeningClick(object sender, RoutedEventArgs e)
        {
            SpeechListeningChanged(true, SpeechListening, SpeechLoading);

            var speechRecognizer = new SpeechRecognizer();

            speechRecognizer.HypothesisGenerated += SpeechRecognizer_HypothesisGenerated;
            await speechRecognizer.CompileConstraintsAsync();

            SpeechRecognitionResult speechRecognitionResult = await speechRecognizer.RecognizeAsync();

            SpeechText.Text = speechRecognitionResult.Text;
            SpeechListeningChanged(false, SpeechListening, SpeechLoading);
        }
        public async Task <string> GetTextFromSpeechAsync(bool withUI = false)
        {
            if (_recognizer == null)
            {
                await InitializeRecognizerAsync();
            }

            SpeechRecognitionResult recognition = null;

            if (withUI)
            {
                recognition = await _recognizer.RecognizeWithUIAsync();
            }
            else
            {
                recognition = await _recognizer.RecognizeAsync();
            }

            if (recognition.Status == SpeechRecognitionResultStatus.Success &&
                recognition.Confidence != SpeechRecognitionConfidence.Rejected)
            {
                Debug.WriteLine($"[Speech to Text]: result: {recognition.Text}, {recognition.RawConfidence.ToString()}, {recognition.Confidence.ToString()}");
                var alternativeResults = recognition.GetAlternates(MaxRecognitionResultAlternates);

                foreach (var r in alternativeResults)
                {
                    Debug.WriteLine($"[Speech to Text]: alternative: {r.Text}, {r.RawConfidence.ToString()}, {r.Confidence.ToString()}");
                }

                var topResult = alternativeResults.Where(r => r.Confidence == SpeechRecognitionConfidence.High).FirstOrDefault();
                if (topResult != null)
                {
                    return(topResult.Text);
                }

                topResult = alternativeResults.Where(r => r.Confidence == SpeechRecognitionConfidence.Medium).FirstOrDefault();
                if (topResult != null)
                {
                    return(topResult.Text);
                }

                topResult = alternativeResults.Where(r => r.Confidence == SpeechRecognitionConfidence.Low).FirstOrDefault();
                if (topResult != null)
                {
                    return(topResult.Text);
                }
            }

            return(string.Empty);
        }
        public  async static Task< string> RecognizeVoiceCommand()
        {
            try
            {             
                speechRecognizer = await ResourceHelper.InitRecognizer() ;
                if(null == speechRecognizer)
                {
                    _command = ResourceHelper.GetString("Sys Err");//"系统异常";
                    return _command;
                }
                
                recognitionOperation = speechRecognizer.RecognizeAsync();

                SpeechRecognitionResult speechRecognitionResult = await recognitionOperation;

                // If successful, display the recognition result. A cancelled task should do nothing.

                if (speechRecognitionResult.Status == SpeechRecognitionResultStatus.Success)
                {
                    if (speechRecognitionResult.Confidence == SpeechRecognitionConfidence.Rejected)
                    {
                        _command = ResourceHelper.GetString("invalid");//"对不起,无法识别您的命令";
                    }
                    else
                    {
                        string tag = "unknown";
                        if (speechRecognitionResult.Constraint != null)
                        {
                            // Only attempt to retreive the tag if we didn't hit the garbage rule.
                            tag = speechRecognitionResult.Constraint.Tag;
                        }

                        _command = speechRecognitionResult.Text;
                    }
                }
                return _command;
            }
            catch (Exception e)
            {
                return e.Message;
            }                     
        }
예제 #36
0
        public async void speech(string option)
        {
            var r = "";

            try
            {
                var _recognizer = new SpeechRecognizer();

                var _recOperation = _recognizer.RecognizeAsync();
                var recoResult = await _recOperation;

                r = recoResult.Text;
            }
            catch (Exception e)
            {
                r = "Exception" + e.ToString();
            }

            /// 
            //return "asdasd";

            DispatchCommandResult(new PluginResult(PluginResult.Status.OK, "Everything went as planned, this is a result that is passed to the success handler." + r.ToString()));
        }
예제 #37
0
        /// <summary>
        /// Move to a new state.
        /// </summary>
        private async Task SetState(SpeechDialogBoxState state)
        {
            // Do not interrupt while speaking.
            while (this.state == SpeechDialogBoxState.Speaking)
            {
                await Task.Delay(200);
            }

            this.state = state;
            await Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, new DispatchedHandler(
               async () =>
               {
                   // Hide all.
                   this.DefaultState.Visibility = Visibility.Collapsed;
                   this.TypingState.Visibility = Visibility.Collapsed;
                   this.ListeningState.Visibility = Visibility.Collapsed;
                   this.ThinkingState.Visibility = Visibility.Collapsed;

                   switch (this.state)
                   {
                       case SpeechDialogBoxState.Default:
                           this.DefaultState.Visibility = Visibility.Visible;
                           break;
                       case SpeechDialogBoxState.Typing:
                           this.TypingState.Visibility = Visibility.Visible;
                           break;
                       case SpeechDialogBoxState.Listening:
                           this.ListeningState.Visibility = Visibility.Visible;
                           this.MediaElement.Source = new Uri("ms-appx:///Assets//Listening.wav");
                           SpeechRecognizer recognizer = new SpeechRecognizer();

                           foreach (var constraint in this.Constraints)
                           {
                               recognizer.Constraints.Add(constraint);
                           }

                           await recognizer.CompileConstraintsAsync();

                           var reco = recognizer.RecognizeAsync();
                           reco.Completed += this.SpeechRecognition_Completed;
                           break;
                       case SpeechDialogBoxState.Thinking:
                           this.ThinkingState.Visibility = Visibility.Visible;
                           break;
                       default:
                           break;
                   }
               }));
        }
예제 #38
0
        private async void InitializeSpeechRecognizer()
        {
            if (speechRecognizer != null)
            {
                this.speechRecognizer.Dispose();
                this.speechRecognizer = null;
            }
            speechRecognizer = new SpeechRecognizer();
            var topicConstraing = new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.Dictation, "Development");
            speechRecognizer.Constraints.Add(topicConstraing);
            await speechRecognizer.CompileConstraintsAsync();

            var operation = await speechRecognizer.RecognizeAsync();
            if (!this.Completed && operation.Status == SpeechRecognitionResultStatus.Success)
            {
                this.Completed = true;
                ResultGenerated(operation.Text);
                speechRecognizer.RecognizeAsync().Cancel();
                speechRecognizer.Dispose();
                speechRecognizer = null;
            }
        }
        private async void btnSearch_Click(object sender, RoutedEventArgs e)
        {
            this.txtCortanaMessages.Text = "Je vous écoute...";
            Windows.Globalization.Language langFR = new Windows.Globalization.Language("fr-FR");
            SpeechRecognizer recognizer = new SpeechRecognizer(langFR);

            SpeechRecognitionTopicConstraint topicConstraint
                    = new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.Dictation, "Development");

            recognizer.Constraints.Add(topicConstraint);
            await recognizer.CompileConstraintsAsync(); // Required

            var recognition = recognizer.RecognizeAsync();
            recognition.Completed += this.Recognition_Completed;
        }
예제 #40
0
 private async void VoiceRecognizer()
 {
     voiceRecognizer = new SpeechRecognizer();
     SpeechRecognitionTopicConstraint topicContraint = new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.Dictation, "development");
     voiceRecognizer.Constraints.Add(topicContraint);
     SpeechRecognitionCompilationResult result = await voiceRecognizer.CompileConstraintsAsync();
     SpeechRecognitionResult speechRecognitionResult = await voiceRecognizer.RecognizeAsync();
     //voiceRecognizer.ContinuousRecognitionSession.Completed += ContinuousRecognitionSession_Completed;
     //voiceRecognizer.ContinuousRecognitionSession.ResultGenerated += ContinuousRecognitionSession_ResultGenerated;
     //await voiceRecognizer.ContinuousRecognitionSession.StartAsync();
     if (pname == "Lorenzo")
     {
         if (speechRecognitionResult.Text.Contains("expensive") || speechRecognitionResult.Text.Contains("expense"))
         {
             //speechText.Text = "So much expensive";
             ReadVoice(Error.Not_Found);
             //pageView.Navigate(new Uri("http://www.americanas.com.br/produto/113151382/carro-eletrico-sport-car-vermelho-6v"));
         }
         else
         {
             ReadVoice(Error.Not_Found);
         }
     }
     else
     {
         ReadVoice(Error.Not_Found);
     }
 }
예제 #41
0
        public async Task<RecognizedSpeech> Recognize(string constraints, bool ui)
        {
            SpeechRecognitionGrammarFileConstraint grammarFileConstraint = null;
            var result = new RecognizedSpeech();
            bool isTable = false;
            Dictionary<string, string> dictionary = null;

            if (!string.IsNullOrWhiteSpace(constraints))
            {
                isTable = constraints.StartsWith("{table:");

                if (isTable)
                {
                    var name = constraints.Substring(7);
                    var i = name.IndexOf("}", StringComparison.CurrentCultureIgnoreCase);
                    name = name.Substring(0, i);

                    var constraintBuilder = new StringBuilder();
                    dictionary = MainPage.Instance.mainDictionary[name];

                    Debug.WriteLine("table "+name+" count=" + dictionary.Count);

                    foreach (var key in dictionary.Keys)
                    {
                        constraintBuilder.Append(key.Replace(","," "));
                        constraintBuilder.Append(",");
                    }

                    if (constraintBuilder.Length < 2)
                    {
                        result.error = -3;
                        return result;
                    }

                    constraints = constraintBuilder.ToString(0, constraintBuilder.Length - 1);
                    constraints = constraints.Replace(";", "-").Replace("&amp"," and ").Replace("&"," and ");
                }

                //build grammar constraints
                var grammarFileTemplate =
                    await
                        StorageFile.GetFileFromApplicationUriAsync(new Uri("ms-appx:///GrammarConstraintTemplate.grxml"));

                const string wordTemplate = "<item>{0}</item>";
                const string itemTemplate = "<item><one-of>{0}</one-of><tag>out=\"{1}\";</tag></item>";

                var itemBuilder = new StringBuilder();
                var items = constraints.Split(';');
                string keyword = null;
                foreach (var itemPart in items)
                {
                    var item = itemPart;

                    var equals = item.IndexOf('=');
                    if (equals > -1)
                    {
                        keyword = item.Substring(0, equals);
                        item = item.Substring(equals + 1);
                    }

                    var words = item.Split(',');
                    var wordBuilder = new StringBuilder();
                    foreach (var word in words)
                    {
                        wordBuilder.AppendFormat(wordTemplate, word);
                    }

                    if (!string.IsNullOrWhiteSpace(keyword))
                    {
                        itemBuilder.AppendFormat(itemTemplate, wordBuilder, keyword);
                    }
                    else
                    {
                        itemBuilder.Append(wordBuilder);
                    }
                }

                var localFolder = ApplicationData.Current.LocalFolder;

                var grammarTemplate = await FileIO.ReadTextAsync(grammarFileTemplate);
                var grammarFile =
                    await
                        localFolder.CreateFileAsync("GrammarConstraint.grxml", CreationCollisionOption.ReplaceExisting);
                var finalGrammarText = string.Format(grammarTemplate, itemBuilder);
                await FileIO.WriteTextAsync(grammarFile, finalGrammarText);

                grammarFileConstraint = new SpeechRecognitionGrammarFileConstraint(grammarFile, "constraints");
            }

            if (isRecognizing && recognizer != null)
            {
                await recognizer.StopRecognitionAsync();
            }

            recognizer = new SpeechRecognizer();

            //if (recognizer != null)
            //{
            //}
            //else
            //{
            //    //recognizer.Constraints?.Clear();
            //    //await recognizer.CompileConstraintsAsync();
            //}

            if (grammarFileConstraint != null)
            {
                recognizer.Constraints.Add(grammarFileConstraint);
            }

            SpeechRecognitionResult recognize = null;

            try
            {
                isRecognizing = false;
                SpeechStatusChanged?.Invoke(this, new SpeechArgs { Status = SpeechStatus.None });

                await recognizer.CompileConstraintsAsync();

                isRecognizing = true;
                SpeechStatusChanged?.Invoke(this, new SpeechArgs { Status = SpeechStatus.Listening });

                recognize = await (ui ? recognizer.RecognizeWithUIAsync() : recognizer.RecognizeAsync());
            }
            catch (Exception e)
            {
                Debug.WriteLine(e.GetType() + ":" + e.Message);

                if (recognize != null)
                {
                    result.status = recognize.Status;
                }

                result.confidence = 5;
                return result;
            }
            finally
            {
                isRecognizing = false;
                SpeechStatusChanged?.Invoke(this, new SpeechArgs { Status = isUserStopped ? SpeechStatus.Stopped : SpeechStatus.None });
            }

            result.status = isUserStopped ? SpeechRecognitionResultStatus.UserCanceled : recognize.Status;

            if (constraints == null)
            {
                result.text = recognize.Text;
                return result;
            }

            result.confidence = (int) recognize.Confidence;

            var text = recognize.Text.ToUpperInvariant();

            var items2 = constraints.Split(';');
            string keyword2 = null;
            var index = 1;
            foreach (var itemPart in items2)
            {
                var item = itemPart;

                var equals = item.IndexOf('=');
                if (equals > -1)
                {
                    keyword2 = item.Substring(0, equals);
                    item = item.Substring(equals + 1);
                }

                var words = item.Split(',');
                var innerIndex = 1;
                foreach (var word in words)
                {
                    if (word.ToUpperInvariant().Equals(text))
                    {
                        result.text = keyword2 ?? word;
                        if (isTable)
                        {
                            result.action = dictionary[result.text];
                        }

                        result.index = items2.Length == 1 ? innerIndex : index;
                        return result;
                    }

                    innerIndex++;
                }

                index++;
            }

            result.text = recognize.Text;
            return result;
        }
예제 #42
0
 private async void Button_Click_4(object sender, RoutedEventArgs e)
 {
     SpeechRecognizer speechRecognizer = new SpeechRecognizer();
     speechRecognizer.Grammars.AddGrammarFromUri("music", new Uri("ms-appx:///SRGSGrammar1.xml"));
     try
     {
         var result = await speechRecognizer.RecognizeAsync();
         if (result.TextConfidence == SpeechRecognitionConfidence.Rejected)
         {
             MessageBox.Show("语音识别不到");
         }
         else
         {
             string music = "";
             if (result.Semantics.Keys.Contains("music"))
             {
                 music = result.Semantics["music"].Value.ToString();
             }
             MessageBox.Show(result.Text + "|" + music);
         }
     }
     catch (Exception err)
     {
         MessageBox.Show("请检查是否接收语音隐私协议" + err.Message + err.HResult);
     }
 }
        private async void InitializeSpeechRecognizer()
        {
            try
            {
                if (speechRecognizer != null)
                {
                    speechRecognizer.RecognizeAsync().Cancel();
                    speechRecognizer.RecognizeAsync().Close();
                    this.speechRecognizer.Dispose();
                    this.speechRecognizer = null;
                }
                speechRecognizer = new SpeechRecognizer();
                var topicConstraing = new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.Dictation, "Development");
                speechRecognizer.Constraints.Add(topicConstraing);
                await speechRecognizer.CompileConstraintsAsync();

                this.Operation = await speechRecognizer.RecognizeAsync();
                if (Operation.Status == SpeechRecognitionResultStatus.Success)
                {
                    ResultGenerated(Operation.Text);
                    speechRecognizer.RecognizeAsync().Cancel();
                    speechRecognizer.Dispose();
                    speechRecognizer = null;
                }
            }
            catch (Exception)
            {
            }
        }
예제 #44
0
        public async void Run(IBackgroundTaskInstance taskInstance)
        {
            BackgroundTaskDeferral deferral = taskInstance.GetDeferral(); // This must be retrieved prior to subscribing to events below which use it

            using (MopidyClient client = new MopidyClient())
            {
                await client.Open();
                await client.Play("spotify:track:1hKdDCpiI9mqz1jVHRKG0E");

                var speechRecognizer = new SpeechRecognizer(SpeechRecognizer.SystemSpeechLanguage);

                var webSearchGrammar = new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.WebSearch, "webSearch");
                speechRecognizer.Constraints.Add(webSearchGrammar);

                SpeechRecognitionCompilationResult compilationResult = await speechRecognizer.CompileConstraintsAsync();

                // Check to make sure that the constraints were in a proper format and the recognizer was able to compile it.
                if (compilationResult.Status == SpeechRecognitionResultStatus.Success)
                {
                    while (true)
                    {
                        var recognitionOperation = speechRecognizer.RecognizeAsync();
                        SpeechRecognitionResult speechRecognitionResult = await recognitionOperation;

                        if (speechRecognitionResult.Status == SpeechRecognitionResultStatus.Success)
                        {
                            if (speechRecognitionResult.Text.StartsWith("play", StringComparison.OrdinalIgnoreCase))
                            {
                                string playSearchString = speechRecognitionResult.Text.Substring(4).Trim();

                                string uri;
                                if (playSearchString.StartsWith("artist", StringComparison.OrdinalIgnoreCase))
                                {
                                    uri = await client.SearchArtist(playSearchString.Substring(6).Trim());
                                }
                                else
                                {
                                    uri = await client.Search(playSearchString);
                                }

                                if (uri != null)
                                {
                                    await client.Play(uri);
                                }
                            }
                            else if (speechRecognitionResult.Text.StartsWith("stop", StringComparison.OrdinalIgnoreCase))
                            {
                                await client.Stop();
                            }
                            else if (speechRecognitionResult.Text.StartsWith("louder", StringComparison.OrdinalIgnoreCase))
                            {
                                int volume = await client.GetVolume();
                                volume = Math.Min(volume + 10, 100);
                                await client.SetVolume(volume);
                            }
                            else if (speechRecognitionResult.Text.StartsWith("quieter", StringComparison.OrdinalIgnoreCase))
                            {
                                int volume = await client.GetVolume();
                                volume = Math.Max(volume - 10, 0);
                                await client.SetVolume(volume);
                            }
                            else if (speechRecognitionResult.Text.StartsWith("mute", StringComparison.OrdinalIgnoreCase))
                            {
                                await client.SetVolume(0);
                            }
                        }
                        else
                        {
                            //resultTextBlock.Visibility = Visibility.Visible;
                            //resultTextBlock.Text = string.Format("Speech Recognition Failed, Status: {0}", speechRecognitionResult.Status.ToString());
                        }

                    }
                }
            }
        }
예제 #45
0
        private async void lineRecog()
        {
                SpeechRecognizer speechRecognizer = new SpeechRecognizer();

                // Compile the default dictionary
                SpeechRecognitionCompilationResult compilationResult =
                                                        await speechRecognizer.CompileConstraintsAsync();

                // Start recognizing
                // Note: you can also use RecognizeWithUIAsync()
                SpeechRecognitionResult speechRecognitionResult = await speechRecognizer.RecognizeAsync();
                result = speechRecognitionResult.Text;
        }
예제 #46
0
        private async void VoiceButton_Click(object sender, RoutedEventArgs e)
        {
            try
            {
                // Get the top user-preferred language and its display name.
                var topUserLanguage = Windows.System.UserProfile.GlobalizationPreferences.Languages[0];
                var language = new Windows.Globalization.Language(topUserLanguage);

                firstStopAttemptDone = false;
                listening = true;
                using (speechRecognizer = new SpeechRecognizer(language))
                {

                    var dictationConstraint = new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.WebSearch, WEB_SEARCH);
                    speechRecognizer.Constraints.Add(dictationConstraint);
                    SpeechRecognitionCompilationResult compilationResult = await speechRecognizer.CompileConstraintsAsync();

                    // setting timeouts
                    speechRecognizer.Timeouts.InitialSilenceTimeout = TimeSpan.FromSeconds(4.0);
                    speechRecognizer.Timeouts.BabbleTimeout = TimeSpan.FromSeconds(4.0);
                    speechRecognizer.Timeouts.EndSilenceTimeout = TimeSpan.FromSeconds(1.0);

                    speechRecognizer.HypothesisGenerated += SpeechRecognizer_HypothesisGenerated;

                    if (compilationResult.Status != SpeechRecognitionResultStatus.Success)
                        return;

                    VisualStateManager.GoToState(this, VISUAL_STATE_LISTENING, true);
                    this.IsReadOnly = true;
                    this.Text = LISTENING_TEXT;

                    SpeechRecognitionResult speechRecognitionResult = await speechRecognizer.RecognizeAsync();
                    if (speechRecognitionResult.Status == SpeechRecognitionResultStatus.Success)
                        Text = speechRecognitionResult.Text;
                    else
                        Text = SPEECH_RECOGNITION_FAILED;

                   
                }
            }
            catch (Exception ex)
            {
                System.Diagnostics.Debug.WriteLine(ex.Message);
                Text = string.Empty;
            }
            finally
            {
                timer.Stop();
                hypotesis = string.Empty;
                VisualStateManager.GoToState(this, VISUAL_STATE_NOT_LISTENING, true);
                this.IsReadOnly = false;
                listening = false;
            }
        }