Exemple #1
0
        public async Task OnSearchStop()
        {
            this.speechRecognizer.ContinuousRecognitionSession.ResultGenerated -= OnSpeechResult;

            speechRecognizer.Dispose();
            speechRecognizer = null;
        }
Exemple #2
0
        private async void Button_Click_1(object sender, RoutedEventArgs e)
        {
            await SayWithTheVoice(
                $"Hello {txtName.Text}, I am Sam, The Tip of the Sword, and the better looking AI.  You're looking fine today.  How can I help?",
                "Mark");


            // Create an instance of SpeechRecognizer.
            var speechRecognizer = new Windows.Media.SpeechRecognition.SpeechRecognizer();

            // Compile the dictation grammar by default.
            await speechRecognizer.CompileConstraintsAsync();

            // Start recognition.
            Windows.Media.SpeechRecognition.SpeechRecognitionResult speechRecognitionResult = await speechRecognizer.RecognizeAsync();

            // Do something with the recognition result.
            //var messageDialog = new Windows.UI.Popups.MessageDialog(speechRecognitionResult.Text, "Text spoken");
            //await messageDialog.ShowAsync();

            if (speechRecognitionResult.Text.Contains("coffee"))
            {
                await SayWithTheVoice("I'm sorry, I don't make coffee", "Mark");
            }
            else if (speechRecognitionResult.Text.Contains("chocolate"))
            {
                await SayWithTheVoice("Coming right up!", "Mark");
            }
            else
            {
                await SayWithTheVoice("I'm confused", "Mark");
            }
        }
Exemple #3
0
        private async void btnTalk_Click(object sender, RoutedEventArgs e)
        {
            // Create an instance of SpeechRecognizer.
            var speechRecognizer = new Windows.Media.SpeechRecognition.SpeechRecognizer();

            // Compile the dictation grammar that is loaded by default.
            await speechRecognizer.CompileConstraintsAsync();

            // Start recognition.
            try
            {
                Windows.Media.SpeechRecognition.SpeechRecognitionResult speechRecognitionResult = await speechRecognizer.RecognizeWithUIAsync();
                // If successful, display the recognition result.
                if (speechRecognitionResult.Status == Windows.Media.SpeechRecognition.SpeechRecognitionResultStatus.Success)
                {
                    txtSource.Text = speechRecognitionResult.Text;
                }
            }
            catch (Exception exception)
            {
                if ((uint)exception.HResult == HResultPrivacyStatementDeclined)
                {
                    //this.resultTextBlock.Visibility = Visibility.Visible;
                    lblResult.Text = "Özür dilerim, konuşma tanımayı kullanmak mümkün değildi. Konuşma gizlilik bildirimini kabul edilmedi.";
                }
                else
                {
                    var messageDialog = new Windows.UI.Popups.MessageDialog(exception.Message, "Exception");
                    messageDialog.ShowAsync().GetResults();
                }
            }
        }
Exemple #4
0
        private async void OnTimer(object state)
        {
            var startTime   = (DateTime)state;
            var runningTime = Math.Round((DateTime.Now - startTime).TotalSeconds, 0);

            using (Windows.Media.SpeechRecognition.SpeechRecognizer recognizer =
                       new Windows.Media.SpeechRecognition.SpeechRecognizer())
            {
                //recognizer.Constraints.Add(new Windows.Media.SpeechRecognition.SpeechRecognitionTopicConstraint
                //    (Windows.Media.SpeechRecognition.SpeechRecognitionScenario.FormFilling, "Phone"));
                await recognizer.CompileConstraintsAsync();

                recognizer.Timeouts.InitialSilenceTimeout = TimeSpan.FromSeconds(5);
                recognizer.Timeouts.EndSilenceTimeout     = TimeSpan.FromSeconds(20);

                Windows.Media.SpeechRecognition.SpeechRecognitionResult aresult = await recognizer.RecognizeAsync();

                if (aresult.Status == Windows.Media.SpeechRecognition.SpeechRecognitionResultStatus.Success)
                {
                    await Dispatcher.RunAsync(CoreDispatcherPriority.Normal, () =>
                    {
                        ExtendedExecutionSessionStatus.Text += aresult.Text + Environment.NewLine;
                    });
                }
            }
            await Dispatcher.RunAsync(CoreDispatcherPriority.Normal, () =>
            {
                ExtendedExecutionSessionStatus.Text += $"Extended execution has been active for {runningTime} seconds" + Environment.NewLine;
            });
        }
        private async void btnTalk_Click(object sender, RoutedEventArgs e)
        {
            // Create an instance of SpeechRecognizer.
            var speechRecognizer = new Windows.Media.SpeechRecognition.SpeechRecognizer();

            // Compile the dictation grammar that is loaded by default.
            await speechRecognizer.CompileConstraintsAsync();

            // Start recognition.
            try
            {
                Windows.Media.SpeechRecognition.SpeechRecognitionResult speechRecognitionResult = await speechRecognizer.RecognizeWithUIAsync();

                // If successful, display the recognition result.
                if (speechRecognitionResult.Status == Windows.Media.SpeechRecognition.SpeechRecognitionResultStatus.Success)
                {
                    txtSource.Text = speechRecognitionResult.Text;
                }
            }
            catch (Exception exception)
            {
                if ((uint)exception.HResult == HResultPrivacyStatementDeclined)
                {
                    //this.resultTextBlock.Visibility = Visibility.Visible;
                    lblResult.Text = "I'm sorry, I was not able to use speech recognition. The speech privacy statement was declined.";
                }
                else
                {
                    var messageDialog = new Windows.UI.Popups.MessageDialog(exception.Message, "Exception");
                    messageDialog.ShowAsync().GetResults();
                }
            }
        }
Exemple #6
0
        private async void startDetect()
        {
            // Create an instance of SpeechRecognizer.
            var speechRecognizer = new Windows.Media.SpeechRecognition.SpeechRecognizer();

            string[] responses = { "start", "quit" };
            // Add a list constraint to the recognizer.
            var listConstraint = new Windows.Media.SpeechRecognition.SpeechRecognitionListConstraint(responses, "startOrStart");

            speechRecognizer.Constraints.Add(listConstraint);

            // Compile the constraint.
            await speechRecognizer.CompileConstraintsAsync();

            // Start recognition.
            //textBlock1.Text = "Say Start";
            //Recognise with UI
            Windows.Media.SpeechRecognition.SpeechRecognitionResult speechRecognitionResult = await speechRecognizer.RecognizeWithUIAsync();

            //Recognise without UI
            //Windows.Media.SpeechRecognition.SpeechRecognitionResult speechRecognitionResult = await speechRecognizer.RecognizeAsync();

            if (speechRecognitionResult.Text == "start")
            {
                //textBlock2.Text = "Start detected";
                await Task.Delay(2000);

                startRecAsync();
            }
            if (speechRecognitionResult.Text == "quit")
            {
                CoreApplication.Exit();
            }
        }
        } // end void

        private async void StartRecognizing_Click(object sender, RoutedEventArgs e)
        {
            // Create an instance of SpeechRecognizer.
            var speechRecognizer = new Windows.Media.SpeechRecognition.SpeechRecognizer();

            // Compile the dictation grammar by default.
            await speechRecognizer.CompileConstraintsAsync();

            // Start recognition.
            Windows.Media.SpeechRecognition.SpeechRecognitionResult speechRecognitionResult = await speechRecognizer.RecognizeWithUIAsync();
            ContentDialog notifyDelete = new ContentDialog()
            {
                Title = "Confirm delete?",
                Content = speechRecognitionResult.Text,
                PrimaryButtonText = "Save Note",
                SecondaryButtonText = "Cancel"

            };

            ContentDialogResult result = await notifyDelete.ShowAsync();
            if (result == ContentDialogResult.Primary)
            {
                tbNote.Text = speechRecognitionResult.Text;
            }
            else
            {
                // User pressed Cancel or the back arrow.
                // Terms of use were not accepted.
            }
            // Do something with the recognition result.
            //var messageDialog = new Windows.UI.Popups.MessageDialog(speechRecognitionResult.Text, "Text spoken");
            //await messageDialog.ShowAsync();
        } // end StartRecognizing_Click
		private async void RecognizeWithDictationGrammar_Click(object sender, RoutedEventArgs e)
		{
			// Create an instance of SpeechRecognizer.
			var speechRecognizer = new Windows.Media.SpeechRecognition.SpeechRecognizer();

			// Compile the dictation grammar that is loaded by default.
			await speechRecognizer.CompileConstraintsAsync();

			this.heardYouSayTextBlock.Visibility = this.resultTextBlock.Visibility = Visibility.Collapsed;

			// Start recognition.
			try
			{
				Windows.Media.SpeechRecognition.SpeechRecognitionResult speechRecognitionResult = await speechRecognizer.RecognizeWithUIAsync();
				// If successful, display the recognition result.
				if (speechRecognitionResult.Status == Windows.Media.SpeechRecognition.SpeechRecognitionResultStatus.Success)
				{
					this.heardYouSayTextBlock.Visibility = this.resultTextBlock.Visibility = Visibility.Visible;
					this.resultTextBlock.Text = speechRecognitionResult.Text;
				}
			}
			catch (Exception exception)
			{
				if ((uint)exception.HResult == App.HResultPrivacyStatementDeclined)
				{
					this.resultTextBlock.Visibility = Visibility.Visible;
					this.resultTextBlock.Text = "The privacy statement was declined.";
				}
				else
				{
					var messageDialog = new Windows.UI.Popups.MessageDialog(exception.Message, "Exception");
					messageDialog.ShowAsync().GetResults();
				}
			}
		}
Exemple #9
0
        public Interact()
        {
            this.InitializeComponent();


            recon = new Windows.Media.SpeechRecognition.SpeechRecognizer();
            mediaElement.MediaEnded += MediaElement_MediaEnded;

            Start2();
        }
Exemple #10
0
        private async Task SpeakToMachine(object sender, RoutedEventArgs e)
        {
            var speechRecognizer = new Windows.Media.SpeechRecognition.SpeechRecognizer();
            // Compile the dictation grammar by default.
            await speechRecognizer.CompileConstraintsAsync();

            // Start recognition.
            Windows.Media.SpeechRecognition.SpeechRecognitionResult speechRecognitionResult = await speechRecognizer.RecognizeAsync();

            objTextBox.Text = speechRecognitionResult.Text;
            OnClick(sender, e);
        }
Exemple #11
0
        /*private async void OnTextChanging(object sender, TextBoxTextChangingEventArgs e)
         * {
         *  var synth = new SpeechSynthesizer();
         *  var textboxObj = (TextBox)sender;
         *  Windows.Media.SpeechSynthesis.SpeechSynthesisStream stream = await synth.SynthesizeTextToStreamAsync(textboxObj.Text);
         *  mediaElement.SetSource(stream, stream.ContentType);
         *  mediaElement.Play();
         *
         *
         *
         * }*/

        private async Task SpeakToComputer(object sender, RoutedEventArgs e)
        {
            Debug.WriteLine("HEELEMOQHNOQOQWGWQGI\n");
            var speechRecognizer = new Windows.Media.SpeechRecognition.SpeechRecognizer();
            // Compile the dictation grammar by default.
            await speechRecognizer.CompileConstraintsAsync();

            // Start recognition.
            Windows.Media.SpeechRecognition.SpeechRecognitionResult speechRecognitionResult = await speechRecognizer.RecognizeAsync();

            objTextBox.Text = speechRecognitionResult.Text;
        }
Exemple #12
0
        public static async Task <string> GetTextFromSpeech()
        {
            // Create an instance of SpeechRecognizer.
            var speechRecognizer = new Windows.Media.SpeechRecognition.SpeechRecognizer();

            // Compile the dictation grammar by default.
            await speechRecognizer.CompileConstraintsAsync();

            // Start recognition.
            Windows.Media.SpeechRecognition.SpeechRecognitionResult speechRecognitionResult = await speechRecognizer.RecognizeWithUIAsync();

            return(speechRecognitionResult.Text);
        }
        /// <summary>
        /// Generate Text from Voice
        /// </summary>
        /// <returns>Generated text</returns>
        public async Task<string> VoiceToText()
        {
            // Create an instance of SpeechRecognizer.
            var speechRecognizer = new Windows.Media.SpeechRecognition.SpeechRecognizer();

            // Compile the dictation grammar by default.
            await speechRecognizer.CompileConstraintsAsync();

            // Start recognition.
            Windows.Media.SpeechRecognition.SpeechRecognitionResult speechRecognitionResult = await speechRecognizer.RecognizeWithUIAsync();

            return speechRecognitionResult.Text;
        }
Exemple #14
0
        // As of this time, UWP only offers microphone input to SpeechRecognizer, not file input
        public static async System.Threading.Tasks.Task <string> MicrophoneToTextAsync()
        {
            Windows.Media.SpeechRecognition.SpeechRecognizer speechRecognizer = new Windows.Media.SpeechRecognition.SpeechRecognizer();
            speechRecognizer.HypothesisGenerated += SpeechRecognizer_HypothesisGenerated;

            // Compile the dictation grammar by default.
            await speechRecognizer.CompileConstraintsAsync();

            // Start recognition.
            Windows.Media.SpeechRecognition.SpeechRecognitionResult speechRecognitionResult = await speechRecognizer.RecognizeAsync();

            Log.WriteLine("Text:" + speechRecognitionResult.Text);
            return(speechRecognitionResult.Text);
        }
		private async void InitializeSpeechRecognizer()
		{
			// Create an instance of SpeechRecognizer.
			this.speechRecognizer = new Windows.Media.SpeechRecognition.SpeechRecognizer();

			// Add a grammar file constraint to the recognizer.
			var storageFile = await Windows.Storage.StorageFile.GetFileFromApplicationUriAsync(new Uri("ms-appx:///Colors.grxml"));
			var grammarFileConstraint = new Windows.Media.SpeechRecognition.SpeechRecognitionGrammarFileConstraint(storageFile, "colors");

			this.speechRecognizer.UIOptions.ExampleText = @"Ex. ""blue background"", ""green text""";
			this.speechRecognizer.Constraints.Add(grammarFileConstraint);

			// Compile the constraint.
			await this.speechRecognizer.CompileConstraintsAsync();
		}
        private async void InitializeSpeechRecognizer()
        {
            // Create an instance of SpeechRecognizer.
            this.speechRecognizer = new Windows.Media.SpeechRecognition.SpeechRecognizer();

            // Add a grammar file constraint to the recognizer.
            var storageFile = await Windows.Storage.StorageFile.GetFileFromApplicationUriAsync(new Uri("ms-appx:///Colors.grxml"));

            var grammarFileConstraint = new Windows.Media.SpeechRecognition.SpeechRecognitionGrammarFileConstraint(storageFile, "colors");

            this.speechRecognizer.UIOptions.ExampleText = @"Ex. ""blue background"", ""green text""";
            this.speechRecognizer.Constraints.Add(grammarFileConstraint);

            // Compile the constraint.
            await this.speechRecognizer.CompileConstraintsAsync();
        }
        private async void VoiceIconTapped(object sender, RoutedEventArgs e)
        {
            // Create an instance of SpeechRecognizer.
            var speechRecognizer = new Windows.Media.SpeechRecognition.SpeechRecognizer();

            // Compile the dictation grammar by default.
            await speechRecognizer.CompileConstraintsAsync();

            // Start recognition.
            Windows.Media.SpeechRecognition.SpeechRecognitionResult speechRecognitionResult = await speechRecognizer.RecognizeWithUIAsync();

            // Do something with the recognition result.
            //var messageDialog = new Windows.UI.Popups.MessageDialog(speechRecognitionResult.Text, "Text spoken");
            //await messageDialog.ShowAsync();
            this.UserInput.Text = speechRecognitionResult.Text;
        }
        public static async System.Threading.Tasks.Task InitialiseSpeechRecognition()
        {
            // Create an instance of SpeechRecognizer.
            speechRecognizer = new Windows.Media.SpeechRecognition.SpeechRecognizer();

            // Add a web search grammar to the recognizer.
            var dictationGrammar = new Windows.Media.SpeechRecognition.SpeechRecognitionTopicConstraint(Windows.Media.SpeechRecognition.SpeechRecognitionScenario.Dictation, "dication");


            speechRecognizer.UIOptions.AudiblePrompt = "Please give us your feedback...";
            speechRecognizer.UIOptions.ExampleText = @"This game is awesome, rate me 5 stars";
            speechRecognizer.UIOptions.IsReadBackEnabled = false;
            speechRecognizer.Constraints.Add(dictationGrammar);

            // Compile the dictation grammar by default.
            await speechRecognizer.CompileConstraintsAsync();
        }
Exemple #19
0
        public static async System.Threading.Tasks.Task InitialiseSpeechRecognition()
        {
            // Create an instance of SpeechRecognizer.
            speechRecognizer = new Windows.Media.SpeechRecognition.SpeechRecognizer();

            // Add a web search grammar to the recognizer.
            var dictationGrammar = new Windows.Media.SpeechRecognition.
                                   SpeechRecognitionTopicConstraint(
                Windows.Media.SpeechRecognition.SpeechRecognitionScenario.Dictation,
                "dictation");


            speechRecognizer.Constraints.Add(dictationGrammar);

            // Compile the dictation grammar by default.
            await speechRecognizer.CompileConstraintsAsync();
        }
Exemple #20
0
        private async void Button_Click(object sender, Windows.UI.Xaml.RoutedEventArgs e)
        {
            TextStatus.Text = "Listening....";

            // Create an instance of SpeechRecognizer.
            var speechRecognizer = new Windows.Media.SpeechRecognition.SpeechRecognizer();

            // Compile the dictation grammar by default.
            await speechRecognizer.CompileConstraintsAsync();

            // Start recognition.
            Windows.Media.SpeechRecognition.SpeechRecognitionResult speechRecognitionResult = await speechRecognizer.RecognizeWithUIAsync();

            TextCommand.Text = speechRecognitionResult.Text;

            await SendToBot(TextCommand.Text);
        }
Exemple #21
0
        private async void btnMicFrench_Click(object sender, RoutedEventArgs e)
        {
            try
            {
                Windows.Media.SpeechRecognition.SpeechRecognizer speechRecognizer =
                    new Windows.Media.SpeechRecognition.SpeechRecognizer(new Windows.Globalization.Language("fr")); // se le puede pasar parámetro de idiioma, ahoria agarra el del sisytem
                await speechRecognizer.CompileConstraintsAsync();

                Windows.Media.SpeechRecognition.SpeechRecognitionResult resultado =
                    await speechRecognizer.RecognizeWithUIAsync();

                txtDescripcionFrances.Text = resultado.Text;
            }
            catch (Exception ex)
            {
            }
        }
Exemple #22
0
        private async void RecognizeWithListConstraint_Click(object sender, RoutedEventArgs e)
        {
            // Create an instance of SpeechRecognizer.
            var speechRecognizer = new Windows.Media.SpeechRecognition.SpeechRecognizer();

            // You could create any IEnumerable dynamically.
            string[] responses = { "Yes", "No" };

            // Add a list constraint to the recognizer.
            var listConstraint = new Windows.Media.SpeechRecognition.SpeechRecognitionListConstraint(responses, "yesOrNo");

            speechRecognizer.UIOptions.ExampleText = @"Ex. ""Yes"", ""No""";
            speechRecognizer.Constraints.Add(listConstraint);

            // Compile the constraint.
            await speechRecognizer.CompileConstraintsAsync();

            this.heardYouSayTextBlock.Visibility = this.resultTextBlock.Visibility = Visibility.Collapsed;

            // Start recognition.
            try
            {
                Windows.Media.SpeechRecognition.SpeechRecognitionResult speechRecognitionResult = await speechRecognizer.RecognizeWithUIAsync();

                // If successful, display the recognition result.
                if (speechRecognitionResult.Status == Windows.Media.SpeechRecognition.SpeechRecognitionResultStatus.Success)
                {
                    this.heardYouSayTextBlock.Visibility = this.resultTextBlock.Visibility = Visibility.Visible;
                    this.resultTextBlock.Text            = speechRecognitionResult.Text;
                }
            }
            catch (Exception exception)
            {
                if ((uint)exception.HResult == App.HResultPrivacyStatementDeclined)
                {
                    this.resultTextBlock.Visibility = Visibility.Visible;
                    this.resultTextBlock.Text       = "The privacy statement was declined.";
                }
                else
                {
                    var messageDialog = new Windows.UI.Popups.MessageDialog(exception.Message, "Exception");
                    messageDialog.ShowAsync().GetResults();
                }
            }
        }
Exemple #23
0
        private async void VoiceSearchButton_OnClick(object sender, RoutedEventArgs e)
        {
            try
            {
                // Create an instance of SpeechRecognizer.
                var speechRecognizer = new Windows.Media.SpeechRecognition.SpeechRecognizer();

                // Listen for audio input issues.
                //speechRecognizer.RecognitionQualityDegrading += speechRecognizer_RecognitionQualityDegrading;

                // Add a web search grammar to the recognizer.
                var webSearchGrammar = new Windows.Media.SpeechRecognition.SpeechRecognitionTopicConstraint(Windows.Media.SpeechRecognition.SpeechRecognitionScenario.WebSearch, "webSearch");


                speechRecognizer.UIOptions.AudiblePrompt = "Say what you want to search for...";
                speechRecognizer.UIOptions.ExampleText   = @"Ex. 'Play Rahman songs'";
                speechRecognizer.Constraints.Add(webSearchGrammar);

                // Compile the constraint.
                await speechRecognizer.CompileConstraintsAsync();

                // Start recognition.
                Windows.Media.SpeechRecognition.SpeechRecognitionResult speechRecognitionResult = await speechRecognizer.RecognizeWithUIAsync();

                //await speechRecognizer.RecognizeWithUIAsync();

                // Do something with the recognition result.
                var messageDialog = new Windows.UI.Popups.MessageDialog(speechRecognitionResult.Text, "Text spoken");
                await messageDialog.ShowAsync();
            }
            catch (Exception err)
            {
                // Define a variable that holds the error for the speech recognition privacy policy.
                // This value maps to the SPERR_SPEECH_PRIVACY_POLICY_NOT_ACCEPTED error,
                // as described in the Windows.Phone.Speech.Recognition error codes section later on.
                const int privacyPolicyHResult = unchecked ((int)0x80045509);

                // Check whether the error is for the speech recognition privacy policy.
                if (err.HResult == privacyPolicyHResult)
                {
                    var messageDialog = new Windows.UI.Popups.MessageDialog("You will need to accept the speech privacy policy in order to use speech recognition in this app.", "Error");
                    await messageDialog.ShowAsync();
                }
            }
        }
Exemple #24
0
        private async void btnMicEspaniol_Click(object sender, RoutedEventArgs e)
        {
            try
            {
                Windows.Media.SpeechRecognition.SpeechRecognizer speechRecognizer =
                    new Windows.Media.SpeechRecognition.SpeechRecognizer(new Windows.Globalization.Language("es-MX")); // se le puede pasar parámetro de idiioma, ahoria agarra el del sisytem
                await speechRecognizer.CompileConstraintsAsync();

                Windows.Media.SpeechRecognition.SpeechRecognitionResult resultado =
                    await speechRecognizer.RecognizeWithUIAsync();

                txtpalespanol.Text = resultado.Text;
            }
            catch (Exception)
            {
                throw;
            }
        }
		private async void RecognizeWithListConstraint_Click(object sender, RoutedEventArgs e)
		{
			// Create an instance of SpeechRecognizer.
			var speechRecognizer = new Windows.Media.SpeechRecognition.SpeechRecognizer();

			// You could create any IEnumerable dynamically.
			string[] responses = { "Yes", "No" };

			// Add a list constraint to the recognizer.
			var listConstraint = new Windows.Media.SpeechRecognition.SpeechRecognitionListConstraint(responses, "yesOrNo");

			speechRecognizer.UIOptions.ExampleText = @"Ex. ""Yes"", ""No""";
			speechRecognizer.Constraints.Add(listConstraint);

			// Compile the constraint.
			await speechRecognizer.CompileConstraintsAsync();

			this.heardYouSayTextBlock.Visibility = this.resultTextBlock.Visibility = Visibility.Collapsed;

			// Start recognition.
			try
			{
				Windows.Media.SpeechRecognition.SpeechRecognitionResult speechRecognitionResult = await speechRecognizer.RecognizeWithUIAsync();
				// If successful, display the recognition result.
				if (speechRecognitionResult.Status == Windows.Media.SpeechRecognition.SpeechRecognitionResultStatus.Success)
				{
					this.heardYouSayTextBlock.Visibility = this.resultTextBlock.Visibility = Visibility.Visible;
					this.resultTextBlock.Text = speechRecognitionResult.Text;
				}
			}
			catch (Exception exception)
			{
				if ((uint)exception.HResult == App.HResultPrivacyStatementDeclined)
				{
					this.resultTextBlock.Visibility = Visibility.Visible;
					this.resultTextBlock.Text = "The privacy statement was declined.";
				}
				else
				{
					var messageDialog = new Windows.UI.Popups.MessageDialog(exception.Message, "Exception");
					messageDialog.ShowAsync().GetResults();
				}
			}
		}
        private async void RecognizeWithWebSearchGrammar_Click(object sender, RoutedEventArgs e)
        {
            // Create an instance of SpeechRecognizer.
            var speechRecognizer = new Windows.Media.SpeechRecognition.SpeechRecognizer();

            // Add a web search grammar to the recognizer.
            var webSearchGrammar = new Windows.Media.SpeechRecognition.SpeechRecognitionTopicConstraint(Windows.Media.SpeechRecognition.SpeechRecognitionScenario.WebSearch, "webSearch");

            speechRecognizer.UIOptions.AudiblePrompt = "Say what you want to search for...";
            speechRecognizer.UIOptions.ExampleText   = @"Ex. ""weather for London""";
            speechRecognizer.Constraints.Add(webSearchGrammar);

            // Compile the constraint.
            await speechRecognizer.CompileConstraintsAsync();

            this.heardYouSayTextBlock.Visibility = this.resultTextBlock.Visibility = Visibility.Collapsed;

            // Start recognition.
            try
            {
                Windows.Media.SpeechRecognition.SpeechRecognitionResult speechRecognitionResult = await speechRecognizer.RecognizeWithUIAsync();

                // If successful, display the recognition result.
                if (speechRecognitionResult.Status == Windows.Media.SpeechRecognition.SpeechRecognitionResultStatus.Success)
                {
                    this.heardYouSayTextBlock.Visibility = this.resultTextBlock.Visibility = Visibility.Visible;
                    this.resultTextBlock.Text            = speechRecognitionResult.Text;
                }
            }
            catch (Exception exception)
            {
                if ((uint)exception.HResult == App.HResultPrivacyStatementDeclined)
                {
                    this.resultTextBlock.Visibility = Visibility.Visible;
                    this.resultTextBlock.Text       = "The privacy statement was declined.";
                }
                else
                {
                    var messageDialog = new Windows.UI.Popups.MessageDialog(exception.Message, "Exception");
                    messageDialog.ShowAsync().GetResults();
                }
            }
        }
Exemple #27
0
        //语音识别 Voice Recognition
        private async void StartRecognizing_Click(object sender, RoutedEventArgs e)
        {
            if (await SpeechRecognition.RequestMicrophonePermission())
            {
                // Create an instance of SpeechRecognizer.
                var speechRecognizer = new Windows.Media.SpeechRecognition.SpeechRecognizer();

                // Compile the dictation grammar by default.
                await speechRecognizer.CompileConstraintsAsync();

                // Start recognition.
                Windows.Media.SpeechRecognition.SpeechRecognitionResult speechRecognitionResult = await speechRecognizer.RecognizeWithUIAsync();

                // Do something with the recognition result.
                HomePageViewModel.Current.QueryWord(speechRecognitionResult.Text);
                //var messageDialog = new Windows.UI.Popups.MessageDialog(speechRecognitionResult.Text, "Text spoken");
                //await messageDialog.ShowAsync();
            }
        }
		private async void RecognizeWithWebSearchGrammar_Click(object sender, RoutedEventArgs e)
		{
			// Create an instance of SpeechRecognizer.
			var speechRecognizer = new Windows.Media.SpeechRecognition.SpeechRecognizer();

			// Add a web search grammar to the recognizer.
			var webSearchGrammar = new Windows.Media.SpeechRecognition.SpeechRecognitionTopicConstraint(Windows.Media.SpeechRecognition.SpeechRecognitionScenario.WebSearch, "webSearch");

			speechRecognizer.UIOptions.AudiblePrompt = "Say what you want to search for...";
			speechRecognizer.UIOptions.ExampleText = @"Ex. ""weather for London""";
			speechRecognizer.Constraints.Add(webSearchGrammar);

			// Compile the constraint.
			await speechRecognizer.CompileConstraintsAsync();

			this.heardYouSayTextBlock.Visibility = this.resultTextBlock.Visibility = Visibility.Collapsed;

			// Start recognition.
			try
			{
				Windows.Media.SpeechRecognition.SpeechRecognitionResult speechRecognitionResult = await speechRecognizer.RecognizeWithUIAsync();
				// If successful, display the recognition result.
				if (speechRecognitionResult.Status == Windows.Media.SpeechRecognition.SpeechRecognitionResultStatus.Success)
				{
					this.heardYouSayTextBlock.Visibility = this.resultTextBlock.Visibility = Visibility.Visible;
					this.resultTextBlock.Text = speechRecognitionResult.Text;
				}
			}
			catch (Exception exception)
			{
				if ((uint)exception.HResult == App.HResultPrivacyStatementDeclined)
				{
					this.resultTextBlock.Visibility = Visibility.Visible;
					this.resultTextBlock.Text = "The privacy statement was declined.";
				}
				else
				{
					var messageDialog = new Windows.UI.Popups.MessageDialog(exception.Message, "Exception");
					messageDialog.ShowAsync().GetResults();
				}
			}
		}
Exemple #29
0
        private static async Task <string> Listen()
        {
// Create an instance of SpeechRecognizer.
            var speechRecognizer = new Windows.Media.SpeechRecognition.SpeechRecognizer();

            // Compile the dictation grammar by default.
            await speechRecognizer.CompileConstraintsAsync();

            // Start recognition.
            Windows.Media.SpeechRecognition.SpeechRecognitionResult speechRecognitionResult =
                await speechRecognizer.RecognizeWithUIAsync();

            // Do something with the recognition result.
            //var messageDialog = new Windows.UI.Popups.MessageDialog(speechRecognitionResult.Text, "Text spoken");
            //await messageDialog.ShowAsync();

            var whatWasSaid = speechRecognitionResult.Text;

            return(whatWasSaid);
        }
Exemple #30
0
        private async void UxStartSpeechRecognition_Click(object sender, RoutedEventArgs e)
        {
            // Create an instance of SpeechRecognizer.
            var speechRecognizer = new Windows.Media.SpeechRecognition.SpeechRecognizer();

            // Compile the dictation grammar by default.
            await speechRecognizer.CompileConstraintsAsync();

            // Start recognition.
            Windows.Media.SpeechRecognition.SpeechRecognitionResult speechRecognitionResult = await speechRecognizer.RecognizeWithUIAsync();

            switch (speechRecognitionResult.Text.ToLower())
            {
            case "eins":
            case "1":
                this.Result = 1;
                break;

            case "null":
            case "0":
                this.Result = 0;
                break;

            default:
                this.Result = -1;
                break;
            }

            // Do something with the recognition result.
            if (speechRecognitionResult.Text.ToLower() == "eins" || speechRecognitionResult.Text.ToLower() == "1" || speechRecognitionResult.Text.ToLower() == "null" || speechRecognitionResult.Text.ToLower() == "0")
            {
                var messageDialog = new Windows.UI.Popups.MessageDialog($"Ok drücken zum Fortfahren", $"'{speechRecognitionResult.Text}' erkannt");
                await messageDialog.ShowAsync();
            }
            else
            {
                var messageDialog = new Windows.UI.Popups.MessageDialog($"'{speechRecognitionResult.Text}' erkannt.", "Ungültige Eingabe, bitte nochmal versuchen.");
                await messageDialog.ShowAsync();
            }
        }
Exemple #31
0
        public async Task <string> Reconnaissance()
        {
            try
            {
                // Create an instance of SpeechRecognizer.
                var speechRecognizer = new Windows.Media.SpeechRecognition.SpeechRecognizer();
                // Compile the dictation grammar by default.
                await speechRecognizer.CompileConstraintsAsync();

                // Start recognition.
                Windows.Media.SpeechRecognition.SpeechRecognitionResult speechRecognitionResult = await speechRecognizer.RecognizeWithUIAsync();

                if (speechRecognitionResult.Status == Windows.Media.SpeechRecognition.SpeechRecognitionResultStatus.Success)
                {
                    return(speechRecognitionResult.Text);
                }
                return(String.Empty);
            }
            catch (Exception ex)
            {
                return(String.Empty);
            }
        }
Exemple #32
0
        private async void Click_Record(object sender, RoutedEventArgs e)
        {
            // Create an instance of file storage dealer
            Windows.Storage.StorageFolder storageFolder = Windows.Storage.ApplicationData.Current.LocalFolder;
            // Create a new file named as "chickenorfish.txt", replace if already exists.
            Windows.Storage.StorageFile sampleFile = await storageFolder.CreateFileAsync("chickenOrFish.txt", Windows.Storage.CreationCollisionOption.ReplaceExisting);

            //await Windows.Storage.FileIO.WriteTextAsync(sampleFile, "Hello sir what would you like for breakfast chicken please good morning sir what would you like for lunch I want fish noodles thanks");
            // Create an instance of SpeechRecognizer.
            var speechRecognizer = new Windows.Media.SpeechRecognition.SpeechRecognizer();

            // Compile the dictation grammar by default.
            await speechRecognizer.CompileConstraintsAsync();

            // Start recognition.
            Windows.Media.SpeechRecognition.SpeechRecognitionResult speechRecognitionResult = await speechRecognizer.RecognizeWithUIAsync();

            // store recognized text as string script0.
            string script0 = speechRecognitionResult.Text;
            // write script0 to the text file "chickenOrFish.txt".
            await Windows.Storage.FileIO.WriteTextAsync(sampleFile, script0);

            var messageDialog = new Windows.UI.Popups.MessageDialog("Heard you say: ", speechRecognitionResult.Text);
        }
Exemple #33
0
 private static void SpeechRecognizer_HypothesisGenerated(Windows.Media.SpeechRecognition.SpeechRecognizer sender, Windows.Media.SpeechRecognition.SpeechRecognitionHypothesisGeneratedEventArgs args)
 {
     Log.WriteLine(args.Hypothesis.Text.ToString());
 }
Exemple #34
0
        public async Task Initialise()
        {
            speechRecognizer = new Windows.Media.SpeechRecognition.SpeechRecognizer();

            await OnSearchStart();
        }
Exemple #35
0
        private async void startRecAsync()
        {
            startRec.IsEnabled = false;
            var speechRecognizer = new Windows.Media.SpeechRecognition.SpeechRecognizer();
            //speechRecognizer.Timeouts.EndSilenceTimeout = TimeSpan.FromSeconds(1.2);
            // Compile the constraint.
            await speechRecognizer.CompileConstraintsAsync();

            Random rnd           = new Random();
            int    colors_number = rnd.Next(1, 7);

            string correct_colors = "";

            for (int i = 0; i < colors_number; i++)
            {
                int temp_rnd = rnd.Next(1, 5);
                if (temp_rnd == 1)
                {
                    correct_colors = correct_colors + "green ";
                    green.Fill     = new SolidColorBrush(Windows.UI.Colors.Green);
                    await Task.Delay(1000);

                    green.Fill = new SolidColorBrush(Windows.UI.Colors.DarkGreen);
                    await Task.Delay(200);
                }
                if (temp_rnd == 2)
                {
                    correct_colors = correct_colors + "red ";
                    red.Fill       = new SolidColorBrush(Windows.UI.Colors.Red);
                    await Task.Delay(1000);

                    red.Fill = new SolidColorBrush(Windows.UI.Colors.DarkRed);
                    await Task.Delay(200);
                }
                if (temp_rnd == 3)
                {
                    correct_colors = correct_colors + "blue ";
                    blue.Fill      = new SolidColorBrush(Windows.UI.Colors.Blue);
                    await Task.Delay(1000);

                    blue.Fill = new SolidColorBrush(Windows.UI.Colors.DarkBlue);
                    await Task.Delay(200);
                }
                if (temp_rnd == 4)
                {
                    correct_colors = correct_colors + "yellow ";
                    yellow.Fill    = new SolidColorBrush(Windows.UI.Colors.Yellow);
                    await Task.Delay(1000);

                    yellow.Fill = new SolidColorBrush(Windows.UI.Colors.DarkOrange);
                    await Task.Delay(200);
                }
            }
            string new_correct_colors = correct_colors.TrimEnd(" ");

            //indicates that speech recognition is on
            elli.Fill = new SolidColorBrush(Windows.UI.Colors.Red);
            // Start recognition.
            Windows.Media.SpeechRecognition.SpeechRecognitionResult speechRecognitionResult = await speechRecognizer.RecognizeAsync();

            //Recognition with UI
            //Windows.Media.SpeechRecognition.SpeechRecognitionResult speechRecognitionResult = await speechRecognizer.RecognizeWithUIAsync();

            // Do something with the recognition result.

            textBlock1.Text = speechRecognitionResult.Text;
            elli.Fill       = new SolidColorBrush(Windows.UI.Colors.SteelBlue);

            if (new_correct_colors == speechRecognitionResult.Text)
            {
                textBlock2.Text = "Good work " + Thumbsup;
            }
            else
            {
                textBlock2.Text = "Wrong! Try again " + Disappointed;
            }
            await Task.Delay(3000);

            startRec.IsEnabled = true;
            startDetect();
        }
Exemple #36
0
        private async void MediaElement_MediaEnded(object sender, RoutedEventArgs e)
        {
            var speechRecognizer = new Windows.Media.SpeechRecognition.SpeechRecognizer();


            string[] responses = { "George", "John", "Tony", "Jason", "Antony", "Gabriel" };



            if (msg == 0)
            {
                con = new Windows.Media.SpeechRecognition.SpeechRecognitionListConstraint(responses, "yesOrNo");
            }
            else if (msg == 1)
            {
                responses = new string[] { "Hello", "What time is it", "I was created at Hackathon"
                                           , "Jarvis call my girlfriend", "Who are your Creators", "Bye" };
                con = new Windows.Media.SpeechRecognition.SpeechRecognitionListConstraint(responses, "yesOrNo");
            }
            else if (msg == 2)
            {
                responses = new string[] { "Good morning" };
                con       = new Windows.Media.SpeechRecognition.SpeechRecognitionListConstraint(responses, "yesOrNo");
            }



            speechRecognizer.UIOptions.AudiblePrompt = "Say what you want to search for...";
            speechRecognizer.UIOptions.ExampleText   = @"George";
            if (msg == 0)
            {
                speechRecognizer.Constraints.Add(con);
            }
            else if (msg == 1)
            {
                speechRecognizer.Constraints.Add(con);
            }
            else if (msg == 2)
            {
                speechRecognizer.Constraints.Add(con);
            }
            // Compile the dictation grammar by default.
            await speechRecognizer.CompileConstraintsAsync();

            // Start recognition.
            Windows.Media.SpeechRecognition.SpeechRecognitionResult speechRecognitionResult = await speechRecognizer.RecognizeWithUIAsync();

            //psaxneis epafes
            if (msg == 0)
            {
                if (speechRecognitionResult.Text != "")
                {
                    Name = speechRecognitionResult.Text;
                    //start2();
                    findContact();
                    return;
                }
                else
                {
                    await Task.Delay(2000);

                    Start();
                }
            }
            else if (msg == 1)
            {
                if (speechRecognitionResult.Text != "")
                {
                    Message = speechRecognitionResult.Text;

                    jarvis();
                }
                else
                {
                    await Task.Delay(2000);

                    Start2();
                }
            }
            else if (msg == 2)
            {
                if (speechRecognitionResult.Text.Contains(""))
                {
                    Message = speechRecognitionResult.Text;
                    ComposeEmail(contactt, Message);
                }
                else
                {
                    await Task.Delay(2000);

                    Start4();
                }
            }
        }
		private async void VoiceIconTapped(object sender, RoutedEventArgs e)
		{
			// Create an instance of SpeechRecognizer.
			var speechRecognizer = new Windows.Media.SpeechRecognition.SpeechRecognizer();

			// Compile the dictation grammar by default.
			await speechRecognizer.CompileConstraintsAsync();

			// Start recognition.
			Windows.Media.SpeechRecognition.SpeechRecognitionResult speechRecognitionResult = await speechRecognizer.RecognizeWithUIAsync();

			// Do something with the recognition result.
			//var messageDialog = new Windows.UI.Popups.MessageDialog(speechRecognitionResult.Text, "Text spoken");
			//await messageDialog.ShowAsync();
			this.UserInput.Text = speechRecognitionResult.Text;
		}