private async void RecognizeWithSRGSGrammarFileConstraintOnce_Click(object sender, RoutedEventArgs e)
        {
            this.heardYouSayTextBlock.Visibility = this.resultTextBlock.Visibility = Visibility.Collapsed;

            // Start recognition.
            try
            {
                Windows.Media.SpeechRecognition.SpeechRecognitionResult speechRecognitionResult = await this.speechRecognizer.RecognizeWithUIAsync();

                // If successful, display the recognition result.
                if (speechRecognitionResult.Status == Windows.Media.SpeechRecognition.SpeechRecognitionResultStatus.Success)
                {
                    this.heardYouSayTextBlock.Visibility = this.resultTextBlock.Visibility = Visibility.Visible;
                    this.resultTextBlock.Text            = speechRecognitionResult.Text;
                }
            }
            catch (Exception exception)
            {
                if ((uint)exception.HResult == App.HResultPrivacyStatementDeclined)
                {
                    this.resultTextBlock.Visibility = Visibility.Visible;
                    this.resultTextBlock.Text       = "The privacy statement was declined.";
                }
                else
                {
                    var messageDialog = new Windows.UI.Popups.MessageDialog(exception.Message, "Exception");
                    messageDialog.ShowAsync().GetResults();
                }
            }

            this.InitializeSpeechRecognizer();
        }
コード例 #2
0
        private async void Button_Click_1(object sender, RoutedEventArgs e)
        {
            await SayWithTheVoice(
                $"Hello {txtName.Text}, I am Sam, The Tip of the Sword, and the better looking AI.  You're looking fine today.  How can I help?",
                "Mark");


            // Create an instance of SpeechRecognizer.
            var speechRecognizer = new Windows.Media.SpeechRecognition.SpeechRecognizer();

            // Compile the dictation grammar by default.
            await speechRecognizer.CompileConstraintsAsync();

            // Start recognition.
            Windows.Media.SpeechRecognition.SpeechRecognitionResult speechRecognitionResult = await speechRecognizer.RecognizeAsync();

            // Do something with the recognition result.
            //var messageDialog = new Windows.UI.Popups.MessageDialog(speechRecognitionResult.Text, "Text spoken");
            //await messageDialog.ShowAsync();

            if (speechRecognitionResult.Text.Contains("coffee"))
            {
                await SayWithTheVoice("I'm sorry, I don't make coffee", "Mark");
            }
            else if (speechRecognitionResult.Text.Contains("chocolate"))
            {
                await SayWithTheVoice("Coming right up!", "Mark");
            }
            else
            {
                await SayWithTheVoice("I'm confused", "Mark");
            }
        }
        protected override void OnActivated(IActivatedEventArgs args)
        {
            InitializeApp();

            // Was the app activated by a voice command?
            if (args.Kind == Windows.ApplicationModel.Activation.ActivationKind.VoiceCommand)
            {
                var commandArgs = args as Windows.ApplicationModel.Activation.VoiceCommandActivatedEventArgs;
                Windows.Media.SpeechRecognition.SpeechRecognitionResult speechRecognitionResult = commandArgs.Result;

                // If so, get the name of the voice command and the values for the semantic properties from the grammar file
                string voiceCommandName = speechRecognitionResult.RulePath[0];
                var    interpretation   = speechRecognitionResult.SemanticInterpretation.Properties;
                IReadOnlyList <string> dictatedSearchTerms;
                interpretation.TryGetValue("dictatedSearchTerms", out dictatedSearchTerms);

                switch (voiceCommandName)
                {
                case "NearMeSearch":
                    MainViewModel.SearchNearMeCommand.Execute(null);
                    break;

                case "PlaceSearch":
                    MainViewModel.SearchTerm = dictatedSearchTerms[0];
                    MainViewModel.SearchTrailsCommand.Execute(null);
                    break;

                default:
                    // There is no match for the voice command name.
                    break;
                }
            }
        }
コード例 #4
0
        protected override void OnActivated(IActivatedEventArgs args)
        {
            if (args.Kind == ActivationKind.VoiceCommand)
            {
                var rootFrame   = EnsureRootFrame();
                var commandArgs = args as Windows.ApplicationModel.Activation.VoiceCommandActivatedEventArgs;

                Windows.Media.SpeechRecognition.SpeechRecognitionResult speechRecognitionResult = commandArgs.Result;

                string voiceCommandName = speechRecognitionResult.RulePath[0];
                string textSpoken       = speechRecognitionResult.Text;
                string navigationTarget = speechRecognitionResult.SemanticInterpretation.Properties["NavigationTarget"][0];

                switch (voiceCommandName)
                {
                case "cortanaitems":
                    rootFrame.Navigate(typeof(CortanaListItems), null);
                    break;

                case "cortanaappointments":
                    rootFrame.Navigate(typeof(CortanaCalendar), null);
                    break;

                case "cortanasearch":
                    rootFrame.Navigate(typeof(CortanaSearch), null);
                    break;

                default:
                    rootFrame.Navigate(typeof(MainPage), null);
                    break;
                }
            }
            base.OnActivated(args);
        }
コード例 #5
0
        private async void OnTimer(object state)
        {
            var startTime   = (DateTime)state;
            var runningTime = Math.Round((DateTime.Now - startTime).TotalSeconds, 0);

            using (Windows.Media.SpeechRecognition.SpeechRecognizer recognizer =
                       new Windows.Media.SpeechRecognition.SpeechRecognizer())
            {
                //recognizer.Constraints.Add(new Windows.Media.SpeechRecognition.SpeechRecognitionTopicConstraint
                //    (Windows.Media.SpeechRecognition.SpeechRecognitionScenario.FormFilling, "Phone"));
                await recognizer.CompileConstraintsAsync();

                recognizer.Timeouts.InitialSilenceTimeout = TimeSpan.FromSeconds(5);
                recognizer.Timeouts.EndSilenceTimeout     = TimeSpan.FromSeconds(20);

                Windows.Media.SpeechRecognition.SpeechRecognitionResult aresult = await recognizer.RecognizeAsync();

                if (aresult.Status == Windows.Media.SpeechRecognition.SpeechRecognitionResultStatus.Success)
                {
                    await Dispatcher.RunAsync(CoreDispatcherPriority.Normal, () =>
                    {
                        ExtendedExecutionSessionStatus.Text += aresult.Text + Environment.NewLine;
                    });
                }
            }
            await Dispatcher.RunAsync(CoreDispatcherPriority.Normal, () =>
            {
                ExtendedExecutionSessionStatus.Text += $"Extended execution has been active for {runningTime} seconds" + Environment.NewLine;
            });
        }
コード例 #6
0
        private async void startDetect()
        {
            // Create an instance of SpeechRecognizer.
            var speechRecognizer = new Windows.Media.SpeechRecognition.SpeechRecognizer();

            string[] responses = { "start", "quit" };
            // Add a list constraint to the recognizer.
            var listConstraint = new Windows.Media.SpeechRecognition.SpeechRecognitionListConstraint(responses, "startOrStart");

            speechRecognizer.Constraints.Add(listConstraint);

            // Compile the constraint.
            await speechRecognizer.CompileConstraintsAsync();

            // Start recognition.
            //textBlock1.Text = "Say Start";
            //Recognise with UI
            Windows.Media.SpeechRecognition.SpeechRecognitionResult speechRecognitionResult = await speechRecognizer.RecognizeWithUIAsync();

            //Recognise without UI
            //Windows.Media.SpeechRecognition.SpeechRecognitionResult speechRecognitionResult = await speechRecognizer.RecognizeAsync();

            if (speechRecognitionResult.Text == "start")
            {
                //textBlock2.Text = "Start detected";
                await Task.Delay(2000);

                startRecAsync();
            }
            if (speechRecognitionResult.Text == "quit")
            {
                CoreApplication.Exit();
            }
        }
コード例 #7
0
        private async void btnTalk_Click(object sender, RoutedEventArgs e)
        {
            // Create an instance of SpeechRecognizer.
            var speechRecognizer = new Windows.Media.SpeechRecognition.SpeechRecognizer();

            // Compile the dictation grammar that is loaded by default.
            await speechRecognizer.CompileConstraintsAsync();

            // Start recognition.
            try
            {
                Windows.Media.SpeechRecognition.SpeechRecognitionResult speechRecognitionResult = await speechRecognizer.RecognizeWithUIAsync();

                // If successful, display the recognition result.
                if (speechRecognitionResult.Status == Windows.Media.SpeechRecognition.SpeechRecognitionResultStatus.Success)
                {
                    txtSource.Text = speechRecognitionResult.Text;
                }
            }
            catch (Exception exception)
            {
                if ((uint)exception.HResult == HResultPrivacyStatementDeclined)
                {
                    //this.resultTextBlock.Visibility = Visibility.Visible;
                    lblResult.Text = "I'm sorry, I was not able to use speech recognition. The speech privacy statement was declined.";
                }
                else
                {
                    var messageDialog = new Windows.UI.Popups.MessageDialog(exception.Message, "Exception");
                    messageDialog.ShowAsync().GetResults();
                }
            }
        }
コード例 #8
0
        /// <summary>
        /// Invoked when the application is activated.
        /// </summary>
        /// <param name="e">Details about the launch request and process.</param>
        protected override void OnActivated(IActivatedEventArgs e)
        {
            // Was the app activated by a voice command?
            if (e.Kind != Windows.ApplicationModel.Activation.ActivationKind.VoiceCommand)
            {
                return;
            }

            var commandArgs = e as Windows.ApplicationModel.Activation.VoiceCommandActivatedEventArgs;

            Windows.Media.SpeechRecognition.SpeechRecognitionResult speechRecognitionResult = commandArgs.Result;

            // The commandMode is either "voice" or "text", and it indicates how the voice command was entered by the user.
            // We should respect "text" mode by providing feedback in a silent form.
            string commandMode = this.SemanticInterpretation("commandMode", speechRecognitionResult);

            // If so, get the name of the voice command, the actual text spoken, and the value of Command/Navigate@Target.
            string voiceCommandName = speechRecognitionResult.RulePath[0];
            string textSpoken       = speechRecognitionResult.Text;
            string navigationTarget = this.SemanticInterpretation("NavigationTarget", speechRecognitionResult);

            Type   navigateToPageType        = typeof(MainPage);
            string navigationParameterString = string.Empty;

            switch (voiceCommandName)
            {
            case "showASection":
            case "goToASection":
                string newspaperSection = this.SemanticInterpretation("newspaperSection", speechRecognitionResult);
                navigateToPageType        = typeof(ShowASectionPage);
                navigationParameterString = string.Format("{0}|{1}", commandMode, newspaperSection);
                break;

            case "message":
            case "text":
                string contact = this.SemanticInterpretation("contact", speechRecognitionResult);
                string msgText = this.SemanticInterpretation("msgText", speechRecognitionResult);
                navigateToPageType        = typeof(MessagePage);
                navigationParameterString = string.Format("{0}|{1}|{2}", commandMode, contact, msgText);
                break;

            case "playAMovie":
                string movieSearch = this.SemanticInterpretation("movieSearch", speechRecognitionResult);
                navigateToPageType        = typeof(PlayAMoviePage);
                navigationParameterString = string.Format("{0}|{1}", commandMode, movieSearch);
                break;

            default:
                // There is no match for the voice command name.
                break;
            }

            this.EnsureRootFrame(e.PreviousExecutionState);
            if (!this.rootFrame.Navigate(navigateToPageType, navigationParameterString))
            {
                throw new Exception("Failed to create voice command page");
            }
        }
コード例 #9
0
ファイル: App.xaml.cs プロジェクト: lee12180/winiot-uwp
        /// <summary>
        /// Invoked when the application is activated by some means other than normal launching.
        /// </summary>
        /// <param name="args">Event data for the event.</param>
        protected override async void OnActivated(IActivatedEventArgs args)
        {
            Debug.WriteLine("OnActivated()");
            //isActivating = true;
            if (args.Kind == ActivationKind.VoiceCommand)
            {
                // The arguments can represent many different activation types. Cast it so we can get the
                // parameters we care about out.
                var commandArgs = args as VoiceCommandActivatedEventArgs;

                Windows.Media.SpeechRecognition.SpeechRecognitionResult speechRecognitionResult = commandArgs.Result;

                // Get the name of the voice command and the text spoken. See AdventureWorksCommands.xml for
                // the <Command> tags this can be filled with.
                string voiceCommandName = speechRecognitionResult.RulePath[0];
                string textSpoken       = speechRecognitionResult.Text;

                //Action(textSpoken);

                /*
                 * switch (voiceCommandName)
                 * {
                 *  case "lockDevice":
                 *      Action(textSpoken);
                 *      break;
                 *
                 *  default:
                 *      // If we can't determine what page to launch, go to the default entry point.
                 *
                 *      break;
                 * }
                 */
            }
            else if (args.Kind == ActivationKind.Protocol)
            {
                //var commandArgs = args as ProtocolActivatedEventArgs;
                //Windows.Foundation.WwwFormUrlDecoder decoder = new Windows.Foundation.WwwFormUrlDecoder(commandArgs.Uri.Query);
                //var param = decoder.GetFirstValueByName("LaunchContext");
                //EndDevice test = JsonConvert.DeserializeObject<EndDevice>(param);

                //XBeeAction.PowerOff(Convert.ToUInt64(test.MacAddress), test.EndPointId);

                //var protocolEventArgs = args as ProtocolActivatedEventArgs;

                //switch (protocolEventArgs.Uri.Scheme)
                //{
                //    case "main-launchapplist":
                //        NavigationService.Navigate("IOTOIApp.ViewModels.AppListViewModel", "");
                //        break;
                //}
            }
            else
            {
                await ActivationService.ActivateAsync(args);
            }
        }
コード例 #10
0
        protected override void OnActivated(IActivatedEventArgs args)
        {
            base.OnActivated(args);
            if (args.Kind != Windows.ApplicationModel.Activation.ActivationKind.VoiceCommand)
            {
                return;
            }

            var commandArgs = args as Windows.ApplicationModel.Activation.VoiceCommandActivatedEventArgs;

            Windows.Media.SpeechRecognition.SpeechRecognitionResult speechRecognitionResult = commandArgs.Result;

            // Get the name of the voice command and the text spoken
            string voiceCommandName = speechRecognitionResult.RulePath[0];

            switch (voiceCommandName)
            {
            case "toggleLight":
                // Access the value of the {lightColor} and {action} phrase in the voice command
                string lightColor = speechRecognitionResult.SemanticInterpretation.Properties["lightColor"][0];
                string action     = speechRecognitionResult.SemanticInterpretation.Properties["action"][0];

                //i use this to pass the data from here to the MainPage
                LightsControll.SetLightControll(lightColor, action);
                break;

            default:
                break;
            }


            Frame rootFrame = Window.Current.Content as Frame;

            // Do not repeat app initialization when the Window already has content,
            // just ensure that the window is active
            if (rootFrame == null)
            {
                // Create a Frame to act as the navigation context and navigate to the first page
                rootFrame = new Frame();

                rootFrame.NavigationFailed += OnNavigationFailed;
                // Place the frame in the current Window
                Window.Current.Content = rootFrame;
            }

            if (rootFrame.Content == null)
            {
                // When the navigation stack isn't restored navigate to the first page,
                // configuring the new page by passing required information as a navigation
                // parameter
                rootFrame.Navigate(typeof(MainPage));
            }
            // Ensure the current window is active
            Window.Current.Activate();
        }
コード例 #11
0
 private string SemanticInterpretation(string key, Windows.Media.SpeechRecognition.SpeechRecognitionResult speechRecognitionResult)
 {
     if (speechRecognitionResult.SemanticInterpretation.Properties.ContainsKey(key))
     {
         return(speechRecognitionResult.SemanticInterpretation.Properties[key][0]);
     }
     else
     {
         return("unknown");
     }
 }
コード例 #12
0
        private async Task SpeakToMachine(object sender, RoutedEventArgs e)
        {
            var speechRecognizer = new Windows.Media.SpeechRecognition.SpeechRecognizer();
            // Compile the dictation grammar by default.
            await speechRecognizer.CompileConstraintsAsync();

            // Start recognition.
            Windows.Media.SpeechRecognition.SpeechRecognitionResult speechRecognitionResult = await speechRecognizer.RecognizeAsync();

            objTextBox.Text = speechRecognitionResult.Text;
            OnClick(sender, e);
        }
コード例 #13
0
        /*private async void OnTextChanging(object sender, TextBoxTextChangingEventArgs e)
         * {
         *  var synth = new SpeechSynthesizer();
         *  var textboxObj = (TextBox)sender;
         *  Windows.Media.SpeechSynthesis.SpeechSynthesisStream stream = await synth.SynthesizeTextToStreamAsync(textboxObj.Text);
         *  mediaElement.SetSource(stream, stream.ContentType);
         *  mediaElement.Play();
         *
         *
         *
         * }*/

        private async Task SpeakToComputer(object sender, RoutedEventArgs e)
        {
            Debug.WriteLine("HEELEMOQHNOQOQWGWQGI\n");
            var speechRecognizer = new Windows.Media.SpeechRecognition.SpeechRecognizer();
            // Compile the dictation grammar by default.
            await speechRecognizer.CompileConstraintsAsync();

            // Start recognition.
            Windows.Media.SpeechRecognition.SpeechRecognitionResult speechRecognitionResult = await speechRecognizer.RecognizeAsync();

            objTextBox.Text = speechRecognitionResult.Text;
        }
コード例 #14
0
        public static void HandleSpeechCommand(IActivatedEventArgs args)
        {
            var commandArgs = args as
                              Windows.ApplicationModel.Activation.VoiceCommandActivatedEventArgs;

            Windows.Media.SpeechRecognition.SpeechRecognitionResult
                speechRecognitionResult = commandArgs.Result;

            string textSpoken = speechRecognitionResult.Text;

            CortanaInterop.CortanaText = textSpoken;
        }
コード例 #15
0
ファイル: Popup.cs プロジェクト: slycoderr/ArmyBuilder
        public static async Task <string> GetTextFromSpeech()
        {
            // Create an instance of SpeechRecognizer.
            var speechRecognizer = new Windows.Media.SpeechRecognition.SpeechRecognizer();

            // Compile the dictation grammar by default.
            await speechRecognizer.CompileConstraintsAsync();

            // Start recognition.
            Windows.Media.SpeechRecognition.SpeechRecognitionResult speechRecognitionResult = await speechRecognizer.RecognizeWithUIAsync();

            return(speechRecognitionResult.Text);
        }
コード例 #16
0
ファイル: SpeechToText.cs プロジェクト: BSalita/Woundify
        // As of this time, UWP only offers microphone input to SpeechRecognizer, not file input
        public static async System.Threading.Tasks.Task <string> MicrophoneToTextAsync()
        {
            Windows.Media.SpeechRecognition.SpeechRecognizer speechRecognizer = new Windows.Media.SpeechRecognition.SpeechRecognizer();
            speechRecognizer.HypothesisGenerated += SpeechRecognizer_HypothesisGenerated;

            // Compile the dictation grammar by default.
            await speechRecognizer.CompileConstraintsAsync();

            // Start recognition.
            Windows.Media.SpeechRecognition.SpeechRecognitionResult speechRecognitionResult = await speechRecognizer.RecognizeAsync();

            Log.WriteLine("Text:" + speechRecognitionResult.Text);
            return(speechRecognitionResult.Text);
        }
コード例 #17
0
        private async void VoiceIconTapped(object sender, RoutedEventArgs e)
        {
            // Create an instance of SpeechRecognizer.
            var speechRecognizer = new Windows.Media.SpeechRecognition.SpeechRecognizer();

            // Compile the dictation grammar by default.
            await speechRecognizer.CompileConstraintsAsync();

            // Start recognition.
            Windows.Media.SpeechRecognition.SpeechRecognitionResult speechRecognitionResult = await speechRecognizer.RecognizeWithUIAsync();

            // Do something with the recognition result.
            //var messageDialog = new Windows.UI.Popups.MessageDialog(speechRecognitionResult.Text, "Text spoken");
            //await messageDialog.ShowAsync();
            this.UserInput.Text = speechRecognitionResult.Text;
        }
コード例 #18
0
ファイル: App.xaml.cs プロジェクト: watermelonpizza/LightCTRL
        /// <summary>
        /// Invoked when the phone is activated by non normal means, including speech
        /// </summary>
        /// <param name="args">Arguments detailing how the phone was activated</param>
        protected override void OnActivated(IActivatedEventArgs args)
        {
            base.OnActivated(args);

            if (args.Kind == Windows.ApplicationModel.Activation.ActivationKind.VoiceCommand)
            {
                var commandArgs = args as Windows.ApplicationModel.Activation.VoiceCommandActivatedEventArgs;
                Windows.Media.SpeechRecognition.SpeechRecognitionResult speechRecognitionResult = commandArgs.Result;

                Frame rootFrame = new Frame();
                Window.Current.Content = rootFrame;
                rootFrame.Navigate(typeof(VoiceCommandPage), speechRecognitionResult);

                // Ensure the current window is active
                Window.Current.Activate();
            }
        }
        private async void RecognizeWithSRGSGrammarFileConstraintContinuously_Click(object sender, RoutedEventArgs e)
        {
            this.recognizeOnceButton.IsEnabled          = false;
            this.recognizeContinuouslyButton.Visibility = Visibility.Collapsed;
            this.stopRecognizingTextBlock.Visibility    = this.listeningTextBlock.Visibility = Visibility.Visible;
            this.heardYouSayTextBlock.Visibility        = this.resultTextBlock.Visibility = Visibility.Collapsed;

            // Start recognition.
            while (true)
            {
                try
                {
                    Windows.Media.SpeechRecognition.SpeechRecognitionResult speechRecognitionResult = await this.speechRecognizer.RecognizeAsync();

                    // If successful, display the recognition result.
                    if (speechRecognitionResult.Status == Windows.Media.SpeechRecognition.SpeechRecognitionResultStatus.Success)
                    {
                        this.resultTextBlock.Visibility = Visibility.Visible;
                        this.resultTextBlock.Text       = speechRecognitionResult.Text;
                        if (speechRecognitionResult.Text == "stop recognizing")
                        {
                            break;
                        }
                    }
                }
                catch (Exception exception)
                {
                    if ((uint)exception.HResult == App.HResultPrivacyStatementDeclined)
                    {
                        var messageDialog = new Windows.UI.Popups.MessageDialog("and accept the privacy statement", "Tap \"with UI\" ");
                        messageDialog.ShowAsync().GetResults();
                    }
                    else
                    {
                        var messageDialog = new Windows.UI.Popups.MessageDialog(exception.Message, "Exception");
                        messageDialog.ShowAsync().GetResults();
                    }
                    break;
                }
            }

            this.recognizeOnceButton.IsEnabled          = true;
            this.recognizeContinuouslyButton.Visibility = Visibility.Visible;
            this.stopRecognizingTextBlock.Visibility    = this.listeningTextBlock.Visibility = Visibility.Collapsed;
            this.heardYouSayTextBlock.Visibility        = this.resultTextBlock.Visibility = Visibility.Collapsed;
        }
コード例 #20
0
        private async void btnMicFrench_Click(object sender, RoutedEventArgs e)
        {
            try
            {
                Windows.Media.SpeechRecognition.SpeechRecognizer speechRecognizer =
                    new Windows.Media.SpeechRecognition.SpeechRecognizer(new Windows.Globalization.Language("fr")); // se le puede pasar parámetro de idiioma, ahoria agarra el del sisytem
                await speechRecognizer.CompileConstraintsAsync();

                Windows.Media.SpeechRecognition.SpeechRecognitionResult resultado =
                    await speechRecognizer.RecognizeWithUIAsync();

                txtDescripcionFrances.Text = resultado.Text;
            }
            catch (Exception ex)
            {
            }
        }
コード例 #21
0
        private async void Button_Click(object sender, Windows.UI.Xaml.RoutedEventArgs e)
        {
            TextStatus.Text = "Listening....";

            // Create an instance of SpeechRecognizer.
            var speechRecognizer = new Windows.Media.SpeechRecognition.SpeechRecognizer();

            // Compile the dictation grammar by default.
            await speechRecognizer.CompileConstraintsAsync();

            // Start recognition.
            Windows.Media.SpeechRecognition.SpeechRecognitionResult speechRecognitionResult = await speechRecognizer.RecognizeWithUIAsync();

            TextCommand.Text = speechRecognitionResult.Text;

            await SendToBot(TextCommand.Text);
        }
コード例 #22
0
        private async void VoiceSearchButton_OnClick(object sender, RoutedEventArgs e)
        {
            try
            {
                // Create an instance of SpeechRecognizer.
                var speechRecognizer = new Windows.Media.SpeechRecognition.SpeechRecognizer();

                // Listen for audio input issues.
                //speechRecognizer.RecognitionQualityDegrading += speechRecognizer_RecognitionQualityDegrading;

                // Add a web search grammar to the recognizer.
                var webSearchGrammar = new Windows.Media.SpeechRecognition.SpeechRecognitionTopicConstraint(Windows.Media.SpeechRecognition.SpeechRecognitionScenario.WebSearch, "webSearch");


                speechRecognizer.UIOptions.AudiblePrompt = "Say what you want to search for...";
                speechRecognizer.UIOptions.ExampleText   = @"Ex. 'Play Rahman songs'";
                speechRecognizer.Constraints.Add(webSearchGrammar);

                // Compile the constraint.
                await speechRecognizer.CompileConstraintsAsync();

                // Start recognition.
                Windows.Media.SpeechRecognition.SpeechRecognitionResult speechRecognitionResult = await speechRecognizer.RecognizeWithUIAsync();

                //await speechRecognizer.RecognizeWithUIAsync();

                // Do something with the recognition result.
                var messageDialog = new Windows.UI.Popups.MessageDialog(speechRecognitionResult.Text, "Text spoken");
                await messageDialog.ShowAsync();
            }
            catch (Exception err)
            {
                // Define a variable that holds the error for the speech recognition privacy policy.
                // This value maps to the SPERR_SPEECH_PRIVACY_POLICY_NOT_ACCEPTED error,
                // as described in the Windows.Phone.Speech.Recognition error codes section later on.
                const int privacyPolicyHResult = unchecked ((int)0x80045509);

                // Check whether the error is for the speech recognition privacy policy.
                if (err.HResult == privacyPolicyHResult)
                {
                    var messageDialog = new Windows.UI.Popups.MessageDialog("You will need to accept the speech privacy policy in order to use speech recognition in this app.", "Error");
                    await messageDialog.ShowAsync();
                }
            }
        }
コード例 #23
0
        private async void RecognizeWithListConstraint_Click(object sender, RoutedEventArgs e)
        {
            // Create an instance of SpeechRecognizer.
            var speechRecognizer = new Windows.Media.SpeechRecognition.SpeechRecognizer();

            // You could create any IEnumerable dynamically.
            string[] responses = { "Yes", "No" };

            // Add a list constraint to the recognizer.
            var listConstraint = new Windows.Media.SpeechRecognition.SpeechRecognitionListConstraint(responses, "yesOrNo");

            speechRecognizer.UIOptions.ExampleText = @"Ex. ""Yes"", ""No""";
            speechRecognizer.Constraints.Add(listConstraint);

            // Compile the constraint.
            await speechRecognizer.CompileConstraintsAsync();

            this.heardYouSayTextBlock.Visibility = this.resultTextBlock.Visibility = Visibility.Collapsed;

            // Start recognition.
            try
            {
                Windows.Media.SpeechRecognition.SpeechRecognitionResult speechRecognitionResult = await speechRecognizer.RecognizeWithUIAsync();

                // If successful, display the recognition result.
                if (speechRecognitionResult.Status == Windows.Media.SpeechRecognition.SpeechRecognitionResultStatus.Success)
                {
                    this.heardYouSayTextBlock.Visibility = this.resultTextBlock.Visibility = Visibility.Visible;
                    this.resultTextBlock.Text            = speechRecognitionResult.Text;
                }
            }
            catch (Exception exception)
            {
                if ((uint)exception.HResult == App.HResultPrivacyStatementDeclined)
                {
                    this.resultTextBlock.Visibility = Visibility.Visible;
                    this.resultTextBlock.Text       = "The privacy statement was declined.";
                }
                else
                {
                    var messageDialog = new Windows.UI.Popups.MessageDialog(exception.Message, "Exception");
                    messageDialog.ShowAsync().GetResults();
                }
            }
        }
コード例 #24
0
        private async void btnMicEspaniol_Click(object sender, RoutedEventArgs e)
        {
            try
            {
                Windows.Media.SpeechRecognition.SpeechRecognizer speechRecognizer =
                    new Windows.Media.SpeechRecognition.SpeechRecognizer(new Windows.Globalization.Language("es-MX")); // se le puede pasar parámetro de idiioma, ahoria agarra el del sisytem
                await speechRecognizer.CompileConstraintsAsync();

                Windows.Media.SpeechRecognition.SpeechRecognitionResult resultado =
                    await speechRecognizer.RecognizeWithUIAsync();

                txtpalespanol.Text = resultado.Text;
            }
            catch (Exception)
            {
                throw;
            }
        }
コード例 #25
0
        //语音识别 Voice Recognition
        private async void StartRecognizing_Click(object sender, RoutedEventArgs e)
        {
            if (await SpeechRecognition.RequestMicrophonePermission())
            {
                // Create an instance of SpeechRecognizer.
                var speechRecognizer = new Windows.Media.SpeechRecognition.SpeechRecognizer();

                // Compile the dictation grammar by default.
                await speechRecognizer.CompileConstraintsAsync();

                // Start recognition.
                Windows.Media.SpeechRecognition.SpeechRecognitionResult speechRecognitionResult = await speechRecognizer.RecognizeWithUIAsync();

                // Do something with the recognition result.
                HomePageViewModel.Current.QueryWord(speechRecognitionResult.Text);
                //var messageDialog = new Windows.UI.Popups.MessageDialog(speechRecognitionResult.Text, "Text spoken");
                //await messageDialog.ShowAsync();
            }
        }
        private async void RecognizeWithWebSearchGrammar_Click(object sender, RoutedEventArgs e)
        {
            // Create an instance of SpeechRecognizer.
            var speechRecognizer = new Windows.Media.SpeechRecognition.SpeechRecognizer();

            // Add a web search grammar to the recognizer.
            var webSearchGrammar = new Windows.Media.SpeechRecognition.SpeechRecognitionTopicConstraint(Windows.Media.SpeechRecognition.SpeechRecognitionScenario.WebSearch, "webSearch");

            speechRecognizer.UIOptions.AudiblePrompt = "Say what you want to search for...";
            speechRecognizer.UIOptions.ExampleText   = @"Ex. ""weather for London""";
            speechRecognizer.Constraints.Add(webSearchGrammar);

            // Compile the constraint.
            await speechRecognizer.CompileConstraintsAsync();

            this.heardYouSayTextBlock.Visibility = this.resultTextBlock.Visibility = Visibility.Collapsed;

            // Start recognition.
            try
            {
                Windows.Media.SpeechRecognition.SpeechRecognitionResult speechRecognitionResult = await speechRecognizer.RecognizeWithUIAsync();

                // If successful, display the recognition result.
                if (speechRecognitionResult.Status == Windows.Media.SpeechRecognition.SpeechRecognitionResultStatus.Success)
                {
                    this.heardYouSayTextBlock.Visibility = this.resultTextBlock.Visibility = Visibility.Visible;
                    this.resultTextBlock.Text            = speechRecognitionResult.Text;
                }
            }
            catch (Exception exception)
            {
                if ((uint)exception.HResult == App.HResultPrivacyStatementDeclined)
                {
                    this.resultTextBlock.Visibility = Visibility.Visible;
                    this.resultTextBlock.Text       = "The privacy statement was declined.";
                }
                else
                {
                    var messageDialog = new Windows.UI.Popups.MessageDialog(exception.Message, "Exception");
                    messageDialog.ShowAsync().GetResults();
                }
            }
        }
コード例 #27
0
        private static async Task <string> Listen()
        {
// Create an instance of SpeechRecognizer.
            var speechRecognizer = new Windows.Media.SpeechRecognition.SpeechRecognizer();

            // Compile the dictation grammar by default.
            await speechRecognizer.CompileConstraintsAsync();

            // Start recognition.
            Windows.Media.SpeechRecognition.SpeechRecognitionResult speechRecognitionResult =
                await speechRecognizer.RecognizeWithUIAsync();

            // Do something with the recognition result.
            //var messageDialog = new Windows.UI.Popups.MessageDialog(speechRecognitionResult.Text, "Text spoken");
            //await messageDialog.ShowAsync();

            var whatWasSaid = speechRecognitionResult.Text;

            return(whatWasSaid);
        }
コード例 #28
0
        protected override void OnActivated(IActivatedEventArgs args)
        {
            if (args.Kind == ActivationKind.VoiceCommand)
            {
                var commandArgs = args as VoiceCommandActivatedEventArgs;
                Windows.Media.SpeechRecognition.SpeechRecognitionResult speechRecognitionResult = commandArgs.Result;

                string voiceCommandName = speechRecognitionResult.RulePath[0];
                if (voiceCommandName == "Search")
                {
                    string item   = speechRecognitionResult.SemanticInterpretation.Properties["Sid"][0];
                    int    result = StudentList.FindItem(int.Parse(item));
                    if (result != -1)
                    {
                        var record = Item1.dataGrid.GetRecordAtRowIndex(result + 1);
                        Item1.dataGrid.SelectedItem = record;
                        Item1.dataGrid.ScrollInView(new Syncfusion.UI.Xaml.ScrollAxis.RowColumnIndex(result + 1, 3));
                    }
                }
            }
        }
コード例 #29
0
        private async void UxStartSpeechRecognition_Click(object sender, RoutedEventArgs e)
        {
            // Create an instance of SpeechRecognizer.
            var speechRecognizer = new Windows.Media.SpeechRecognition.SpeechRecognizer();

            // Compile the dictation grammar by default.
            await speechRecognizer.CompileConstraintsAsync();

            // Start recognition.
            Windows.Media.SpeechRecognition.SpeechRecognitionResult speechRecognitionResult = await speechRecognizer.RecognizeWithUIAsync();

            switch (speechRecognitionResult.Text.ToLower())
            {
            case "eins":
            case "1":
                this.Result = 1;
                break;

            case "null":
            case "0":
                this.Result = 0;
                break;

            default:
                this.Result = -1;
                break;
            }

            // Do something with the recognition result.
            if (speechRecognitionResult.Text.ToLower() == "eins" || speechRecognitionResult.Text.ToLower() == "1" || speechRecognitionResult.Text.ToLower() == "null" || speechRecognitionResult.Text.ToLower() == "0")
            {
                var messageDialog = new Windows.UI.Popups.MessageDialog($"Ok drücken zum Fortfahren", $"'{speechRecognitionResult.Text}' erkannt");
                await messageDialog.ShowAsync();
            }
            else
            {
                var messageDialog = new Windows.UI.Popups.MessageDialog($"'{speechRecognitionResult.Text}' erkannt.", "Ungültige Eingabe, bitte nochmal versuchen.");
                await messageDialog.ShowAsync();
            }
        }
コード例 #30
0
        protected override async void OnActivated(IActivatedEventArgs args)
        {
            // Was the app activated by a voice command?
            if (args.Kind == ActivationKind.VoiceCommand)
            {
                // Need to put this here to make sure the main page is displayed.
                Frame rootFrame = Window.Current.Content as Frame;
                if (rootFrame == null)
                {
                    rootFrame              = new Frame();
                    rootFrame.CacheSize    = 1;
                    Window.Current.Content = rootFrame;
                    rootFrame.Navigate(typeof(MainPage));
                }
                Window.Current.Activate();

                var commandArgs = args as VoiceCommandActivatedEventArgs;
                if (commandArgs != null)
                {
                    Windows.Media.SpeechRecognition.SpeechRecognitionResult speechRecognitionResult = commandArgs.Result;

                    // What command was issued?
                    string voiceCommandName = speechRecognitionResult.RulePath[0];

                    if (voiceCommandName == "ShowMeAll")
                    {
                        // TODO - find a better way to access the list name from the phrase list.
                        // Get the name of the list the user wants.
                        string textSpoken = speechRecognitionResult.Text;
                        string listName   = textSpoken.Substring(textSpoken.LastIndexOf(" ", StringComparison.Ordinal)).Trim();

                        // Pull back the list items and display them on the UI
                        // TODO - need a better way to update the Main Page than this.
                        var announcements = await SharePoint.GetListItems(listName);

                        await Windows.ApplicationModel.Core.CoreApplication.MainView.CoreWindow.Dispatcher.RunAsync(CoreDispatcherPriority.Normal, () => MainPage.Instance.UpdateListView(announcements));
                    }
                }
            }
        }