private async void BtnSpeechRecogWeatherSearchAsync_Click(object sender, RoutedEventArgs e)
        {
            // Create an instance of SpeechRecognizer.
            var speechRecognizer = new Windows.Media.SpeechRecognition.SpeechRecognizer();

            // Listen for audio input issues.
            ///////  speechRecognizer.RecognitionQualityDegrading += speechRecognizer_RecognitionQualityDegrading;

            // Add a web search grammar to the recognizer.
            var webSearchGrammar = new Windows.Media.SpeechRecognition.SpeechRecognitionTopicConstraint(Windows.Media.SpeechRecognition.SpeechRecognitionScenario.WebSearch, "webSearch");

            speechRecognizer.UIOptions.AudiblePrompt = "Say what you want to search for...";
            speechRecognizer.UIOptions.ExampleText   = @"Ex. 'weather for London'";
            speechRecognizer.Constraints.Add(webSearchGrammar);

            // Compile the constraint.
            await speechRecognizer.CompileConstraintsAsync();

            // Start recognition.
            Windows.Media.SpeechRecognition.SpeechRecognitionResult speechRecognitionResult = await speechRecognizer.RecognizeWithUIAsync();

            //await speechRecognizer.RecognizeWithUIAsync();

            // Do something with the recognition result.
            var messageDialog = new Windows.UI.Popups.MessageDialog(speechRecognitionResult.Text, "Text spoken");
            await messageDialog.ShowAsync();
        }
示例#2
0
        protected async override void OnActivated(IActivatedEventArgs args)
        {
            base.OnActivated(args);

            //Type navigationToPageType;
            //ViewModel.TripVoiceCommand? navigationCommand = null;

            // If the app was launched via a Voice Command, this corresponds to the "show trip to <location>" command.
            // Protocol activation occurs when a tile is clicked within Cortana (via the background task)
            if (args.Kind == ActivationKind.VoiceCommand || args.Kind == ActivationKind.Search)
            {
                await AuthenticateUser();

                var commandArgs = args as VoiceCommandActivatedEventArgs;

                Windows.Media.SpeechRecognition.SpeechRecognitionResult speechRecognitionResult = commandArgs.Result;

                // Get the name of the voice command and the text spoken. See AdventureWorksCommands.xml for
                // the <Command> tags this can be filled with.
                string voiceCommandName = speechRecognitionResult.RulePath[0];
                string textSpoken       = speechRecognitionResult.Text;

                // The commandMode is either "voice" or "text", and it indictes how the voice command
                // was entered by the user.
                // Apps should respect "text" mode by providing feedback in silent form.
                string commandMode = this.SemanticInterpretation("commandMode", speechRecognitionResult);
            }
            // or not the app is already active.
            Frame rootFrame = Window.Current.Content as Frame;

            // Ensure the current window is active
            Window.Current.Activate();
        }
示例#3
0
文件: App.xaml.cs 项目: Eynorey/zmo
        protected override void OnActivated(IActivatedEventArgs args)
        {
            // Was the app activated by a voice command?
            if (args.Kind == Windows.ApplicationModel.Activation.ActivationKind.VoiceCommand)
            {
                var commandArgs = args as Windows.ApplicationModel.Activation.VoiceCommandActivatedEventArgs;
                Windows.Media.SpeechRecognition.SpeechRecognitionResult speechRecognitionResult = commandArgs.Result;

                // If so, get the name of the voice command, the actual text spoken, and the value of Command/Navigate@Target.
                string voiceCommandName = speechRecognitionResult.RulePath[0];
                var    test             = speechRecognitionResult.SemanticInterpretation.Properties;

                switch (voiceCommandName)
                {
                case "zensurNeuCortana":
                    ApplicationData.Current.RoamingSettings.Values["cortanaText"] = speechRecognitionResult.Text;
                    Launch();
                    break;

                // Cases for other voice commands.

                default:
                    // There is no match for the voice command name.
                    Launch();
                    break;
                }
            }
        }
示例#4
0
        public void HandleVoiceRequest(VoiceCommandActivatedEventArgs commandArgs)
        {
            Windows.Media.SpeechRecognition.SpeechRecognitionResult speechRecognitionResult = commandArgs.Result;

            // Get the name of the voice command and the text spoken. See AdventureWorksCommands.xml for
            // the <Command> tags this can be filled with.
            string voiceCommandName = speechRecognitionResult.RulePath[0];
            string textSpoken       = speechRecognitionResult.Text;

            // The commandMode is either "voice" or "text", and it indictes how the voice command
            // was entered by the user.
            // Apps should respect "text" mode by providing feedback in silent form.
            string commandMode = this.SemanticInterpretation("commandMode", speechRecognitionResult);

            switch (voiceCommandName)
            {
            case "openBookmarks":
                var bookmarkCommand = new NavigateToBookmarksCommand();
                bookmarkCommand.Execute(null);
                break;

            case "openPrivateMessages":
                var pmCommand = new NavigateToPrivateMessageListPageCommand();
                pmCommand.Execute(null);
                break;

            case "lowtaxIsAJerk":
                var lowtaxCommand = new NavigateToNewPrivateMessagePageLowtaxCommand();
                lowtaxCommand.Execute(null);
                break;

            default:
                break;
            }
        }
        private void ProcessSpeechStatusChangeCommand(SR.SpeechRecognitionResult result)
        {
            bool activationStatus = IsActive;

            if (result.SemanticInterpretation.Properties.Keys.Contains("action"))
            {
                var intent = result.SemanticInterpretation.Properties["action"][0];
                Debug.WriteLine($"--> Voice reco status change request: {intent}");

                switch (intent)
                {
                case "ActivateSpeechReco":
                    IsActive = true;
                    break;

                case "StopSpeechReco":
                    IsActive = false;
                    break;


                default:
                    break;
                }

                if (activationStatus != IsActive)
                {
                    _commandingConstraint.IsEnabled = IsActive;

                    // Rise the event
                    Activated(this, IsActive);
                }
            }
        }
示例#6
0
        async void clickStart(object sender, RoutedEventArgs e)
        {
            // Create an instance of SpeechRecognizer.
            var speechRecognizer = new Windows.Media.SpeechRecognition.SpeechRecognizer();

            // Compile the dictation grammar by default.
            await speechRecognizer.CompileConstraintsAsync();

            // Start recognition.
            Windows.Media.SpeechRecognition.SpeechRecognitionResult speechRecognitionResult = await speechRecognizer.RecognizeWithUIAsync();

            // Do something with the recognition result.
            var messageDialog = new Windows.UI.Popups.MessageDialog(speechRecognitionResult.Text, "Text spoken");
            await messageDialog.ShowAsync();

            //Task.Factory.StartNew(async () =>
            //{
            //    try
            //    {
            //          Speech.Initialize();
            //await Speech.StartRecognition();
            //    }
            //    catch (Exception ex)
            //    {
            //        throw ex;
            //    }

            //});
        }
示例#7
0
        protected async override Task OnActivateApplicationAsync(IActivatedEventArgs args)
        {
            if (args.Kind == ActivationKind.ToastNotification && args.PreviousExecutionState != ApplicationExecutionState.Running)
            {
                // Handle a toast notification here
                // Since dev center, toast, and Azure notification hub will all active with an ActivationKind.ToastNotification
                // you may have to parse the toast data to determine where it came from and what action you want to take
                // If the app isn't running then launch the app here
                OnLaunchApplicationAsync(args as LaunchActivatedEventArgs);
            }
            else if (args.Kind == ActivationKind.VoiceCommand)
            {
                // Event args can represent many different activation types.
                // Cast it so we can get the parameters we care about out.
                var commandArgs = args as VoiceCommandActivatedEventArgs;

                Windows.Media.SpeechRecognition.SpeechRecognitionResult speechRecognitionResult = commandArgs.Result;

                // Get the name of the voice command and the text spoken.
                // See VoiceCommands.xml for supported voice commands.
                string voiceCommandName = speechRecognitionResult.RulePath[0];
                string textSpoken       = speechRecognitionResult.Text;

                // commandMode indicates whether the command was entered using speech or text.
                // Apps should respect text mode by providing silent (text) feedback.
                string commandMode = this.SemanticInterpretation("commandMode", speechRecognitionResult);
                NavigationService.Navigate(PageTokens.JogosPage, "hoje");
                //var api = Container.Resolve<IRussiaServiceApi>();
                //var jogos = await api.ListarJogos();
            }

            //return Task.CompletedTask;
        }
        private async System.Threading.Tasks.Task VoiceRecognition()
        {
            try
            {
                Windows.Media.SpeechRecognition.SpeechRecognitionResult speechRecognitionResult
                    = await speechRecognizer.RecognizeWithUIAsync();

                //テキストボックスに入れる
                String result = speechRecognitionResult.Text;

                //リストから検索して該当するものを選択する
                if (result != "")
                {
                    if (result == "ホーム")
                    {
                        webview.Navigate(StartURL);
                    }
                    else
                    {
                        textBox.Text = result;
                        foreach (var item in listView.Items)
                        {
                            if ((String)item == result)
                            {
                                listView.SelectedItem = item;
                            }
                        }
                    }
                }
            }
            catch { }
        }
示例#9
0
        protected override void OnActivated(IActivatedEventArgs args)
        {
            base.OnActivated(args);
            if (args.Kind == ActivationKind.VoiceCommand)
            {
                VoiceCommandActivatedEventArgs voiceCommandArgs = args as VoiceCommandActivatedEventArgs;
                Windows.Media.SpeechRecognition.SpeechRecognitionResult speechRecognitionResult = voiceCommandArgs.Result;
                String text             = speechRecognitionResult.Text;
                string voiceCommandName = speechRecognitionResult.RulePath[0];
                switch (voiceCommandName)
                {
                case "voicecommand":
                    string newtxt = this.SemanticInterpretation("name", speechRecognitionResult);
                    if (newtxt != null)
                    {
                        text = newtxt;
                    }
                    ReadText(new MediaElement(), text);
                    Frame rootFrame = Window.Current.Content as Frame;

                    // Do not repeat app initialization when the Window already has content,
                    // just ensure that the window is active
                    if (rootFrame == null)
                    {
                        // Create a Frame to act as the navigation context and navigate to the first page
                        rootFrame = new Frame();


                        rootFrame.NavigationFailed += OnNavigationFailed;
                        rootFrame.Navigate(typeof(MainPage), speechRecognitionResult);
                        // Place the frame in the current Window
                        Window.Current.Content = rootFrame;
                    }
                    else
                    {
                        Window.Current.Content = rootFrame;
                    }

                    // Since we're expecting to always show a details page, navigate even if
                    // a content frame is in place (unlike OnLaunched).
                    // Navigate to either the main trip list page, or if a valid voice command
                    // was provided, to the details page for that trip.


                    // Ensure the current window is active
                    Window.Current.Activate();
                    break;

                default:
                    ReadText(new MediaElement(), text);
                    break;
                }
            }
            else if (args.Kind == ActivationKind.Protocol)
            {
            }
        }
示例#10
0
 private void ProcessSpeechCommand(SR.SpeechRecognitionResult result)
 {
     if (result.SemanticInterpretation.Properties.Keys.Contains("action"))
     {
         var intentArgs = new SpeechIntentArgs(result.SemanticInterpretation.Properties["action"][0]);
         Debug.WriteLine($"--> Voice reco intent: {intentArgs.Intent}");
         // Execute the event
         SpeechCommandTriggered(this, intentArgs);
     }
 }
示例#11
0
        private async void StartVoiceRecognition_Click(object sender, RoutedEventArgs e)
        {
            string textBoxText = textBox_comment.Text;

            //Start Recognition
            Windows.Media.SpeechRecognition.SpeechRecognitionResult speechRecognitionResult = await speechRecognizer.RecognizeWithUIAsync();

            //Put recognition result in commentTextBlock
            textBox_comment.Text = textBoxText + " " + speechRecognitionResult.Text;
        }
 private string SemanticInterpretation(string key, Windows.Media.SpeechRecognition.SpeechRecognitionResult speechRecognitionResult)
 {
     if (speechRecognitionResult.SemanticInterpretation.Properties.ContainsKey(key))
     {
         return(speechRecognitionResult.SemanticInterpretation.Properties[key][0]);
     }
     else
     {
         return("unknown");
     }
 }
示例#13
0
        protected override async void OnActivated(IActivatedEventArgs args)
        {
            base.OnActivated(args);
            var message = new MessageDialog("Activated by: " + args.Kind.ToString());
            await message.ShowAsync();

            Type navigationToPageType;

            if (args.Kind == ActivationKind.VoiceCommand)
            {
                var commandArgs = args as VoiceCommandActivatedEventArgs;

                Windows.Media.SpeechRecognition.SpeechRecognitionResult speechRecognitionResult = commandArgs.Result;

                string voiceCommand = speechRecognitionResult.RulePath[0];
                string textSpoken   = speechRecognitionResult.Text;

                string commandMode = this.SemanticInterpretation("commandMode", speechRecognitionResult);

                switch (commandMode)
                {
                default:
                    navigationToPageType = typeof(DetailsPage);
                    break;
                }
            }
            else if (args.Kind == ActivationKind.Protocol)
            {
                var commandArgs = args as ProtocolActivatedEventArgs;

                WwwFormUrlDecoder decoder = new WwwFormUrlDecoder(commandArgs.Uri.Query);

                navigationToPageType = typeof(DetailsPage);
            }
            else
            {
                navigationToPageType = typeof(MainPage);
            }

            if (!(Window.Current.Content is Frame rootFrame))
            {
                rootFrame = new Frame();
                rootFrame.NavigationFailed += RootFrame_NavigationFailed;
                Window.Current.Content      = rootFrame;
            }

            Window.Current.Activate();
        }
        /// <summary>
        /// Entry point for an application activated by some means other than normal launching.
        /// This includes voice commands, URI, share target from another app, and so on.
        ///
        /// NOTE:
        /// A previous version of the VCD file might remain in place
        /// if you modify it and update the app through the store.
        /// Activations might include commands from older versions of your VCD.
        /// Try to handle these commands gracefully.
        /// </summary>
        /// <param name="args">Details about the activation method.</param>
        protected override void OnActivated(IActivatedEventArgs args)
        {
            base.OnActivated(args);

            //Type navigationToPageType;
            //ViewModel.TripVoiceCommand? navigationCommand = null;

            // Voice command activation.
            if (args.Kind == ActivationKind.VoiceCommand)
            {
                // Event args can represent many different activation types.
                // Cast it so we can get the parameters we care about out.
                var commandArgs = args as VoiceCommandActivatedEventArgs;

                Windows.Media.SpeechRecognition.SpeechRecognitionResult speechRecognitionResult = commandArgs.Result;

                // Get the name of the voice command and the text spoken.
                // See VoiceCommands.xml for supported voice commands.
                string voiceCommandName = speechRecognitionResult.RulePath[0];
                string textSpoken       = speechRecognitionResult.Text;

                // commandMode indicates whether the command was entered using speech or text.
                // Apps should respect text mode by providing silent (text) feedback.
                string commandMode = this.SemanticInterpretation("commandMode", speechRecognitionResult);

                Frame rootFrame = Window.Current.Content as Frame;

                // Do not repeat app initialization when the Window already has content,
                // just ensure that the window is active
                if (rootFrame == null)
                {
                    // Create a Frame to act as the navigation context and navigate to the first page
                    rootFrame = new Frame();

                    rootFrame.NavigationFailed += OnNavigationFailed;


                    // Place the frame in the current Window
                    Window.Current.Content = rootFrame;
                }

                rootFrame.Navigate(typeof(MainPage), voiceCommandName);

                Window.Current.Activate();
            }
        }
示例#15
0
        protected override void OnActivated(IActivatedEventArgs args)
        {
            // Was the app activated by a voice command?
            if (args.Kind == Windows.ApplicationModel.Activation.ActivationKind.VoiceCommand)
            {
                var commandArgs = args as Windows.ApplicationModel.Activation.VoiceCommandActivatedEventArgs;
                Windows.Media.SpeechRecognition.SpeechRecognitionResult speechRecognitionResult = commandArgs.Result;

                // If so, get the name of the voice command, the actual text spoken, and the value of Command/Navigate@Target.
                string voiceCommandName = speechRecognitionResult.RulePath[0];
                string textSpoken       = speechRecognitionResult.Text;
                string navigationTarget = speechRecognitionResult.SemanticInterpretation.Properties["NavigationTarget"][0];


                Frame rootFrame = Window.Current.Content as Frame;
                rootFrame.Navigate(typeof(PivotPage), voiceCommandName);
            }
        }
示例#16
0
        /// <summary>
        /// Entry point for an application activated by some means other than normal launching.
        /// This includes voice commands, URI, share target from another app, and so on.
        ///
        /// NOTE:
        /// A previous version of the VCD file might remain in place
        /// if you modify it and update the app through the store.
        /// Activations might include commands from older versions of your VCD.
        /// Try to handle these commands gracefully.
        /// </summary>
        /// <param name="args">Details about the activation method.</param>
        protected override void OnActivated(IActivatedEventArgs args)
        {
            base.OnActivated(args);

            Type navigationToPageType;

            // Voice command activation.
            if (args.Kind == ActivationKind.VoiceCommand)
            {
                // Event args can represent many different activation types.
                // Cast it so we can get the parameters we care about out.
                var commandArgs = args as VoiceCommandActivatedEventArgs;

                Windows.Media.SpeechRecognition.SpeechRecognitionResult speechRecognitionResult = commandArgs.Result;

                // Get the name of the voice command and the text spoken.
                // See VoiceCommands.xml for supported voice commands.
                string voiceCommandName = speechRecognitionResult.RulePath[0];
                string textSpoken       = speechRecognitionResult.Text;

                // commandMode indicates whether the command was entered using speech or text.
                // Apps should respect text mode by providing silent (text) feedback.
                string commandMode = this.SemanticInterpretation("commandMode", speechRecognitionResult);

                switch (voiceCommandName)
                {
                case "readyToCode":
                    break;

                case "doneCoding":
                    break;

                default:
                    // If we can't determine what page to launch, go to the default entry point.
                    //navigationToPageType = typeof(View.TripListView);
                    break;
                }
            }
            // Protocol activation occurs when a card is clicked within Cortana (using a background task).

            // Ensure the current window is active
            Window.Current.Activate();
        }
示例#17
0
        public async Task <string> StartListeningAsync()
        {
            try
            {
                Windows.Media.SpeechRecognition.SpeechRecognitionResult speechRecognitionResult = await this.Recognizer.RecognizeWithUIAsync();

                // If successful, display the recognition result.
                if (speechRecognitionResult.Status == Windows.Media.SpeechRecognition.SpeechRecognitionResultStatus.Success)
                {
                    return(speechRecognitionResult.Text);
                }

                return(string.Empty);
            }
            catch (Exception ex)
            {
                return(string.Empty);
            }
        }
示例#18
0
        protected override void OnActivated(IActivatedEventArgs e)
        {
            if (e.Kind != Windows.ApplicationModel.Activation.ActivationKind.VoiceCommand)
            {
                return;
            }

            var commandArgs = e as Windows.ApplicationModel.Activation.VoiceCommandActivatedEventArgs;

            Windows.Media.SpeechRecognition.SpeechRecognitionResult speechRecognitionResult = commandArgs.Result;

            // TCommand Mode kann sein Texteingabe, oder Spracheingabe
            string commandMode = this.SemanticInterpretation("commandMode", speechRecognitionResult);

            // If so, get the name of the voice command, the actual text spoken, and the value of Command/Navigate@Target.
            string voiceCommandName = speechRecognitionResult.RulePath[0];
            string textSpoken       = speechRecognitionResult.Text;
            string navigationTarget = this.SemanticInterpretation("NavigationTarget", speechRecognitionResult);

            Type   navigateToPageType        = typeof(MainPage);
            string navigationParameterString = string.Empty;

            switch (voiceCommandName)
            {
            case "CallToAction":

                navigateToPageType        = typeof(MainPage);
                navigationParameterString = "";
                break;
            }

            this.EnsureRootFrame(e.PreviousExecutionState);
            if (!this.rootFrame.Navigate(navigateToPageType, navigationParameterString))
            {
                throw new Exception("Sprachbefehle konnte nicht entgegen genommen werden");
            }


            //base.OnActivated(e);
        }
示例#19
0
        public async void CallAssitant(TextBlock speechText)
        {
            // Compile the dictation grammar by default.
            await speechRecognizer.CompileConstraintsAsync();

            //recognitionOperation = speechRecognizer.RecognizeAsync();
            //SpeechRecognitionResult speechRecognitionResult = await recognitionOperation;

            //// Start recognition.

            //if (speechRecognitionResult.Status == SpeechRecognitionResultStatus.Success)
            //{
            //    TextSaid = "\n" + speechRecognitionResult.Text;
            //}


            Windows.Media.SpeechRecognition.SpeechRecognitionResult speechRecognitionResult = await speechRecognizer.RecognizeWithUIAsync();

            TextSaid        = "\n" + speechRecognitionResult.Text;
            speechText.Text = speechText.Text + TextSaid;
            //This code is commented out because i am trying to live without a dialogue box
        }
示例#20
0
        protected async override void OnActivated(IActivatedEventArgs args)
        {
            base.OnActivated(args);

            // If the app was launched via a Voice Command, this corresponds to the "show trip to <location>" command.
            // Protocol activation occurs when a tile is clicked within Cortana (via the background task)
            if (args.Kind == ActivationKind.VoiceCommand)
            {
                // The arguments can represent many different activation types. Cast it so we can get the
                // parameters we care about out.
                var commandArgs = args as VoiceCommandActivatedEventArgs;

                Windows.Media.SpeechRecognition.SpeechRecognitionResult speechRecognitionResult = commandArgs.Result;

                // Get the name of the voice command and the text spoken. See AdventureWorksCommands.xml for
                // the <Command> tags this can be filled with.
                string voiceCommandName = speechRecognitionResult.RulePath[0];
                string textSpoken       = speechRecognitionResult.Text;

                Debug.WriteLine("Command: " + voiceCommandName);
                Debug.WriteLine("Text spoken: " + textSpoken);

                switch (voiceCommandName)
                {
                case "switch":
                    handleCommand(commandArgs.Result.SemanticInterpretation.Properties, voiceCommandName);
                    break;

                case "volume":
                    handleCommand(commandArgs.Result.SemanticInterpretation.Properties, voiceCommandName);
                    break;

                default:
                    Debug.WriteLine("Unknown command: ");
                    break;
                }
            }
        }
        /// <summary>
        /// Entry point for an application activated by some means other than normal launching.
        /// This includes voice commands, URI, share target from another app, and so on.
        ///
        /// NOTE:
        /// A previous version of the VCD file might remain in place
        /// if you modify it and update the app through the store.
        /// Activations might include commands from older versions of your VCD.
        /// Try to handle these commands gracefully.
        /// </summary>
        /// <param name="args">Details about the activation method.</param>
        protected override async void OnActivated(IActivatedEventArgs args)
        {
            base.OnActivated(args);

            Type navigationToPageType;

            VoiceCommandObjects.VoiceCommand navCommand = null;

            // Voice command activation.
            if (args.Kind == Windows.ApplicationModel.Activation.ActivationKind.VoiceCommand)
            {
                // Event args can represent many different activation types.
                // Cast it so we can get the parameters we care about out.
                var commandArgs = args as VoiceCommandActivatedEventArgs;

                Windows.Media.SpeechRecognition.SpeechRecognitionResult
                    speechRecognitionResult = commandArgs.Result;

                // Get the name of the voice command and the text spoken.
                // See VoiceCommands.xml for supported voice commands.
                string voiceCommand = speechRecognitionResult.RulePath[0];
                string textSpoken   = speechRecognitionResult.Text;

                // commandMode indicates whether the command was entered using speech or text.
                // Apps should respect text mode by providing silent (text) feedback.
                string commandMode = this.SemanticInterpretation("commandMode", speechRecognitionResult);

                switch (voiceCommand)
                {
                case "addNewNote":

                    // Create a navigation command object to pass to the page.
                    navCommand                  = new VoiceCommandObjects.VoiceCommand();
                    navCommand.CommandMode      = commandMode;
                    navCommand.VoiceCommandName = voiceCommand;
                    navCommand.TextSpoken       = textSpoken;

                    // Set the page to navigate to for this voice command.
                    // App is a single page app at this time.
                    navigationToPageType = typeof(MainPage);
                    break;

                case "addNewNoteForPerson":

                    // Create a navigation command object to pass to the page.
                    // Access the value of the {person} phrase in the voice command
                    string noteOwner = this.SemanticInterpretation("person", speechRecognitionResult);
                    navCommand                  = new VoiceCommandObjects.VoiceCommand();
                    navCommand.CommandMode      = commandMode;
                    navCommand.VoiceCommandName = voiceCommand;
                    navCommand.TextSpoken       = textSpoken;
                    navCommand.NoteOwner        = noteOwner;

                    // Set the page to navigate to for this voice command.
                    // App is a single page app at this time.
                    navigationToPageType = typeof(MainPage);
                    break;

                default:
                    // If we can't determine what page to launch, go to the default entry point.
                    navigationToPageType = typeof(MainPage);
                    break;
                }
            }
            // Protocol activation occurs when a card is clicked within Cortana (using a background task).
            else if (args.Kind == ActivationKind.Protocol)
            {
                // No background service at this time.
                navigationToPageType = typeof(MainPage);
            }
            else
            {
                // If we were launched via any other mechanism, fall back to the main page view.
                // Otherwise, we'll hang at a splash screen.
                navigationToPageType = typeof(MainPage);
            }

            // Repeat the same basic initialization as OnLaunched() above,
            // taking into account whether or not the app is already active.
            Frame rootFrame = Window.Current.Content as Frame;

            // Do not repeat app initialization when the Window already has content,
            // just ensure that the window is active
            if (rootFrame == null)
            {
                // Create a Frame to act as the navigation context and navigate to the first page
                rootFrame = new Frame();

                rootFrame.NavigationFailed += OnNavigationFailed;

                if (args.PreviousExecutionState != ApplicationExecutionState.Running &&
                    args.PreviousExecutionState != ApplicationExecutionState.Suspended)
                {
                    await LoadModelAndSettingsAsync();
                }

                // Place the frame in the current Window
                Window.Current.Content = rootFrame;
            }

            // Since we're expecting to always show the home page, navigate even if
            // a content frame is in place (unlike OnLaunched).
            rootFrame.Navigate(navigationToPageType, navCommand);

            // Ensure the current window is active
            Window.Current.Activate();
        }
示例#22
0
        protected override void OnActivated(IActivatedEventArgs args)
        {
            base.OnActivated(args);

            Type navigationToPageType;

            // Voice command activation.
            if (args.Kind == ActivationKind.VoiceCommand)
            {
                // Event args can represent many different activation types.
                // Cast it so we can get the parameters we care about out.
                var commandArgs = args as VoiceCommandActivatedEventArgs;

                Windows.Media.SpeechRecognition.SpeechRecognitionResult speechRecognitionResult = commandArgs.Result;

                // Get the name of the voice command and the text spoken.
                // See VoiceCommands.xml for supported voice commands.
                string voiceCommandName = speechRecognitionResult.RulePath[0];
                string textSpoken       = speechRecognitionResult.Text;

                // commandMode indicates whether the command was entered using speech or text.
                // Apps should respect text mode by providing silent (text) feedback.
                string commandMode = this.SemanticInterpretation("commandMode", speechRecognitionResult);

                switch (voiceCommandName)
                {
                case "howIsMood":
                    // Access the value of {destination} in the voice command.
                    string destination = this.SemanticInterpretation("mood", speechRecognitionResult);

                    // Create a navigation command object to pass to the page.


                    // Set the page to navigate to for this voice command.
                    break;

                default:
                    // If we can't determine what page to launch, go to the default entry point.
                    break;
                }
            }


            // Repeat the same basic initialization as OnLaunched() above, taking into account whether
            // or not the app is already active.
            Frame rootFrame = Window.Current.Content as Frame;

            // Do not repeat app initialization when the Window already has content,
            // just ensure that the window is active.
            if (rootFrame == null)
            {
                // Create a frame to act as the navigation context and navigate to the first page.
                rootFrame = new Frame();
                rootFrame.NavigationFailed += OnNavigationFailed;

                // Place the frame in the current window.
                Window.Current.Content = rootFrame;
            }

            // Ensure the current window is active
            Window.Current.Activate();
        }
示例#23
0
        protected async override void OnActivated(IActivatedEventArgs e)
        {
            // Get the root frame
            Frame rootFrame = Window.Current.Content as Frame;

            // Do not repeat app initialization when the Window already has content,
            // just ensure that the window is active
            if (rootFrame == null)
            {
                // Create a Frame to act as the navigation context and navigate to the first page
                rootFrame = new Frame();

                rootFrame.NavigationFailed += OnNavigationFailed;
                //rootFrame.Navigated += OnNavigated;

                if (e.PreviousExecutionState == ApplicationExecutionState.Terminated)
                {
                    //TODO: Load state from previously suspended application
                }

                // Place the frame in the current Window
                Window.Current.Content = rootFrame;


                StoreServicesEngagementManager engagementManager = StoreServicesEngagementManager.GetDefault();
                engagementManager.RegisterNotificationChannelAsync();



                await BackgroundExecutionManager.RequestAccessAsync();

                unregisterBackgroundTasks();
                var backgroundTask = RegisterBackgroundTask("tasks.Class1", "Class1", new TimeTrigger(1440, false), new SystemCondition(SystemConditionType.UserNotPresent));



                var appView = Windows.UI.ViewManagement.ApplicationView.GetForCurrentView();
                setLaunchViewSize(appView);
                setMinAppSize(appView);
                if (Windows.Foundation.Metadata.ApiInformation.IsTypePresent("Windows.UI.ViewManagement.StatusBar"))
                {
                    var statusBar = Windows.UI.ViewManagement.StatusBar.GetForCurrentView();
                    statusBar.BackgroundOpacity = 1;
                    statusBar.ForegroundColor   = ((SolidColorBrush)Application.Current.Resources["SystemControlForegroundAccentBrush"]).Color;
                    statusBar.BackgroundColor   = ((SolidColorBrush)Application.Current.Resources["SystemControlBackgroundChromeMediumLowBrush"]).Color;
                }

                var qualifiers = Windows.ApplicationModel.Resources.Core.ResourceContext.GetForCurrentView().QualifierValues;

                if (qualifiers.ContainsKey("DeviceFamily") && qualifiers["DeviceFamily"] == "Desktop")
                {
                    var accentColorBrush = new SolidColorBrush((Color)Application.Current.Resources["SystemAccentColor"]).Color;
                    ApplicationViewTitleBar formattableTitleBar = appView.TitleBar;
                    formattableTitleBar.ButtonBackgroundColor         = Colors.Transparent;
                    formattableTitleBar.ButtonInactiveBackgroundColor = Colors.Transparent;
                    formattableTitleBar.ButtonForegroundColor         = accentColorBrush;
                    CoreApplicationViewTitleBar coreTitleBar = CoreApplication.GetCurrentView().TitleBar;
                    coreTitleBar.ExtendViewIntoTitleBar = true;
                    appView.SetDesiredBoundsMode(ApplicationViewBoundsMode.UseCoreWindow);
                }
            }



            if (rootFrame.Content == null)
            {
                NavService = new Navigation(ref rootFrame);
            }


            // Handle toast activation
            if (e is ToastNotificationActivatedEventArgs)
            {
                var toastActivationArgs = e as ToastNotificationActivatedEventArgs;

                // Parse the query string
                string args = toastActivationArgs.Argument;

                // See what action is being requested
                if (args == "comeBack" || args == "Yes")
                {
                    logger.Log("Launched app from encouraging toast");
                    App.NavService.NavigateTo(typeof(MainPage), "comeBack");
                }
            }



            // TODO: Handle other types of activation

            if (e.Kind == ActivationKind.VoiceCommand)
            {
                // Event args can represent many different activation types.
                // Cast it so we can get the parameters we care about out.
                var commandArgs = e as VoiceCommandActivatedEventArgs;

                Windows.Media.SpeechRecognition.SpeechRecognitionResult speechRecognitionResult = commandArgs.Result;

                // Get the name of the voice command and the text spoken.
                // See VoiceCommands.xml for supported voice commands.
                string voiceCommandName = speechRecognitionResult.RulePath[0];
                string textSpoken       = speechRecognitionResult.Text;

                // commandMode indicates whether the command was entered using speech or text.
                // Apps should respect text mode by providing silent (text) feedback.
                string commandMode = this.SemanticInterpretation("commandMode", speechRecognitionResult);

                switch (voiceCommandName)
                {
                case "createNewGoal":
                    const string nullValue   = "(null)";
                    string       goalName    = this.SemanticInterpretation("goalName", speechRecognitionResult);
                    string       target      = this.SemanticInterpretation("target", speechRecognitionResult);
                    string[]     goalDetails = new string[] { goalName, target };



                    // Create a navigation command object to pass to the page.
                    NavService.NavigateTo(typeof(addNewGoalPage), goalDetails);
                    if (rootFrame.BackStackDepth > 0)
                    {
                        if (rootFrame.BackStack.Last().SourcePageType != typeof(MainPage))
                        {
                            int backStackSize = rootFrame.BackStackDepth;
                            rootFrame.BackStack.RemoveAt(backStackSize - 1);
                        }
                    }

                    break;

                case "showGoalInProgress":

                    goalName = this.SemanticInterpretation("goalInProgress", speechRecognitionResult);

                    try
                    {
                        int itemCount = goal.listOfGoals.Where(item => item.name == goalName).Count();
                        if (itemCount > 0)
                        {
                            var goalInContext = goal.listOfGoals.Where(item => item.name == goalName).First();
                            NavService.NavigateTo(typeof(selectedGoalPage), goalInContext);
                        }
                    }
                    catch (Exception)
                    {
                        NavService.NavigateTo(typeof(MainPage), "cortanaFailed");
                    }


                    break;

                default:
                    NavService.NavigateTo(typeof(MainPage), "cortanaFailed");
                    break;
                }
            }

// Ensure the current window is active
            Window.Current.Activate();
        }
示例#24
0
        public IAsyncOperation <SpeechRecognitionResult> RecognizeAsync()
        {
            _initialSilenceTimeout          = new Timer();
            _initialSilenceTimeout.Interval = Math.Max(Timeouts.InitialSilenceTimeout.TotalMilliseconds, 5000);
            _initialSilenceTimeout.Elapsed += OnTimeout;

            _endSilenceTimeout          = new Timer();
            _endSilenceTimeout.Interval = Math.Max(Timeouts.EndSilenceTimeout.TotalMilliseconds, 150);
            _endSilenceTimeout.Elapsed += OnTimeout;

            // Cancel the previous task if it's running.
            _recognitionTask?.Cancel();
            _recognitionTask = null;

            var     audioSession = AVAudioSession.SharedInstance();
            NSError err;

            err = audioSession.SetCategory(AVAudioSessionCategory.Record);
            audioSession.SetMode(AVAudioSession.ModeMeasurement, out err);
            err = audioSession.SetActive(true, AVAudioSessionSetActiveOptions.NotifyOthersOnDeactivation);

            // Configure request to get partial results
            _recognitionRequest = new SFSpeechAudioBufferRecognitionRequest
            {
                ShouldReportPartialResults = true,
                TaskHint = SFSpeechRecognitionTaskHint.Dictation
            };

            var inputNode = _audioEngine.InputNode;

            if (inputNode == null)
            {
                throw new InvalidProgramException("Audio engine has no input node");
            }

            var tcs = new TaskCompletionSource <SpeechRecognitionResult>();

            // Keep a reference to the task so that it can be cancelled.
            _recognitionTask = _speechRecognizer.GetRecognitionTask(_recognitionRequest, (result, error) =>
            {
                var isFinal   = false;
                var bestMatch = default(SpeechRecognitionResult);

                if (result != null)
                {
                    _initialSilenceTimeout.Stop();
                    _endSilenceTimeout.Stop();
                    _endSilenceTimeout.Start();

                    bestMatch = new SpeechRecognitionResult()
                    {
                        Text       = result.BestTranscription.FormattedString,
                        Alternates = result.Transcriptions?
                                     .Select(t => new SpeechRecognitionResult()
                        {
                            Text = t.FormattedString
                        })
                                     .ToList()
                    };
                    isFinal = result.Final;

                    OnHypothesisGenerated(bestMatch.Text);
                }

                if (error != null || isFinal)
                {
                    _initialSilenceTimeout.Stop();
                    _endSilenceTimeout.Stop();

                    _audioEngine.Stop();

                    inputNode.RemoveTapOnBus(0);
                    inputNode.Reset();

                    audioSession = AVAudioSession.SharedInstance();
                    err          = audioSession.SetCategory(AVAudioSessionCategory.Playback);
                    audioSession.SetMode(AVAudioSession.ModeDefault, out err);
                    err = audioSession.SetActive(false, AVAudioSessionSetActiveOptions.NotifyOthersOnDeactivation);

                    _recognitionTask = null;

                    OnStateChanged(SpeechRecognizerState.Idle);

                    if (bestMatch != null)
                    {
                        tcs.TrySetResult(bestMatch);
                    }
                    else
                    {
                        tcs.TrySetException(new Exception($"Error during speech recognition: {error.LocalizedDescription}"));
                    }
                }
            });

            var recordingFormat = new AVAudioFormat(sampleRate: 44100, channels: 1);

            inputNode.InstallTapOnBus(0, 1024, recordingFormat, (buffer, when) => {
                _recognitionRequest?.Append(buffer);
            });

            _initialSilenceTimeout.Start();

            _audioEngine.Prepare();
            _audioEngine.StartAndReturnError(out err);

            OnStateChanged(SpeechRecognizerState.Capturing);

            return(tcs.Task.AsAsyncOperation());
        }
        /// <summary>
        /// OnActivated is the entry point for an application when it is launched via
        /// means other normal user interaction. This includes Voice Commands, URI activation,
        /// being used as a share target from another app, etc. Here, we're going to handle the
        /// Voice Command activation from Cortana.
        ///
        /// Note: Be aware that an older VCD could still be in place for your application if you
        /// modify it and update your app via the store. You should be aware that you could get
        /// activations that include commands in older versions of your VCD, and you should try
        /// to handle them gracefully.
        /// </summary>
        /// <param name="args">Details about the activation method, including the activation
        /// phrase (for voice commands) and the semantic interpretation, parameters, etc.</param>
        protected override void OnActivated(IActivatedEventArgs args)
        {
            base.OnActivated(args);

            Type navigationToPageType;

            ViewModel.TripVoiceCommand?navigationCommand = null;

            // If the app was launched via a Voice Command, this corresponds to the "show trip to <location>" command.
            // Protocol activation occurs when a tile is clicked within Cortana (via the background task)
            if (args.Kind == ActivationKind.VoiceCommand)
            {
                // The arguments can represent many different activation types. Cast it so we can get the
                // parameters we care about out.
                var commandArgs = args as VoiceCommandActivatedEventArgs;

                Windows.Media.SpeechRecognition.SpeechRecognitionResult speechRecognitionResult = commandArgs.Result;

                // Get the name of the voice command and the text spoken. See AdventureWorksCommands.xml for
                // the <Command> tags this can be filled with.
                string voiceCommandName = speechRecognitionResult.RulePath[0];
                string textSpoken       = speechRecognitionResult.Text;

                // The commandMode is either "voice" or "text", and it indictes how the voice command
                // was entered by the user.
                // Apps should respect "text" mode by providing feedback in silent form.
                string commandMode = this.SemanticInterpretation("commandMode", speechRecognitionResult);

                switch (voiceCommandName)
                {
                case "showTripToDestination":
                    // Access the value of the {destination} phrase in the voice command
                    string destination = this.SemanticInterpretation("destination", speechRecognitionResult);

                    // Create a navigation command object to pass to the page. Any object can be passed in,
                    // here we're using a simple struct.
                    navigationCommand = new ViewModel.TripVoiceCommand(
                        voiceCommandName,
                        commandMode,
                        textSpoken,
                        destination);

                    // Set the page to navigate to for this voice command.
                    navigationToPageType = typeof(View.TripDetails);
                    break;

                default:
                    // If we can't determine what page to launch, go to the default entry point.
                    navigationToPageType = typeof(View.TripListView);
                    break;
                }
            }
            else if (args.Kind == ActivationKind.Protocol)
            {
                // Extract the launch context. In this case, we're just using the destination from the phrase set (passed
                // along in the background task inside Cortana), which makes no attempt to be unique. A unique id or
                // identifier is ideal for more complex scenarios. We let the destination page check if the
                // destination trip still exists, and navigate back to the trip list if it doesn't.
                var commandArgs = args as ProtocolActivatedEventArgs;
                Windows.Foundation.WwwFormUrlDecoder decoder = new Windows.Foundation.WwwFormUrlDecoder(commandArgs.Uri.Query);
                var destination = decoder.GetFirstValueByName("LaunchContext");

                navigationCommand = new ViewModel.TripVoiceCommand(
                    "protocolLaunch",
                    "text",
                    "destination",
                    destination);

                navigationToPageType = typeof(View.TripDetails);
            }
            else
            {
                // If we were launched via any other mechanism, fall back to the main page view.
                // Otherwise, we'll hang at a splash screen.
                navigationToPageType = typeof(View.TripListView);
            }

            // Re"peat the same basic initialization as OnLaunched() above, taking into account whether
            // or not the app is already active.
            Frame rootFrame = Window.Current.Content as Frame;

            // Do not repeat app initialization when the Window already has content,
            // just ensure that the window is active
            if (rootFrame == null)
            {
                // Create a Frame to act as the navigation context and navigate to the first page
                rootFrame             = new Frame();
                App.NavigationService = new NavigationService(rootFrame);

                rootFrame.NavigationFailed += OnNavigationFailed;

                // Place the frame in the current Window
                Window.Current.Content = rootFrame;
            }

            // Since we're expecting to always show a details page, navigate even if
            // a content frame is in place (unlike OnLaunched).
            // Navigate to either the main trip list page, or if a valid voice command
            // was provided, to the details page for that trip.
            rootFrame.Navigate(navigationToPageType, navigationCommand);

            // Ensure the current window is active
            Window.Current.Activate();
        }
示例#26
0
        protected override async void OnActivated(IActivatedEventArgs args)
        {
            try
            {
                if (NetworkInterface.GetIsNetworkAvailable())
                {
                    base.OnActivated(args);

                    EnsureInstancedMainVM();

                    // for switching to VideoPage
                    var    selectedItem     = new YoutubeVideo();
                    string voiceCommandName = "";
                    string selectedItemType = "Video";

                    // If the app was launched via a Voice Command, this corresponds to the "show trip to <location>" command.
                    // Protocol activation occurs when a tile is clicked within Cortana (via the background task)
                    if (args.Kind == ActivationKind.VoiceCommand)
                    {
                        // The arguments can represent many different activation types. Cast it so we can get the
                        // parameters we care about out.
                        var commandArgs = args as VoiceCommandActivatedEventArgs;

                        Windows.Media.SpeechRecognition.SpeechRecognitionResult speechRecognitionResult = commandArgs.Result;

                        // Get the name of the voice command and the text spoken. See AdventureWorksCommands.xml for
                        // the <Command> tags this can be filled with.
                        voiceCommandName = speechRecognitionResult.RulePath[0];
                        string textSpoken = speechRecognitionResult.Text;

                        // The commandMode is either "voice" or "text", and it indictes how the voice command
                        // was entered by the user.
                        // Apps should respect "text" mode by providing feedback in silent form.
                        string commandMode = this.SemanticInterpretation("commandMode", speechRecognitionResult);
                        var    player      = MainPageViewModel.MediaElement;
                        double volume      = 0;
                        int    currentIdx  = 0;
                        string searchQuery = "";

                        switch (voiceCommandName)
                        {
                        case "searchVideo":
                            // Access the value of the {searchQuery} phrase in the voice command
                            searchQuery = this.SemanticInterpretation("searchQuery", speechRecognitionResult);

                            // set the view model's search string
                            MainPageViewModel.SearchQuery = searchQuery;

                            MainPageViewModel.YouTubeItems.Clear();

                            MainPageViewModel.Header = "Videos";

                            foreach (var ytItems in await MainPageViewModel.YouTubeService.ListItems(searchQuery, MainPageViewModel.MaxResults, "video"))
                            {
                                MainPageViewModel.YouTubeItems.Add(ytItems);
                            }
                            break;

                        case "searchChannel":
                            // Access the value of the {searchQuery} phrase in the voice command
                            searchQuery = this.SemanticInterpretation("searchQuery", speechRecognitionResult);

                            // set the view model's search string
                            MainPageViewModel.SearchQuery = searchQuery;

                            MainPageViewModel.YouTubeItems.Clear();

                            MainPageViewModel.Header = "Channels";

                            foreach (var ytItems in await MainPageViewModel.YouTubeService.ListItems(searchQuery, MainPageViewModel.MaxResults, "channel"))
                            {
                                MainPageViewModel.YouTubeItems.Add(ytItems);
                            }
                            break;

                        case "searchPlaylist":
                            // Access the value of the {searchQuery} phrase in the voice command
                            searchQuery = this.SemanticInterpretation("searchQuery", speechRecognitionResult);

                            // set the view model's search string
                            MainPageViewModel.SearchQuery = searchQuery;

                            MainPageViewModel.YouTubeItems.Clear();

                            MainPageViewModel.Header = "Playlists";

                            foreach (var ytItems in await MainPageViewModel.YouTubeService.ListItems(searchQuery, MainPageViewModel.MaxResults, "playlist"))
                            {
                                MainPageViewModel.YouTubeItems.Add(ytItems);
                            }
                            break;

                        case "selectItem":
                            // Access the value of the {searchQuery} phrase in the voice command
                            var selected = this.SemanticInterpretation("selected", speechRecognitionResult);

                            switch (selected)
                            {
                            case "first":
                                selected = "1";
                                break;

                            case "second":
                                selected = "2";
                                break;

                            case "third":
                                selected = "3";
                                break;

                            case "fourth":
                                selected = "4";
                                break;

                            case "fifth":
                                selected = "5";
                                break;
                            }

                            selectedItem = MainPageViewModel.YouTubeItems.ElementAtOrDefault(int.Parse(selected) - 1);

                            // switch for searchQueryType
                            switch (selectedItem.Type)
                            {
                            //case "Video":
                            //    selectedItemType = "Video";
                            //    break;

                            case "Channel":
                                selectedItemType = "Channel";
                                break;

                            case "Playlist":
                                selectedItemType = "Playlist";
                                break;
                            }
                            break;

                        case "pauseVideo":
                            player.Pause();
                            break;

                        case "resumeVideo":
                            player.Play();
                            break;

                        case "stopVideo":
                            player.Stop();
                            break;

                        case "volumeUp":
                            volume         = double.Parse(this.SemanticInterpretation("vNumber", speechRecognitionResult)) / 100.0;
                            player.Volume += volume;
                            if (player.Volume > 100)
                            {
                                player.Volume = 100;
                            }
                            break;

                        case "volumeDown":
                            volume         = double.Parse(this.SemanticInterpretation("vNumber", speechRecognitionResult)) / 100.0;
                            player.Volume -= volume;
                            if (player.Volume < 0)
                            {
                                player.Volume = 0;
                            }
                            break;

                        case "skip":
                            if (player.Position + new TimeSpan(0, 0, int.Parse(this.SemanticInterpretation("number", speechRecognitionResult))) <= player.NaturalDuration.TimeSpan)
                            {
                                player.Position += new TimeSpan(0, 0, int.Parse(this.SemanticInterpretation("number", speechRecognitionResult)));
                            }
                            else
                            {
                                player.Position = player.NaturalDuration.TimeSpan;
                            }
                            break;

                        case "goBack":
                            var zeroTimeSpan = new TimeSpan(0);
                            if (player.Position - new TimeSpan(0, 0, int.Parse(this.SemanticInterpretation("number", speechRecognitionResult))) >= zeroTimeSpan)
                            {
                                player.Position -= new TimeSpan(0, 0, int.Parse(this.SemanticInterpretation("number", speechRecognitionResult)));
                            }
                            else
                            {
                                player.Position = zeroTimeSpan;
                            }
                            break;

                        case "mute":
                            player.IsMuted = true;
                            break;

                        case "unmute":
                            player.IsMuted = false;
                            break;

                        case "nextVideo":
                            currentIdx   = MainPageViewModel.CurrentElementInList + 1;
                            selectedItem = MainPageViewModel.YouTubeItems.ElementAtOrDefault(currentIdx);
                            break;

                        case "prevVideo":
                            currentIdx   = MainPageViewModel.CurrentElementInList - 1;
                            selectedItem = MainPageViewModel.YouTubeItems.ElementAtOrDefault(currentIdx);
                            break;

                        case "exit":
                            Application.Current.Exit();
                            break;

                        default:
                            // If we can't determine what page to launch, go to the default entry point.
                            Debug.WriteLine("default");
                            break;
                        }
                    }

                    // R"peat the same basic initialization as OnLaunched() above, taking into account whether
                    // or not the app is already active.
                    Frame rootFrame = Window.Current.Content as Frame;

                    // Do not repeat app initialization when the Window already has content,
                    // just ensure that the window is active
                    if (rootFrame == null)
                    {
                        // Create a Frame to act as the navigation context and navigate to the first page
                        rootFrame = new Frame();

                        rootFrame.NavigationFailed += OnNavigationFailed;

                        // Place the frame in the current Window
                        Window.Current.Content = rootFrame;
                    }

                    if (voiceCommandName == "searchVideo" || voiceCommandName == "searchChannel" || voiceCommandName == "searchPlaylist")
                    {
                        rootFrame.Navigate(typeof(MainPage));
                    }
                    else if (voiceCommandName == "selectItem" || voiceCommandName == "nextVideo" || voiceCommandName == "prevVideo")
                    {
                        if (selectedItemType != "Video")
                        {
                            rootFrame.Navigate(typeof(MainPage), selectedItem);
                        }
                        else
                        {
                            rootFrame.Navigate(typeof(VideoPage), selectedItem);
                        }
                    }


                    // Ensure the current window is active
                    Window.Current.Activate();
                }
                else
                {
                    MessageDialog msg = new MessageDialog("You're not connected to Internet!");
                    await msg.ShowAsync();
                }
            }
            catch (Exception e)
            {
                //string ex = $"App.xaml {e.Message}";
                //await new MessageDialog(ex).ShowAsync();
            }
        }
        /// <summary>
        /// OnActivated is the entry point for an application when it is launched via
        /// means other normal user interaction. This includes Voice Commands, URI activation,
        /// being used as a share target from another app, etc. Here, we're going to handle the
        /// Voice Command activation from Cortana.
        ///
        /// Note: Be aware that an older VCD could still be in place for your application if you
        /// modify it and update your app via the store. You should be aware that you could get
        /// activations that include commands in older versions of your VCD, and you should try
        /// to handle them gracefully.
        /// </summary>
        /// <param name="args">Details about the activation method, including the activation
        /// phrase (for voice commands) and the semantic interpretation, parameters, etc.</param>
        protected override void OnActivated(IActivatedEventArgs args)
        {
            base.OnActivated(args);

            // If the app was launched via some other mechanism than a Voice Command, exit. If
            // the app is able to act as a share target, or handle various file types, etc,
            // then developers should handle these cases here.
            if (args.Kind != ActivationKind.VoiceCommand)
            {
                return;
            }

            // The arguments can represent many different activation types. Cast it so we can get the
            // parameters we care about out.
            var commandArgs = args as VoiceCommandActivatedEventArgs;

            Windows.Media.SpeechRecognition.SpeechRecognitionResult speechRecognitionResult = commandArgs.Result;

            // Get the name of the voice command and the text spoken. See AdventureWorksCommands.xml for
            // the <Command> tags this can be filled with.
            string voiceCommandName = speechRecognitionResult.RulePath[0];
            string textSpoken       = speechRecognitionResult.Text;

            // The commandMode is either "voice" or "text", and it indictes how the voice command
            // was entered by the user.
            // Apps should respect "text" mode by providing feedback in silent form.
            string commandMode = this.SemanticInterpretation("commandMode", speechRecognitionResult);

            // Multiple different voice commands may be supported, switch between them (The voiceCommandName
            Type navigationToPageType;

            ViewModel.TripVoiceCommand?navigationCommand = null;
            switch (voiceCommandName)
            {
            case "showTripToDestination":
                // Access the value of the {destination} phrase in the voice command
                string destination = this.SemanticInterpretation("destination", speechRecognitionResult);

                // Create a navigation command object to pass to the page. Any object can be passed in,
                // here we're using a simple struct.
                navigationCommand = new ViewModel.TripVoiceCommand(
                    voiceCommandName,
                    commandMode,
                    textSpoken,
                    destination);

                // Set the page to navigate to for this voice command.
                navigationToPageType = typeof(View.TripDetails);
                break;

            default:
                // If we can't determine what page to launch, go to the default entry point.
                navigationToPageType = typeof(View.TripListView);
                break;
            }

            // Repeat the same basic initialization as OnLaunched() above, taking into account whether
            // or not the app is already active.
            Frame rootFrame = Window.Current.Content as Frame;

            // Do not repeat app initialization when the Window already has content,
            // just ensure that the window is active
            if (rootFrame == null)
            {
                // Create a Frame to act as the navigation context and navigate to the first page
                rootFrame             = new Frame();
                App.NavigationService = new NavigationService(rootFrame);

                rootFrame.NavigationFailed += OnNavigationFailed;

                // Place the frame in the current Window
                Window.Current.Content = rootFrame;
            }

            // Since we're expecting to always show a details page, navigate even if
            // a content frame is in place (unlike OnLaunched).
            // Navigate to either the main trip list page, or if a valid voice command
            // was provided, to the details page for that trip.
            rootFrame.Navigate(navigationToPageType, navigationCommand);

            // Ensure the current window is active
            Window.Current.Activate();
        }
示例#28
0
        protected override async void OnActivated(IActivatedEventArgs args)
        {
            base.OnActivated(args);
            if (!IsInternet())
            {
                await new MessageDialog("Seems you are not connected to the Internet").ShowAsync();
                return;
            }
            else
            {
                if (args.Kind == ActivationKind.VoiceCommand)
                {
                    VoiceCommandActivatedEventArgs voiceCommandArgs = args as VoiceCommandActivatedEventArgs;
                    Windows.Media.SpeechRecognition.SpeechRecognitionResult speechRecognitionResult = voiceCommandArgs.Result;
                    String text             = speechRecognitionResult.Text;
                    Frame  rootFrame        = Window.Current.Content as Frame;
                    string voiceCommandName = speechRecognitionResult.RulePath[0];
                    var    localSettings    = Windows.Storage.ApplicationData.Current.LocalSettings;
                    if (rootFrame == null)
                    {
                        rootFrame = new Frame();
                    }
                    rootFrame.NavigationFailed += OnNavigationFailed;
                    if (localSettings.Values["LoggedIn"] == null)
                    {
                        string readtext = "You need to login first";
                        ReadSpeech(new MediaElement(), readtext);
                        rootFrame.Navigate(typeof(MainPage), speechRecognitionResult);
                    }
                    else
                    {
                        switch (voiceCommandName)
                        {
                        case "simpleOpeningApp":
                            rootFrame.Navigate(typeof(Navigation.NavigationPage), speechRecognitionResult);
                            break;

                        case "voicecommand2":
                            rootFrame.Navigate(typeof(Navigation.NavigationPage));
                            break;

                        case "toCustomLocationApp":
                            Arguments parameters = await ToCustomLocation(voiceCommandArgs);

                            rootFrame.Navigate(typeof(BookingPage), parameters);
                            break;

                        case "bookcheapestApp":
                            Arguments data = await ProcessBookCheapest(voiceCommandArgs);

                            rootFrame.Navigate(typeof(BookingPage), data);
                            break;

                        case "costEstimateApp":
                            rootFrame.Navigate(typeof(EstimationList), speechRecognitionResult);
                            break;

                        case "bookFromXToYApp":
                            rootFrame.Navigate(typeof(BookingPage), ProcessBookFromXToY(speechRecognitionResult));
                            break;
                        }
                    }
                    Window.Current.Content = rootFrame;
                    Window.Current.Activate();
                }
                else if (args.Kind == ActivationKind.Protocol)
                {
                    var   protocolArgs = args as ProtocolActivatedEventArgs;
                    var   queryArgs    = new WwwFormUrlDecoder(protocolArgs.Uri.Query);
                    Frame rootFrame    = Window.Current.Content as Frame;

                    // Do not repeat app initialization when the Window already has content,
                    // just ensure that the window is active
                    if (rootFrame == null)
                    {
                        // Create a Frame to act as the navigation context and navigate to the first page
                        rootFrame = new Frame();

                        rootFrame.NavigationFailed += OnNavigationFailed;

                        if (args.PreviousExecutionState == ApplicationExecutionState.Terminated)
                        {
                            //TODO: Load state from previously suspended application
                        }

                        // Place the frame in the current Window
                        Window.Current.Content = rootFrame;
                        rootFrame.Navigate(typeof(CabsPage), queryArgs);
                        Window.Current.Activate();
                    }
                }
            }
        }
示例#29
0
        //Cortana entry point
        protected override void OnActivated(IActivatedEventArgs args)
        {
            base.OnActivated(args);


            // If the app was launched via a Voice Command, this corresponds to the "show trip to <location>" command.
            // Protocol activation occurs when a tile is clicked within Cortana (via the background task)
            if (args.Kind == ActivationKind.VoiceCommand)
            {
                // The arguments can represent many different activation types. Cast it so we can get the
                // parameters we care about out.
                var commandArgs = args as VoiceCommandActivatedEventArgs;

                Windows.Media.SpeechRecognition.SpeechRecognitionResult speechRecognitionResult = commandArgs.Result;

                // Get the name of the voice command and the text spoken. See AdventureWorksCommands.xml for
                // the <Command> tags this can be filled with.
                string voiceCommandName = speechRecognitionResult.RulePath[0];
                string textSpoken       = speechRecognitionResult.Text;


                _vcService.Call(voiceCommandName, speechRecognitionResult.SemanticInterpretation.Properties);



                // The commandMode is either "voice" or "text", and it indictes how the voice command
                // was entered by the user.
                // Apps should respect "text" mode by providing feedback in silent form.
                //string commandMode = this.SemanticInterpretation("commandMode", speechRecognitionResult);

                //switch (voiceCommandName)
                //{
                //    case "showTripToDestination":
                //        // Access the value of the {destination} phrase in the voice command
                //        string destination = this.SemanticInterpretation("destination", speechRecognitionResult);

                //        // Create a navigation command object to pass to the page. Any object can be passed in,
                //        // here we're using a simple struct.
                //        navigationCommand = new ViewModel.TripVoiceCommand(
                //            voiceCommandName,
                //            commandMode,
                //            textSpoken,
                //            destination);

                //        // Set the page to navigate to for this voice command.
                //        navigationToPageType = typeof(View.TripDetails);
                //        break;
                //    default:
                //        // If we can't determine what page to launch, go to the default entry point.
                //        navigationToPageType = typeof(View.TripListView);
                //        break;
                //}
            }

            else
            {
                // If we were launched via any other mechanism, fall back to the main page view.
                // Otherwise, we'll hang at a splash screen.
                throw new Exception();
            }

            // Ensure the current window is active
            Window.Current.Activate();
        }
示例#30
0
        protected override void OnActivated(IActivatedEventArgs args)
        {
            base.OnActivated(args);

            Type navigationToPageType;

            ViewModel.LampVoiceCommand navigationCommand = null;

            if (args.Kind == ActivationKind.VoiceCommand)
            {
                var commandArgs = args as VoiceCommandActivatedEventArgs;

                Windows.Media.SpeechRecognition.SpeechRecognitionResult speechRecognitionResult = commandArgs.Result;

                string voiceCommandName = speechRecognitionResult.RulePath[0];
                string textSpoken       = speechRecognitionResult.Text;

                // The commandMode is either "voice" or "text", and it indictes how the voice command
                // was entered by the user.
                // Apps should respect "text" mode by providing feedback in silent form.
                string commandMode = this.SemanticInterpretation("commandMode", speechRecognitionResult);

                switch (voiceCommandName)
                {
                case "offLight":
                case "onLight":

                    // Create a navigation command object to pass to the page. Any object can be passed in,
                    // here we're using a simple struct.
                    navigationCommand = new ViewModel.LampVoiceCommand()
                    {
                        VoiceCommand = voiceCommandName,
                        CommandMode  = commandMode,
                        TextSpoken   = textSpoken
                    };

                    // Set the page to navigate to for this voice command.
                    navigationToPageType = typeof(View.Lamp);
                    break;

                case "changeColor":
                    // Access the value of the {destination} phrase in the voice command
                    string color = this.SemanticInterpretation("color", speechRecognitionResult);

                    // Create a navigation command object to pass to the page. Any object can be passed in,
                    // here we're using a simple struct.
                    navigationCommand = new ViewModel.LampVoiceCommand()
                    {
                        VoiceCommand = voiceCommandName,
                        CommandMode  = commandMode,
                        TextSpoken   = textSpoken,
                        Color        = color
                    };

                    // Set the page to navigate to for this voice command.
                    navigationToPageType = typeof(View.Lamp);

                    break;

                default:
                    // If we can't determine what page to launch, go to the default entry point.
                    navigationToPageType = typeof(View.Lamp);
                    break;
                }

                // Repeat the same basic initialization as OnLaunched() above, taking into account whether
                // or not the app is already active.
                Frame rootFrame = Window.Current.Content as Frame;

                // Do not repeat app initialization when the Window already has content,
                // just ensure that the window is active
                if (rootFrame == null)
                {
                    // Create a Frame to act as the navigation context and navigate to the first page
                    rootFrame = new Frame();

                    rootFrame.NavigationFailed += OnNavigationFailed;

                    // Place the frame in the current Window
                    Window.Current.Content = rootFrame;
                }

                // Since we're expecting to always show a details page, navigate even if
                // a content frame is in place (unlike OnLaunched).
                // Navigate to either the main trip list page, or if a valid voice command
                // was provided, to the details page for that trip.
                rootFrame.Navigate(navigationToPageType, navigationCommand);

                // Ensure the current window is active
                Window.Current.Activate();
            }
        }