示例#1
0
        private static async void ContinuousRecognitionSession_ResultGenerated(
            SpeechContinuousRecognitionSession sender,
            SpeechContinuousRecognitionResultGeneratedEventArgs args)
        {
            if (args.Result.Confidence == SpeechRecognitionConfidence.Medium ||
                args.Result.Confidence == SpeechRecognitionConfidence.High)
            {
                string resultGeneratedText = args.Result.Text + " ";
                dictatedTextBuilder.Append(resultGeneratedText);
                recognizedText     += resultGeneratedText;
                resultGeneratedText = string.Empty;

                await dispatcher.RunAsync(CoreDispatcherPriority.Normal, () =>
                {
                    RtfTextHelper.Text = dictatedTextBuilder.ToString();
                });
            }
            else
            {
                await dispatcher.RunAsync(CoreDispatcherPriority.Normal, () =>
                {
                    RtfTextHelper.Text = dictatedTextBuilder.ToString();
                });
            }
        }
示例#2
0
        async void ContinuousRecognitionSession_ResultGenerated(
            SpeechContinuousRecognitionSession sender,
            SpeechContinuousRecognitionResultGeneratedEventArgs args)
        {
            if (recognizer_ == null || recognizer_.ContinuousRecognitionSession != sender)
            {
                return;
            }

            dictatedTextBuilder_.Append(args.Result.Text + "|");

            System.Diagnostics.Debug.WriteLine("ResultGenerated:" + args.Result.Confidence + " " + args.Result.Text);

            lock (eventQue_)
            {
                var confidenceLevel = UnityEngine.Windows.Speech.ConfidenceLevel.Rejected;
                switch (args.Result.Confidence)
                {
                case SpeechRecognitionConfidence.High: confidenceLevel = UnityEngine.Windows.Speech.ConfidenceLevel.High; break;

                case SpeechRecognitionConfidence.Low: confidenceLevel = UnityEngine.Windows.Speech.ConfidenceLevel.Low; break;

                case SpeechRecognitionConfidence.Medium: confidenceLevel = UnityEngine.Windows.Speech.ConfidenceLevel.Medium; break;

                case SpeechRecognitionConfidence.Rejected: confidenceLevel = UnityEngine.Windows.Speech.ConfidenceLevel.Rejected; break;
                }
                eventQue_.Enqueue(new RecoEvent_
                {
                    eventType       = RecoEvent_.EventType.DictationResult,
                    text            = args.Result.Text,
                    confidenceLevel = confidenceLevel,
                });
            }
        }
示例#3
0
        /// <summary>
        /// Handle events fired when a result is generated. This may include a garbage rule that fires when general room noise
        /// or side-talk is captured (this will have a confidence of Rejected typically, but may occasionally match a rule with
        /// low confidence).
        /// </summary>
        /// <param name="sender">The Recognition session that generated this result</param>
        /// <param name="args">Details about the recognized speech</param>
        private void ContinuousRecognitionSession_ResultGenerated(SpeechContinuousRecognitionSession sender,
                                                                  SpeechContinuousRecognitionResultGeneratedEventArgs args)
        {
            var text = string.Empty;
            var tag  = string.Empty;

            if (args.Result != null)
            {
                text = args.Result.Text;
            }

            if (args.Result.Constraint != null)
            {
                tag = args.Result.Constraint.Tag;
            }

            Debug.WriteLine($"ContinuousRecognitionSession_ResultGenerated {text} {tag} {args.Result.Confidence.ToString()}");

            // Developers may decide to use per-phrase confidence levels in order to tune the behavior of their
            // grammar based on testing.
            if (!string.IsNullOrEmpty(tag) &&
                (args.Result.Confidence == SpeechRecognitionConfidence.Medium || args.Result.Confidence == SpeechRecognitionConfidence.High))
            {
                OnPhraseRecognized(1, text, tag);
            }
            else
            {
                OnPhraseRecognized(-1, text, tag);
            }
        }
示例#4
0
 private void respondToSpeechRecognition(
     SpeechContinuousRecognitionSession sender,
     SpeechContinuousRecognitionResultGeneratedEventArgs args)
 {
     if (args.Result.Status == SpeechRecognitionResultStatus.Success)
     {
         System.Diagnostics.Debug.WriteLine($"Recognized speech: {args.Result.Text}");
         System.Diagnostics.Debug.WriteLine($"Recognition confidence: {args.Result.Confidence}");
         if (args.Result.Confidence == SpeechRecognitionConfidence.Rejected)
         {
             return;
         }
         else if (args.Result.Confidence == SpeechRecognitionConfidence.Low)
         {
             onCommandHypothesized(new CommandHypothesisEventArgs(convertSpeechToCommand(args.Result), args.Result.Text));
         }
         else
         {
             var command = convertSpeechToCommand(args.Result);
             if (command.Type == CommandType.Move)
             {
                 var moveCommand = command as MoveCommand;
                 if (moveCommand.Position.HasValue && !moveCommand.PositionUsedNatoAlphabet)
                 {
                     if (isPositionAmbiguous(moveCommand.Position.Value))
                     {
                         onCommandHypothesized(new CommandHypothesisEventArgs(command, args.Result.Text));
                         return;
                     }
                 }
                 if (!moveCommand.DestinationUsedNatoAlphabet)
                 {
                     if (isPositionAmbiguous(moveCommand.Destination))
                     {
                         onCommandHypothesized(new CommandHypothesisEventArgs(command, args.Result.Text));
                         return;
                     }
                 }
             }
             else if (command.Type == CommandType.ConfirmPiece)
             {
                 var confirmPieceCommand = command as ConfirmPieceCommand;
                 if (!confirmPieceCommand.PositionUsedNatoAlphabet)
                 {
                     if (isPositionAmbiguous(confirmPieceCommand.Position))
                     {
                         onCommandHypothesized(new CommandHypothesisEventArgs(command, args.Result.Text));
                         return;
                     }
                 }
             }
             onCommandRecognized(new CommandEventArgs(convertSpeechToCommand(args.Result)));
         }
     }
     else
     {
         System.Diagnostics.Debug.WriteLine($"Received continuous speech result of {args.Result.Status}");
     }
 }
示例#5
0
 private async void ContinuousRecognitionSession_ResultGenerated(
     SpeechContinuousRecognitionSession sender, SpeechContinuousRecognitionResultGeneratedEventArgs args)
 {
     await dispatcher.RunAsync(CoreDispatcherPriority.Normal, () =>
     {
         Logger.Log("Waiting ...");
     });
 }
 /// <summary>
 /// event de reconnaissance
 /// </summary>
 /// <param name="sender"></param>
 /// <param name="args"></param>
 private void RecognitionFound(SpeechContinuousRecognitionSession sender, SpeechContinuousRecognitionResultGeneratedEventArgs args)
 {
     if (args.Result.Status == SpeechRecognitionResultStatus.Success)
     {
         var command = FindCommand(args.Result.Text);
         OnRecognitionCommandFound(args.Result, command);
     }
 }
 private async void Con_Result(SpeechContinuousRecognitionSession sender, SpeechContinuousRecognitionResultGeneratedEventArgs args)
 {
     if (args.Result.Text == "note finished")
     {
         await this.Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, () => talkBack("note recorded"));
         await sender.StopAsync();
         //SetListening(true);
     }
 }
        private async void UpdateTextBox(SpeechContinuousRecognitionSession session, SpeechContinuousRecognitionResultGeneratedEventArgs args)
        {

            System.Diagnostics.Debug.WriteLine("updated box with recognizer results");
            await Dispatcher.InvokeAsync(() =>
            {
                customPhrase.Text = args.Result.Text;
            });
        }
 private async void Con_Result(SpeechContinuousRecognitionSession sender, SpeechContinuousRecognitionResultGeneratedEventArgs args)
 {
     switch (args.Result.Text) {
         case "sponge in":
             await this.Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, () => talkBack("Noted"));
             spongeIn++;
             await this.Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, () => SetNumber(spongInC, spongeIn));
             break;
         case "sponge out":
             await this.Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, () => talkBack("Noted"));
             spongeOut++;
             await this.Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, () => SetNumber( spongOutC,spongeOut));
             break;
         case "instrument in":
             await this.Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, () => talkBack("Noted"));
             instruIn++;
             await this.Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, () => SetNumber( instInC, instruIn));
             break;
         case "needle in":
             await this.Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, () => talkBack("Noted"));
             needleIn++;
             await this.Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, () => SetNumber( needleInC,needleIn));
             break;
         case "instrument out":
             await this.Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, () => talkBack("Noted"));
             instruOut++;
             await this.Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, () => SetNumber( instOutC,instruOut));
             break;
         case "needle out":
             await this.Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, () => talkBack("Noted"));
             needleOut++;
             await this.Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, () => SetNumber( needleOutC,needleOut));
             break;
         case "going to close":
             if (spongeIn != spongeOut)
             {
                 await this.Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, () => talkBack("I'm concerned. There is "+ (int) Math.Abs(spongeIn-spongeOut)+" sponge not accounted for. "+spongeIn+" In, "+ spongeOut+" out"));
                 
             } else if (needleIn != needleOut)
             {
                 await this.Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, () => talkBack("I'm concerned. There is " + (int) Math.Abs(needleIn - needleOut) + " needle not accounted for. " + needleIn + " in, " + needleOut + " out."));
                 
             }
             else if (instruIn != instruOut)
             {
                 await this.Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, () => talkBack("I'm concerned. There is " + (int) Math.Abs(instruIn - instruOut) + " instrument not accounted for. " + instruIn + " in, " + instruOut + " out."));
                 
             }
             else {
                 await this.Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, () => talkBack("I have countered " + spongeIn + " in sponge, " + spongeOut + " out sponge, " + instruIn + " in instrument, " + instruOut + " out instrument, " + needleIn + " in needle, " + needleOut + " out needle. Count seems OK"));
                 instruIn = instruOut = needleIn = needleOut = spongeIn = spongeOut = 0;
                 await sender.StopAsync();
             }
             break;
     }
 }
 /// <summary>
 /// Initializes a new instance of the <see cref="PhraseRecognizedEventArgs"/> class.
 /// </summary>
 /// <param name="person">The Person who the note is addressed to.</param>
 /// <param name="phrase">The phrase provided by the speech recognizer.</param>
 /// <param name="speechRecognitionArgs">Event data from the speech recognizer.</param>
 public PhraseRecognizedEventArgs(
     Person person,
     string phrase,
     CommandVerb verb,
     SpeechContinuousRecognitionResultGeneratedEventArgs speechRecognitionArgs)
 {
     PhraseTargetPerson = person;
     PhraseText = phrase;
     Verb = verb;
     IsDictation = speechRecognitionArgs.Result.Constraint == null ? false : Verb == CommandVerb.Dictation;
 }
 private async void ContinuousRecognitionSession_ResultGenerated(SpeechContinuousRecognitionSession sender, SpeechContinuousRecognitionResultGeneratedEventArgs args)
 {
     Debug.WriteLine(args.Result.Text);
     await
           Windows.ApplicationModel.Core.CoreApplication.MainView.CoreWindow.Dispatcher.RunAsync(
               CoreDispatcherPriority.Normal, () =>
               {
                     SpeechResult = args.Result.Text;
                     ProcessCommands(args.Result);
               });
 }
示例#12
0
 /// <summary>
 /// Initializes a new instance of the <see cref="PhraseRecognizedEventArgs"/> class.
 /// </summary>
 /// <param name="person">The Person who the note is addressed to.</param>
 /// <param name="phrase">The phrase provided by the speech recognizer.</param>
 /// <param name="speechRecognitionArgs">Event data from the speech recognizer.</param>
 public PhraseRecognizedEventArgs(
     Person person,
     string phrase,
     CommandVerb verb,
     SpeechContinuousRecognitionResultGeneratedEventArgs speechRecognitionArgs)
 {
     PhraseTargetPerson = person;
     PhraseText         = phrase;
     Verb        = verb;
     IsDictation = speechRecognitionArgs.Result.Constraint == null ? false : Verb == CommandVerb.Dictation;
 }
示例#13
0
        private void FindResults(SpeechContinuousRecognitionResultGeneratedEventArgs args)
        {
            var results = _agendaService.FindSessionsByKeyword(args.Result.Text);
            var list    = results.Where(r => r.Value > 0).OrderByDescending(r => r.Value).Take(10);

            CodecampSessions = new ObservableCollection <Session>();
            foreach (var keyValuePair in list)
            {
                CodecampSessions.Add(keyValuePair.Key);
            }
        }
示例#14
0
 private void VoiceHandler(SpeechContinuousRecognitionSession session, SpeechContinuousRecognitionResultGeneratedEventArgs e)
 {
     if (e.Result.Text == "Connect")
     {
         Debug.WriteLine("Voice Command: " + e.Result.Text);
         if (_ezb == null)
         {
             ConnectBot();
         }
     }
 }
 /// <summary>
 /// Initializes a new instance of the <see cref="PhraseRecognizedEventArgs"/> class.
 /// </summary>
 /// <param name="person">The Person who the note is addressed to.</param>
 /// <param name="phrase">The phrase provided by the speech recognizer.</param>
 /// <param name="speechRecognitionArgs">Event data from the speech recognizer.</param>
 public PhraseRecognizedEventArgs(
     string phrase,
     CommandContext commandContext,
     SpeechContinuousRecognitionResultGeneratedEventArgs speechRecognitionArgs)
 {
     PhraseText     = phrase;
     CommandContext = commandContext;
     IsDictation    =
         speechRecognitionArgs.Result.Constraint == null
         ? false
         : CommandContext.Command == Command.Dictation;
 }
示例#16
0
 private async void HandleSpeechResult(
     SpeechContinuousRecognitionSession session,
     SpeechContinuousRecognitionResultGeneratedEventArgs args)
 {
     if (args.Result.Text == searchTriggerPhrase)
     {
         // Simulate a search button click on the UI thread
         await Dispatcher.RunAsync(CoreDispatcherPriority.Normal, () =>
         {
             this.SearchButton_Click(null, null);
         });
     }
 }
示例#17
0
        /// <summary>
        /// Method that runs when the local recognizer generates a result.
        /// </summary>
        /// <param name="sender"></param>
        /// <param name="args"></param>
        private async void LocalSessionResult(
            SpeechContinuousRecognitionSession sender,
            SpeechContinuousRecognitionResultGeneratedEventArgs args)
        {
            Debug.WriteLine("Received command");
            if (args.Result.Confidence != SpeechRecognitionConfidence.Rejected)
            {
                Debug.WriteLine("Received command: OK");
                await StartCommandRecognizer();

                OnResponseReceived("TriggerSuccess");
            }
        }
示例#18
0
 private async void ContinuousRecognitionSession_ResultGenerated(SpeechContinuousRecognitionSession sender, SpeechContinuousRecognitionResultGeneratedEventArgs args)
 {
     if (args.Result.Confidence == SpeechRecognitionConfidence.Medium ||
         args.Result.Confidence == SpeechRecognitionConfidence.High)
     {
         if (args.Result.Text == "Start Listening")
         {
             //await Windows.Media.Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, () =>
             //{
             //    SetListening(true);
             //});
         }
     }
 }
示例#19
0
 /// <summary>
 /// Handle events fired when a result is generated. This may include a garbage rule that fires when general room noise
 /// or side-talk is captured (this will have a confidence of Rejected typically, but may occasionally match a rule with
 /// low confidence).
 /// </summary>
 /// <param name="sender">The Recognition session that generated this result</param>
 /// <param name="args">Details about the recognized speech</param>
 private void ContinuousRecognitionSession_ResultGenerated(SpeechContinuousRecognitionSession sender,
                                                           SpeechContinuousRecognitionResultGeneratedEventArgs args)
 {
     if (args.Result.Confidence == SpeechRecognitionConfidence.Medium ||
         args.Result.Confidence == SpeechRecognitionConfidence.High ||
         args.Result.Confidence == SpeechRecognitionConfidence.Low)
     {
         main.HandleVoiceCommand(args.Result.SemanticInterpretation.Properties);
     }
     else if (args.Result.Confidence == SpeechRecognitionConfidence.Rejected)
     {
         // TODO HANDLE ERROR
     }
 }
 private async void Recogniser_ResultGenerated(
     SpeechContinuousRecognitionSession sender,
     SpeechContinuousRecognitionResultGeneratedEventArgs args)
 {
     if (args.Result.Confidence == SpeechRecognitionConfidence.Medium ||
         args.Result.Confidence == SpeechRecognitionConfidence.High)
     {
         _builder.Append($"{args.Result.Text} ");
         await _dispatcher.RunAsync(CoreDispatcherPriority.Normal, () =>
         {
             Result?.Invoke(_builder.ToString());
         });
     }
 }
        /// <summary>
        /// Runs when a final result is created.
        /// </summary>
        /// <param name="sender"></param>
        /// <param name="args"></param>
        private async void CommandResultGenerated(
            SpeechContinuousRecognitionSession sender,
            SpeechContinuousRecognitionResultGeneratedEventArgs args)
        {
            try
            {
                string text = args.Result.Text;
                Debug.WriteLine("-> " + text);

                await this.StopCommandRecognizer();

                string[] tokens = text.Split();
                var      type   = this.ValidateCommand(text);

                if (text.Contains("what can I say"))
                {
                    this.OnResponseReceived("Try: \"Go to room 2011\"");
                    //VoiceFeedback("Try take me to room 2011");
                    await Task.Delay(3000);
                }
                else
                {
                    string dest = null;
                    try
                    {
                        dest = tokens.Last();
                    }
                    catch (Exception)
                    {
                        // Fail silently
                    }

                    this.OnCommandReceived(new VoiceCommand()
                    {
                        CommandType = type,
                        CommandText = text,
                        Data        = dest
                    });
                }

                await this.StartTriggerRecognizer();
            }
            catch (Exception ex)
            {
                // Command not in expected format
                Debug.WriteLine("Something Broke!\n {0}", ex.Message);
                await this.FailedCommand();
            }
        }
示例#22
0
        /// <summary>
        /// Handle events fired when a result is generated. This may include a garbage rule that fires when general room noise
        /// or side-talk is captured (this will have a confidence of Rejected typically, but may occasionally match a rule with
        /// low confidence).
        /// </summary>
        void OnContinuousRecognitionSessionResultGenerated(
            SpeechContinuousRecognitionSession sender,
            SpeechContinuousRecognitionResultGeneratedEventArgs args)
        {
            if (args.Result.Status != SpeechRecognitionResultStatus.Success)
            {
                return;
            }

            // Unpack event arg data.
            bool   hasConstraint = args.Result.Constraint != null;
            var    confidence    = args.Result.Confidence;
            string phrase        = args.Result.Text;

            // The garbage rule doesn't have a tag associated with it, and
            // the other rules return a string matching the tag provided
            // when the grammar was compiled.
            string tag = hasConstraint ? args.Result.Constraint.Tag : "unknown";

            if (tag == "unknown")
            {
                return;
            }

            if (hasConstraint && args.Result.Constraint.Type == SpeechRecognitionConstraintType.List)
            {
                // The List constraint type represents speech from
                // a compiled grammar of commands.
                var command = _intentInterpreter.GetPhraseIntent(phrase);

                // You may decide to use per-phrase confidence levels in order to
                // tune the behavior of your grammar based on testing.
                if (confidence == SpeechRecognitionConfidence.Medium ||
                    confidence == SpeechRecognitionConfidence.High)
                {
                    OnPhraseRecognized(new PhraseRecognizedEventArgs(phrase,
                                                                     command,
                                                                     args));
                }
            }
            else if (hasConstraint && args.Result.Constraint.Type == SpeechRecognitionConstraintType.Topic)
            {
                // The Topic constraint type represents speech from dictation.
                OnPhraseRecognized(new PhraseRecognizedEventArgs(phrase,
                                                                 Command.Dictation,
                                                                 args));
            }
        }
        /// <summary>
        /// Triggers cortana with simulated key combo if constraint is heard.
        /// </summary>
        /// <param name="session">backgroundListener's continuous recognition session.</param>
        /// <param name="args">Result arguments</param>
        private void blResultGenerated(SpeechContinuousRecognitionSession session, SpeechContinuousRecognitionResultGeneratedEventArgs args)
        {
            if (args.Result.Constraint != null)
            {
                switch (args.Result.Text)
                {
                case "Open Netflix now":
                    Process.Start(@"C:\Program Files (x86)\Google\Chrome\Application\chrome.exe", @"http:\\www.netflix.com");
                    break;

                default:
                    InputSimulator.SimulateModifiedKeyStroke(VirtualKeyCode.LWIN, VirtualKeyCode.VK_S);
                    break;
                }
            }
        }
        private void RecognizerResultGenerated(
            SpeechContinuousRecognitionSession session,
            SpeechContinuousRecognitionResultGeneratedEventArgs args)
        {
            // only act if the speech is recognised with high confidence
            if (!args.Result.IsRecognisedWithHighConfidence())
            {
                return;
            }

            // interpret key individual parts of the grammar specification
            string command   = args.Result.SemanticInterpretation.GetInterpretation("command");
            string direction = args.Result.SemanticInterpretation.GetInterpretation("direction");

            // write to debug
            Debug.WriteLine($"Command: {command}, Direction: {direction}");
        }
示例#25
0
        void OnSpeechResultGenerated(
            SpeechContinuousRecognitionSession sender,
            SpeechContinuousRecognitionResultGeneratedEventArgs args)
        {
            switch (args.Result.Text)
            {
            case "scan":
                this.Scan();
                break;

            case "reset":
                this.Reset();
                break;

            default:
                break;
            }
        }
示例#26
0
        async void OnResultGenerated(SpeechContinuousRecognitionSession sender,
                                     SpeechContinuousRecognitionResultGeneratedEventArgs args)
        {
            await this.Dispatcher.RunAsync(CoreDispatcherPriority.Normal,
                                           () =>
            {
                this.HypothesisedSpeech = string.Empty;

                if (string.IsNullOrEmpty(this.FullSpeech))
                {
                    this.FullSpeech = args.Result.Text;
                }
                else
                {
                    this.FullSpeech += $" {args.Result.Text}";
                }
            }
                                           );
        }
示例#27
0
        async void OnSpeechResult(
            SpeechContinuousRecognitionSession sender,
            SpeechContinuousRecognitionResultGeneratedEventArgs args)
        {
            if ((args.Result.Confidence == SpeechRecognitionConfidence.High) ||
                (args.Result.Confidence == SpeechRecognitionConfidence.Medium))
            {
                if (args.Result?.RulePath?.FirstOrDefault() == "filter")
                {
                    var filter =
                        args.Result.SemanticInterpretation.Properties["emotion"].FirstOrDefault();

                    if (!string.IsNullOrEmpty(filter))
                    {
                        await this.Dispatcher.RunAsync(
                            Windows.UI.Core.CoreDispatcherPriority.Normal,
                            async() =>
                        {
                            await this.photoControl.ShowFilteredGridAsync(filter);
                        }
                            );
                    }
                }
                else if (args.Result.Text.ToLower() == "cheese")
                {
                    await this.Dispatcher.RunAsync(
                        Windows.UI.Core.CoreDispatcherPriority.Normal,
                        async() =>
                    {
                        var photoResult = await this.photoControl.TakePhotoAsync();

                        if (photoResult != null)
                        {
                            await this.AddFaceBasedTagsToPhotoAsync(photoResult);
                            await this.AddEmotionBasedTagsToPhotoAsync(photoResult);
                            await this.SpeakAsync("That's lovely, you look great!");
                        }
                    }
                        );
                }
            }
        }
示例#28
0
        private void ContinuousRecognitionSession_ResultGenerated(SpeechContinuousRecognitionSession sender,
                                                                  SpeechContinuousRecognitionResultGeneratedEventArgs args)
        {
            //Just in case
            if (args.Result.Text == "")
            {
                return;
            }

            //Invoking delegates in case of specified tags
            if (args.Result.Constraint.Tag == "login")
            {
                //Check if method exists and invoke it
                Login?.Invoke();
            }
            else if (args.Result.Constraint.Tag == "logout")
            {
                Logout?.Invoke();
            }
        }
示例#29
0
 private async void Con_Result(SpeechContinuousRecognitionSession sender, SpeechContinuousRecognitionResultGeneratedEventArgs args)
 {
     if (args.Result.Text.Contains("yes"))
     {
         if (marked)
         {
             queCounter++;
             marked = false;
         }
         if (queCounter == 1)
         {
             await this.Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, () => talkBack(questionList[queCounter]));
             await this.Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, () => tick1.Visibility = Visibility.Visible);
             queCounter++;
         }
         else if (queCounter == 2)
         {
             await this.Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, () => talkBack(questionList[queCounter]));
             await this.Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, () => tick2.Visibility = Visibility.Visible);
             queCounter++;
         }
         else if (queCounter == 3)
         {
             await this.Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, () => talkBack(questionList[queCounter]));
             await this.Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, () => tick3.Visibility = Visibility.Visible);
             queCounter++;
         }
         else if (queCounter == 4)
         {
             await this.Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, () => talkBack(questionList[queCounter]));
             await this.Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, () => tick4.Visibility = Visibility.Visible);
             queCounter++;
         }
         else if (queCounter == questionList.Count()) {
             await this.Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, () => tick5.Visibility = Visibility.Visible);
             await this.Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, () => talkBack("Sign in checklist complete, you can track equipment now."));
             await sender.StopAsync();
         }
     }
 }
        //Handle appropriate voice commands
        private void RecognizerResultGenerated(SpeechContinuousRecognitionSession session,
                                               SpeechContinuousRecognitionResultGeneratedEventArgs args)
        {
            // Output debug strings
//            Debug.WriteLine(args.Result.Status);
//            Debug.WriteLine(args.Result.Text);

            //This is how we will tell which module grammar the command came from
//            Debug.WriteLine("Grammar File Constraint Tag: " + args.Result.Constraint.Tag);
//
//            Debug.WriteLine(args.Result.Confidence.ToString());
//            Debug.WriteLine("Confidence 0 = High, 1 = Medium, 2 = Low");
//
//            int count = args.Result.SemanticInterpretation.Properties.Count;
//            Debug.WriteLine("Count: " + count);
//            Debug.WriteLine("Tag: " + args.Result.Constraint.Tag);

            //Receive the commands and pass it to the appropriate module to handle
            IVoiceController commandsModule = activeModules[args.Result.Constraint.Tag];

            commandsModule.EnqueueCommand(args.Result);
        }
 /// <summary>
 /// Handle events fired when a result is generated. This may include a garbage rule that fires when general room noise
 /// or side-talk is captured (this will have a confidence of Rejected typically, but may occasionally match a rule with
 /// low confidence).
 /// </summary>
 /// <param name="sender">The Recognition session that generated this result</param>
 /// <param name="args">Details about the recognized speech</param>
 private async void ContinuousRecognitionSession_ResultGenerated(SpeechContinuousRecognitionSession sender, SpeechContinuousRecognitionResultGeneratedEventArgs args)
 {
     // Developers may decide to use per-phrase confidence levels in order to tune the behavior of their 
     // grammar based on testing.
     if (args.Result.Confidence == SpeechRecognitionConfidence.Medium ||
         args.Result.Confidence == SpeechRecognitionConfidence.High)
     {
         await dispatcher.RunAsync(CoreDispatcherPriority.Normal, () =>
         {
             HandleRecognitionResult(args.Result);
         });
     }
     // Prompt the user if recognition failed or recognition confidence is low.
     else if (args.Result.Confidence == SpeechRecognitionConfidence.Rejected ||
     args.Result.Confidence == SpeechRecognitionConfidence.Low)
     {
         // In some scenarios, a developer may choose to ignore giving the user feedback in this case, if speech
         // is not the primary input mechanism for the application.
         await dispatcher.RunAsync(CoreDispatcherPriority.Normal, () =>
         {
             resultTextBlock.Text = speechResourceMap.GetValue("SRGSGarbagePromptText", speechContext).ValueAsString;
         });
     }
 }
        /// <summary>
        /// Handle events fired when a result is generated. Check for high to medium confidence, and then append the
        /// string to the end of the stringbuffer, and replace the content of the textbox with the string buffer, to
        /// remove any hypothesis text that may be present.
        /// </summary>
        /// <param name="sender">The Recognition session that generated this result</param>
        /// <param name="args">Details about the recognized speech</param>
        private async void ContinuousRecognitionSession_ResultGenerated(SpeechContinuousRecognitionSession sender, SpeechContinuousRecognitionResultGeneratedEventArgs args)
        {
            // We may choose to discard content that has low confidence, as that could indicate that we're picking up
            // noise via the microphone, or someone could be talking out of earshot.
            if (args.Result.Confidence == SpeechRecognitionConfidence.Medium ||
                args.Result.Confidence == SpeechRecognitionConfidence.High)
            {
                dictatedTextBuilder.Append(args.Result.Text + " ");

                await dispatcher.RunAsync(CoreDispatcherPriority.Normal, () =>
                {
                    discardedTextBlock.Visibility = Windows.UI.Xaml.Visibility.Collapsed;

                    dictationTextBox.Text = dictatedTextBuilder.ToString();
                    btnClearText.IsEnabled = true;
                });
            }
            else
            {
                // In some scenarios, a developer may choose to ignore giving the user feedback in this case, if speech
                // is not the primary input mechanism for the application.
                // Here, just remove any hypothesis text by resetting it to the last known good.
                await dispatcher.RunAsync(CoreDispatcherPriority.Normal, () =>
                {
                    dictationTextBox.Text = dictatedTextBuilder.ToString();
                    string discardedText = args.Result.Text;
                    if (!string.IsNullOrEmpty(discardedText))
                    {
                        discardedText = discardedText.Length <= 25 ? discardedText : (discardedText.Substring(0, 25) + "...");

                        discardedTextBlock.Text = "Discarded due to low/rejected Confidence: " + discardedText;
                        discardedTextBlock.Visibility = Windows.UI.Xaml.Visibility.Visible;
                    }
                });
            }
        }
示例#33
0
        /// <summary>
        /// Handle events fired when a result is generated. Check for high to medium confidence, and then append the
        /// string to the end of the stringbuffer, and replace the content of the textbox with the string buffer, to
        /// remove any hypothesis text that may be present.
        /// </summary>
        /// <param name="sender">The Recognition session that generated this result</param>
        /// <param name="args">Details about the recognized speech</param>
        private async void ContinuousRecognitionSession_ResultGenerated(SpeechContinuousRecognitionSession sender, SpeechContinuousRecognitionResultGeneratedEventArgs args)
        {
            // We may choose to discard content that has low confidence, as that could indicate that we're picking up
            // noise via the microphone, or someone could be talking out of earshot.
            if (args.Result.Confidence == SpeechRecognitionConfidence.Medium ||
                args.Result.Confidence == SpeechRecognitionConfidence.High)
            {
                dictatedTextBuilder.Append(args.Result.Text + " ");

                await Dispatcher.InvokeAsync(() => {
                    DictationTextBox.Text = dictatedTextBuilder.ToString();
                });
            }
            else
            {
                // In some scenarios, a developer may choose to ignore giving the user feedback in this case, if speech
                // is not the primary input mechanism for the application.
                // Here, just remove any hypothesis text by resetting it to the last known good.
                await Dispatcher.InvokeAsync(() => {
                    DictationTextBox.Text = dictatedTextBuilder.ToString();
                    string discardedText  = args.Result.Text;
                    if (!string.IsNullOrEmpty(discardedText))
                    {
                        discardedText         = discardedText.Length <= 25 ? discardedText : (discardedText.Substring(0, 25) + "...");
                        DictationTextBox.Text = "Discarded due to low/rejected Confidence: " + discardedText;
                    }
                });
            }
        }
示例#34
0
        private void RecognizerResultGenerated(SpeechContinuousRecognitionSession session, SpeechContinuousRecognitionResultGeneratedEventArgs args)
        {
            // Output debug strings
            Debug.WriteLine(args.Result.Status);
            Debug.WriteLine(args.Result.Text);

            int count = args.Result.SemanticInterpretation.Properties.Count;

            Debug.WriteLine("Count: " + count);
            Debug.WriteLine("Tag: " + args.Result.Constraint.Tag);

            // Read tags
            var bodyName = args.Result.GetTag(TAG_BODYNAME);

            Debug.WriteLine("Body: " + bodyName);

            // Try to find the body
            var body = system.Bodies.Where(b => b.Name == bodyName).FirstOrDefault();

            // Notify
            if (ResultRecognized != null)
            {
                if ((body != null))
                {
                    var bodies = new List<CelestialBody>() { body };
                    ResultRecognized(this, new CelestialSpeechResult(new List<CelestialBody>(bodies)));
                }
            }
        }
        private async void ContinuousRecognitionSession_ResultGenerated(SpeechContinuousRecognitionSession sender, SpeechContinuousRecognitionResultGeneratedEventArgs args)
        {
            if (!this.Completed)
            {

                try
                {
                    //sender.StopAsync();
                    //this.Completed = true;
                    sender.CancelAsync();
                }
                catch (Exception)
                {

                }
                await this.Dispatcher.RunAsync(CoreDispatcherPriority.Normal, () =>
                    {

                        var mensaje = args.Result.Text.Replace(".", string.Empty);
                        if (mensaje.Contains("Text")) mensaje = "Texto";
                        var vm = new ImageSearchViewModel();
                    // mensaje = "Texto";
                    vm.Search(mensaje);
                        if (vm.SearchResult.Count > 0)
                        {
                            Frame.Navigate(typeof (SearchListView), vm);
                        }
                        else
                        {
                            var auth = new Authentication("713034f5c7994f089c1d5a70c1a12ede", "54c4cd393679455d90a48250cde0cfa4");
                            var token = auth.GetAccessToken();
                            var requestUri = "https://speech.platform.bing.com/synthesize";

                            var sb = new StringBuilder();
                            sb.AppendFormat("No se han encontrado resultados de búsqueda.");
                            
                            var cortana = new Synthesize(new Synthesize.InputOptions()
                            {
                                RequestUri = new Uri(requestUri),
                                Text = sb.ToString(),
                                VoiceType = Gender.Female,
                                Locale = "es-es",
                                VoiceName = "Microsoft Server Speech Text to Speech Voice (en-US, ZiraRUS)",
                                OutputFormat = AudioOutputFormat.Riff16Khz16BitMonoPcm,
                                AuthorizationToken = "Bearer " + token.access_token,
                            });

                            cortana.OnAudioAvailable += PlayAudio;
                            cortana.OnError += ErrorHandler;
                             cortana.Speak(CancellationToken.None);
                            cortana = null;
                        }
                    });
            }
        }
        /// <summary>
        /// Handle events fired when a result is generated. This may include a garbage rule that fires when general room noise
        /// or side-talk is captured (this will have a confidence of Rejected typically, but may occasionally match a rule with
        /// low confidence).
        /// </summary>
        /// <param name="sender">The Recognition session that generated this result</param>
        /// <param name="args">Details about the recognized speech</param>
        private async void ContinuousRecognitionSession_ResultGenerated(SpeechContinuousRecognitionSession sender, SpeechContinuousRecognitionResultGeneratedEventArgs args)
        {
            // The garbage rule will not have a tag associated with it, the other rules will return a string matching the tag provided
            // when generating the grammar.
            string tag = "unknown";
            if (args.Result.Constraint != null)
            {
                tag = args.Result.Constraint.Tag;
            }

            // Developers may decide to use per-phrase confidence levels in order to tune the behavior of their 
            // grammar based on testing.
            if (args.Result.Confidence == SpeechRecognitionConfidence.Medium ||
                args.Result.Confidence == SpeechRecognitionConfidence.High ||
                args.Result.Confidence == SpeechRecognitionConfidence.Low)
            {
                if (tag == "Note")
                {
                    this.isNote = true;
                    await this.Dispatcher.RunAsync(CoreDispatcherPriority.Normal, async () =>
                    {
                        this.defaultViewModel.Trip.Notes = speechResourceMap.GetValue("NotesTip", speechContext).ValueAsString;
                        try
                        {
                            await speechRecognizerNote.ContinuousRecognitionSession.StartAsync();
                        }
                        catch (Exception ex)
                        {
                            var messageDialog = new Windows.UI.Popups.MessageDialog(ex.Message, "Exception");
                            await messageDialog.ShowAsync();
                        }
                    });

                }
                else if (tag == "Trip")
                {
                    this.isNote = false;
                    await this.Dispatcher.RunAsync(CoreDispatcherPriority.Normal, () =>
                   {
                       if (defaultViewModel.Trip.Notes == speechResourceMap.GetValue("NotesTip", speechContext).ValueAsString)
                           defaultViewModel.Trip.Notes = this.originalNote;
                       defaultViewModel.SaveTrip();
                   });
                }
            }
            else
            {
                if (this.isNote && !string.IsNullOrEmpty(this.hypothesis))
                {
                    await this.Dispatcher.RunAsync(CoreDispatcherPriority.Normal, () =>
                    {
                        this.defaultViewModel.Trip.Notes = this.hypothesis;
                    });
                }
            }
        }
示例#37
0
        private async void ContinuousRecognitionSession_ResultGenerated(SpeechContinuousRecognitionSession sender,
            SpeechContinuousRecognitionResultGeneratedEventArgs args)
        {
           
            try
            {
              
                //Debug.WriteLine(Enum.GetName(typeof(SpeechRecognitionConfidence), args.Result.Confidence));

                if (!_commandExecuting)
                {

                    Debug.WriteLine(args.Result.Text);

                    _commandExecuting = true;
                    if (args.Result.Text.Contains("call") && args.Result.Text.Contains("MUMMY"))
                    {
                        _notAudioCommand = true;
                        SpeechSynthesizer synt = new SpeechSynthesizer();


                        SpeechSynthesisStream syntStream =
                            await synt.SynthesizeTextToStreamAsync("Calling your mum. Just a moment.");

                        await
                           Dispatcher.RunAsync(CoreDispatcherPriority.Normal,
                               () => { mediaElement.SetSource(syntStream, syntStream.ContentType); });

                        await SendCommandToServer("CallMummy");

                       
                    }


                    if (args.Result.Text.Contains("light") &&
                        args.Result.Text.Contains("ON") && args.Result.Text.Contains("turn"))
                    {
                       
                        SpeechSynthesizer synt = new SpeechSynthesizer();
                        SpeechSynthesisStream syntStream =
                            await synt.SynthesizeTextToStreamAsync("Success! Lights are active!");
                        await
                            Dispatcher.RunAsync(CoreDispatcherPriority.Normal,
                                  async () =>
                                {

                                    mediaElement.SetSource(syntStream, syntStream.ContentType);
                                    await WriteBlecOmmand("l1on\n");
                                    await WriteBlecOmmand("l2on\n");
                                });

                        _notAudioCommand = true;

                    }



                    if (args.Result.Text.Contains("light") &&
                        args.Result.Text.Contains("turn") && args.Result.Text.Contains("OFF"))
                    {
                       
                        SpeechSynthesizer synt = new SpeechSynthesizer();
                        SpeechSynthesisStream syntStream =
                            await synt.SynthesizeTextToStreamAsync("Success! No lights active!");
                        await
                            Dispatcher.RunAsync(CoreDispatcherPriority.Normal,
                                 async () =>
                                {
                                    mediaElement.SetSource(syntStream, syntStream.ContentType);
                                    await WriteBlecOmmand("l1off\n");
                                    await WriteBlecOmmand("l2off\n");
                                });

                        _notAudioCommand = true;
                    }

                    if (args.Result.Text.Contains("call") && args.Result.Text.Contains("DADDY"))
                    {
                        _notAudioCommand = true;
                        SpeechSynthesizer synt = new SpeechSynthesizer();
                        SpeechSynthesisStream syntStream =
                            await synt.SynthesizeTextToStreamAsync("Calling your dad. Just a moment. ");

                        await
                           Dispatcher.RunAsync(CoreDispatcherPriority.Normal,
                               () => { mediaElement.SetSource(syntStream, syntStream.ContentType); });

                        await SendCommandToServer("CallDaddy");

                       
                    }

                    //if (args.Result.Text.Contains("emergency") && args.Result.Text.Contains("call"))
                    //{
                    //    _notAudioCommand = true;
                    //    SpeechSynthesizer synt = new SpeechSynthesizer();
                    //    SpeechSynthesisStream syntStream =
                    //        await
                    //            synt.SynthesizeTextToStreamAsync(
                    //                "Calling nine one one. It will take only a moment sweety -  if you can, try to get help and take your phone with you. If not stay calm and wait for the operator to respond.");
                    //    await
                    //        Dispatcher.RunAsync(CoreDispatcherPriority.Normal,
                    //            () => { mediaElement.SetSource(syntStream, syntStream.ContentType); });
                    //}

                    if (!_audioRunning)
                    {
                        _notAudioCommand = true;
                        if (args.Result.Text.Contains("one") && args.Result.Text.Contains("soundtrack") &&
                                       args.Result.Text.Contains("play"))
                        {
                            await SendCommandToServer("PlaySong1");

                            _audioRunning = true;

                            SpeechSynthesizer synt = new SpeechSynthesizer();
                            SpeechSynthesisStream syntStream =
                                await synt.SynthesizeTextToStreamAsync("Playing soundtrack one");
                            await
                                Dispatcher.RunAsync(CoreDispatcherPriority.Normal,
                                    () => { mediaElement.SetSource(syntStream, syntStream.ContentType); });
                        }

                        if (args.Result.Text.Contains("two") && args.Result.Text.Contains("soundtrack") &&
                            args.Result.Text.Contains("play"))
                        {
                            await SendCommandToServer("PlaySong2");

                            _audioRunning = true;

                            SpeechSynthesizer synt = new SpeechSynthesizer();
                            SpeechSynthesisStream syntStream =
                                await synt.SynthesizeTextToStreamAsync("Playing soundtrack two");
                            await
                                Dispatcher.RunAsync(CoreDispatcherPriority.Normal,
                                    () => { mediaElement.SetSource(syntStream, syntStream.ContentType); });
                        }

                        if (args.Result.Text.Contains("three") && args.Result.Text.Contains("soundtrack") &&
                            args.Result.Text.Contains("play"))
                        {
                            _audioRunning = true;

                            await SendCommandToServer("PlaySong3");

                            SpeechSynthesizer synt = new SpeechSynthesizer();
                            SpeechSynthesisStream syntStream =
                                await synt.SynthesizeTextToStreamAsync("Playing soundtrack three");
                            await
                                Dispatcher.RunAsync(CoreDispatcherPriority.Normal,
                                    () => { mediaElement.SetSource(syntStream, syntStream.ContentType); });
                        }

                        if (args.Result.Text.Contains("four") && args.Result.Text.Contains("soundtrack") &&
                            args.Result.Text.Contains("play"))
                        {
                            _audioRunning = true;

                            await SendCommandToServer("PlaySong4");

                            SpeechSynthesizer synt = new SpeechSynthesizer();
                            SpeechSynthesisStream syntStream =
                                await synt.SynthesizeTextToStreamAsync("Playing soundtrack four");
                            await
                                Dispatcher.RunAsync(CoreDispatcherPriority.Normal,
                                    () => { mediaElement.SetSource(syntStream, syntStream.ContentType); });
                        }
                        
                    }
                    


                    if (args.Result.Text.Contains("iron") && args.Result.Text.Contains("man") &&
                       (args.Result.Text.Contains("green") || args.Result.Text.Contains("red") || args.Result.Text.Contains("blue") || args.Result.Text.Contains("off")))
                    {

                        _notAudioCommand = true;

                        SpeechSynthesizer synt = new SpeechSynthesizer();
                        SpeechSynthesisStream syntStream =
                            await synt.SynthesizeTextToStreamAsync("Success! Color changed!");
                        await
                            Dispatcher.RunAsync(CoreDispatcherPriority.Normal,
                                async () =>
                                {
                                    mediaElement.SetSource(syntStream, syntStream.ContentType);
                                    if (args.Result.Text.Contains("red"))
                                    {
                                        await WriteBlecOmmand("boff\n");
                                        await WriteBlecOmmand("bred\n");
                                    }

                                    if (args.Result.Text.Contains("green"))
                                    {
                                        await WriteBlecOmmand("boff\n");
                                        await WriteBlecOmmand("bgreen\n");
                                    }

                                    if (args.Result.Text.Contains("blue"))
                                    {
                                        await WriteBlecOmmand("boff\n");
                                        await WriteBlecOmmand("bblue\n");
                                    }
                                    if (args.Result.Text.Contains("off"))
                                    {
                                        await WriteBlecOmmand("boff\n");

                                    }
                                });


                    }



                    if (args.Result.Text.Contains("clock") &&
                        (args.Result.Text.Contains("on") || args.Result.Text.Contains("off")))
                    {
                        _notAudioCommand = true;
                        SpeechSynthesizer synt = new SpeechSynthesizer();
                        SpeechSynthesisStream syntStream =
                            await synt.SynthesizeTextToStreamAsync("Success! Time information panel status changed.");

                        await Dispatcher.RunAsync(CoreDispatcherPriority.Normal, () =>
                             {
                                 mediaElement.SetSource(syntStream, syntStream.ContentType);
                             });

                        if (args.Result.Text.Contains("off"))
                        {
                            await SendCommandToServer("DimmClock");
                        }

                        if (args.Result.Text.Contains("on"))
                        {
                            await SendCommandToServer("UndimmClock");
                        }

                    }

                    if (args.Result.Text.Contains("audio") &&
                      (args.Result.Text.Contains("pause") || args.Result.Text.Contains("stop") || args.Result.Text.Contains("continue")))
                    {
                        _notAudioCommand = false;

                        if (_audioRunning)
                        {
                            string commandText = default(string);

                            if (args.Result.Text.Contains("pause"))
                            {
                                commandText = "Media waiting state!";
                                await SendCommandToServer("PauseSong");
                                _audioRunning = true;
                            }

                            if (args.Result.Text.Contains("continue"))
                            {
                                commandText = "Media resuming!";
                                await SendCommandToServer("ResumeSong");
                                _audioRunning = true;
                            }

                            if (args.Result.Text.Contains("stop"))
                            {
                                commandText = "Media off!";
                                await SendCommandToServer("StopSong");
                                _audioRunning = false;
                                _notAudioCommand = true;
                            }



                            SpeechSynthesizer synt = new SpeechSynthesizer();
                            SpeechSynthesisStream syntStream =
                                await synt.SynthesizeTextToStreamAsync(commandText);

                            await Dispatcher.RunAsync(CoreDispatcherPriority.Normal, () =>
                            {
                                mediaElement.SetSource(syntStream, syntStream.ContentType);
                            });
                        }



                    }


                }
            }
            catch (Exception ex)
            {
                Debug.WriteLine(ex.Message);
            }

          
        }
示例#38
0
        private async void ContinuousRecognitionSession_ResultGenerated(SpeechContinuousRecognitionSession sender, SpeechContinuousRecognitionResultGeneratedEventArgs args)
        {
            string s = args.Result.Text;
            //Send the Data
            SendDataToHost(s);

            if (args.Result.Status == SpeechRecognitionResultStatus.Success)
            {
                dictatedTextBuilder.Append(s + " ");
                await dispatcher.RunAsync(CoreDispatcherPriority.Normal, () =>
                {
                    dictationTextBox.Text = dictatedTextBuilder.ToString();
                    btnClearText.IsEnabled = true;
                });
            }
            else
            {
                // In some scenarios, a developer may choose to ignore giving the user feedback in this case, if speech
                // is not the primary input mechanism for the application.
                // Here, just remove any hypothesis text by resetting it to the last known good.
                await dispatcher.RunAsync(CoreDispatcherPriority.Normal, () =>
                {
                    dictationTextBox.Text = dictatedTextBuilder.ToString();
                });
            }

        }
 /// <summary>
 /// event de reconnaissance
 /// </summary>
 /// <param name="sender"></param>
 /// <param name="args"></param>
 private void RecognitionFound(SpeechContinuousRecognitionSession sender, SpeechContinuousRecognitionResultGeneratedEventArgs args)
 {
     if (args.Result.Status == SpeechRecognitionResultStatus.Success)
     {
         var command = FindCommand(args.Result.Text);
         OnRecognitionCommandFound(args.Result, command);
     }
 }
示例#40
0
文件: Ear.cs 项目: marcodiniz/TinBot
        private void ContinuousRecognitionSessionOnResultGenerated(SpeechContinuousRecognitionSession sender, SpeechContinuousRecognitionResultGeneratedEventArgs args)
        {
            var result = args.Result;
            if (result.Status == SpeechRecognitionResultStatus.Success &&
                result.Confidence == SpeechRecognitionConfidence.High)
            {
                var tags = result.Constraint.Tag.Split(';');
                var actionName = tags[new Random().Next(tags.Length)];
                var action = TinBotData.ActionsLib[actionName];

                if (action != null)
                    ActionRequested?.Invoke(this, action);
            }
            else
            {
                ActionRequested?.Invoke(this, new SavedAction("LPulseRed"));
            }
        }
示例#41
0
        /// <summary>
        /// Handles successful recognized speech commands
        /// </summary>
        /// <param name="sender">The session of <see cref="SpeechContinuousRecognitionSession"/> that generated the result</param>
        /// <param name="args">The generated result arguments</param>
        private void RecognizerResultGenerated(SpeechContinuousRecognitionSession sender, SpeechContinuousRecognitionResultGeneratedEventArgs args)
        {
            var rssProvider = m_Container.Resolve<RssProvider>();

            var command = args.Result.SemanticInterpretation.Properties.ContainsKey("command") ?
                           args.Result.SemanticInterpretation.Properties["command"][0].ToString() :
                           "";
            var subreddit = args.Result.SemanticInterpretation.Properties.ContainsKey("subreddit") ?
                             args.Result.SemanticInterpretation.Properties["subreddit"][0].ToString() :
                             "";
            var page = args.Result.SemanticInterpretation.Properties.ContainsKey("page") ?
                             args.Result.SemanticInterpretation.Properties["page"][0].ToString() :
                             "";

            Debug.WriteLine(string.Format("Command: {0}, SubReddit: {1}, Page: {2}", command, subreddit, page));

            if (!string.IsNullOrWhiteSpace(subreddit) && (rssProvider != null))
            {
                rssProvider.Subreddit = subreddit;
            }


            if (!string.IsNullOrWhiteSpace(command))
            {
                switch (command)
                {
                    case "on":
                        DispatcherHelper.RunOnUIThread(() =>
                        {
                            var frame = m_Container.Resolve<Window>().Content as Frame;
                            frame.Navigate(typeof(MainPage));
                        });
                        break;

                    case "off":
                        DispatcherHelper.RunOnUIThread(() =>
                        {
                            var frame = m_Container.Resolve<Window>().Content as Frame;
                            frame.Navigate(typeof(BlankPage));
                        });
                        break;

                    default:
                        break;
                }
            }

            var navigationPage = ParseNavigationPage(page);
            if ((navigationPage != null))
            {
                DispatcherHelper.RunOnUIThread(() =>
                {
                    var frame = m_Container.Resolve<Window>().Content as Frame;
                    frame.Navigate(navigationPage);
                });
            }
        }
示例#42
0
        private async void ContinuousRecognitionSession_ResultGenerated(SpeechContinuousRecognitionSession sender, SpeechContinuousRecognitionResultGeneratedEventArgs args)
        {
            SpeechRecognitionResult tmpRes = args.Result;
            if (tmpRes != null && tmpRes.Status.Equals(SpeechRecognitionResultStatus.Success))

            {
                if (tmpRes.Confidence == SpeechRecognitionConfidence.Rejected)
                    await dispatcher.RunAsync(CoreDispatcherPriority.Normal, () =>
                    {
                        listenText.Text = "didn't get cha.";
                    });
                else
                    await dispatcher.RunAsync(CoreDispatcherPriority.Normal, () =>
                    {
                        listenText.Text = tmpRes.Text;
                        if (!slideshow)
                        {
                            if (listenText.Text.Equals("Light on") || listenText.Text.Equals("on") || listenText.Text.Equals("light") || listenText.Text.Equals("bright"))
                            {
                                lightPinValue = GpioPinValue.Low;
                                lightPin.Write(lightPinValue);
                                LED.Fill = redBrush;
                                lightOn = true;
                            }
                            if (listenText.Text.Equals("Light off") || listenText.Text.Equals("off") || listenText.Text.Equals("dark"))
                            {
                                lightPinValue = GpioPinValue.High;
                                lightPin.Write(lightPinValue);
                                LED.Fill = grayBrush;
                                lightOn = false;
                            }
                            if (listenText.Text.Equals("next") || listenText.Text.Equals("forward"))
                            {
                                triggerPinValue = GpioPinValue.Low;
                                timer.Interval = TimeSpan.FromMilliseconds(200);
                                timer.Tick += Timer_Tick;
                                triggerPin.Write(triggerPinValue);
                                timer.Start();
                                slideCounter++;
                            }
                            if (listenText.Text.Equals("previous") || listenText.Text.Equals("back"))
                            {
                                triggerPinValue = GpioPinValue.Low;
                                timer.Interval = TimeSpan.FromMilliseconds(700);
                                timer.Tick += Timer_Tick;
                                triggerPin.Write(triggerPinValue);
                                timer.Start();
                                slideCounter--;
                            }
                            if (listenText.Text.Equals("start slideshow") || listenText.Text.Equals("slideshow"))
                            {
                                //triggerPinValue = GpioPinValue.Low;
                                //triggerPin.Write(triggerPinValue);
                                timer = new DispatcherTimer();
                                timer.Interval = TimeSpan.FromMilliseconds(100);
                                timer.Tick += Slideshow_Tick;
                                timer.Start();
                                slideshow = true;
                            }
                        }
                        else //slideshow mode
                        {
                            if (listenText.Text.Equals("stop slideshow") || listenText.Text.Equals("stop"))
                            {

                                timer.Stop();
                                timer.Tick -= Slideshow_Tick;
                                slideshow = false;
                                timer = new DispatcherTimer(); //??
                                //if (timer.Tick != null)
                                {

                                }
                            }
                        }
                    });
            }
        }
示例#43
0
        private async void ContinuousRecognitionSession_ResultGenerated(SpeechContinuousRecognitionSession sender, SpeechContinuousRecognitionResultGeneratedEventArgs args)
        {
            //if (args.Result.Confidence == SpeechRecognitionConfidence.Medium ||
            //  args.Result.Confidence == SpeechRecognitionConfidence.High)
            //{
            dictatedTextBuilder.Append(args.Result.Text + " ");

            if (_currentQuestion.Key == "subject")
            {
                var words = args.Result.Text.Split(' ');
                for (int i = 0; i < words.Length; i++)
                {
                    if (words[i] == "un" || words[i] == "une")
                    {
                        _currentQuestion.Value = words[i + 1];
                        break;
                    }
                }

                for (int i = words.Length - 1; i >= 0; i--)
                {
                    if (words[i].StartsWith("personne"))
                    {
                        var q = _questions.Single(e => e.Key == QuestionsType.nbpers.ToString());
                        var nb = q.Finder.Resolve(args.Result.Text);

                        if (nb != null)
                        {
                            q.Value = nb;
                            q.HasBeenAsked = true;
                        }

                        break;
                    }
                }

                if (args.Result.Text.Contains("soyons fou") || args.Result.Text.Contains("soyons fous") || args.Result.Text.Contains("soyons-fou"))
                    _isCrazy = true;

            }
            else
            {
                if (args.Result.Text.Contains("quelle est la différence"))
                {
                    Speak(_currentQuestion.Help);
                    return;
                }
                else
                    _currentQuestion.Value = _currentQuestion.Finder.Resolve(args.Result.Text);
            }

            await dispatcher.RunAsync(CoreDispatcherPriority.Normal, () =>
                    {
                        DictationTextBox.Text = dictatedTextBuilder.ToString();
                    });

            if (_currentQuestion.Value == null)
            {
                AskQuestionAgain();
                return;
            }

            _currentQuestion.HasBeenAsked = true;
            var nextQ = _questions.Where(e => !e.HasBeenAsked).FirstOrDefault();

            if (nextQ == null)
            {
                EndConversation();
            }
            else
                AskQuestion(nextQ);

            //}
            //else
            //{
            //    await dispatcher.RunAsync(CoreDispatcherPriority.Normal, () =>
            //    {
            //        DictationTextBox.Text = dictatedTextBuilder.ToString();
            //    });
            //}
        }
示例#44
0
 /// <summary>
 /// Intermediate speech recognition result received.
 /// Store the confidence so that we can calculate the average at the end.
 /// Also store the text that was recognized in this result segment.
 /// </summary>
 private void ContinuousRecognitionSession_ResultGenerated(SpeechContinuousRecognitionSession sender, SpeechContinuousRecognitionResultGeneratedEventArgs args)
 {
     Debug.WriteLine("Recognized: " + args.Result.RawConfidence.ToString(CultureInfo.CurrentCulture) + ", " + args.Result.Text);
     _averageScore += args.Result.RawConfidence;
     _numSegments++;
     _recognizedText.AppendLine(args.Result.Text);
 }
示例#45
0
        private async void ContinuousRecognitionSession_ResultGenerated(SpeechContinuousRecognitionSession sender, SpeechContinuousRecognitionResultGeneratedEventArgs args)
        {
            if (args.Result.Confidence == SpeechRecognitionConfidence.Medium || args.Result.Confidence == SpeechRecognitionConfidence.High)
            {
                switch (args.Result.Text)
                {
                    case "product":
                        await Media.Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, () =>
                        {
                            captureElement();
                        });
                        break;
                    case "Reset":
                        await Media.Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, () =>
                        {
                            resetSetup();
                        });
                        break;
                    case "How Old":
                        await Media.Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, () =>
                        {
                            captureElement_Old();
                        });
                        break;
                    default:
                        break;
                }
            }

        }
示例#46
0
        private void FindResults(SpeechContinuousRecognitionResultGeneratedEventArgs args)
        {
            var results = _agendaService.FindSessionsByKeyword(args.Result.Text);
            var list = results.Where(r => r.Value > 0).OrderByDescending(r => r.Value).Take(10);

            CodecampSessions = new ObservableCollection<Session>();
            foreach (var keyValuePair in list)
            {
                CodecampSessions.Add(keyValuePair.Key);
            }
        }
示例#47
0
        private async void ContinuousRecognitionSession_ResultGenerated(SpeechContinuousRecognitionSession sender, SpeechContinuousRecognitionResultGeneratedEventArgs arguments)
        {
            Debug.WriteLine("User Input: " + arguments.Result.Text);

            var recoResult = arguments.Result;
            var prop = recoResult.SemanticInterpretation.Properties as IReadOnlyDictionary<string, IReadOnlyList<string>>;
            var dict = new Dictionary<string, string>();
            foreach (var key in prop.Keys)
            {
                var item = prop[key];
                dict.Add(key, item[0]);
            }
            var uri = new System.Uri("ms-appx:///Assets/ResponseTemplates.xml");
            var file = await Windows.Storage.StorageFile.GetFileFromApplicationUriAsync(uri);
            DialogueManager dm = new DialogueManager(file);
            args.Name = "COFFEE_SHOP";
            string response = dm.GenerateResponse(dict, ref args);
            hasSpoken = true;
            if (response != null)
                VoiceResponse(response);
            //Debug.WriteLine(args.ToString());
            //ParseUtteranceMeaning(prop);
        }
    async void OnSpeechResult(
      SpeechContinuousRecognitionSession sender,
      SpeechContinuousRecognitionResultGeneratedEventArgs args)
    {
      if ((args.Result.Confidence == SpeechRecognitionConfidence.High) ||
          (args.Result.Confidence == SpeechRecognitionConfidence.Medium))
      {
        if (args.Result?.RulePath?.FirstOrDefault() == "filter")
        {
          var filter =
            args.Result.SemanticInterpretation.Properties["emotion"].FirstOrDefault();

          if (!string.IsNullOrEmpty(filter))
          {
            await this.Dispatcher.RunAsync(
              Windows.UI.Core.CoreDispatcherPriority.Normal,
              async () =>
              {
                await this.photoControl.ShowFilteredGridAsync(filter);
              }
            );
          }
        }
        else if (args.Result.Text.ToLower() == "cheese")
        {
          await this.Dispatcher.RunAsync(
            Windows.UI.Core.CoreDispatcherPriority.Normal,
            async () =>
            {
              var photoResult = await this.photoControl.TakePhotoAsync();

              if (photoResult != null)
              {
                await this.AddFaceBasedTagsToPhotoAsync(photoResult);
                await this.AddEmotionBasedTagsToPhotoAsync(photoResult);
                await this.SpeakAsync("That's lovely, you look great!");
              }
            }
          );
        }
      }
    }
 /// <summary>
 /// Occurs when speech recognizer has a result.
 /// </summary>
 /// <param name="sender">The sender.</param>
 /// <param name="args">The args.</param>
 private void OnContinuousRecognitionSessionResult(SpeechContinuousRecognitionSession sender, SpeechContinuousRecognitionResultGeneratedEventArgs args)
 {
     if (this.OnKeywordRecognized != null)
     {
         string text       = args.Result.Text;
         var    confidence = args.Result.Confidence;
         this.OnKeywordRecognized(new KeywordRecognizerResult(text, (ConfidenceLevel)confidence));
     }
 }
        private async void ContinuousRecognitionSession_ResultGenerated(SpeechContinuousRecognitionSession sender, SpeechContinuousRecognitionResultGeneratedEventArgs args)
        {
            // The garbage rule will not have a tag associated with it, the other rules will return a string matching the tag provided
            // when generating the grammar.
            string tag = "unknown";
            if (args.Result.Constraint != null)
            {
                tag = args.Result.Constraint.Tag;
            }

            // Developers may decide to use per-phrase confidence levels in order to tune the behavior of their 
            // grammar based on testing.
            if (args.Result.Confidence == SpeechRecognitionConfidence.Medium ||
                args.Result.Confidence == SpeechRecognitionConfidence.High)
            {
                await dispatcher.RunAsync(CoreDispatcherPriority.Normal, () =>
                {
                    switch (tag)
                    {
                        case "Check":
                            CheckRoom();
                            break;
                        //case "Book":
                        //    BookRoom();
                        //    break;
                        case "Set":
                            SetRoom();
                            break;
                        case "apple":
                            if (settingRoomMode)
                                Activate("Apple");
                            settingRoomMode = false;
                            break;
                        case "raspberry":
                            if (settingRoomMode)
                                Activate("Raspberry");
                            settingRoomMode = false;
                            break;
                        case "melon":
                            if (settingRoomMode)
                                Activate("Melon");
                            settingRoomMode = false;
                            break;
                        case "Hello":
                            TakePicture();
                            break;

                        default:
                            break;
                    }
                });

            }
        }
示例#51
0
        private async void RecognizerResultGenerated(SpeechContinuousRecognitionSession session, SpeechContinuousRecognitionResultGeneratedEventArgs args)
        {
            Log.i(args.Result.Status.ToString());
            Log.i(args.Result.Confidence.ToString());
            Log.i(string.IsNullOrEmpty(args.Result.Text) ? "[NOTEXT]" : args.Result.Text);

            int confidence = (int)args.Result.Confidence;
            if (args.Result.Status == SpeechRecognitionResultStatus.Success && confidence < 2)
            {
                string text = args.Result.Text;

                if (!string.IsNullOrEmpty(text))
                {
                    text = text.ToUpper();

                    if (text == "SHOW" || text == "NEWS" || text == "DETAIL")
                    {
                        await EnsureOnUI(() => this.News.ViewDetail());
                    }
                    else if (text == "HIDE" || text == "CLOSE" || text == "TIME" || text == "BACK" || text == "ESCAPE")
                    {
                        await EnsureOnUI(() => { this.News.HideDetail(); this.Weather.HideDetail(); });
                    }
                    else if (text == "WEATHER")
                    {
                        await EnsureOnUI(() => this.Weather.ViewDetail());
                    }
                    else if (text == "STOP" || text == "PAUSE")
                    {
                        await EnsureOnUI(() => Playback.Instance.Pause());
                    }
                    else if (text == "LOUDER")
                    {
                        await EnsureOnUI(() => Playback.Instance.Louder());
                    }
                    else if (text == "QUIETER")
                    {
                        await EnsureOnUI(() => Playback.Instance.Quieter());
                    }
                    else if (text == "RADIO")
                    {
                        var radio = new Configuration.Configuration().Radios.FirstOrDefault();
                        if (radio != null)
                            await EnsureOnUI(()=>Playback.Instance.LoadAndPlay(radio));
                    }
                    else
                    {
                        var radio = new Configuration.Configuration().Radios.Where(A => A.PhoneticName == text).FirstOrDefault();
                        if (radio != null)
                            await EnsureOnUI(()=>Playback.Instance.LoadAndPlay(radio));
                    }
                }
            }

        }
示例#52
0
 private async void ContinuousRecognitionSession_ResultGenerated(SpeechContinuousRecognitionSession sender, SpeechContinuousRecognitionResultGeneratedEventArgs args)
 {
     await Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal,
         async () =>
         {
             var cmd = args.Result.SemanticInterpretation.Properties["cmd"][0].ToString();
             var param = "";
             if (args.Result.SemanticInterpretation.Properties.ContainsKey("param"))
             {
                 param = args.Result.SemanticInterpretation.Properties["param"][0].ToString();
             }
             if (param=="")
             {
                 if (cmd == "forw" || cmd == "back") param = "50";
                 if (cmd == "left" || cmd == "right") param = "90";
             }
             stat.Text = cmd+" "+param;
             await Exec(cmd, double.Parse(param));
             // "Recognized, conf="+args.Result.Confidence.ToString();
         });
 }
示例#53
0
        /// <summary>
        /// Handle events fired when a result is generated. Check for high to medium confidence, and then append the
        /// string to the end of the stringbuffer, and replace the content of the textbox with the string buffer, to
        /// remove any hypothesis text that may be present.
        /// </summary>
        /// <param name="sender">The Recognition session that generated this result</param>
        /// <param name="args">Details about the recognized speech</param>
        private async void ContinuousRecognitionSession_ResultGenerated(SpeechContinuousRecognitionSession sender, SpeechContinuousRecognitionResultGeneratedEventArgs args)
        {
            // We may choose to discard content that has low confidence, as that could indicate that we're picking up
            // noise via the microphone, or someone could be talking out of earshot.
            if (args.Result.Confidence == SpeechRecognitionConfidence.Medium ||
                args.Result.Confidence == SpeechRecognitionConfidence.High)
            {
                dictatedTextBuilder.Append(args.Result.Text + " ");

                await Dispatcher.RunAsync(CoreDispatcherPriority.Normal, () =>
                {
                    //  discardedTextBlock.Visibility = Windows.UI.Xaml.Visibility.Collapsed;
                    var recoString = dictatedTextBuilder.ToString();

                    if (recoString.Contains("clear"))
                    {
                        commandBox.Text = "";
                        dictatedTextBuilder.Clear();
                    }
                    else
                    {
                        commandBox.Text = recoString;
                    }

                    // btnClearText.IsEnabled = true;
                });
            }
            else
            {
                // In some scenarios, a developer may choose to ignore giving the user feedback in this case, if speech
                // is not the primary input mechanism for the application.
                // Here, just remove any hypothesis text by resetting it to the last known good.
                await Dispatcher.RunAsync(CoreDispatcherPriority.Normal, () =>
                {
                    var recoString = dictatedTextBuilder.ToString();
                    if (recoString.Contains("clear"))
                    {
                        commandBox.Text = "";
                        dictatedTextBuilder.Clear();
                    }
                    else
                    {
                        commandBox.Text = recoString;
                    }
                    string discardedText = args.Result.Text;
                    if (!string.IsNullOrEmpty(discardedText))
                    {
                        discardedText = discardedText.Length <= 25 ? discardedText : (discardedText.Substring(0, 25) + "...");

                        //discardedTextBlock.Text = "Discarded due to low/rejected Confidence: " + discardedText;
                        // discardedTextBlock.Visibility = Windows.UI.Xaml.Visibility.Visible;
                    }
                });
            }
        }
 /// <summary>
 /// Triggers cortana with simulated key combo if constraint is heard.
 /// </summary>
 /// <param name="session">backgroundListener's continuous recognition session.</param>
 /// <param name="args">Result arguments</param>
 private void blResultGenerated(SpeechContinuousRecognitionSession session, SpeechContinuousRecognitionResultGeneratedEventArgs args)
 {
     if (args.Result.Constraint != null)
     {
         switch (args.Result.Text)
         {
             case "Open Netflix now":
                 Process.Start(@"C:\Program Files (x86)\Google\Chrome\Application\chrome.exe", @"http:\\www.netflix.com");
                 break;
             default:
                 InputSimulator.SimulateModifiedKeyStroke(VirtualKeyCode.LWIN, VirtualKeyCode.VK_S);
                 break;
         }
     }
 }
示例#55
0
        private async void ContinuousRecognitionSession_ResultGenerated(SpeechContinuousRecognitionSession sender, SpeechContinuousRecognitionResultGeneratedEventArgs args)
        {
            if (args.Result.Confidence == SpeechRecognitionConfidence.Medium ||
                args.Result.Confidence == SpeechRecognitionConfidence.High || args.Result.Confidence == SpeechRecognitionConfidence.Low)
            {
                await dispatcher.RunAsync(CoreDispatcherPriority.Normal, async() =>
                {
                    resultTextBlock.Visibility = Visibility.Visible;
                    resultTextBlock.Text       = args.Result.Text;
                    if (args.Result.Text == "product")
                    {
                        this.Frame.Navigate(typeof(addProduct), null);
                        backButton();
                    }

                    else if (args.Result.Text == "manage")
                    {
                        // this.Frame.Navigate(typeof(myProduct), null);
                        backButton();
                    }

                    else if (args.Result.Text == "order")
                    {
                        //this.Frame.Navigate(typeof(myProduct), null);
                        backButton();
                    }

                    else if (args.Result.Text == "back" || args.Result.Text == "home")
                    {
                        this.Frame.Navigate(typeof(MainPage), null);
                        backButton();
                    }


                    else if (args.Result.Text == "help")
                    {
                        if (this.speechRecognizer != null)
                        {
                            if (isListening)
                            {
                                await this.speechRecognizer.ContinuousRecognitionSession.CancelAsync();
                                isListening = false;
                            }
                            speechRecognizer.ContinuousRecognitionSession.Completed       -= ContinuousRecognitionSession_Completed;
                            speechRecognizer.ContinuousRecognitionSession.ResultGenerated -= ContinuousRecognitionSession_ResultGenerated;
                            speechRecognizer.StateChanged -= SpeechRecognizer_StateChanged;
                            this.speechRecognizer.Dispose();
                            this.speechRecognizer = null;
                        }
                        string help = "welcome to the hopins store. To add new product say product, to manage your product say manage, to manage your order say order, to repeat the instruction, say help.";
                        play(help);
                    }

                    else if (args.Result.Text == "exit")
                    {
                        CoreApplication.Exit();
                    }
                    else if (args.Result.Text == "capture")
                    {
                        if (this.speechRecognizer != null)
                        {
                            if (isListening)
                            {
                                await this.speechRecognizer.ContinuousRecognitionSession.CancelAsync();
                                isListening = false;
                            }
                            speechRecognizer.ContinuousRecognitionSession.Completed       -= ContinuousRecognitionSession_Completed;
                            speechRecognizer.ContinuousRecognitionSession.ResultGenerated -= ContinuousRecognitionSession_ResultGenerated;
                            speechRecognizer.StateChanged -= SpeechRecognizer_StateChanged;
                            this.speechRecognizer.Dispose();
                            this.speechRecognizer = null;
                        }
                        string help = "image captured, if you want to capture another photo, say capture. If you are finish, say next";
                        play(help);
                        CapturePhoto_Click(this, new RoutedEventArgs());
                    }
                    else
                    {
                        resultTextBlock.Text = "I dont understand what you said";
                    }
                });
            }

            else
            {
                await dispatcher.RunAsync(CoreDispatcherPriority.Normal, () =>
                {
                    resultTextBlock.Visibility = Visibility.Visible;
                    resultTextBlock.Text       = "sorry, I didnt catch that";
                });
            }
        }
示例#56
0
        // Recognizer generated results
        private async void RecognizerResultGenerated(SpeechContinuousRecognitionSession session, SpeechContinuousRecognitionResultGeneratedEventArgs args)
        {
            // Output debug strings
            Debug.WriteLine(args.Result.Status);
            Debug.WriteLine(args.Result.Text);

            int count = args.Result.SemanticInterpretation.Properties.Count;

            Debug.WriteLine("Count: " + count);
            Debug.WriteLine("Tag: " + args.Result.Constraint.Tag);

            // Check for different tags and initialize the variables
            String target = args.Result.SemanticInterpretation.Properties.ContainsKey(TAG_TARGET) ?
                            args.Result.SemanticInterpretation.Properties[TAG_TARGET][0].ToString() :
                            "";

            String cmd = args.Result.SemanticInterpretation.Properties.ContainsKey(TAG_CMD) ?
                         args.Result.SemanticInterpretation.Properties[TAG_CMD][0].ToString() :
                         "";

            String device = args.Result.SemanticInterpretation.Properties.ContainsKey(TAG_DEVICE) ?
                            args.Result.SemanticInterpretation.Properties[TAG_DEVICE][0].ToString() :
                            "";

            // Whether state is on or off
            isOn = cmd.Equals(STATE_ON);

            Debug.WriteLine("Target: " + target + ", Command: " + cmd + ", Device: " + device);
            await this.Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, () =>
            {
                StatusBlock.Text = ("Target: " + target + ", Command: " + cmd + ", Device: " + device);
            });

            // First check which device the user refers to
            if (device.Equals(DEVICE_LED))
            {
                // Check what color is specified
                if (target.Equals(COLOR_RED))
                {
                    Debug.WriteLine("FIRST LED " + (isOn ? STATE_ON : STATE_OFF));

                    // Turn on the Red LED
                    WriteGPIOPin(redPin, isOn ? GpioPinValue.High : GpioPinValue.Low);

                    await this.Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, () =>
                    {
                        Ellipse1.Fill = (isOn ? blueBrush : whiteBrush);
                    });
                }
                else if (target.Equals(COLOR_GREEN))
                {
                    Debug.WriteLine("SECOND LED " + (isOn ? STATE_ON : STATE_OFF));

                    // Turn on the Green LED
                    WriteGPIOPin(greenPin, isOn ? GpioPinValue.High : GpioPinValue.Low);

                    await this.Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, () =>
                    {
                        Ellipse2.Fill = (isOn ? blueBrush : whiteBrush);
                    });
                }
                else if (target.Equals(COLOR_BOTH))
                {
                    Debug.WriteLine("BOTH LED " + (isOn ? STATE_ON : STATE_OFF));

                    // Turn on the Green LED
                    WriteGPIOPin(greenPin, isOn ? GpioPinValue.High : GpioPinValue.Low);
                    // Turn on the Red LED
                    WriteGPIOPin(redPin, isOn ? GpioPinValue.High : GpioPinValue.Low);

                    await this.Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, () =>
                    {
                        Ellipse1.Fill = (isOn ? blueBrush : whiteBrush);
                        Ellipse2.Fill = (isOn ? blueBrush : whiteBrush);
                    });
                }
                else
                {
                    Debug.WriteLine("Unknown Target");
                }
            }
            else if (device.Equals(DEVICE_LIGHT))
            {
                // Check target location
                if (target.Equals(TARGET_BEDROOM))
                {
                    Debug.WriteLine("BEDROOM LIGHT " + (isOn ? STATE_ON : STATE_OFF));

                    // Turn on the bedroom light
                    WriteGPIOPin(bedroomLightPin, isOn ? GpioPinValue.High : GpioPinValue.Low);
                }
                else if (target.Equals(TARGET_PORCH))
                {
                    Debug.WriteLine("PORCH LIGHT " + (isOn ? STATE_ON : STATE_OFF));

                    // Insert code to control Porch light
                }
                else
                {
                    Debug.WriteLine("Unknown Target");
                }
            }
            else
            {
                Debug.WriteLine("Unknown Device");
            }

            /*foreach (KeyValuePair<String, IReadOnlyList<string>> child in args.Result.SemanticInterpretation.Properties)
             * {
             *  Debug.WriteLine(child.Key + " = " + child.Value.ToString());
             *
             *  foreach (String val in child.Value)
             *  {
             *      Debug.WriteLine("Value = " + val);
             *  }
             * }*/
        }
示例#57
0
        /// <summary>
        /// Handle events fired when a result is generated. Check for high to medium confidence, and then append the
        /// string to the end of the stringbuffer, and replace the content of the textbox with the string buffer, to
        /// remove any hypothesis text that may be present.
        /// </summary>
        /// <param name="sender">The Recognition session that generated this result</param>
        /// <param name="args">Details about the recognized speech</param>
        private void ContinuousRecognitionSession_ResultGenerated(SpeechContinuousRecognitionSession sender, SpeechContinuousRecognitionResultGeneratedEventArgs args)
        {
            // We may choose to discard content that has low confidence, as that could indicate that we're picking up
            // noise via the microphone, or someone could be talking out of earshot.
            //if (args.Result.Confidence == SpeechRecognitionConfidence.Medium ||
            //    args.Result.Confidence == SpeechRecognitionConfidence.High)
            {
                AppendTextToDictationOutput(args.Result.Text);
            }
            //else
            //{
            //    // In some scenarios, a developer may choose to ignore giving the user feedback in this case, if speech
            //    // is not the primary input mechanism for the application.
            //    // Here, just remove any hypothesis text by resetting it to the last known good.
            //    await Dispatcher.RunAsync(CoreDispatcherPriority.Normal, () =>
            //    {
            //        dictationTextBox.Text = dictatedTextBuilder.ToString();
            //        string discardedText = args.Result.Text;
            //        if (!string.IsNullOrEmpty(discardedText))
            //        {
            //            discardedText = discardedText.Length <= 25 ? discardedText : (discardedText.Substring(0, 25) + "...");

            //            discardedTextBlock.Text = "Discarded due to low/rejected Confidence: " + discardedText;
            //            discardedTextBlock.Visibility = Windows.UI.Xaml.Visibility.Visible;
            //        }
            //    });
            //}
        }
        /// <summary>
        /// Handle events fired when a result is generated. This may include a garbage rule that fires when general room noise
        /// or side-talk is captured (this will have a confidence of Rejected typically, but may occasionally match a rule with
        /// low confidence).
        /// </summary>
        /// <param name="sender">The Recognition session that generated this result</param>
        /// <param name="args">Details about the recognized speech</param>
        private async void ContinuousRecognitionSession_ResultGenerated(SpeechContinuousRecognitionSession sender, SpeechContinuousRecognitionResultGeneratedEventArgs args)
        {
            // The garbage rule will not have a tag associated with it, the other rules will return a string matching the tag provided
            // when generating the grammar.
            string tag = "unknown";
            if (args.Result.Constraint != null)
            {
                tag = args.Result.Constraint.Tag;
            }

            // Developers may decide to use per-phrase confidence levels in order to tune the behavior of their 
            // grammar based on testing.
            if (args.Result.Confidence == SpeechRecognitionConfidence.Medium ||
                args.Result.Confidence == SpeechRecognitionConfidence.High)
            {
                await dispatcher.RunAsync(CoreDispatcherPriority.Normal, () =>
                {
                    resultTextBlock.Text = string.Format("Heard: '{0}', (Tag: '{1}', Confidence: {2})", args.Result.Text, tag, args.Result.Confidence.ToString());
                });
            }
            else
            {
                // In some scenarios, a developer may choose to ignore giving the user feedback in this case, if speech
                // is not the primary input mechanism for the application.
                await dispatcher.RunAsync(CoreDispatcherPriority.Normal, () =>
                {
                    resultTextBlock.Text = string.Format("Sorry, I didn't catch that. (Heard: '{0}', Tag: {1}, Confidence: {2})", args.Result.Text, tag, args.Result.Confidence.ToString());
                });
            }
        }
 private void ContinuousRecognitionSession_ResultGenerated(SpeechContinuousRecognitionSession sender, SpeechContinuousRecognitionResultGeneratedEventArgs args)
 {
     if (args.Result.Confidence == SpeechRecognitionConfidence.Medium ||
         args.Result.Confidence == SpeechRecognitionConfidence.High)
     {
         speechToTextEventArgs.SpeechResult = args.Result.Text;
         OnHaveResultEvent(speechToTextEventArgs);
     }
 }
示例#60
0
        /// <summary>
        /// Handle events fired when a result is generated. Check for high to medium confidence, and then append the
        /// string to the end of the stringbuffer, and replace the content of the textbox with the string buffer, to
        /// remove any hypothesis text that may be present.
        /// </summary>
        /// <param name="sender">The Recognition session that generated this result</param>
        /// <param name="args">Details about the recognized speech</param>
        private async void ContinuousRecognitionSession_ResultGenerated(SpeechContinuousRecognitionSession sender, SpeechContinuousRecognitionResultGeneratedEventArgs args)
        {
            // We may choose to discard content that has low confidence, as that could indicate that we're picking up
            // noise via the microphone, or someone could be talking out of earshot.

            // Speak it out

            string s = args.Result.Text;
            string lang = selectedLang.LanguageTag + ":";

            // we add the translator here. And then translate what heard to English
            //Translator Trans = new Translator(s, ConstantParam.from, ConstantParam.to);
            //Translator Trans = new Translator(s, SRLang.LanguageTag, SSLang.LanguageTag);
            //string translatedS = Trans.GetTranslatedString();

            //// Second translator added to verify the end to end scenario
            //Translator trans1 = new Translator(translatedS, ConstantParam.from1, ConstantParam.to1);
            //string translatedS1 = trans1.GetTranslatedString();

            //Make the Connection
            // ConnectHost();

            //Send the Data
            SendDataToHost(lang + s);

            //SpeechSynthesisStream stream = await synthesizer.SynthesizeTextToStreamAsync(translatedS);

            // if (args.Result.Confidence==SpeechRecognitionConfidence.Medium || args.Result.Confidence == SpeechRecognitionConfidence.High)
            if (args.Result.Status == SpeechRecognitionResultStatus.Success)
            {
                dictatedTextBuilder.Append(s + " ");
                await dispatcher.RunAsync(CoreDispatcherPriority.Normal, () =>
                {
                    dictationTextBox.Text = dictatedTextBuilder.ToString();
                    btnClearText.IsEnabled = true;
                    // we comment out PLAY here as we will play on another server
                    //media.SetSource(stream, stream.ContentType);
                    //media.Play();
                });
            }
            else
            {
                // In some scenarios, a developer may choose to ignore giving the user feedback in this case, if speech
                // is not the primary input mechanism for the application.
                // Here, just remove any hypothesis text by resetting it to the last known good.
                await dispatcher.RunAsync(CoreDispatcherPriority.Normal, () =>
                {
                    dictationTextBox.Text = dictatedTextBuilder.ToString();
                });
            }

        }