Exemple #1
1
        /* Constructor for page */
        public MainPage()
        {
            // Initialize the component model
            InitializeComponent();

            // Set the data context for the page bindings
            DataContext = App.ViewModel;

            // Create speech recognizer 
            _Recognizer = new SpeechRecognizerUI();

            // Bind up shake gesture
            ShakeGesturesHelper.Instance.ShakeGesture += new EventHandler<ShakeGestureEventArgs>(Instance_ShakeGesture);
            ShakeGesturesHelper.Instance.MinimumRequiredMovesForShake = 4;
            ShakeGesturesHelper.Instance.Active = true;

            // Create demo recognizer and set grammer
            _DemoRecognizer = new SpeechRecognizerUI();
            _DemoRecognizer.Recognizer.Grammars.AddGrammarFromList("Demo", App.GrammerList);

            // Create speech synthesizer
            _Synthesizer = new SpeechSynthesizer();

            // Create signalr connection
            _Connection = new HubConnection("http://sagevoice.azurewebsites.net/");
            _Connection.StateChanged += change => ReportChange(change);

            // Create hub proxy
            _Hub = _Connection.CreateHubProxy("erpTicker");
            _Hub.On<string>("addResponse", data => OnResponse(data));
        }
Exemple #2
0
        public static void Initialize()
        {
            try
            {
                if (Speech.initialized)
                {
                    return;
                }
                Speech.recognizer = new SpeechRecognizer();
                Speech.synthesizer = new SpeechSynthesizer();
                Speech.recognizerUI = new SpeechRecognizerUI();

                IEnumerable<VoiceInformation> DeVoices = from voice in InstalledVoices.All
                                                         where voice.Gender == VoiceGender.Female
                                                         && voice.Language == "de-DE"
                                                         select voice;
                Speech.synthesizer.SetVoice(DeVoices.ElementAt(0));
                Speech.initialized = true;
                IsolatedStorageSettingsHelper.SetSpeechPackageState(true);
            }
            catch (Exception ex)
            {
                IsolatedStorageSettingsHelper.SetSpeechPackageState(false);
                throw new Exception();
            }
            

         }
Exemple #3
0
        public async void UseVoice()
        {
            var speech = new SpeechSynthesizer();

            await speech.SpeakTextAsync("Do you want to search or see your booked flights?");

            var optionsUI = new SpeechRecognizerUI();

            optionsUI.Recognizer.Grammars.AddGrammarFromList("options", new[] { "search", "see booked flights" });

            optionsUI.Settings.ListenText       = "What do you want to do?";
            optionsUI.Settings.ExampleText      = "Search, See booked flights";
            optionsUI.Settings.ShowConfirmation = true;
            optionsUI.Settings.ReadoutEnabled   = true;

            var optionsResult = await optionsUI.RecognizeWithUIAsync();

            if (optionsResult.ResultStatus == SpeechRecognitionUIStatus.Succeeded &&
                optionsResult.RecognitionResult.TextConfidence != SpeechRecognitionConfidence.Rejected)
            {
                if (optionsResult.RecognitionResult.Text == "search")
                {
                    Search();
                }
                else
                {
                    Booked();
                }
            }
        }
Exemple #4
0
        private void InitializeVoiceRecognition()
        {
            speechRecognizerWithUI = new SpeechRecognizerUI();
            List <string> searchTerms = ExtractSearchTerms();

            speechRecognizerWithUI.Recognizer.Grammars.AddGrammarFromList("SearchTerms", searchTerms);
        }
 public SimpleSpeechRecognition()
 {
     InitializeComponent();
     mainViewModel = Resources["MainViewModel"] as SimpleSpeechRecognitionMainViewModel;
     recognizerUI = new SpeechRecognizerUI();
     mainViewModel.SpeechRecognitionText = "認識ボタンを押して音声認識を開始します。";
 }
 public async Task<SpeechRecognitionUIResult> Listen()
 {
     SpeechRecognitionUIResult speechRecognitionResult;
     _speechRecognizerUI = new SpeechRecognizerUI();
     speechRecognitionResult = await _speechRecognizerUI.RecognizeWithUIAsync();
     return speechRecognitionResult;
 }
        private async void SpeakNote_Click(object sender, RoutedEventArgs e)
        {
            // Spracherkennung initialisieren
            var sr = new SpeechRecognizerUI();
            sr.Settings.ListenText = "Notiz erfassen";
            sr.Settings.ExampleText = "Geburtstagsgeschenk kaufen";
            sr.Settings.ReadoutEnabled = true;
            sr.Settings.ShowConfirmation = false;

            // Spracherkennungsergebnis abfragen
            var result = await sr.RecognizeWithUIAsync();
            if (result.ResultStatus == SpeechRecognitionUIStatus.Succeeded)
            {
                // erfolgreich - erkannten Text und Genaugigkeit ausgeben
                string spokenText = result.RecognitionResult.Text;
                string confidence = result.RecognitionResult.TextConfidence.ToString();

                SpokenText.Text = spokenText;
                Status.Text = confidence;
            }
            else
            {
                // nicht erfolgreich - Status ausgeben
                Status.Text = result.ResultStatus.ToString();
            }
        }
Exemple #8
0
        private async void SpeakNote_Click(object sender, RoutedEventArgs e)
        {
            // Spracherkennung initialisieren
            var sr = new SpeechRecognizerUI();

            sr.Settings.ListenText       = "Notiz erfassen";
            sr.Settings.ExampleText      = "Geburtstagsgeschenk kaufen";
            sr.Settings.ReadoutEnabled   = true;
            sr.Settings.ShowConfirmation = false;

            // Spracherkennungsergebnis abfragen
            var result = await sr.RecognizeWithUIAsync();

            if (result.ResultStatus == SpeechRecognitionUIStatus.Succeeded)
            {
                // erfolgreich - erkannten Text und Genaugigkeit ausgeben
                string spokenText = result.RecognitionResult.Text;
                string confidence = result.RecognitionResult.TextConfidence.ToString();

                SpokenText.Text = spokenText;
                Status.Text     = confidence;
            }
            else
            {
                // nicht erfolgreich - Status ausgeben
                Status.Text = result.ResultStatus.ToString();
            }
        }
        private async void Button_Click_3(object sender, RoutedEventArgs e)
        {


            SpeechRecognizerUI speechRecognizerUI = new SpeechRecognizerUI();
            speechRecognizerUI.Recognizer.Grammars.AddGrammarFromList("Number", new List<string>
            {
                "一",
                "二",
                "三",
                "四"
            });
            try
            {
                var result = await speechRecognizerUI.RecognizeWithUIAsync();
                if (result.ResultStatus == SpeechRecognitionUIStatus.Succeeded)
                {
                    MessageBox.Show(result.RecognitionResult.Text);
                }
                else
                {
                    MessageBox.Show("语音识别不到");
                }
            }
            catch (Exception err)
            {
                MessageBox.Show(err.Message + err.HResult);
            }
        }
Exemple #10
0
        private async void AskQuestion_Click(object sender, RoutedEventArgs e)
        {
            // Spracherkennung initialisieren
            var sr = new SpeechRecognizerUI();

            sr.Settings.ListenText       = "Welcher Tag ist heute?";
            sr.Settings.ReadoutEnabled   = true;
            sr.Settings.ShowConfirmation = false;

            // Erkennbare Wörter reduzieren auf Werktage
            var weekdays = new[] { "Montag", "Dienstag", "Mittwoch", "Donnerstag", "Freitag" };

            sr.Recognizer.Grammars.AddGrammarFromList("Weekdays", weekdays);

            // Spracherkennungsergebnis abfragen
            var result = await sr.RecognizeWithUIAsync();

            if (result.ResultStatus == SpeechRecognitionUIStatus.Succeeded)
            {
                // erfolgreich - erkannten Text und Genaugigkeit ausgeben
                string spokenText = result.RecognitionResult.Text;
                string confidence = result.RecognitionResult.TextConfidence.ToString();

                SpokenText.Text = spokenText;
                Status.Text     = confidence;
            }
            else
            {
                // nicht erfolgreich - Status ausgeben
                Status.Text = result.ResultStatus.ToString();
            }
        }
Exemple #11
0
        private async Task <SendRepeatCancel> AskSendRepeatCancelQuestion()
        {
            var recognizer = new SpeechRecognizerUI();

            string[] options     = { Resources.Send, Resources.Repeat, Resources.Cancel };
            string   exampleText = String.Format("{0}, {1}, {2}", options[0], options[1], options[2]);

            recognizer.Recognizer.Grammars.AddGrammarFromList("SendRepeatCancel", options);
            recognizer.Settings.ExampleText = exampleText;


            SpeechSynthesizer synth = new SpeechSynthesizer();
            await synth.SpeakTextAsync(exampleText);

            var result = await recognizer.RecognizeWithUIAsync();

            if (result.ResultStatus == SpeechRecognitionUIStatus.Succeeded)
            {
                if (result.RecognitionResult.Text == Resources.Send)
                {
                    return(SendRepeatCancel.Send);
                }
                else if (result.RecognitionResult.Text == Resources.Repeat)
                {
                    return(SendRepeatCancel.Repeat);
                }
            }

            return(SendRepeatCancel.Cancel); // In every other case.
        }
Exemple #12
0
 // Create and configure the SpeechRecognizerUI object.
 private void ConfigureRecognizer()
 {
     recoWithUI = new SpeechRecognizerUI();
     recoWithUI.Settings.ListenText       = "Speak your voice reminder.";
     recoWithUI.Settings.ReadoutEnabled   = true;
     recoWithUI.Settings.ShowConfirmation = true;
 }
        private async void FindFrequency_Click(object sender, RoutedEventArgs e)
        {
            try
            {
                SpeechRecognizerUI speechRecognition = new SpeechRecognizerUI();
                speechRecognition.Settings.ListenText  = "Enter Sentence!";
                speechRecognition.Settings.ExampleText = "plot will show frequency of letters(lower case)";
                SpeechSynthesizer synth = new SpeechSynthesizer();
                await synth.SpeakTextAsync("Say something");

                SpeechRecognitionUIResult recoResult = await speechRecognition.RecognizeWithUIAsync();

                if (recoResult.ResultStatus == SpeechRecognitionUIStatus.Succeeded)
                {
                    int[] alpha = Enumerable.Repeat(0, 26).ToArray();

                    recoResult.RecognitionResult.Text.ToLower();
                    for (int i = 0; i < recoResult.RecognitionResult.Text.Length; i++)
                    {
                        if (Char.IsLetter(recoResult.RecognitionResult.Text[i]))
                        {
                            alpha[((int)recoResult.RecognitionResult.Text[i]) - 97]++;
                        }
                    }
                    for (int i = 0; i < recoResult.RecognitionResult.Text.Length; i++)
                    {
                        DataPoint model = new DataPoint((char)(i + 97), alpha[i]);
                        CollectionCoordinates.Add(model);
                    }
                }
            }
            catch (Exception r)
            {
            }
        }
        private async void AskQuestion_Click(object sender, RoutedEventArgs e)
        {
            // Spracherkennung initialisieren
            var sr = new SpeechRecognizerUI();
            sr.Settings.ListenText = "Welcher Tag ist heute?";
            sr.Settings.ReadoutEnabled = true;
            sr.Settings.ShowConfirmation = false;

            // Erkennbare Wörter reduzieren auf Werktage
            var weekdays = new[] { "Montag", "Dienstag", "Mittwoch", "Donnerstag", "Freitag" };
            sr.Recognizer.Grammars.AddGrammarFromList("Weekdays", weekdays);

            // Spracherkennungsergebnis abfragen
            var result = await sr.RecognizeWithUIAsync();
            if (result.ResultStatus == SpeechRecognitionUIStatus.Succeeded)
            {
                // erfolgreich - erkannten Text und Genaugigkeit ausgeben
                string spokenText = result.RecognitionResult.Text;
                string confidence = result.RecognitionResult.TextConfidence.ToString();

                SpokenText.Text = spokenText;
                Status.Text = confidence;
            }
            else
            {
                // nicht erfolgreich - Status ausgeben
                Status.Text = result.ResultStatus.ToString();
            }
        }
Exemple #15
0
        public async Task<TextDictationResult> GetDictatedText()
        {
            var retval = new TextDictationResult();

            var recognizer = new SpeechRecognizerUI();

            var result = await recognizer.RecognizeWithUIAsync();

            if (result.ResultStatus == SpeechRecognitionUIStatus.Succeeded)
            {
                retval.SuccesfulRecognition = true;
                retval.Text = result.RecognitionResult.Text;

                var userConfirms = await AskSendRepeatCancelQuestion();

                if (userConfirms == SendRepeatCancel.Send)
                    retval.UserCancelled = false;
                else if (userConfirms == SendRepeatCancel.Repeat)
                    return await GetDictatedText();
                else if (userConfirms == SendRepeatCancel.Cancel)
                    retval.UserCancelled = true;
            }
            else
            {
                retval.SuccesfulRecognition = false;
                retval.UserCancelled = false;
                retval.Text = String.Empty;
            }

            return retval;
        }
Exemple #16
0
        private async void SpeakButton_Click(object sender, EventArgs e)
        {
            try
            {
                await speechSynthesizer.SpeakTextAsync("Say the item name");

                this.recoWithUI = new SpeechRecognizerUI();
                recoWithUI.Recognizer.Grammars.AddGrammarFromPredefinedType("webSearch", SpeechPredefinedGrammar.WebSearch);
                SpeechRecognitionUIResult recoResultName = await recoWithUI.RecognizeWithUIAsync();

                Name.Text = recoResultName.ResultStatus == SpeechRecognitionUIStatus.Succeeded ? recoResultName.RecognitionResult.Text : "Unknown";

                if (recoResultName.ResultStatus != SpeechRecognitionUIStatus.Cancelled)
                {
                    await speechSynthesizer.SpeakTextAsync("Say the item price");

                    this.recoWithUI = new SpeechRecognizerUI();
                    SpeechRecognitionUIResult recoResultPrice = await recoWithUI.RecognizeWithUIAsync();

                    Amount.Text = GetOnlyNumberFromSpeech(recoResultPrice);
                }
            }
            catch
            {
            }
        }
Exemple #17
0
        private async void Sp(object sender, RoutedEventArgs e)
        {
            try
            {
                if (sender == null)
                {
                }
                if (e == null)
                {
                }

                _recoWithUi = new SpeechRecognizerUI();
                _recoWithUi.Settings.ReadoutEnabled   = false;
                _recoWithUi.Settings.ShowConfirmation = false;
                _recoWithUi.Settings.ExampleText      = "";
                string[] b = _vita.GetAllCommands();
                _recoWithUi.Recognizer.Grammars.AddGrammarFromList("frenchNumbers", b);

                IEnumerable <SpeechRecognizerInformation> frenchRecognizers = from recognizerInfo in InstalledSpeechRecognizers.All
                                                                              where recognizerInfo.Language == "ru-RU"
                                                                              select recognizerInfo;

                _recoWithUi.Recognizer.SetRecognizer(frenchRecognizers.ElementAt(0));
                SpeechRecognitionUIResult recoResult = await _recoWithUi.RecognizeWithUIAsync();

                //SpeechSynthesizer synth = new SpeechSynthesizer();
                //await synth.SpeakTextAsync(recoResult.RecognitionResult.Text);
                MoonPadTcpClient.Send(recoResult.RecognitionResult.Text);
                _fl = 1;
            }
            catch (Exception ex)
            {
                _fl = 1;
            }
        }
        //*/
        /// <summary>
        /// When the user clicks on the button "let_sHear", the device hears its environment.
        /// </summary>
        /// <param name="sender">Don't know!</param>
        /// <param name="e">Don't know!</param>
        private async void let_sHear(/*object sender, RoutedEventArgs e*/)
        {
            SpeechRecognitionUIResult recoResult;

            this.recoWithUI = new SpeechRecognizerUI();   // Creates an instance of SpeechRecognizerUI.
            recoResult = await recoWithUI.RecognizeWithUIAsync();   // Starts recognition (loads the dictation grammar by default).

            if (recoResult.RecognitionResult.Text != null)
                toSay = recoResult.RecognitionResult.Text;

        }
Exemple #19
0
        private async void Listen()
        {
            this.recoWithUI = new SpeechRecognizerUI();

            SpeechRecognitionUIResult recoResult = await recoWithUI.RecognizeWithUIAsync();

            if (recoResult.ResultStatus == SpeechRecognitionUIStatus.Succeeded)
            {
                MessageBox.Show(string.Format("You said {0}.",
                                              recoResult.RecognitionResult.Text));
            }
        }
Exemple #20
0
        //private async Task UpdatePhraseListsAsync()
        //{
        //    foreach (VoiceCommandSet cs in VoiceCommandService.InstalledCommandSets.Values)
        //    {
        //        List<string> updatedListOfPhrases = GetPhrasesForUpdatedSiteToSearchPhraseList(cs.Language.ToLower());
        //        await cs.UpdatePhraseListAsync("siteToSearch", updatedListOfPhrases);
        //    }
        //}

        public async Task <string> RecognizeTextFromWebSearchGrammar(string exampleText)
        {
            string text = null;

            try
            {
                SpeechRecognizerUI sr = new SpeechRecognizerUI();
                sr.Recognizer.Grammars.AddGrammarFromPredefinedType("web", SpeechPredefinedGrammar.WebSearch);
                sr.Settings.ListenText       = "Listening...";
                sr.Settings.ExampleText      = exampleText;
                sr.Settings.ReadoutEnabled   = false;
                sr.Settings.ShowConfirmation = false;

                SpeechRecognitionUIResult result = await sr.RecognizeWithUIAsync();

                if (result != null)
                {
                    if (result.ResultStatus == SpeechRecognitionUIStatus.Succeeded)
                    {
                        if (result.RecognitionResult != null &&
                            result.RecognitionResult.TextConfidence != SpeechRecognitionConfidence.Rejected)
                        {
                            text = result.RecognitionResult.Text;
                        }
                    }
                    else
                    {
                        if (result.ResultStatus == SpeechRecognitionUIStatus.PrivacyPolicyDeclined)
                        {
                            Execute.BeginOnUIThread(() =>
                            {
                                var toast = new ToastPrompt()
                                {
                                    Title           = "Privacy policy declined",
                                    Message         = "You must accept the privacy policy to use speech recognition.",
                                    TextOrientation = Orientation.Vertical,
                                    TextWrapping    = TextWrapping.Wrap,
                                    Background      = new SolidColorBrush(Colors.Red),
                                };

                                toast.Show();
                            });
                        }
                    }
                }
            }
            catch
            {
            }

            return(text);
        }
 private async void SelectRecognizerLanguage(object sender, System.Windows.Controls.SelectionChangedEventArgs e)
 {
     if (e.AddedItems != null && e.AddedItems.Count != 0)
     {
         _speechRecognizer = new SpeechRecognizerUI();
         _speechRecognizer.Recognizer.SetRecognizer((SpeechRecognizerInformation)e.AddedItems[0]);
         SpeechRecognitionUIResult _result = await _speechRecognizer.RecognizeWithUIAsync();
         if (_result.ResultStatus == SpeechRecognitionUIStatus.Succeeded)
         {
             SpeechToTextBox.Text = _result.RecognitionResult.Text;
         }
     }
 }
 private async void speakToDevice(object sender, System.Windows.RoutedEventArgs e)
 {
     _speechRecognizer = new SpeechRecognizerUI();
     _speechRecognizer.Settings.ShowConfirmation = false;
     _speechRecognizer.Settings.ReadoutEnabled = false;
     _speechRecognizer.Settings.ExampleText = "Now say a thing!";
     _speechRecognizer.Settings.ListenText = "I can hear you...";
     SpeechRecognitionUIResult _result = await _speechRecognizer.RecognizeWithUIAsync();
     if (_result.ResultStatus == SpeechRecognitionUIStatus.Succeeded)
     {
         SpeechToTextBox.Text = _result.RecognitionResult.Text;
     }
 }
Exemple #23
0
        private async void appBarButton_Click(object sender, EventArgs e)
        {
            //-- Objetos para reconocimiento de voz
            var recognizer = new SpeechRecognizerUI();
            recognizer.Settings.ShowConfirmation = true;
            recognizer.Settings.ReadoutEnabled = false;

            //-- Inicializando para escuchar la voz
            var result = await recognizer.RecognizeWithUIAsync();

            if (result.ResultStatus == SpeechRecognitionUIStatus.Succeeded)
            {
                //-- Colocamos el texto reconocido en la TextBox que tiene el texto a traducir
                this.txtOriginal.Text = result.RecognitionResult.Text;
                //-- Formando la URL de consulta a utilizar
                string uri = "";
                uri = String.Format("https://translate.yandex.net/api/v1.5/tr/translate?key=trnsl.1.1.20130705T161953Z.6bf2c28742bb8e29.605b3c4e4d852751bafc35a06b04713c60ac2d63&lang=en&text=" + this.txtOriginal.Text + "&text=" + this.txtOriginal.Text);
                //-- Consultamos el API
                WebClient clienteProxy;
                clienteProxy = new WebClient();
                clienteProxy.DownloadStringCompleted += (s, a) =>
                {
                    if (a.Error == null && !a.Cancelled)
                    {
                        var api_result = a.Result;
                        string resultado = api_result;
                        //-- Pasamos los resultados a un tipo de doc XML
                        var doc = XDocument.Parse(api_result);
                        var query = from c in doc.Descendants("Translation")
                                    select new Translation()
                                    {
                                        text = (string)c.Element("text")
                                    };
                        var results = query.ToList();

                        //-- Obtenemoos el texto y lo pasamos al objeto
                        Translation texto = new Translation();
                        texto = results.FirstOrDefault();
                        //-- Colocamos el texto traducido en el TextBox que contendrá la traducción
                        this.txtTraduccion.Text = texto.text;

                        //-- Llamamos al metodo para que el texto traducido sea hablado por el telefono
                        this.SpeakText();

                    }
                };
                clienteProxy.DownloadStringAsync(new Uri(uri, UriKind.Absolute));

                
            }
        }
 private void LoadLanguages(object sender, System.Windows.RoutedEventArgs e)
 {
     _speechRecognizer = new SpeechRecognizerUI();
     allLanguages = new List<SpeechRecognizerInformation>();
     foreach (SpeechRecognizerInformation sri in InstalledSpeechRecognizers.All)
     {
         allLanguages.Add(sri);
         if (sri.Language == "es-ES")
         {
             _speechRecognizer.Recognizer.SetRecognizer(sri); 
         }
     }
     LanguagesListBox.ItemsSource = allLanguages;
 }
 private async void MicButton_Tap(object sender, System.Windows.Input.GestureEventArgs e)
 {
     SpeechRecognizerUI sr = new SpeechRecognizerUI();
     sr.Settings.ListenText = "Let's go to the beach";
     sr.Settings.ReadoutEnabled = false;
     sr.Settings.ShowConfirmation = true;
     SpeechRecognitionUIResult result = await sr.RecognizeWithUIAsync();
     if (result.ResultStatus == SpeechRecognitionUIStatus.Succeeded)
     {
         ShareStatusTask task = new ShareStatusTask();
         task.Status = result.RecognitionResult.Text;
         task.Show();
     }
 }
        private async void RetrieveMemoSpeech()
        {
            this.SpeechUI = new SpeechRecognizerUI();
            SpeechRecognitionUIResult recoResult = await SpeechUI.RecognizeWithUIAsync();

            String phoneID = Phone.ID;
            String message = recoResult.RecognitionResult.Text;

            MessageBox.Show(string.Format("You said {0}.", recoResult.RecognitionResult.Text));

            // Call API
            // Got Call back

            Memos.Add(new MemoItem());
        }
        private async void MicButton_Tap(object sender, System.Windows.Input.GestureEventArgs e)
        {

            SpeechRecognizerUI sr = new SpeechRecognizerUI();
            sr.Settings.ListenText = "Will I be rich?";
            sr.Settings.ReadoutEnabled = false;
            sr.Settings.ShowConfirmation = true;
            SpeechRecognitionUIResult result = await sr.RecognizeWithUIAsync();
            if (result.ResultStatus == SpeechRecognitionUIStatus.Succeeded)
            {
                string resultText = result.RecognitionResult.Text;
                resultText.Replace('.', '?');
                resultText=ConfidenceText.Text = resultText;
                Speak();
            }
        }
        private async void Button_Click(object sender, RoutedEventArgs e)
        {
            var recognizerUI = new SpeechRecognizerUI();

            recognizerUI.Recognizer.Grammars.AddGrammarFromList("animal", new List <string>()
            {
                "いぬ", "さる", "ねこ"
            });                                                   // 音声認識の候補
            recognizerUI.Settings.ListenText  = "好きな動物は以下のうちどれ?"; // 音声検索画面上のタイトル
            recognizerUI.Settings.ExampleText = "・いぬ\n・さる\n・ねこ";  // 音声検索画面上の詳細

            var result = await recognizerUI.RecognizeWithUIAsync();

            if (result.ResultStatus == SpeechRecognitionUIStatus.Succeeded)
            {
                MessageBox.Show(result.RecognitionResult.Text);
            }
        }
Exemple #29
0
        private void CheckIfMusicPlayingAndCanStopIt()
        {
            try
            {
                _speechRecognizer = new SpeechRecognizerUI();
                _speechRecognizer.Recognizer.GetRecognizer();
                _speechIsSupported = true;
            }
            catch (Exception ex)
            {
                _speechIsSupported = false;
                Log.ErrorException("CheckForSupportedSpeech", ex);
            }

            if (IsMusicPlaying())
            {
                if (App.SettingsWrapper.AppSettings.AllowStopMusic || _alreadyAskedAboutMusic)
                {
                    StopMusic();
                }
                else
                {
                    var message = new CustomMessageBox
                    {
                        Title              = "stop music?",
                        Message            = "We've noticed you're already listening to some music, mind if we stop it so you can play the game?",
                        LeftButtonContent  = "go ahead",
                        RightButtonContent = "no thanks",
                        Content            = Utils.CreateDontShowCheckBox("DontShowAllowStopMusicMessage")
                    };

                    message.Dismissed += (sender, args) =>
                    {
                        if (args.Result == CustomMessageBoxResult.LeftButton)
                        {
                            _alreadyAskedAboutMusic = true;
                            StopMusic();
                            StartNewGame();
                        }
                    };
                    message.Show();
                }
            }
        }
        public async void SpeechToText_Click(object sender, RoutedEventArgs e)
        {
            //Speech recognition only supports spanish from spain not from México
            var Language = (from language in InstalledSpeechRecognizers.All
                            where language.Language == "es-ES"
                            select language).FirstOrDefault();

            SpeechRecognizerUI speechRecognition = new SpeechRecognizerUI();

            speechRecognition.Recognizer.SetRecognizer(Language);

            SpeechRecognitionUIResult recoResult = await speechRecognition.RecognizeWithUIAsync();

            if (recoResult.ResultStatus == SpeechRecognitionUIStatus.Succeeded)
            {
                txtPregunta.Text = recoResult.RecognitionResult.Text.Replace(".", "");
                LaunchSearch();
            }
        }
        private async void InitializeVoiceRecognition()
        {
            speechRecognizerWithUI = new SpeechRecognizerUI();
            List<string> searchTerms = ExtractSearchTerms();
            speechRecognizerWithUI.Recognizer.Grammars.AddGrammarFromList("SearchTerms", searchTerms);

            //speechRecognizer = new SpeechRecognizer();
            //speechRecognizer.Grammars.AddGrammarFromList("Commands", new List<string>() { "Back" });

            //while (true)
            //{
            //    //Causes crash on back from FAS. How to stop in deactivation?
            //    var result = await speechRecognizer.RecognizeAsync();

            //    if (result.TextConfidence == SpeechRecognitionConfidence.High && this.RootFrame.CanGoBack)
            //    {
            //        this.RootFrame.GoBack();
            //    }
            //}
        }
Exemple #32
0
        public async Task <string> GetMicrophoneSpeech(string SpeechListenText, string SpeechExampleText)
        {
            try
            {
                SpeechRecognizerUI sr = new SpeechRecognizerUI();
                sr.Settings.ListenText       = SpeechListenText;
                sr.Settings.ExampleText      = SpeechExampleText;
                sr.Settings.ReadoutEnabled   = false;
                sr.Settings.ShowConfirmation = false;

                SpeechRecognitionUIResult result = await sr.RecognizeWithUIAsync();

                if (result.ResultStatus == SpeechRecognitionUIStatus.Succeeded)
                {
                    return(result.RecognitionResult.Text);
                }
            }
            catch { }
            throw new Exception();
        }
Exemple #33
0
        public async void UseVoice()
        {
            var searchUI = new SpeechRecognizerUI();

            searchUI.Recognizer.Grammars.AddGrammarFromUri("search", new Uri("ms-appx:///resources/grammar.xml"));

            searchUI.Settings.ListenText  = "Search for?";
            searchUI.Settings.ExampleText = "Show flights from Auckland to Wellington";

            var searchResult = await searchUI.RecognizeWithUIAsync();

            if (searchResult.ResultStatus == SpeechRecognitionUIStatus.Succeeded &&
                searchResult.RecognitionResult.TextConfidence != SpeechRecognitionConfidence.Rejected)
            {
                SelectedArrival   = Arrives.Single(c => c.Name == (string)searchResult.RecognitionResult.Semantics["arrives"].Value);
                SelectedDeparture = Departs.Single(c => c.Name == (string)searchResult.RecognitionResult.Semantics["departs"].Value);

                Search();
            }
        }
Exemple #34
0
        private async void PhoneApplicationPage_Loaded(object sender, RoutedEventArgs e)
        {
            this.recoWithUI = new SpeechRecognizerUI();

            // Start recognition (load the dictation grammar by default).
            SpeechRecognitionUIResult recoResult = await recoWithUI.RecognizeWithUIAsync();

            String result = await Outils.RecoInteract(recoResult.RecognitionResult.Text);

            if (!String.IsNullOrEmpty(result))
            {
                JObject res = JsonConvert.DeserializeObject <JObject>(result);
                if (res["result"] != null && !String.IsNullOrEmpty(res["result"].ToString()))
                {
                    SpeechSynthesizer synth = new SpeechSynthesizer();

                    await synth.SpeakTextAsync(res["result"].ToString());
                }
            }
        }
Exemple #35
0
        // Must be called before using static methods.
        public static void Initialize()
        {
            if (Speech.initialized)
            {
                return;
            }

            Speech.recognizer   = new SpeechRecognizer();
            Speech.synthesizer  = new SpeechSynthesizer();
            Speech.recognizerUI = new SpeechRecognizerUI();

            // Sets the en-US male voice.
            IEnumerable <VoiceInformation> enUSMaleVoices = from voice in InstalledVoices.All
                                                            where voice.Gender == VoiceGender.Male &&
                                                            voice.Language == "en-US"
                                                            select voice;

            Speech.synthesizer.SetVoice(enUSMaleVoices.ElementAt(0));

            Speech.initialized = true;
        }
Exemple #36
0
        private async Task RecognizeSpeech()
        {
            try
            {
                var localSpeechRecognizerUI = new SpeechRecognizerUI();

                localSpeechRecognizerUI.Settings.ListenText       = "Say your phrase...";
                localSpeechRecognizerUI.Settings.ExampleText      = "What's going on?";
                localSpeechRecognizerUI.Settings.ReadoutEnabled   = false;
                localSpeechRecognizerUI.Settings.ShowConfirmation = true;

                SpeechRecognitionUIResult recognitionResult = await localSpeechRecognizerUI.RecognizeWithUIAsync();

                Dispatcher.BeginInvoke(delegate { DetectedTextTextBox.Text = recognitionResult.RecognitionResult.Text; });
                await SayText(recognitionResult.RecognitionResult.Text);
            }
            catch (Exception ex)
            {
                MessageBox.Show(ex.Message);
                throw;
            }
        }
Exemple #37
0
        /// <summary>
        /// Método que reconoce la voz
        /// </summary>
        /// <param name="lan">Idioma que se usará</param>
        /// <returns>Texto hablado</returns>
        private async Task<string> startRecognition(string lan)
        {
            string text = String.Empty;

            IEnumerable<SpeechRecognizerInformation> recognizers = from recognizerInfo in InstalledSpeechRecognizers.All
                                                                   where recognizerInfo.Language == lan
                                                                   select recognizerInfo;
            var recognizer = new SpeechRecognizerUI();

            recognizer.Recognizer.SetRecognizer(recognizers.ElementAt(0));
            recognizer.Settings.ShowConfirmation = true;
            recognizer.Settings.ReadoutEnabled = false;

            var result = await recognizer.RecognizeWithUIAsync();

            if (result.ResultStatus == SpeechRecognitionUIStatus.Succeeded)
            {
                text = result.RecognitionResult.Text;
            }

            return text;
        }
        private async void SpeakButton_Click(object sender, EventArgs e)
        {
            try
            {
                await speechSynthesizer.SpeakTextAsync("Say the item name");
                this.recoWithUI = new SpeechRecognizerUI();
                recoWithUI.Recognizer.Grammars.AddGrammarFromPredefinedType("webSearch", SpeechPredefinedGrammar.WebSearch);
                SpeechRecognitionUIResult recoResultName = await recoWithUI.RecognizeWithUIAsync();
                Name.Text = recoResultName.ResultStatus == SpeechRecognitionUIStatus.Succeeded ? recoResultName.RecognitionResult.Text : "Unknown";

                if (recoResultName.ResultStatus != SpeechRecognitionUIStatus.Cancelled)
                {
                    await speechSynthesizer.SpeakTextAsync("Say the item price");
                    this.recoWithUI = new SpeechRecognizerUI();
                    SpeechRecognitionUIResult recoResultPrice = await recoWithUI.RecognizeWithUIAsync();
                    Amount.Text = GetOnlyNumberFromSpeech(recoResultPrice);
                }
            }
            catch
            {
            }
        }
        private async void MicrophoneImage_Tap(object sender, System.Windows.Input.GestureEventArgs e)
        {
            this.speechRecognizer = new SpeechRecognizerUI();
            this.speechRecognizer.Recognizer.Grammars.Clear();
            this.speechRecognizer.Recognizer.Grammars.AddGrammarFromPredefinedType("search", SpeechPredefinedGrammar.WebSearch);
            await this.speechRecognizer.Recognizer.PreloadGrammarsAsync();

            try
            {
                // Use the built-in UI to prompt the user and get the result.
                SpeechRecognitionUIResult recognitionResult = await this.speechRecognizer.RecognizeWithUIAsync();

                if (recognitionResult.ResultStatus == SpeechRecognitionUIStatus.Succeeded)
                {
                    // Output the speech recognition result.
                    NewMessageTextBox.Text = recognitionResult.RecognitionResult.Text.Trim();
                }
            }
            catch (Exception ex)
            {
                MessageBox.Show(ex.Message);
            }
        }
Exemple #40
0
        public async Task OpenSpeechUI()
        {
            SpeechRecognizerUI recoWithUI;

            // Create an instance of SpeechRecognizerUI.
            recoWithUI = new SpeechRecognizerUI();
            var installed = InstalledSpeechRecognizers.All;

            if (installed.Any(o => o.Language == "en-US"))
            {
                recoWithUI.Recognizer.SetRecognizer(installed.Where(o => o.Language == "en-US").Single());


                // Uri searchGrammar = new Uri("ms-appx:///Assets/SRGSGrammar1.xml", UriKind.Absolute);

                // Add the SRGS grammar to the grammar set.
                //   recoWithUI.Recognizer.Grammars.AddGrammarFromUri("cities", searchGrammar);

                recoWithUI.Settings.ListenText  = "search for?";
                recoWithUI.Settings.ExampleText = " 'guides', 'guide', 'device' ";
                // Start recognition (load the dictation grammar by default).

                recoWithUI.Recognizer.Grammars.AddGrammarFromPredefinedType("typeName", SpeechPredefinedGrammar.Dictation);

                SpeechRecognitionUIResult recoResult = await recoWithUI.RecognizeWithUIAsync();

                // Do something with the recognition result.
                // MessageBox.Show(string.Format("You said {0}.", recoResult.RecognitionResult.Text),);


                //  DoSearch(recoResult.RecognitionResult.Text);
            }
            else
            {
                MessageBox.Show("not lang");
            }
        }
        private async Task AskForColor()
        {
            try
            {

                SpeechRecognizerUI speechRecognizerUi = new SpeechRecognizerUI();
                SpeechRecognizer speechRecognizer = speechRecognizerUi.Recognizer;
                speechRecognizer.AudioCaptureStateChanged += speechRecognizer_AudioCaptureStateChanged;
                speechRecognizer.Grammars.AddGrammarFromList("colorList", _colorNames);

                PromptTextBlock.Text = "Which color?";
                ExampleTextBlock.Text = "'Red', 'Blue', 'Green', 'Yellow', 'Purple', 'Orange', 'Black', 'White'";
                _speechSynthesizer.SpeakTextAsync( "Which color?");
                
                SpeechRecognitionResult result = await speechRecognizer.RecognizeAsync();
                if (result.TextConfidence < SpeechRecognitionConfidence.Medium)
                {
                    FillUi(result);
                    Dispatcher.BeginInvoke(() => PromptTextBlock.Text = "Recognition Confidence too low.");
                    _speechSynthesizer.SpeakTextAsync("Recognition Confidence too low. Please try again.");
                    await AskForColor();
                }
                else
                {
                    SetColorFromCommand(result.Text);
                    PromptTextBlock.Text = "Color set to " + result.Text;
                    FillUi(result);
                    _speechSynthesizer.SpeakTextAsync("Color set to " + result.Text);
                }

            }
            catch (Exception ex)
            {
                MessageBox.Show(ex.Message);
                //throw;
            }
        }
        private async void SpeechToText_Click(object sender, RoutedEventArgs e)
        {
            try
            {
                SpeechRecognizerUI speechRecognition = new SpeechRecognizerUI();
                speechRecognition.Settings.ListenText  = "Enter Observations!";
                speechRecognition.Settings.ExampleText = "Ex: X equals <value>, Y equals <value>";
                SpeechSynthesizer synth = new SpeechSynthesizer();
                await synth.SpeakTextAsync("X equals!?");

                SpeechRecognitionUIResult recoResult = await speechRecognition.RecognizeWithUIAsync();

                if (recoResult.ResultStatus == SpeechRecognitionUIStatus.Succeeded)
                {
                    xCoordinates.Add(Double.Parse(recoResult.RecognitionResult.Text));
                }
                double x_val = Double.Parse(recoResult.RecognitionResult.Text);
                await synth.SpeakTextAsync("Y equals!?");

                recoResult = await speechRecognition.RecognizeWithUIAsync();

                if (recoResult.ResultStatus == SpeechRecognitionUIStatus.Succeeded)
                {
                    yCoordinates.Add(Double.Parse(recoResult.RecognitionResult.Text));
                }
                double y_val = Double.Parse(recoResult.RecognitionResult.Text);
                coordinates.Add("X: " + x_val + "   Y: " + y_val);
                listCoordinates.ItemsSource = null;
                listCoordinates.ItemsSource = coordinates;
                DataPoint model = new DataPoint(x_val, y_val);
                CollectionCoordinates.Add(model);
            }
            catch (Exception h)
            {
                MessageBox.Show("Some error, Say Clearly!");
            }
        }
Exemple #43
0
        public async Task <TextDictationResult> GetDictatedText()
        {
            var retval = new TextDictationResult();

            var recognizer = new SpeechRecognizerUI();

            var result = await recognizer.RecognizeWithUIAsync();

            if (result.ResultStatus == SpeechRecognitionUIStatus.Succeeded)
            {
                retval.SuccesfulRecognition = true;
                retval.Text = result.RecognitionResult.Text;

                var userConfirms = await AskSendRepeatCancelQuestion();

                if (userConfirms == SendRepeatCancel.Send)
                {
                    retval.UserCancelled = false;
                }
                else if (userConfirms == SendRepeatCancel.Repeat)
                {
                    return(await GetDictatedText());
                }
                else if (userConfirms == SendRepeatCancel.Cancel)
                {
                    retval.UserCancelled = true;
                }
            }
            else
            {
                retval.SuccesfulRecognition = false;
                retval.UserCancelled        = false;
                retval.Text = String.Empty;
            }

            return(retval);
        }
Exemple #44
0
        private async Task<SendRepeatCancel> AskSendRepeatCancelQuestion()
        {
            var recognizer = new SpeechRecognizerUI();
            string[] options = { Resources.Send, Resources.Repeat, Resources.Cancel };
            string exampleText =  String.Format("{0}, {1}, {2}", options[0], options[1], options[2]);
            recognizer.Recognizer.Grammars.AddGrammarFromList("SendRepeatCancel", options);
            recognizer.Settings.ExampleText = exampleText;


            SpeechSynthesizer synth = new SpeechSynthesizer();
            await synth.SpeakTextAsync(exampleText);

            var result = await recognizer.RecognizeWithUIAsync();

            if (result.ResultStatus == SpeechRecognitionUIStatus.Succeeded)
            {
                if (result.RecognitionResult.Text == Resources.Send)
                    return SendRepeatCancel.Send;
                else if (result.RecognitionResult.Text == Resources.Repeat)
                    return SendRepeatCancel.Repeat;
            }

            return SendRepeatCancel.Cancel; // In every other case.
        }
Exemple #45
0
        public static async Task <string> GetResult(string exampleText)
        {
            String             text = "";
            SpeechRecognizerUI sr   = new SpeechRecognizerUI();

            sr.Recognizer.Grammars.AddGrammarFromPredefinedType("web", SpeechPredefinedGrammar.WebSearch);
            sr.Settings.ListenText       = "Listening...";
            sr.Settings.ExampleText      = exampleText;
            sr.Settings.ReadoutEnabled   = false;
            sr.Settings.ShowConfirmation = false;

            SpeechRecognitionUIResult result = await sr.RecognizeWithUIAsync();

            if (result != null &&
                result.ResultStatus == SpeechRecognitionUIStatus.Succeeded &&
                result.RecognitionResult != null &&
                result.RecognitionResult.TextConfidence != SpeechRecognitionConfidence.Rejected)
            {
                await Speak("Looking for " + result.RecognitionResult.Text);

                text = result.RecognitionResult.Text;
            }
            return(text);
        }
        private async Task AskForColor()
        {
            try
            {
                SpeechRecognizerUI speechRecognizerUi = new SpeechRecognizerUI();
                SpeechRecognizer   speechRecognizer   = speechRecognizerUi.Recognizer;
                speechRecognizer.AudioCaptureStateChanged += speechRecognizer_AudioCaptureStateChanged;
                speechRecognizer.Grammars.AddGrammarFromList("colorList", _colorNames);

                PromptTextBlock.Text  = "Which color?";
                ExampleTextBlock.Text = "'Red', 'Blue', 'Green', 'Yellow', 'Purple', 'Orange', 'Black', 'White'";
                _speechSynthesizer.SpeakTextAsync("Which color?");

                SpeechRecognitionResult result = await speechRecognizer.RecognizeAsync();

                if (result.TextConfidence < SpeechRecognitionConfidence.Medium)
                {
                    FillUi(result);
                    Dispatcher.BeginInvoke(() => PromptTextBlock.Text = "Recognition Confidence too low.");
                    _speechSynthesizer.SpeakTextAsync("Recognition Confidence too low. Please try again.");
                    await AskForColor();
                }
                else
                {
                    SetColorFromCommand(result.Text);
                    PromptTextBlock.Text = "Color set to " + result.Text;
                    FillUi(result);
                    _speechSynthesizer.SpeakTextAsync("Color set to " + result.Text);
                }
            }
            catch (Exception ex)
            {
                MessageBox.Show(ex.Message);
                //throw;
            }
        }
        private void ChangeFirstSelection(object sender, System.Windows.Controls.SelectionChangedEventArgs e)
        {
            if(e.AddedItems != null && e.AddedItems.Count == 1 && e.AddedItems[0] != null)
            {     try  {
                if (_firstActiveReco == null)
                    _firstActiveReco = new SpeechRecognizerUI();

                _selectedFirstLanguage = (LangDisplay)e.AddedItems[0];
                //Change the first language buttons
                //firstSpeakBtn.Content = _selectedFirstLanguage.SpeakString;
                //firstLangSelectBtn.Content = _selectedFirstLanguage.LangSelf;
                //Change the first language listener
                foreach(SpeechRecognizerInformation sri in InstalledSpeechRecognizers.All)
                {
                    if(sri.Language == _selectedFirstLanguage.LangCode)
                    {
                        _firstActiveReco.Recognizer.SetRecognizer(sri);
                        //firstConfirmBox.Language = sri.Language;
                    }
                }

                //Change the first language speaker
foreach (VoiceInformation vi in InstalledVoices.All)
{
    if (vi.Language == _selectedFirstLanguage.LangCode)
    {
        _firstSpeechSynth = new SpeechSynthesizer();
        _firstSpeechSynth.SetVoice(vi);
    }
}
                firstFlag.Source = new BitmapImage( new Uri(_selectedFirstLanguage.LangFlag, UriKind.Relative));
                firstLangText.Text = _selectedFirstLanguage.LangSelf;
                firstLangHeader.Text = _selectedFirstLanguage.LangSelf;
            } catch{
            }
                
            }

            // Close the selection UI
            HideFirstLangList.Begin();
        }
Exemple #48
0
        private async void speech_Click(object sender, EventArgs eventArgs)
        {
            string message   = "Excuse me, what did you say?!";
            string txtbxType = string.Empty;

            if (GTaskSettings.IsFree)
            {
                GTaskSettings.Upsell();
            }
            else
            {
                try
                {
                    //If no textbox is selected, there is no where to put the text
                    if (focusedTextbox == null)
                    {
                        MessageBoxResult o = MessageBox.Show("Please select the text box you want to use and try again.", "Which Text Box?", MessageBoxButton.OK);
                        return;
                    }

                    // Create an instance of SpeechRecognizerUI.
                    this.recoWithUI = new SpeechRecognizerUI();
                    recoWithUI.Settings.ReadoutEnabled   = false;
                    recoWithUI.Settings.ShowConfirmation = false;

                    if (focusedTextbox.Name == "txtbxTitle")
                    {
                        recoWithUI.Settings.ListenText  = "Listening for Task Title...";
                        recoWithUI.Settings.ExampleText = "Ex. 'Mow the lawn'";
                        txtbxType = "Title";
                    }
                    else
                    {
                        recoWithUI.Settings.ListenText  = "Listening for Tasks Notes...";
                        recoWithUI.Settings.ExampleText = "Ex. 'This needs to be done by Tuesday.'";
                        txtbxType = "Notes";
                    }

                    // Start recognition (load the dictation grammar by default).
                    SpeechRecognitionUIResult recoResult = await recoWithUI.RecognizeWithUIAsync();

                    // Do something with the recognition result.
                    string txtbxText       = focusedTextbox.Text;
                    string SpeakResult     = (recoResult.RecognitionResult == null) ? string.Empty : recoResult.RecognitionResult.Text;
                    string FinalText       = string.Empty;
                    int    SelectionStart  = focusedTextbox.SelectionStart;
                    int    SelectionLength = focusedTextbox.SelectionLength;
                    int    SelectionEnd    = SelectionStart + SelectionLength;

                    if (SpeakResult == string.Empty) //If nothing in speech result, don't do anything
                    {
                        return;
                    }

                    FinalText = SpeechHelper.FormatSpeech(SelectionStart, txtbxText, SelectionEnd, SpeakResult, txtbxType);

                    if (FinalText != String.Empty) //Results are returned
                    {
                        if (SelectionLength == 0)  //0 means it is an insert
                        {
                            focusedTextbox.Text = focusedTextbox.Text.Insert(SelectionStart, FinalText);
                            focusedTextbox.Select(SelectionStart + FinalText.Length, 0); //Set the cursor location to where the start was previously
                        }
                        else //greater than 0 means it is a replace
                        {
                            focusedTextbox.SelectedText = FinalText;
                            focusedTextbox.Select(SelectionStart + FinalText.Length, 0); //Set the cursor location to where the start was previously
                        }
                    }
                }
                catch
                {
                    if (GTaskSettings.MsgError)
                    {
                        MessageBox.Show(message);
                    }
                }
            }
        }
Exemple #49
0
        //Speech
        private async void speech_Click(object sender, EventArgs eventArgs)
        {
            string message   = "Excuse me, what did you say?!";
            string txtbxType = "Title";

            if (GTaskSettings.IsFree)
            {
                GTaskSettings.Upsell();
            }
            else
            {
                try
                {
                    // Create an instance of SpeechRecognizerUI.
                    this.recoWithUI = new SpeechRecognizerUI();
                    recoWithUI.Settings.ReadoutEnabled   = false;
                    recoWithUI.Settings.ShowConfirmation = false;

                    recoWithUI.Settings.ListenText  = "Listening for Task List Title...";
                    recoWithUI.Settings.ExampleText = "Ex. 'Grocery List'";

                    // Start recognition (load the dictation grammar by default).
                    SpeechRecognitionUIResult recoResult = await recoWithUI.RecognizeWithUIAsync();

                    // Do something with the recognition result.
                    string txtbxText       = txtbxTitle.Text;
                    string FinalText       = string.Empty;
                    int    SelectionStart  = txtbxTitle.SelectionStart;
                    int    SelectionLength = txtbxTitle.SelectionLength;
                    int    SelectionEnd    = SelectionStart + SelectionLength;
                    string SpeakResult     = (recoResult.RecognitionResult == null) ? string.Empty : recoResult.RecognitionResult.Text;

                    if (SpeakResult == string.Empty) //If nothing in speech result, don't do anything
                    {
                        return;
                    }

                    FinalText = SpeechHelper.FormatSpeech(SelectionStart, txtbxText, SelectionEnd, SpeakResult, txtbxType);

                    if (FinalText != String.Empty) //Results are returned
                    {
                        if (SelectionLength == 0)  //0 means it is an insert
                        {
                            txtbxTitle.Text = txtbxTitle.Text.Insert(SelectionStart, FinalText);
                            txtbxTitle.Select(SelectionStart + FinalText.Length, 0); //Set the cursor location to where the start was previously
                        }
                        else //greater than 0 means it is a replace
                        {
                            txtbxTitle.SelectedText = FinalText;
                            txtbxTitle.Select(SelectionStart + FinalText.Length, 0); //Set the cursor location to where the start was previously
                        }
                    }
                }
                catch
                {
                    if (GTaskSettings.MsgError)
                    {
                        MessageBox.Show(message);
                    }
                }
            }
        }
		private async void Button_Tap_1(object sender, System.Windows.Input.GestureEventArgs e)
		{
            // Create an instance of SpeechRecognizerUI.
            var recoWithUI = new SpeechRecognizerUI();

            // Start recognition (load the dictation grammar by default).
            SpeechRecognitionUIResult recoResult = await recoWithUI.RecognizeWithUIAsync();

            // Do something with the recognition result.
            MessageBox.Show(string.Format("You said {0}.", recoResult.RecognitionResult.Text));
            NewItemTextBox.Text = recoResult.RecognitionResult.Text;
        }
        private async Task RecognizeSpeech()
        {
            try
            {
                var localSpeechRecognizerUI = new SpeechRecognizerUI();

                localSpeechRecognizerUI.Settings.ListenText = "Say your phrase...";
                localSpeechRecognizerUI.Settings.ExampleText = "What's going on?";
                localSpeechRecognizerUI.Settings.ReadoutEnabled = false;
                localSpeechRecognizerUI.Settings.ShowConfirmation = true;

                SpeechRecognitionUIResult recognitionResult = await localSpeechRecognizerUI.RecognizeWithUIAsync();
                Dispatcher.BeginInvoke(delegate { DetectedTextTextBox.Text = recognitionResult.RecognitionResult.Text; });
                await SayText(recognitionResult.RecognitionResult.Text);
            }
            catch (Exception ex)
            {
                MessageBox.Show(ex.Message);
                throw;
            }
        }
 private void InitializeSpeechRecognition()
 {
     SpeechRecognizerWithUI = new SpeechRecognizerUI();
 }
        private async void RecordVoiceClick(object sender, EventArgs e) {
            var recorder = new SpeechRecognizerUI();

            recorder.Recognizer.Grammars.AddGrammarFromPredefinedType("default", SpeechPredefinedGrammar.Dictation);
            //recorder.Recognizer.Grammars.AddGrammarFromList("ponyshit", ReadGrammarFile());

            var result = await recorder.RecognizeWithUIAsync();
            if (result.ResultStatus != SpeechRecognitionUIStatus.Succeeded) return;
            SendBox.Text = GetSpeech(result.RecognitionResult);
            SendTextClick(recorder, e);
        }
        private async void recognitionMethod(object sender, System.Windows.Input.GestureEventArgs e)
        {
            SpeechRecognizerUI recoWithUI = new SpeechRecognizerUI();
            recoWithUI.Recognizer.Grammars.AddGrammarFromList("answer", new string[] { "Help", "Ajuda", "Record", "Gravar", "Where", "Onde", "Change mode", "Mudar modo", "Quick start", "Começo rápido", "Promoções", "Promotions" });
            recoWithUI.Recognizer.AudioCaptureStateChanged += myRecognizer_AudioCaptureStateChanged;
            SpeechRecognitionUIResult recoResult = await recoWithUI.RecognizeWithUIAsync();
            if (recoResult.ResultStatus == SpeechRecognitionUIStatus.Succeeded)
            {
                if (recoResult.RecognitionResult.Text == "Where" || recoResult.RecognitionResult.Text == "Onde")
                {
                    holdWhere(sender, e);
                }
                else if (recoResult.RecognitionResult.Text == "Change mode" || recoResult.RecognitionResult.Text == "Mudar modo")
                {
                    holdMode(sender, e);
                }
                else if (recoResult.RecognitionResult.Text == "Quick start")
                {
                    playSound("In that page you can touch for a time in the screen and make a mesage to another person and to finish the message you need to touch in the screen for a time again. After that, you will be at a confirmation page, which you have itens and to select these itens, you need to pass the finger from top to bottom and when you listen the item that you wana to select you need to double tap the screen. If you are on map, you can change to menu screen making a slice from left to right on screnn and you can select and chose the item passing the finger from top to bottom and if you wana to select the item you make a double tap on screen.");
                }
                else if (recoResult.RecognitionResult.Text == "Promotions")
                {
                    playSound("We have a promotion near here for 10% dicount Pizza Park, it is 0.5 km from here. If you wana to have the discount, only say you are a Smart Audio user.");
                }
                else if (recoResult.RecognitionResult.Text == "Help")
                {
                    playSound("Actions that can be made: Record, Where, Change Mode, Quick Start and Promotions");
                }
                else if (recoResult.RecognitionResult.Text == "Ajuda")
                {
                    playSound("Ações que podem ser feitas: Gravar, Onde, Mudar modo");
                }
                else if (recoResult.RecognitionResult.Text == "Gravar" || recoResult.RecognitionResult.Text == "Record")
                {
                    map_Hold(sender, e);
                }
            }
            else if (recoResult.ResultStatus == SpeechRecognitionUIStatus.Cancelled)
            {
                sound.play("map");
            }

        }
        private async void Listen()
        {
            if (!busy)
            {
                busy = true;
                // Initialize the SpeechRecognizerUI object.
                recoWithUI = new SpeechRecognizerUI();

                // Query for a recognizer that recognizes French as spoken in France.
                IEnumerable<SpeechRecognizerInformation> recognizers = from recognizerInfo in InstalledSpeechRecognizers.All
                                                                       select recognizerInfo;

                // Set the recognizer to the top entry in the query result.
                recoWithUI.Recognizer.SetRecognizer(recognizers.ElementAt(0));

                // Create a string array of French numbers.
                string[] settings = (from MarketCat cat in listOfCats.ToList()
                                     select cat.Title).ToArray();

                // Create a list grammar from the string array and add it to the grammar set.
                recoWithUI.Recognizer.Grammars.AddGrammarFromList("categories", settings);

                string listenText = "";
                string exampleText = "";
                for (int i = 0; i < settings.Length; i++)
                {
                    listenText += settings[i];
                    exampleText += " " + settings[i];
                }
                // Display text to prompt the user's input.
                recoWithUI.Settings.ListenText = "Category Between: ";

                recoWithUI.Settings.ReadoutEnabled = false;
                // Give an example of ideal speech input.
                recoWithUI.Settings.ExampleText = exampleText;

                // Load the grammar set and start recognition.
                SpeechRecognitionUIResult recoResult = await recoWithUI.RecognizeWithUIAsync();
                string action = recoResult.RecognitionResult.Text;
                SpeechSynthesizer ss = new SpeechSynthesizer();
                VoiceInformation vi = InstalledVoices.All.Where(v => v.Language == "en-EN" && v.Gender == VoiceGender.Male).FirstOrDefault();
                ss.SetVoice(vi);
                await ss.SpeakTextAsync("I'm looking for " + action + "!");
                MarketCat chooseCat = (from MarketCat cat in listOfCats.ToList()
                                       where cat.Title == action
                                       select cat).FirstOrDefault();
                chooseAppsPreview = CreateAppsPreview(chooseCat);
                CategorieTitle.Text = chooseCat.Title;
                AppsPreviewContent.Children.Clear();
                AppsPreviewContent.Children.Add(chooseAppsPreview);
                chooseAppsPreview.load();

                chooseAppsPreview.CompletedEvent+= (e, o) =>
                {
                    sayAppName();
                };
               
            }
            busy = false;
        }
 void MainPage_Loaded(object sender, RoutedEventArgs e)
 {
     _firstActiveReco = new SpeechRecognizerUI();
     _firstSpeechSynth = new SpeechSynthesizer();
     _secondActiveReco = new SpeechRecognizerUI();
     _secondSpeechSynth = new SpeechSynthesizer();
     GetDeviceID();
     
 }
Exemple #57
0
        private async void RecognizeCommand()
        {
            if (this.isRecoEnabled)
            {
                isRecoEnabled = false;
                btnStartRecognition.Content = "I wanna more fun!";
                if (recoOperation != null && recoOperation.Status == AsyncStatus.Started)
                {
                    recoOperation.Cancel();
                }
                return;
            }
            else
            {
                isRecoEnabled = true;
                btnStartRecognition.Content = "Stop joking!";
            }


            using (SpeechRecognizerUI recognizerUI = new SpeechRecognizerUI())
            {
                while (this.isRecoEnabled)
                {
                    try
                    {
                        recoOperation = recognizer.RecognizeAsync();
                        var recoResult = await this.recoOperation;

                        if (recoResult.TextConfidence < SpeechRecognitionConfidence.Medium)
                        {
                            using (var synthesizer = new SpeechSynthesizer())
                            {
                                await synthesizer.SpeakTextAsync("Ha-ha-ha. Say it louder!");
                            }
                        }
                        else
                        {
                            SemanticProperty genre;

                            if (recoResult.Semantics.TryGetValue("genre", out genre))
                            {
                                string filePath      = string.Empty;
                                object arg           = null;
                                string displayFormat = string.Empty;

                                switch (genre.Value.ToString())
                                {
                                case "joke":
                                    filePath = "JokeTemplate.xml";
                                    arg      = jokeCollection.ElementAt(indexForJoke);
                                    indexForJoke++;
                                    indexForJoke %= NUM_OF_JOKES;
                                    break;

                                case "tongue-twister":
                                    filePath = "TongueTwisterTemplate.xml";
                                    arg      = tongueTwisterCollection.ElementAt(indexForTwister);
                                    indexForTwister++;
                                    indexForTwister %= NUM_OF_TWISTERS;
                                    break;

                                default:
                                    break;
                                }
                                ReproduceSpeech(filePath, arg);
                            }
                        }
                    }
                    catch (System.Threading.Tasks.TaskCanceledException) { }
                    catch (Exception err)
                    {
                        // Handle the speech privacy policy error.
                        const int privacyPolicyHResult = unchecked ((int)0x80045509);
                        if (err.HResult == privacyPolicyHResult)
                        {
                            MessageBox.Show("To run the app, you must first accept the speech privacy policy. To do so, navigate to Settings -> speech on your phone and check 'Enable Speech Recognition Service' ");
                            isRecoEnabled = false;
                            btnStartRecognition.Content = "Start speech recognition";
                        }
                        else
                        {
                            //textOfJoke.Text = "Error: " + err.Message;
                        }
                    }
                }
            }
        }
        private void ChangeSecondSelection(object sender, System.Windows.Controls.SelectionChangedEventArgs e)
        {
            if (e.AddedItems != null && e.AddedItems.Count == 1 && e.AddedItems[0] != null)
            {
                try
                {
                    _secondActiveReco = new SpeechRecognizerUI();
                    _selectedSecondLanguage = (LangDisplay)e.AddedItems[0];
                    //Change the first language buttons
                    //secondSpeakBtn.Content = _selectedSecondLanguage.SpeakString;
                    //secondLangSelectBtn.Content = _selectedSecondLanguage.LangSelf;
                    //Change the second language listener
                    // the language listener we want may not be installed
                    // so lets check to see if it is or isn't installed.
                    bool languageInstalled = false;
                    foreach (SpeechRecognizerInformation sri in InstalledSpeechRecognizers.All)
                    {
                        if (sri.Language == _selectedSecondLanguage.LangCode)
                        {
                            _secondActiveReco.Recognizer.SetRecognizer(sri);
                            languageInstalled = true;
                        }
                    }
                    if (!languageInstalled)
                        MessageBox.Show("You have not installed the speech recognition for that language. Please visit the speech section of your settings to install it.");
                    //Change the first language speaker
                    foreach (VoiceInformation vi in InstalledVoices.All)
                    {
                        if (vi.Language == _selectedSecondLanguage.LangCode)
                        {
                            _secondSpeechSynth = new SpeechSynthesizer();
                            _secondSpeechSynth.SetVoice(vi);
                        }
                    }
                    BitmapImage bit = new BitmapImage(new Uri(_selectedSecondLanguage.LangFlag, UriKind.Relative));
                    Image newSecondImage = new Image();

                    newSecondImage.Source = bit;
                    secondFlag.Child = newSecondImage;
                    secondLangText.Text = _selectedSecondLanguage.LangSelf;
                    secondLangHeader.Text = _selectedSecondLanguage.LangSelf;
                }
                catch { }
            }
            HideSecondLangList.Begin();
        }
 private void InitializeVoiceRecognition()
 {
     speechRecognizerWithUI = new SpeechRecognizerUI();
     List<string> searchTerms = ExtractSearchTerms();
     speechRecognizerWithUI.Recognizer.Grammars.AddGrammarFromList("SearchTerms", searchTerms);
 }
        /// <summary>
        /// Method to instantiate recognizer with appropriate grammar and perform recognition.
        /// </summary>
        private async void HandleSpeech()
        {
            if (_handlingSpeech)
            {
                return;
            }

            _handlingSpeech = true;
            try
            {
                SpeechRecognizerUI        recognizer = new SpeechRecognizerUI();
                SpeechRecognitionUIResult result     = null;

                if (this.InputScope != null && (this.InputScope.Names[0] as InputScopeName).NameValue.Equals(InputScopeNameValue.Search))
                {
                    recognizer.Recognizer.Grammars.AddGrammarFromPredefinedType("WebSearchGrammar", SpeechPredefinedGrammar.WebSearch);
                }

                try
                {
                    result = await recognizer.RecognizeWithUIAsync();
                }
                catch (OperationCanceledException)
                {
                    return;
                }
                catch (Exception ex)
                {
                    if ((uint)ex.HResult == 0x80045508)
                    {
                        // This can occur when speech recognition is interupted by navigation away from
                        // the app. We'll just swallow the exception to work around it.
                        return;
                    }

                    MessageBox.Show("An error occured. \n" + ex.Message);
                    return;
                }

                // The SpeechRecognizerUI component will handle cases where the speech was not recognized and prompt
                // user to retry. This check is just to make sure that the speech recognition request was not
                // canceled by the back button, navigation, etc.
                if (result.ResultStatus == SpeechRecognitionUIStatus.Succeeded)
                {
                    // Raise SpeechRecognized event
                    var handler = SpeechRecognized;
                    SpeechRecognizedEventArgs eventArgs = new SpeechRecognizedEventArgs(result.RecognitionResult);

                    if (handler != null)
                    {
                        handler(this, eventArgs);

                        if (eventArgs.Canceled)
                        {
                            return;
                        }
                    }

                    // Update display
                    string originalText = this.Text;

                    if (_useSelectedTextReplacement)
                    {
                        string newText = originalText.Substring(0, _selectionStart) + result.RecognitionResult.Text + originalText.Substring(_selectionEnd + 1);
                        this.Text = newText;
                        this.Select(_selectionStart, result.RecognitionResult.Text.Length);
                    }
                    else
                    {
                        this.Text = result.RecognitionResult.Text;
                        this.Focus();
                        this.Select(_selectionStart, result.RecognitionResult.Text.Length);
                    }
                }
            }
            finally
            {
                _handlingSpeech = false;
            }
        }