コード例 #1
0
ファイル: ShellViewModel.cs プロジェクト: kaki104/IoTSamples
        private async void Frame_Navigated(object sender, NavigationEventArgs e)
        {
            if (e != null)
            {
                var vm             = NavigationService.GetNameOfRegisteredPage(e.SourcePageType);
                var navigationItem = PrimaryItems?.FirstOrDefault(i => i.ViewModelName == vm);
                if (navigationItem == null)
                {
                    navigationItem = SecondaryItems?.FirstOrDefault(i => i.ViewModelName == vm);
                }

                if (navigationItem != null)
                {
                    ChangeSelected(_lastSelectedItem, navigationItem);
                    _lastSelectedItem = navigationItem;
                }

                //Sentence 초기화
                var init = Singleton <SentenceHelper> .Instance;

                //마이크로 폰 권한 체크
                bool permissionGained = await AudioCapturePermissions.RequestMicrophonePermission();

                if (permissionGained)
                {
                    var speechLanguage = SpeechRecognizer.SystemSpeechLanguage;
                    var langTag        = speechLanguage.LanguageTag;
                    var speechContext  = ResourceContext.GetForCurrentView();
                    speechContext.Languages = new[] { langTag };
                }
            }
        }
コード例 #2
0
        protected override async void OnNavigatedTo(NavigationEventArgs e)
        {
            //마이크로 폰 권한 체크
            bool permissionGained = await AudioCapturePermissions.RequestMicrophonePermission();

            if (permissionGained)
            {
                var speechLanguage = SpeechRecognizer.SystemSpeechLanguage;
                var langTag        = speechLanguage.LanguageTag;
                var speechContext  = ResourceContext.GetForCurrentView();
                speechContext.Languages = new[] { langTag };

                var supportedLanguages = SpeechRecognizer.SupportedGrammarLanguages;
                var enUS = supportedLanguages.FirstOrDefault(p => p.LanguageTag == "en-US");
                if (enUS == null)
                {
                    enUS = SpeechRecognizer.SystemSpeechLanguage;
                }
                await InitializeRecognizerAsync(enUS);
            }

            _timer = new DispatcherTimer
            {
                Interval = TimeSpan.FromSeconds(10)
            };
            _timer.Tick += _timer_Tick;
        }
コード例 #3
0
        protected async override void OnNavigatedTo(NavigationEventArgs e)
        {
            // Keep track of the UI thread dispatcher, as speech events will come in on a separate thread.
            dispatcher = CoreWindow.GetForCurrentThread().Dispatcher;

            // Prompt the user for permission to access the microphone. This request will only happen
            // once, it will not re-prompt if the user rejects the permission.
            bool permissionGained = await AudioCapturePermissions.RequestMicrophonePermission();

            if (permissionGained)
            {
                // Initialize resource map to retrieve localized speech strings.
                Language speechLanguage = SpeechRecognizer.SystemSpeechLanguage;
                string   langTag        = speechLanguage.LanguageTag;
                speechContext           = ResourceContext.GetForCurrentView();
                speechContext.Languages = new string[] { langTag };

                speechResourceMap = ResourceManager.Current.MainResourceMap.GetSubtree("LocalizationSpeechResources");

                PopulateLanguageDropdown();
                await InitializeRecognizer(SpeechRecognizer.SystemSpeechLanguage);

                TurnRecognizer1();
            }
            else
            {
                this.resultTextBlock.Visibility = Visibility.Visible;
                this.resultTextBlock.Text       = "Permission to access capture resources was not given by the user, reset the application setting in Settings->Privacy->Microphone.";
                cbLanguageSelection.IsEnabled   = false;
                //luis
            }
        }
コード例 #4
0
        /// <summary>
        /// When activating the scenario, ensure we have permission from the user to access their microphone, and
        /// provide an appropriate path for the user to enable access to the microphone if they haven't
        /// given explicit permission for it.
        /// </summary>
        /// <param name="e">The navigation event details</param>
        private async Task InitSpeech()
        {
            // Save the UI thread dispatcher to allow speech status messages to be shown on the UI.
            dispatcher = CoreWindow.GetForCurrentThread().Dispatcher;

            bool permissionGained = await AudioCapturePermissions.RequestMicrophonePermission();

            if (permissionGained)
            {
                // Enable the recognition buttons.
                button.IsEnabled = true;

                if (speechRecognizer != null)
                {
                    // cleanup prior to re-initializing this scenario.
                    //speechRecognizer.StateChanged -= SpeechRecognizer_StateChanged;

                    this.speechRecognizer.Dispose();
                    this.speechRecognizer = null;
                }

                // Create an instance of SpeechRecognizer.
                speechRecognizer = new SpeechRecognizer();

                // Provide feedback to the user about the state of the recognizer.
                //speechRecognizer.StateChanged += SpeechRecognizer_StateChanged;

                // Compile the dictation topic constraint, which optimizes for dictated speech.
                var dictationConstraint = new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.Dictation, "dictation");
                speechRecognizer.Constraints.Add(dictationConstraint);
                SpeechRecognitionCompilationResult compilationResult = await speechRecognizer.CompileConstraintsAsync();

                speechRecognizer.HypothesisGenerated += SpeechRecognizer_HypothesisGenerated;

                // Check to make sure that the constraints were in a proper format and the recognizer was able to compile it.
                if (compilationResult.Status != SpeechRecognitionResultStatus.Success)
                {
                    // Disable the recognition buttons.
                    button.IsEnabled = false;

                    // Let the user know that the grammar didn't compile properly.
                    //resultTextBlock.Visibility = Visibility.Visible;
                    //resultTextBlock.Text = "Unable to compile grammar.";
                }
            }
            else
            {
                // "Permission to access capture resources was not given by the user; please set the application setting in Settings->Privacy->Microphone.";
                button.IsEnabled = false;
            }

            await Task.Yield();
        }
コード例 #5
0
        /// The methods provided in this section are simply used to allow
        /// NavigationHelper to respond to the page's navigation methods.
        ///
        /// Page specific logic should be placed in event handlers for the
        /// <see cref="Common.NavigationHelper.LoadState"/>
        /// and <see cref="Common.NavigationHelper.SaveState"/>.
        /// The navigation parameter is available in the LoadState method
        /// in addition to page state preserved during an earlier session.

        protected async override void OnNavigatedTo(NavigationEventArgs e)
        {
            navigationHelper.OnNavigatedTo(e);

            // Prompt the user for permission to access the microphone. This request will only happen
            // once, it will not re-prompt if the user rejects the permission.
            bool permissionGained = await AudioCapturePermissions.RequestMicrophonePermission();

            speechLanguage = SpeechRecognizer.SystemSpeechLanguage;
            string langTag = speechLanguage.LanguageTag;

            // Initialize resource map to retrieve localized speech strings.
            speechContext = ResourceContext.GetForCurrentView();
            IReadOnlyList <Language> supportedLanguages = SpeechRecognizer.SupportedGrammarLanguages;

            if (supportedLanguages.Count > 1)
            {
                if (Windows.Globalization.ApplicationLanguages.PrimaryLanguageOverride == "zh-Hans-CN")
                {
                    speechContext.Languages = new string[] { "zh-Hans-CN" };
                    speechLanguage          = new Windows.Globalization.Language("zh-Hans-CN");
                }
                else
                {
                    speechContext.Languages = new string[] { "en-US" };
                    speechLanguage          = new Windows.Globalization.Language("en-US");
                }
            }
            else
            {
                speechContext.Languages = new string[] { langTag };
            }
            speechResourceMap = ResourceManager.Current.MainResourceMap.GetSubtree("LocalizationSpeechResources");
            //Initia Command
            await InitializeRecognizer(speechLanguage);

            //Initia RecognizerNote
            await InitializeRecognizerNote(speechLanguage);

            if (speechRecognizer.State == SpeechRecognizerState.Idle && permissionGained)
            {
                try
                {
                    await speechRecognizer.ContinuousRecognitionSession.StartAsync();
                }
                catch (Exception ex)
                {
                    var messageDialog = new Windows.UI.Popups.MessageDialog(ex.Message, "Exception");
                    await messageDialog.ShowAsync();
                }
            }
        }
コード例 #6
0
        async Task RecordSpeechFromMicrophoneAsync(VoiceInformation voiceInformation)
        {
            if (!await AudioCapturePermissions.RequestMicrophonePermission())
            {
                return;
            }

            if (voiceInformation == null)
            {
                return;
            }

            if (!await DoRecognition())
            {
                await SpeakAndListen(listenOnly : true);
            }

            async Task <bool> DoRecognition()
            {
                using (SpeechRecognizer speechRecognizer = new SpeechRecognizer(new Windows.Globalization.Language(voiceInformation.Language)))
                {
                    SpeechRecognitionConstraints.ToList().ForEach(c => speechRecognizer.Constraints.Add(c));

                    speechRecognizer.Timeouts.InitialSilenceTimeout = TimeSpan.FromSeconds(SpeechRecognitionConstants.InitialSilenceTimeout);
                    speechRecognizer.Timeouts.EndSilenceTimeout     = TimeSpan.FromSeconds(SpeechRecognitionConstants.EndSilenceTimeout);

                    await speechRecognizer.CompileConstraintsAsync();

                    SpeechRecognitionResult result = await speechRecognizer.RecognizeAsync();

                    if (
                        result.Status == SpeechRecognitionResultStatus.Success &&
                        new HashSet <SpeechRecognitionConfidence>
                    {
                        SpeechRecognitionConfidence.High,
                        SpeechRecognitionConfidence.Medium,
                        SpeechRecognitionConfidence.Low
                    }.Contains(result.Confidence) &&
                        uiNotificationService.CanGoBack
                        )
                    {
                        await GoBack();

                        return(true);
                    }
                    else
                    {
                        return(false);
                    }
                }
            }
        }
コード例 #7
0
ファイル: MainPage.xaml.cs プロジェクト: ploiu/Bob
 private async void RequestMicrophoneAcessIfUserWantsVoiceDetection()
 {
     if (Utils.IsListeningSettingEnabled())
     {
         if (await AudioCapturePermissions.RequestMicrophonePermission())
         {
             SpeechRecognitionManager.StartListeningForMainPage(performActionFromCommandBoxText, this.CommandBox);
         }
         else
         {
             TextToSpeechEngine.SpeakText(this.media, "Sorry, but something went wrong with setting up your microphone. You cannot use me through speech, but you can still use the command bar at the bottom of the screen.");
         }
     }
 }
コード例 #8
0
        protected async override void OnNavigatedTo(NavigationEventArgs e)
        {
            // Save the UI thread dispatcher to allow speech status messages to be shown on the UI.
            dispatcher = CoreWindow.GetForCurrentThread().Dispatcher;

            bool permissionGained = await AudioCapturePermissions.RequestMicrophonePermission();

            if (permissionGained)
            {
            }
            else
            {
            }

            await InitializeRecognizer(SpeechRecognizer.SystemSpeechLanguage);
        }
コード例 #9
0
        private async Task Initialize()
        {
            // Prompt the user for permission to access the microphone.
            bool permissionGained = await AudioCapturePermissions.RequestMicrophonePermission();

            if (permissionGained)
            {
                this.mainPage.IsEnabled                = true;
                this.notificationControl.Visibility    = Visibility.Visible;
                this.targetLanguagesListView.IsEnabled = this.isAzureSpeechEndpoint;

                timer          = new DispatcherTimer();
                timer.Tick    += AutoStopRecognitionHandlerTimerTick;
                timer.Interval = new TimeSpan(0, 0, RecognitionTimeLimitInSeconds);

                recordingTimer          = new DispatcherTimer();
                recordingTimer.Tick    += RecordingTimerTickHandler;
                recordingTimer.Interval = new TimeSpan(0, 0, 1);

                this.speechToTextView.ShowNotificationEventHandler            += OnShowNotification;
                this.speechToTextWithTranslation.ShowNotificationEventHandler += OnShowNotification;
                this.speechToTextWithTranslation.Closed += (s, args) => {
                    SpeechExplorerState = oldSpeechExplorerState != SpeechExplorerState.SpeechToTextWithTranslation ? oldSpeechExplorerState : SpeechExplorerState.SpeechToText;
                };
            }
            else
            {
                this.mainPage.IsEnabled             = false;
                this.notificationControl.Visibility = Visibility.Collapsed;

                ContentDialog deleteFileDialog = new ContentDialog
                {
                    Title             = "Intelligent Kiosk can't access the microphone",
                    Content           = "To let kiosk use this device's microphone, go to Windows Settings -&gt; Apps and turn on microphone permissions for Intelligent Kiosk.",
                    PrimaryButtonText = "Open Settings",
                    CloseButtonText   = "Close",
                    DefaultButton     = ContentDialogButton.Primary
                };

                ContentDialogResult result = await deleteFileDialog.ShowAsync();

                if (result == ContentDialogResult.Primary)
                {
                    await Windows.System.Launcher.LaunchUriAsync(new Uri("ms-settings:privacy-microphone"));
                }
            }
        }
コード例 #10
0
        private async Task InitializeAsync()
        {
            // if user haven't give permission to speec or app is running on a phone then the voice button has not to be shown
            if (await AudioCapturePermissions.RequestMicrophonePermission())
            {
                //if (await Template10.Utils.AudioUtils.RequestMicrophonePermission() == false || DeviceUtils.Current().DeviceDisposition() == DeviceUtils.DeviceDispositions.Phone
                //    || DeviceUtils.Current().DeviceDisposition() == DeviceUtils.DeviceDispositions.Continuum)
                VisualStateManager.GoToState(this, VISUAL_STATE_VOICE_DISABLED, true);
            }

            // if textbox is readonly there should not be possible to use voice recognition
            if (this.IsReadOnly)
            {
                this.voiceButton.IsEnabled     = false;
                this.stopVoiceButton.IsEnabled = false;
            }
        }
コード例 #11
0
 /// <summary>
 /// When activating the scenario, ensure we have permission from the user to access their microphone, and
 /// provide an appropriate path for the user to enable access to the microphone if they haven't
 /// given explicit permission for it.
 /// </summary>
 /// <param name="e">The navigation event details</param>
 protected async override void OnNavigatedTo(NavigationEventArgs e)
 {
     // Save the UI thread dispatcher to allow speech status messages to be shown on the UI.
     dispatcher = CoreWindow.GetForCurrentThread().Dispatcher;
     bool permissionGained = await AudioCapturePermissions.RequestMicrophonePermission();
     if (permissionGained)
     {
         // Enable the recognition buttons.                
         await InitializeRecognizer(SpeechRecognizer.SystemSpeechLanguage);
         buttonOnListen.IsEnabled = true;
     }
     else
     {
         // Permission to access capture resources was not given by the user; please set the application setting in Settings->Privacy->Microphone.
         buttonOnListen.IsEnabled = false;
     }
 }
コード例 #12
0
        public async void Page_Loaded(object sender, RoutedEventArgs e)
        {
            _dispatcher = CoreWindow.GetForCurrentThread().Dispatcher;

            var permissionGained = await AudioCapturePermissions.RequestMicrophonePermission();

            if (!permissionGained)
            {
                return;
            }

            await InitializeRecognizer(SpeechRecognizer.SystemSpeechLanguage);

            // Za debugging: Prikaz poruke o detektovanom jeziku
            // Poruka("Jezik: " + SpeechRecognizer.SystemSpeechLanguage.DisplayName, "Jezik");
            await _speechRecognizer.ContinuousRecognitionSession.StartAsync();
        }
コード例 #13
0
ファイル: MainPage.xaml.cs プロジェクト: sarand2/medimotion
        /// <summary>
        /// Upon entering the scenario, ensure that we have permissions to use the Microphone. This may entail popping up
        /// a dialog to the user on Desktop systems. Only enable functionality once we've gained that permission in order to
        /// prevent errors from occurring when using the SpeechRecognizer. If speech is not a primary input mechanism, developers
        /// should consider disabling appropriate parts of the UI if the user does not have a recording device, or does not allow
        /// audio input.
        /// </summary>
        /// <param name="e">Unused navigation parameters</param>
        protected async void VoiceStartup()
        {
            // Prompt the user for permission to access the microphone. This request will only happen
            // once, it will not re-prompt if the user rejects the permission.
            bool permissionGained = await AudioCapturePermissions.RequestMicrophonePermission();

            if (permissionGained)
            {
                btnContinuousRecognize.IsEnabled = true;
                await InitializeRecognizer(SpeechRecognizer.SystemSpeechLanguage);
            }
            else
            {
                this.commandBox.PlaceholderText  = "Permission to access capture resources was not given by the user, reset the application setting in Settings->Privacy->Microphone.";
                btnContinuousRecognize.IsEnabled = false;
                //  cbLanguageSelection.IsEnabled = false;
            }
        }
コード例 #14
0
        protected async override void OnNavigatedTo(NavigationEventArgs e)
        {
            // Prompt the user for permission to access the microphone. This request will only happen
            // once, it will not re-prompt if the user rejects the permission.
            bool permissionGained = await AudioCapturePermissions.RequestMicrophonePermission();

            if (permissionGained)
            {
                btnContinuousRecognize.IsEnabled = true;
                PopulateLanguageDropdown();
                await InitializeRecognizer(SpeechRecognizer.SystemSpeechLanguage);
            }
            else
            {
                this.dictationTextBox.Text       = "Permission to access capture resources was not given by the user, reset the application setting in Settings->Privacy->Microphone.";
                btnContinuousRecognize.IsEnabled = false;
                cbLanguageSelection.IsEnabled    = false;
            }
        }
コード例 #15
0
        public async void initializeSpeechRec()
        {
            TIASiml.initiateSIML();
            bool permissionGained = await AudioCapturePermissions.RequestMicrophonePermission();

            if (!permissionGained)
            {
                MessageDialog("Permission to access capture resources was not given by the user, reset the application setting in Settings->Privacy->Microphone.");
            }

            this.dispatcher                = CoreWindow.GetForCurrentThread().Dispatcher;
            this.speechRecognizer          = new SpeechRecognizer();
            speechRecognizer.StateChanged += SpeechRecognizer_StateChanged;
            SpeechRecognitionCompilationResult result = await speechRecognizer.CompileConstraintsAsync();

            speechRecognizer.ContinuousRecognitionSession.Completed       += ContinuousRecognitionSession_Completed;
            speechRecognizer.ContinuousRecognitionSession.ResultGenerated += ContinuousRecognitionSession_ResultGenerated;
            await speechRecognizer.ContinuousRecognitionSession.StartAsync();
        }
コード例 #16
0
        private async void Main_LoadedAsync(object sender, RoutedEventArgs e)
        {
            cbVoice.Items.Clear();
            foreach (var voice in SpeechSynthesizer.AllVoices)
            {
                cbVoice.Items.Add(voice.DisplayName);
                if (voice.DisplayName == SpeechSynthesizer.DefaultVoice.DisplayName)
                {
                    cbVoice.SelectedItem = cbVoice.Items.Last();
                }
            }
            isListening = false;

            // Prompt the user for permission to access the microphone. This request will only happen
            // once, it will not re-prompt if the user rejects the permission.
            bool permissionGained = await AudioCapturePermissions.RequestMicrophonePermission();

            if (permissionGained)
            {
                btnListen.IsEnabled            = true;
                btnListen.Visibility           = Visibility.Visible;
                cbLanguageSelection.Visibility = Visibility.Visible;

                // Initialize resource map to retrieve localized speech strings.
                Language speechLanguage = SpeechRecognizer.SystemSpeechLanguage;
                string   langTag        = speechLanguage.LanguageTag;
                speechContext           = ResourceContext.GetForCurrentView();
                speechContext.Languages = new string[] { langTag };

                speechResourceMap = ResourceManager.Current.MainResourceMap.GetSubtree("LocalizationSpeechResources");

                PopulateLanguageDropdown();
                await InitializeRecognizer(SpeechRecognizer.SystemSpeechLanguage);
            }
            else
            {
                btnListen.IsEnabled            = false;
                btnListen.Visibility           = Visibility.Collapsed;
                cbLanguageSelection.Visibility = Visibility.Collapsed;
            }
        }
コード例 #17
0
        protected async override void OnNavigatedTo(NavigationEventArgs e) //cuando llegas
        {
            // MQTT
            MiMqtt = new mqtt();
            MiMqtt.cliente.MqttMsgPublishReceived += cliente_MqttMsgPublishReceivedAsync; //registrarme al evento

            // Presencia
            miPresencia = new Presencia();

            // VOZ
            miEstado = Estado.Parado; //iniciamos parados
            nextStep = Estado.Parado;

            dispatcher = CoreWindow.GetForCurrentThread().Dispatcher;

            //comprobación de si tengo permiso sobre el micrófono; si tengo, inicio el proceso (InitializeRecognizer)
            bool tengoPermiso = await AudioCapturePermissions.RequestMicrophonePermission();

            if (tengoPermiso)
            {
                // inicializa el habla
                inicializaHabla();

                //escoge castellano (válido para todos los reconocedores)
                Language speechLanguage = SpeechRecognizer.SystemSpeechLanguage;

                // inicializo el reconocedor de gramática compilada
                await InitializeRecognizer(speechLanguage);

                //// y lanza el control de estados
                await ControlEstado();
            }
            else
            {
                await dime("No tengo acceso al micrófono; cerrando");

                MostrarTexto(txbEstado, "Sin acceso al micrófono");
            }
        }
コード例 #18
0
        public async Task <SpeechResult> RecognizeAsync(string listenText, string exampleText)
        {
            try
            {
                bool permissionGained = await AudioCapturePermissions.RequestMicrophonePermission();

                if (!permissionGained)
                {
                    await this.messageBoxService.ShowAsync(
                        StringResources.Message_Warning,
                        StringResources.Speech_ReviewPermission);

                    return(new SpeechResult(string.Empty, false));
                }

                SpeechRecognizer recognizer = new SpeechRecognizer(SpeechRecognizer.SystemSpeechLanguage);
                recognizer.UIOptions.IsReadBackEnabled = false;
                recognizer.UIOptions.ShowConfirmation  = false;

                var dictationConstraint = new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.Dictation, "dictation");
                recognizer.Constraints.Add(dictationConstraint);

                SpeechRecognitionCompilationResult compilationResult = await recognizer.CompileConstraintsAsync();

                if (compilationResult.Status == SpeechRecognitionResultStatus.Success)
                {
                    SpeechRecognitionResult result = await recognizer.RecognizeWithUIAsync();

                    return(new SpeechResult(result.Text.Trim('.'), result.Status == SpeechRecognitionResultStatus.Success));
                }

                return(new SpeechResult(string.Empty, false));
            }
            catch (Exception e)
            {
                return(new SpeechResult(e.Message, false));
            }
        }
コード例 #19
0
ファイル: RecognitionService.cs プロジェクト: vamsitp/basher
        public async Task Initialize()
        {
            if (!this.permissionGained)
            {
                await WindowManagerService.Current.MainDispatcher.RunAsync(CoreDispatcherPriority.Normal, async() => this.permissionGained = await AudioCapturePermissions.RequestMicrophonePermission());
            }

            try
            {
                if (this.speechRecognizer != null)
                {
                    this.speechRecognizer.StateChanged -= this.SpeechRecognizer_StateChanged;
                    this.speechRecognizer.ContinuousRecognitionSession.Completed       -= this.ContinuousRecognitionSession_Completed;
                    this.speechRecognizer.ContinuousRecognitionSession.ResultGenerated -= this.ContinuousRecognitionSession_ResultGenerated;
                    this.speechRecognizer.HypothesisGenerated -= this.SpeechRecognizer_HypothesisGenerated;

                    this.speechRecognizer.Dispose();
                    this.speechRecognizer = null;
                }

                var recognizerLanguage = new Language(App.Settings.SpeechLocale); // SpeechRecognizer.SystemSpeechLanguage
                this.speechRecognizer = new SpeechRecognizer(recognizerLanguage);

                // Provide feedback to the user about the state of the recognizer. This can be used to provide visual feedback in the form
                // of an audio indicator to help the user understand whether they're being heard.
                this.speechRecognizer.StateChanged += this.SpeechRecognizer_StateChanged;

                // Apply the dictation topic constraint to optimize for dictated free-form speech.
                var dictationConstraint = new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.Dictation, "dictation");
                this.speechRecognizer.Constraints.Add(dictationConstraint);
                var result = await this.speechRecognizer.CompileConstraintsAsync();

                if (result.Status != SpeechRecognitionResultStatus.Success)
                {
                    await this.dialogService.ShowError(result.Status.ToString(), "Grammar Compilation Failed", "OK", null);
                }

                // Handle continuous recognition events. Completed fires when various error states occur. ResultGenerated fires when
                // some recognized phrases occur, or the garbage rule is hit. HypothesisGenerated fires during recognition, and
                // allows us to provide incremental feedback based on what the user's currently saying.
                this.speechRecognizer.ContinuousRecognitionSession.Completed       += this.ContinuousRecognitionSession_Completed;
                this.speechRecognizer.ContinuousRecognitionSession.ResultGenerated += this.ContinuousRecognitionSession_ResultGenerated;
                this.speechRecognizer.HypothesisGenerated += this.SpeechRecognizer_HypothesisGenerated;
                await this.StartRecognizing(true);
            }
            catch (Exception ex)
            {
                if ((uint)ex.HResult == HResultRecognizerNotFound)
                {
                    throw new Exception("Speech Language pack for selected language not installed.", ex);
                }
                else
                {
                    throw;
                }
            }
        }
コード例 #20
0
        async Task RecordSpeechFromMicrophoneAsync(VoiceInformation voiceInformation, Func <SpeechRecognitionResult, Task> doNext)
        {
            if (!await AudioCapturePermissions.RequestMicrophonePermission())
            {
                return;
            }

            if (voiceInformation == null)
            {
                return;
            }

            if (!await DoRecognition())
            {
                //media.StopMedia();
                //In some cases DoRecognition ends prematurely e.g. when
                //the user allows access to the microphone but there is no
                //microphone available so do not stop media.
                await SpeakAndListen();
            }

            async Task <bool> DoRecognition()
            {
                using (SpeechRecognizer speechRecognizer = new SpeechRecognizer(new Windows.Globalization.Language(voiceInformation.Language)))
                {
                    SpeechRecognitionConstraints.ToList().ForEach(c => speechRecognizer.Constraints.Add(c));

                    speechRecognizer.Timeouts.InitialSilenceTimeout = TimeSpan.FromSeconds(SpeechRecognitionConstants.InitialSilenceTimeout);
                    speechRecognizer.Timeouts.EndSilenceTimeout     = TimeSpan.FromSeconds(SpeechRecognitionConstants.EndSilenceTimeout);

                    await speechRecognizer.CompileConstraintsAsync();

                    SpeechRecognitionResult result = await speechRecognizer.RecognizeAsync();

                    if (
                        !(result.Status == SpeechRecognitionResultStatus.Success &&
                          new HashSet <SpeechRecognitionConfidence>
                    {
                        SpeechRecognitionConfidence.High,
                        SpeechRecognitionConfidence.Medium,
                        SpeechRecognitionConfidence.Low
                    }.Contains(result.Confidence)
                          )
                        )
                    {
                        return(false);
                    }

                    if (result.Constraint.Tag == SpeechRecognitionConstants.GOBACKTAG)
                    {
                        if (UiNotificationService.CanGoBack)
                        {
                            await GoBack();

                            return(true);
                        }
                        else
                        {
                            return(false);
                        }
                    }
                    else
                    {//Options constraint succeeded
                        await doNext(result);

                        return(true);
                    }
                }
            }
        }