/// <summary> /// Runs the service. /// </summary> /// <param name="state">The state<see cref="object"/></param> private static async void Run(object state) { try { // restart listener if nothing has happend for more than 30 seconds if (lastListenCylce > DateTime.Now.AddSeconds(-30)) { return; } if (recognizer != null) { try { await recognizer.StopRecognitionAsync(); } catch (Exception ex) { Log(ex); } } recognizer = new SpeechRecognizer(new Language("de-DE")); recognizer.Timeouts.InitialSilenceTimeout = TimeSpan.FromSeconds(2); recognizer.Timeouts.EndSilenceTimeout = TimeSpan.FromSeconds(0.5); recognizer.StateChanged += RecognizerStateChanged; recognizer.ContinuousRecognitionSession.ResultGenerated += RecognizerResultGenerated; var textGrammar = new SpeechRecognitionListConstraint(new List <string> { "Licht an", "Licht aus" }); var webSearchGrammar = new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.WebSearch, "webSearch"); recognizer.Constraints.Add(textGrammar); recognizer.Constraints.Add(webSearchGrammar); SpeechRecognitionCompilationResult compilationResult = await recognizer.CompileConstraintsAsync(); if (compilationResult.Status == SpeechRecognitionResultStatus.Success) { Log(LogLevel.Debug, "Speechrecognition compile result: " + compilationResult.ToString()); await Listen(); } else { Log(LogLevel.Debug, "Speechrecognition compile result: " + compilationResult.ToString()); } } catch (Exception ex) { Log(ex); } }
// Initialize Speech Recognizer and start async recognition private async void initializeSpeechRecognizer() { // Initialize recognizer recognizer = new SpeechRecognizer(); // Set event handlers recognizer.StateChanged += RecognizerStateChanged; recognizer.ContinuousRecognitionSession.ResultGenerated += RecognizerResultGenerated; // Load Grammar file constraint string fileName = String.Format(SRGS_FILE); StorageFile grammarContentFile = await Package.Current.InstalledLocation.GetFileAsync(fileName); SpeechRecognitionGrammarFileConstraint grammarConstraint = new SpeechRecognitionGrammarFileConstraint(grammarContentFile); // Add to grammar constraint recognizer.Constraints.Add(grammarConstraint); // Compile grammar SpeechRecognitionCompilationResult compilationResult = await recognizer.CompileConstraintsAsync(); Debug.WriteLine("Status: " + compilationResult.Status.ToString()); // If successful, display the recognition result. if (compilationResult.Status == SpeechRecognitionResultStatus.Success) { Debug.WriteLine("Result: " + compilationResult.ToString()); await recognizer.ContinuousRecognitionSession.StartAsync(); } else { Debug.WriteLine("Status: " + compilationResult.Status); } }
private async void StartSpeechRecognizer() { // Compile the loaded GrammarFiles SpeechRecognitionCompilationResult compilationResult = await _recognizer.CompileConstraintsAsync(); // If successful, display the recognition result. if (compilationResult.Status == SpeechRecognitionResultStatus.Success) { Debug.WriteLine("Result: " + compilationResult.ToString()); SpeechContinuousRecognitionSession session = _recognizer.ContinuousRecognitionSession; try { await session.StartAsync(); } catch (Exception e) { //TODO this needs to report to the user that something failed. //also potentially write to a log somewhere. Debug.WriteLine(e.Data); } } else { //TODO this needs to report to the user that something failed. //also potentially write to a log somewhere. Debug.WriteLine("Status: " + compilationResult.Status); } }