/// <summary> /// Creates a new speech recognition engine /// </summary> /// <returns>A new speech recognition engine object.</returns> private SpeechRecognitionEngine CreateSpeechRecognitionEngine() { // Create the recognizer var recognizer = SystemSpeech.CreateSpeechRecognitionEngine(this.Configuration.Language, this.Configuration.Grammars); // Attach event handlers for speech recognition events recognizer.SpeechDetected += this.OnSpeechDetected; recognizer.SpeechHypothesized += this.OnSpeechHypothesized; recognizer.SpeechRecognized += this.OnSpeechRecognized; recognizer.SpeechRecognitionRejected += this.OnSpeechRecognitionRejected; recognizer.AudioSignalProblemOccurred += this.OnAudioSignalProblemOccurred; recognizer.AudioStateChanged += this.OnAudioStateChanged; recognizer.RecognizeCompleted += this.OnRecognizeCompleted; recognizer.RecognizerUpdateReached += this.OnRecognizerUpdateReached; recognizer.AudioLevelUpdated += this.OnAudioLevelUpdated; recognizer.EmulateRecognizeCompleted += this.OnEmulateRecognizeCompleted; recognizer.LoadGrammarCompleted += this.OnLoadGrammarCompleted; // Create the format info from the configuration input format SpeechAudioFormatInfo formatInfo = new SpeechAudioFormatInfo( (EncodingFormat)this.Configuration.InputFormat.FormatTag, (int)this.Configuration.InputFormat.SamplesPerSec, this.Configuration.InputFormat.BitsPerSample, this.Configuration.InputFormat.Channels, (int)this.Configuration.InputFormat.AvgBytesPerSec, this.Configuration.InputFormat.BlockAlign, (this.Configuration.InputFormat is WaveFormatEx) ? ((WaveFormatEx)this.Configuration.InputFormat).ExtraInfo : null); // Specify the input stream and audio format recognizer.SetInputToAudioStream(this.inputAudioStream, formatInfo); return(recognizer); }
/// <summary> /// Creates a new speech recognition engine. /// </summary> /// <returns>A new speech recognition engine object.</returns> private SpeechRecognitionEngine CreateSpeechRecognitionEngine() { // Create speech recognition engine var recognizer = SystemSpeech.CreateSpeechRecognitionEngine(this.Configuration.Language, this.Configuration.Grammars); // Attach event handlers for speech recognition events recognizer.AudioStateChanged += this.OnAudioStateChanged; recognizer.RecognizeCompleted += this.OnRecognizeCompleted; // Create the format info from the configuration input format SpeechAudioFormatInfo formatInfo = new SpeechAudioFormatInfo( (EncodingFormat)this.Configuration.InputFormat.FormatTag, (int)this.Configuration.InputFormat.SamplesPerSec, this.Configuration.InputFormat.BitsPerSample, this.Configuration.InputFormat.Channels, (int)this.Configuration.InputFormat.AvgBytesPerSec, this.Configuration.InputFormat.BlockAlign, (this.Configuration.InputFormat is WaveFormatEx) ? ((WaveFormatEx)this.Configuration.InputFormat).ExtraInfo : null); // Specify the input stream and audio format recognizer.SetInputToAudioStream(this.inputAudioStream, formatInfo); // Set the speech recognition engine parameters recognizer.InitialSilenceTimeout = TimeSpan.FromMilliseconds(this.Configuration.InitialSilenceTimeoutMs); recognizer.BabbleTimeout = TimeSpan.FromMilliseconds(this.Configuration.BabbleTimeoutMs); recognizer.EndSilenceTimeout = TimeSpan.FromMilliseconds(this.Configuration.EndSilenceTimeoutMs); recognizer.EndSilenceTimeoutAmbiguous = TimeSpan.FromMilliseconds(this.Configuration.EndSilenceTimeoutAmbiguousMs); return(recognizer); }
private SpeechRecognitionEngine CreateSpeechRecognitionEngine() { // Create the recognizer var recognizer = SystemSpeech.CreateSpeechRecognitionEngine(this.Configuration.Language, this.Configuration.Grammars); // Attach event handlers for speech recognition events recognizer.LoadGrammarCompleted += this.OnLoadGrammarCompleted; return(recognizer); }
/// <inheritdoc/> protected override void Receive(string text, Envelope e) { if (string.IsNullOrWhiteSpace(text)) { this.Out.Post(new IntentData(), e.OriginatingTime); } else { var result = this.speechRecognitionEngine.EmulateRecognize(text); if (result != null && result.Semantics != null) { var intents = SystemSpeech.BuildIntentData(result.Semantics); this.Out.Post(intents, e.OriginatingTime); } else { this.Out.Post(new IntentData(), e.OriginatingTime); } } }
/// <summary> /// Called when the final recognition result received. /// </summary> /// <param name="sender">The source of the event.</param> /// <param name="e">An object that contains the event data.</param> private void OnSpeechRecognized(object sender, SpeechRecognizedEventArgs e) { // Convention for intervals is to use the end time as the originating time so we add the duration as well. DateTime originatingTime = this.streamStartTime + e.Result.Audio.AudioPosition + e.Result.Audio.Duration; // Post the raw result from the underlying recognition engine this.PostWithOriginatingTimeConsistencyCheck(this.SpeechRecognized, e, originatingTime); if (e.Result.Alternates.Count > 0) { var result = this.BuildSpeechRecognitionResult(e.Result); this.PostWithOriginatingTimeConsistencyCheck(this.Out, result, originatingTime); if (e.Result.Semantics != null) { var intents = SystemSpeech.BuildIntentData(e.Result.Semantics); this.PostWithOriginatingTimeConsistencyCheck(this.IntentData, intents, originatingTime); } } }