Exemplo n.º 1
0
        /// <summary>
        /// Creates a new speech recognition engine.
        /// </summary>
        /// <returns>A new speech recognition engine object.</returns>
        private SpeechRecognitionEngine CreateSpeechRecognitionEngine()
        {
            // Create the speech recognition engine
            var recognizer = MicrosoftSpeech.CreateSpeechRecognitionEngine(this.Configuration.Language, this.Configuration.Grammars);

            // Attach the event handlers for speech recognition events
            recognizer.SpeechDetected             += this.OnSpeechDetected;
            recognizer.SpeechHypothesized         += this.OnSpeechHypothesized;
            recognizer.SpeechRecognized           += this.OnSpeechRecognized;
            recognizer.SpeechRecognitionRejected  += this.OnSpeechRecognitionRejected;
            recognizer.AudioSignalProblemOccurred += this.OnAudioSignalProblemOccurred;
            recognizer.AudioStateChanged          += this.OnAudioStateChanged;
            recognizer.RecognizeCompleted         += this.OnRecognizeCompleted;
            recognizer.RecognizerUpdateReached    += this.OnRecognizerUpdateReached;
            recognizer.AudioLevelUpdated          += this.OnAudioLevelUpdated;
            recognizer.EmulateRecognizeCompleted  += this.OnEmulateRecognizeCompleted;
            recognizer.LoadGrammarCompleted       += this.OnLoadGrammarCompleted;

            // Create the format info from the configuration input format
            SpeechAudioFormatInfo formatInfo = new SpeechAudioFormatInfo(
                (EncodingFormat)this.Configuration.InputFormat.FormatTag,
                (int)this.Configuration.InputFormat.SamplesPerSec,
                this.Configuration.InputFormat.BitsPerSample,
                this.Configuration.InputFormat.Channels,
                (int)this.Configuration.InputFormat.AvgBytesPerSec,
                this.Configuration.InputFormat.BlockAlign,
                (this.Configuration.InputFormat is WaveFormatEx) ? ((WaveFormatEx)this.Configuration.InputFormat).ExtraInfo : null);

            // Specify the input stream and audio format
            recognizer.SetInputToAudioStream(this.inputAudioStream, formatInfo);

            return(recognizer);
        }
Exemplo n.º 2
0
        /// <summary>
        /// Creates a new speech recognition engine.
        /// </summary>
        /// <returns>A new speech recognition engine object.</returns>
        private SpeechRecognitionEngine CreateSpeechRecognitionEngine()
        {
            // Create the speech recognition engine
            var recognizer = MicrosoftSpeech.CreateSpeechRecognitionEngine(this.Configuration.Language, this.Configuration.Grammars);

            // Attach the event handlers for speech recognition events
            recognizer.LoadGrammarCompleted += this.OnLoadGrammarCompleted;

            return(recognizer);
        }
Exemplo n.º 3
0
 /// <inheritdoc/>
 protected override void Receive(string text, Envelope e)
 {
     if (string.IsNullOrWhiteSpace(text))
     {
         this.Out.Post(new IntentData(), e.OriginatingTime);
     }
     else
     {
         var result = this.speechRecognitionEngine.EmulateRecognize(text);
         if (result != null && result.Semantics != null)
         {
             var intents = MicrosoftSpeech.BuildIntentData(result.Semantics);
             this.Out.Post(intents, e.OriginatingTime);
         }
     }
 }
Exemplo n.º 4
0
        /// <summary>
        /// Called when the final recognition result received.
        /// </summary>
        /// <param name="sender">The source of the event.</param>
        /// <param name="e">An object that contains the event data.</param>
        private void OnSpeechRecognized(object sender, SpeechRecognizedEventArgs e)
        {
            // Convention for intervals is to use the end time as the originating time so we add the duration as well.
            DateTime originatingTime = this.streamStartTime + e.Result.Audio.AudioPosition + e.Result.Audio.Duration;

            // Post the raw result from the underlying recognition engine
            this.PostWithOriginatingTimeConsistencyCheck(this.SpeechRecognized, e, originatingTime);

            if (e.Result.Alternates.Count > 0)
            {
                var result = this.BuildSpeechRecognitionResult(e.Result);
                this.PostWithOriginatingTimeConsistencyCheck(this.Out, result, originatingTime);

                if (e.Result.Semantics != null)
                {
                    var intents = MicrosoftSpeech.BuildIntentData(e.Result.Semantics);
                    this.PostWithOriginatingTimeConsistencyCheck(this.IntentData, intents, originatingTime);
                }
            }
        }