static void InitializeSpeechRecognition() { // grab the audio stream IReadOnlyList <AudioBeam> audioBeamList = kinectSensor.AudioSource.AudioBeams; System.IO.Stream audioStream = audioBeamList[0].OpenInputStream(); // create the convert stream convertStream = new KinectAudioStream(audioStream); RecognizerInfo ri = TryGetKinectRecognizer(); if (null != ri) { speechEngine = new SpeechRecognitionEngine(ri.Id); var directions = new Choices(); directions.Add(new SemanticResultValue("start", "START")); directions.Add(new SemanticResultValue("stop", "STOP")); var gb = new GrammarBuilder { Culture = ri.Culture }; gb.Append(directions); var g = new Grammar(gb); speechEngine.LoadGrammar(g); speechEngine.SpeechRecognized += SpeechRecognized; speechEngine.SpeechRecognitionRejected += SpeechRejected; // let the convertStream know speech is going active convertStream.SpeechActive = true; // For long recognition sessions (a few hours or more), it may be beneficial to turn off adaptation of the acoustic model. // will prevent recognition accuracy from degrading over time. ////speechEngine.UpdateRecognizerSetting("AdaptationOn", 0); speechEngine.SetInputToAudioStream( convertStream, new SpeechAudioFormatInfo(EncodingFormat.Pcm, 16000, 16, 1, 32000, 2, null)); speechEngine.RecognizeAsync(RecognizeMode.Multiple); } else { //statusBarText.Text = Properties.Resources.NoSpeechRecognizer; } }
static void InitializeSpeechRecognition() { // grab the audio stream IReadOnlyList<AudioBeam> audioBeamList = kinectSensor.AudioSource.AudioBeams; System.IO.Stream audioStream = audioBeamList[0].OpenInputStream(); // create the convert stream convertStream = new KinectAudioStream(audioStream); RecognizerInfo ri = TryGetKinectRecognizer(); if (null != ri) { speechEngine = new SpeechRecognitionEngine(ri.Id); var directions = new Choices(); directions.Add(new SemanticResultValue("start", "START")); directions.Add(new SemanticResultValue("stop", "STOP")); var gb = new GrammarBuilder { Culture = ri.Culture }; gb.Append(directions); var g = new Grammar(gb); speechEngine.LoadGrammar(g); speechEngine.SpeechRecognized += SpeechRecognized; speechEngine.SpeechRecognitionRejected += SpeechRejected; // let the convertStream know speech is going active convertStream.SpeechActive = true; // For long recognition sessions (a few hours or more), it may be beneficial to turn off adaptation of the acoustic model. // will prevent recognition accuracy from degrading over time. ////speechEngine.UpdateRecognizerSetting("AdaptationOn", 0); speechEngine.SetInputToAudioStream( convertStream, new SpeechAudioFormatInfo(EncodingFormat.Pcm, 16000, 16, 1, 32000, 2, null)); speechEngine.RecognizeAsync(RecognizeMode.Multiple); } else { //statusBarText.Text = Properties.Resources.NoSpeechRecognizer; } }