/// <summary> /// Adds a new grammar to the SREs /// </summary> /// <param name="name">the name of the grammar</param> /// <param name="xml">the grammar xml string</param> public void AddGrammar(string name, string xml) { grammars.Add(name, xml); foreach (var kv in mics) { var gram = new CombinedGrammar(name, xml); Microphone mic = kv.Value; mic.Sre.RequestRecognizerUpdate(); mic.Sre.LoadGrammarAsync(gram.compiled); } }
/// <summary> /// Adds a new microphone instance /// </summary> /// <param name="instance">The instance id of the microphone</param> /// <param name="stream">The audio stream</param> /// <param name="status">The status of the microphone</param> /// <param name="shouldBeOn">Whether the speech recognition engine should be turned on</param> public void AddInputMic(string instance, UDPClient client, string status, bool shouldBeOn) { try { var sre = new SpeechRecognitionEngine(new CultureInfo("en-US")); sre.SetInputToAudioStream(client.AudioStream, new SpeechAudioFormatInfo(16000, AudioBitsPerSample.Sixteen, AudioChannel.Mono)); sre.SpeechRecognized += new EventHandler<SpeechRecognizedEventArgs>(RecognitionHandler); sre.SpeechRecognitionRejected += new EventHandler<SpeechRecognitionRejectedEventArgs>(RecognitionRejectedHandler); DictationGrammar customDictationGrammar = new DictationGrammar("grammar:dictation"); customDictationGrammar.Name = "dictation"; customDictationGrammar.Enabled = true; sre.LoadGrammar(customDictationGrammar); mics.Add(instance, new Microphone(sre,client, status, shouldBeOn,port)); foreach (var g in grammars) { var gram = new CombinedGrammar(g.Key, g.Value); sre.LoadGrammarAsync(gram.compiled); } if (shouldBeOn) { sre.RecognizeAsync(RecognizeMode.Multiple); } } catch (IOException) { //negotiating connection with mic failed. } }