Connects to an RTP stream and listens for data
コード例 #1
0
 /// <summary>
 /// Constructor for a Microphone
 /// </summary>
 /// <param name="sre">The speech recognition engine associate with this microphone</param>
 /// <param name="status">The status of the microphone</param>
 /// <param name="shouldBeOn">Should the speech recognition engine for this microphone be on</param>
 /// <param name="port">The por this microphone is asociated with</param>
 public Microphone(SpeechRecognitionEngine sre, UDPClient client, string status, bool shouldBeOn, int port)
 {
     this.client = client;
     this.sre = sre;
     this.status = status;
     this.port = port;
 }
コード例 #2
0
 /// <summary>
 /// Helper method for dependencies
 /// </summary>
 /// <param name="dep">The dependencies</param>
 /// <param name="shouldBeOn">Should the SRE associated with this microphone be turned on or not</param>
 private async void DepenedencyHelper(dynamic dep, bool shouldBeOn)
 {
     foreach (var mic in dep)
     {
         if ((string)mic.Value == "up")
         {
             if (mics.ContainsKey(mic.Key))
             {
                 SpeechRecognitionEngine sre = mics[mic.Key].Sre;
                 if (sre.AudioState == AudioState.Stopped && mics[mic.Key].ShouldBeOn)
                     sre.RecognizeAsync(RecognizeMode.Multiple);
             }
             else
             {
                 UDPClient client = new UDPClient(port);
                 client.StartClient();
                 AddInputMic(mic.Key, client, mic.Value, shouldBeOn);
                 port++;
             }
             await Query("microphone", "invite", new { ip = ipAddress, port = mics[mic.Key].Port }, new string[1] { mic.Key });
         }
         else
         {
             if (mics.ContainsKey(mic.Key))
             {
                 mics[(string)mic.Key].Client.StopClient();
                 SpeechRecognitionEngine sre = mics[mic.Key].Sre;
                 if (sre.AudioState != AudioState.Stopped && shouldBeOn)
                 {
                     sre.RecognizeAsyncCancel();
                     Logger.GetInstance().Debug("IT STOPPED!");
                 }
                 mics.Remove(mic.Key);
             }
         }
     }
 }
コード例 #3
0
 /// <summary>
 /// Adds a new microphone instance
 /// </summary>
 /// <param name="instance">The instance id of the microphone</param>
 /// <param name="stream">The audio stream</param>
 /// <param name="status">The status of the microphone</param>
 /// <param name="shouldBeOn">Whether the speech recognition engine should be turned on</param>
 public void AddInputMic(string instance, UDPClient client, string status, bool shouldBeOn)
 {
     try 
     {
         var sre = new SpeechRecognitionEngine(new CultureInfo("en-US"));
         sre.SetInputToAudioStream(client.AudioStream, new SpeechAudioFormatInfo(16000, AudioBitsPerSample.Sixteen, AudioChannel.Mono));
         sre.SpeechRecognized += new EventHandler<SpeechRecognizedEventArgs>(RecognitionHandler);
         sre.SpeechRecognitionRejected += new EventHandler<SpeechRecognitionRejectedEventArgs>(RecognitionRejectedHandler);
         DictationGrammar customDictationGrammar  = new DictationGrammar("grammar:dictation");
         customDictationGrammar.Name = "dictation";
         customDictationGrammar.Enabled = true;
         sre.LoadGrammar(customDictationGrammar);
         mics.Add(instance, new Microphone(sre,client, status, shouldBeOn,port));
         foreach (var g in grammars)
         {
             var gram = new CombinedGrammar(g.Key, g.Value);
             sre.LoadGrammarAsync(gram.compiled);
         }
         if (shouldBeOn)
         {
             sre.RecognizeAsync(RecognizeMode.Multiple);
         }
     }
     catch (IOException) 
     {
         //negotiating connection with mic failed.
     }
 }