public void AddSpeechEngine(Stream stream, string format, String device, String language, double confidence) {

      language = (language == null) ? ConfigManager.GetInstance().Find("bot.language", "fr-FR") : language;

      var info = new SpeechAudioFormatInfo(16000, AudioBitsPerSample.Sixteen, AudioChannel.Stereo);
      if ("Kinect".Equals(format)) {
        info = new SpeechAudioFormatInfo(EncodingFormat.Pcm, 16000, 16, 1, 32000, 2, null);
      }

      SpeechEngine engine = new SpeechEngine(device, language, confidence);
      engine.Load(GrammarManager.GetInstance().Cache, false); 
      engine.Init();
      engine.Engine.SetInputToAudioStream(stream, info);
      engine.Start();

      Engines.Add(device, engine);
    }
Example #2
0
        public void AddSpeechEngine(Stream stream, string format, String device, String language, double confidence)
        {
            language = (language == null) ? ConfigManager.GetInstance().Find("bot.language", "fr-FR") : language;

            var info = new SpeechAudioFormatInfo(16000, AudioBitsPerSample.Sixteen, AudioChannel.Stereo);

            if ("Kinect".Equals(format))
            {
                info = new SpeechAudioFormatInfo(EncodingFormat.Pcm, 16000, 16, 1, 32000, 2, null);
            }

            SpeechEngine engine = new SpeechEngine(device, language, confidence);

            engine.Load(GrammarManager.GetInstance().Cache, false);
            engine.Init();
            engine.Engine.SetInputToAudioStream(stream, info);
            engine.Start();

            Engines.Add(device, engine);
        }
    public void SpeechRejected(SpeechEngine engine, RecognitionResult rr) {

      // 1. Handle the Listening global state
      if (!IsListening) {
        Log("REJECTED not listening");
        return;
      }

      // 2. Check DYN Grammar
      if (ContextManager.GetInstance().Dynamic() == null) {
        return;
      }

      XPathNavigator xnav = rr.ConstructSmlFromSemantics().CreateNavigator();

      // 3. Forward to all addons
      var text = rr.Text;
      var options = new Dictionary<string, string>();

      using (var stream = new MemoryStream()) {
        rr.Audio.WriteToWaveStream(stream);
        AddOnManager.GetInstance().BeforeSpeechRejected(engine.Name, text, rr.Confidence, xnav, stream, options);
        AddOnManager.GetInstance().AfterSpeechRejected(engine.Name, text, rr.Confidence, xnav, stream, options);
      }
    }
    public void SpeechRecognized(SpeechEngine engine, RecognitionResult rr) {

      // 1. Handle the Listening global state
      if (!IsListening) {
        Log("REJECTED not listening");
        return;
      }

      // Compute XPath Navigator
      XPathNavigator xnav = rr.ConstructSmlFromSemantics().CreateNavigator();

      // 2. Handle confidence
      if (!Confidence(engine.Name, rr, xnav, engine.Confidence)) {
        return;
      }

      // 3. Set an engagement for valid audio
      AddOnManager.GetInstance().HandleProfile(engine.Name, "engaged", (Object) DateTime.Now);

      // 4. Forward to all addons
      var text    = rr.Text;
      var grammar = rr.Grammar.Name;
      var options = new Dictionary<string, string>();

      using (var stream = new MemoryStream()) {
        rr.Audio.WriteToWaveStream(stream);
        AddOnManager.GetInstance().BeforeSpeechRecognition(engine.Name, text, rr.Confidence, xnav, grammar, stream, options);
        AddOnManager.GetInstance().AfterSpeechRecognition(engine.Name, text, rr.Confidence, xnav, grammar, stream, options);
      }
    }