Exemplo n.º 1
0
 public GUI(DataFaucet <AudioBuffer> df, AggregateDump dump, GoogleASR asr, GoogleTranslate gt)
 {
     Speak = false;
     InitializeComponent();
     this.faucet    = df;
     this.dump      = dump;
     this.speechRec = asr;
     this.translate = gt;
     System.Timers.Timer myTimer = new System.Timers.Timer();
 }
Exemplo n.º 2
0
        static void Main(string[] args)
        {
            using (Pipeline pipeline = Pipeline.Create())
            {
                WaveFormat waveFormat = WaveFormat.Create16kHz1Channel16BitPcm();

                IProducer <AudioBuffer> audioInput = new AudioCapture(pipeline, new AudioCaptureConfiguration()
                {
                    OutputFormat = waveFormat
                });
                DataFaucet <AudioBuffer> df = new DataFaucet <AudioBuffer>(pipeline);
                audioInput.PipeTo(df);
                AggregateDump dump = new AggregateDump(pipeline);
                df.PipeTo(dump);
                GoogleASR gsr = new GoogleASR(pipeline, "en");                   //gsr for google speech recognition
                dump.PipeTo(gsr);
                GoogleTranslate gt = new GoogleTranslate(pipeline, "en", "de");  //gt for google translate
                gsr.PipeTo(gt);
                GoogleSpeak gs = new GoogleSpeak(pipeline, waveFormat, "de-DE"); //gs for google speak
                gt.PipeTo(gs);
                AudioOutput aOut = new AudioOutput(pipeline);                    //aOut for audio out
                gs.PipeTo(aOut);

                ActiveMQ rasa = new ActiveMQ(pipeline, "rasa.PSI", "rasa.PYTHON");
                gsr.PipeTo(rasa);

                GUI    gui    = new GUI(df, dump, gsr, gt);
                Thread thread = new Thread(() =>
                {
                    gui.ShowDialog();
                });
                thread.Start();

                pipeline.RunAsync();

                Console.ReadKey(true);
            }
        }