예제 #1
0
파일: C2SR.cs 프로젝트: ariva13/C2Program
        private void InitZone(int zoneNum)
        {
            //Set up the zone computers to transmit speech and receive audio
            if (zoneNum < 3)
            {
//                Process.Start(@"C:\Users\Blake\Documents\Programming\CSharp\C2program\C2program\scripts\pi_speak.bat", Convert.ToString(zoneNum));
                Process.Start(@"..\..\scripts\pi_speak.bat", Convert.ToString(zoneNum));
            }
            else
            {
//                Process.Start(@"C:\Users\Blake\Documents\Programming\CSharp\C2program\C2program\scripts\pi_speak_new_mic.bat", Convert.ToString(zoneNum));
                Process.Start(@"..\..\scripts\pi_speak_new_mic.bat", Convert.ToString(zoneNum));
            }
//            Process.Start(@"C:\Users\Blake\Documents\Programming\CSharp\C2program\C2program\scripts\pi_listen.bat", Convert.ToString(zoneNum));
            Process.Start(@"..\..\scripts\pi_listen.bat", Convert.ToString(zoneNum));

            //Set up the voice for each zone
//            rtpServer[zoneNum - 1] = new RTPServer(zoneAddresses[zoneNum - 1], 1234);
//            rtpServer[zoneNum - 1].StartServer();
//            SpCustomStream vStream = new SpCustomStream();
//            vStream.BaseStream = rtpServer[zoneNum - 1].AudioStream;
            voice[zoneNum - 1] = new C2Voice(zoneNum);
//            voice[zoneNum - 1].Voice.AudioOutputStream = vStream;



            //recoContext = new SpSharedRecoContext();
            recoContext[zoneNum - 1] = new SpInProcRecoContext();

            //set up the socket stream first

            //            mySrStream = new C2SRStream("192.168.2.101", 1234);
            rtpClient[zoneNum - 1] = new RTPReceiver(zonePortBase + zoneNum);
            rtpClient[zoneNum - 1].StartClient();
            SpCustomStream stream = new SpCustomStream();

            //            stream.BaseStream = (System.Runtime.InteropServices.ComTypes.IStream)mySrStream;
            //            stream.BaseStream = (System.Runtime.InteropServices.ComTypes.IStream)rtpClient.AudioStream;
            stream.BaseStream = rtpClient[zoneNum - 1].AudioStream;
            //SpStream st = new SpStream();

            CreateGrammar(zoneNum);

            this.recoContext[zoneNum - 1].Recognizer.AudioInputStream = stream;
            //this.recoContext.Recognizer.AudioInputStream = (ISpeechBaseStream) stream.BaseStream;
            //this.recoContext.Recognizer.AudioInputStream = (ISpeechBaseStream)rtpClient.Stream;
            //RecoContext.RetainedAudioFormat.Type = SpeechAudioFormatType.SAFT32kHz16BitMono;
            if (zoneNum < 3)
            {
                recoContext[zoneNum - 1].RetainedAudioFormat.Type = SpeechAudioFormatType.SAFT24kHz16BitMono;
            }
            else
            {
                recoContext[zoneNum - 1].RetainedAudioFormat.Type = SpeechAudioFormatType.SAFT48kHz16BitMono;
            }
            //RecoContext.RetainedAudioFormat.Type = SpeechAudioFormatType.SAFT12kHz16BitMono;
            //RecoContext.EventInterests = SPSEMANTICFORMAT. SRERecognition + SRESoundEnd + SREStreamEnd + SREStreamStart + SRESoundEnd;
            recoContext[zoneNum - 1].Recognition += new SpeechLib._ISpeechRecoContextEvents_RecognitionEventHandler(InterpretCommand);
            //RecoContext.Recognition += new _ISpeechRecoContextEvents_

            recoContext[zoneNum - 1].Recognizer.SetPropertyNumber("AdaptationOn", 0); //turns adaptation off so it doesn't train to noise
        }
예제 #2
0
        public C2SRold(Form1 form)
        {
            form1                      = form;
            gpio                       = new C2gpio(1, "");
            state                      = State.IDLE;
            voice                      = new C2Voice(1);
            C2attentionTimer           = new Timer(30000); //60 second time out for C2 to stop listening
            C2attentionTimer.Elapsed  += new ElapsedEventHandler(C2attentionTimer_Elapsed);
            C2attentionTimer.AutoReset = false;

            missunderstandCount = 0;
            voice.Speak("C2 standing by and awaiting your instructions!");

            //recoContext = new SpSharedRecoContext();
            recoContext = new SpInProcRecoContext();

            //set up the socket stream first
            //IPEndPoint receiver = new IPEndPoint(new IPAddress(("192.168.2.101"), 1234);
//            UdpClient udpClient = new UdpClient("192.168.2.101", 1234);
            //UdpClient udpClient = new UdpClient(1234);
            //udpClient.Connect(receiver);
//            Socket socket = udpClient.Client;

            //TcpClient tcpClient = new TcpClient("192.168.2.101", 1234);
//            Socket socket = new Socket(AddressFamily.InterNetwork, SocketType.Dgram, ProtocolType.Udp);
//            socket.Connect("192.168.2.101", 1234);
//            if (!socket.Connected)
//            {
//                form1.statusMsg = "socket was never connected!";
//                return;
//            }

            //SpMMAudioIn instream = new SpMMAudioIn();
//            ASRStreamClass myAsrStream = new ASRStreamClass();
//            mySrStream = new C2SRStream("192.168.2.101", 1234);
            rtpClient = new RTPReceiver(1234);
            rtpClient.StartClient();
            SpCustomStream stream = new SpCustomStream();

//            stream.BaseStream = (System.Runtime.InteropServices.ComTypes.IStream)mySrStream;
//            stream.BaseStream = (System.Runtime.InteropServices.ComTypes.IStream)rtpClient.AudioStream;
            stream.BaseStream = rtpClient.AudioStream;
            //SpStream st = new SpStream();
            //st.



            //m_GrammarID = 1;
            Grammar = this.recoContext.CreateGrammar(0);
            Grammar.DictationLoad("", SpeechLoadOption.SLOStatic);
            //our program doesn't do this
            Grammar.DictationSetState(SpeechRuleState.SGDSActive);
            //our program doesn't do this

            //            ISpeechGrammarRule CommandsRule;
            //            CommandsRule = Grammar.Rules.Add("CommandsRule", SpeechRuleAttributes.SRATopLevel | SpeechRuleAttributes.SRADynamic, 1);
            //            CommandsRule.Clear();
            //            object dummy = 0;
            //            string sCommand = "see";
            //            CommandsRule.InitialState.AddWordTransition(null, sCommand, " ", SpeechGrammarWordType.SGLexical, null, 0, ref dummy, 0);
            //            Grammar.Rules.Commit();
            //            Grammar.CmdSetRuleState("CommandsRule", SpeechRuleState.SGDSActive);
            //stream.get
            this.recoContext.Recognizer.AudioInputStream = stream;
            //this.recoContext.Recognizer.AudioInputStream = (ISpeechBaseStream) stream.BaseStream;
            //this.recoContext.Recognizer.AudioInputStream = (ISpeechBaseStream)rtpClient.Stream;
            //RecoContext.EventInterests = SpeechRecoEvents.SREAllEvents;
            //RecoContext.RetainedAudioFormat.Type = SpeechAudioFormatType.SAFT32kHz16BitMono;
            recoContext.RetainedAudioFormat.Type = SpeechAudioFormatType.SAFT24kHz16BitMono;
            //RecoContext.EventInterests = SPSEMANTICFORMAT. SRERecognition + SRESoundEnd + SREStreamEnd + SREStreamStart + SRESoundEnd;
            recoContext.Recognition += new SpeechLib._ISpeechRecoContextEvents_RecognitionEventHandler(InterpretCommand);
            //RecoContext.Recognition += new _ISpeechRecoContextEvents_

            recoContext.Recognizer.SetPropertyNumber("AdaptationOn", 0);
        }