Ejemplo n.º 1
0
        private void ProgramSpeechInit()
        {
            //ルール認識 音声認識オブジェクトの生成
            this.ProgramRule = new SpInProcRecoContext();
            bool hit = false;

            foreach (SpObjectToken recoperson in this.ProgramRule.Recognizer.GetRecognizers()) //'Go through the SR enumeration
            {
                string language = recoperson.GetAttribute("Language");
                if (language == "411")
                {                                                        //日本語を聴き取れる人だ
                    this.ProgramRule.Recognizer.Recognizer = recoperson; //君に聞いていて欲しい
                    hit = true;
                    break;
                }
            }
            if (!hit)
            {
                MessageBox.Show("日本語認識が利用できません。\r\n日本語音声認識 MSSpeech_SR_ja-JP_TELE をインストールしてください。\r\n");
                Application.Exit();
            }

            //マイクから拾ってね。
            this.ProgramRule.Recognizer.AudioInput = this.CreateMicrofon();

            //音声認識イベントで、デリゲートによるコールバックを受ける.
            //認識完了
            this.ProgramRule.Recognition +=
                delegate(int streamNumber, object streamPosition, SpeechLib.SpeechRecognitionType srt, SpeechLib.ISpeechRecoResult isrr)
            {
                string strText = isrr.PhraseInfo.GetText(0, -1, true);
                ProgramRun(strText);
            };
            //認識失敗
            this.ProgramRule.FalseRecognition +=
                delegate(int streamNumber, object streamPosition, SpeechLib.ISpeechRecoResult isrr)
            {
                label1.Text = "??";
                show("what", 0);
            };

            //言語モデルの作成
            this.ProgramGrammarRule = this.ProgramRule.CreateGrammar(0);

            this.ProgramGrammarRule.Reset(0);
            //言語モデルのルールのトップレベルを作成する.
            this.ProgramGrammarRuleGrammarRule = this.ProgramGrammarRule.Rules.Add("ProgramRule",
                                                                                   SpeechRuleAttributes.SRATopLevel | SpeechRuleAttributes.SRADynamic);
            //認証用文字列の追加.
            ArrayList voicelist = list.GetProgramCommand();

            foreach (string voice in voicelist)
            {
                this.ProgramGrammarRuleGrammarRule.InitialState.AddWordTransition(null, voice);
            }
            this.ProgramGrammarRuleGrammarRule.InitialState.AddWordTransition(null, Program.skinini.GetValue("skininfo", "clock", "時計に戻して"));

            //ルールを反映させる。
            this.ProgramGrammarRule.Rules.Commit();
        }
Ejemplo n.º 2
0
        //音声認識初期化
        private void AlwaysSpeechInit()
        {
            //ルール認識 音声認識オブジェクトの生成
            this.AlwaysRule = new SpInProcRecoContext();
            bool hit = false;

            foreach (SpObjectToken recoperson in this.AlwaysRule.Recognizer.GetRecognizers()) //'Go through the SR enumeration
            {
                string language = recoperson.GetAttribute("Language");
                if (language == "411")
                {                                                       //日本語を聴き取れる人だ
                    this.AlwaysRule.Recognizer.Recognizer = recoperson; //君に聞いていて欲しい
                    hit = true;
                    break;
                }
            }
            if (!hit)
            {
                MessageBox.Show("日本語認識が利用できません。\r\n日本語音声認識 MSSpeech_SR_ja-JP_TELE をインストールしてください。\r\n");
                Application.Exit();
            }

            //マイクから拾ってね。
            this.AlwaysRule.Recognizer.AudioInput = this.CreateMicrofon();

            //音声認識イベントで、デリゲートによるコールバックを受ける.
            //認識完了
            this.AlwaysRule.Recognition +=
                delegate(int streamNumber, object streamPosition, SpeechLib.SpeechRecognitionType srt, SpeechLib.ISpeechRecoResult isrr)
            {
                //音声認識終了
                this.AlwaysGrammarRule.CmdSetRuleState("AlwaysRule", SpeechRuleState.SGDSInactive);
                //ウィンドウをアクティブにする
                this.Activate();
                //聞き取り開始時間取得
                starttime = Environment.TickCount & int.MaxValue;
                //聞き取り開始
                label1_MouseUp(null, null);
            };
            //言語モデルの作成
            this.AlwaysGrammarRule = this.AlwaysRule.CreateGrammar(0);

            this.AlwaysGrammarRule.Reset(0);
            //言語モデルのルールのトップレベルを作成する.
            this.AlwaysGrammarRuleGrammarRule = this.AlwaysGrammarRule.Rules.Add("AlwaysRule",
                                                                                 SpeechRuleAttributes.SRATopLevel | SpeechRuleAttributes.SRADynamic);
            //認証用文字列の追加.
            this.AlwaysGrammarRuleGrammarRule.InitialState.AddWordTransition(null, Program.skinini.GetValue("skininfo", "response", "テスト"));

            //ルールを反映させる。
            this.AlwaysGrammarRule.Rules.Commit();
        }
Ejemplo n.º 3
0
        //音声認識初期化
        private void VoiceCommandInit()
        {
            //ルール認識 音声認識オブジェクトの生成
            this.DialogRule = new SpInProcRecoContext();
            bool hit = false;

            foreach (SpObjectToken recoperson in this.DialogRule.Recognizer.GetRecognizers()) //'Go through the SR enumeration
            {
                string language = recoperson.GetAttribute("Language");
                if (language == "411")                                  //日本語を聴き取れる人だ
                {
                    this.DialogRule.Recognizer.Recognizer = recoperson; //君に聞いていて欲しい
                    hit = true;
                    break;
                }
            }
            if (!hit)
            {
                MessageBox.Show("日本語認識が利用できません。\r\n日本語音声認識 MSSpeech_SR_ja-JP_TELE をインストールしてください。\r\n");
                Application.Exit();
            }

            //マイクから拾ってね。
            this.DialogRule.Recognizer.AudioInput = this.CreateMicrofon();

            //音声認識イベントで、デリゲートによるコールバックを受ける.
            //認識完了
            this.DialogRule.Recognition +=
                delegate(int streamNumber, object streamPosition, SpeechLib.SpeechRecognitionType srt, SpeechLib.ISpeechRecoResult isrr) {
                string strText = isrr.PhraseInfo.GetText(0, -1, true);
                this.SpeechTextBranch(strText);
            };

            //言語モデルの作成
            this.DialogGrammarRule = this.DialogRule.CreateGrammar(0);

            this.DialogGrammarRule.Reset(0);
            //言語モデルのルールのトップレベルを作成する.
            this.DialogGrammarRuleGrammarRule = this.DialogGrammarRule.Rules.Add("DialogRule",
                                                                                 SpeechRuleAttributes.SRATopLevel | SpeechRuleAttributes.SRADynamic);
            //認証用文字列の追加.
            this.DialogGrammarRuleGrammarRule.InitialState.AddWordTransition(null, "音声入力");
            this.DialogGrammarRuleGrammarRule.InitialState.AddWordTransition(null, "オーケー");
            this.DialogGrammarRuleGrammarRule.InitialState.AddWordTransition(null, "キャンセル");

            //ルールを反映させる。
            this.DialogGrammarRule.Rules.Commit();
        }
Ejemplo n.º 4
0
        public void initialize()
        {
            context = new SpInProcRecoContext();
            grammars.Clear();

            context.Recognition      += new _ISpeechRecoContextEvents_RecognitionEventHandler(context_Recognition);
            context.FalseRecognition += new _ISpeechRecoContextEvents_FalseRecognitionEventHandler(context_FalseRecognition);
            context.Interference     += new _ISpeechRecoContextEvents_InterferenceEventHandler(context_Interference);
            //      grammar.DictationSetState(SpeechRuleState.SGDSActive);
            context.CmdMaxAlternates = 5;
            context.State            = SpeechRecoContextState.SRCS_Enabled;
            context.RetainedAudio    = SpeechRetainedAudioOptions.SRAORetainAudio;
            //if (get_property_value("PersistedBackgroundAdaptation") == 1)
            //Feedback.print("cpu: " + get_property_value("ResourceUsage") + "\r\n", Feedback.Status.debug);
            //Feedback.print("high: " + get_property_value("ResourceUsage") + "\r\n", Feedback.Status.debug);
            //Feedback.print("medium: " + get_property_value("ResourceUsage") + "\r\n", Feedback.Status.debug);
            //Feedback.print("low: " + get_property_value("ResourceUsage") + "\r\n", Feedback.Status.debug);
            //Feedback.print("adaptation: " + get_property_value("AdaptationOn") + "\r\n", Feedback.Status.debug);
            //set_property_value("PersistedBackgroundAdaptation", 0);
            //     set_property_value("AdaptationOn", 0);
            //    set_property_value("PersistedLanguageModelAdaptation", 0);
            initialize_audio();
        }
Ejemplo n.º 5
0
        private void InitZone(int zoneNum)
        {
            //Set up the zone computers to transmit speech and receive audio
            if (zoneNum < 3)
            {
//                Process.Start(@"C:\Users\Blake\Documents\Programming\CSharp\C2program\C2program\scripts\pi_speak.bat", Convert.ToString(zoneNum));
                Process.Start(@"..\..\scripts\pi_speak.bat", Convert.ToString(zoneNum));
            }
            else
            {
//                Process.Start(@"C:\Users\Blake\Documents\Programming\CSharp\C2program\C2program\scripts\pi_speak_new_mic.bat", Convert.ToString(zoneNum));
                Process.Start(@"..\..\scripts\pi_speak_new_mic.bat", Convert.ToString(zoneNum));
            }
//            Process.Start(@"C:\Users\Blake\Documents\Programming\CSharp\C2program\C2program\scripts\pi_listen.bat", Convert.ToString(zoneNum));
            Process.Start(@"..\..\scripts\pi_listen.bat", Convert.ToString(zoneNum));

            //Set up the voice for each zone
//            rtpServer[zoneNum - 1] = new RTPServer(zoneAddresses[zoneNum - 1], 1234);
//            rtpServer[zoneNum - 1].StartServer();
//            SpCustomStream vStream = new SpCustomStream();
//            vStream.BaseStream = rtpServer[zoneNum - 1].AudioStream;
            voice[zoneNum - 1] = new C2Voice(zoneNum);
//            voice[zoneNum - 1].Voice.AudioOutputStream = vStream;



            //recoContext = new SpSharedRecoContext();
            recoContext[zoneNum - 1] = new SpInProcRecoContext();

            //set up the socket stream first

            //            mySrStream = new C2SRStream("192.168.2.101", 1234);
            rtpClient[zoneNum - 1] = new RTPReceiver(zonePortBase + zoneNum);
            rtpClient[zoneNum - 1].StartClient();
            SpCustomStream stream = new SpCustomStream();

            //            stream.BaseStream = (System.Runtime.InteropServices.ComTypes.IStream)mySrStream;
            //            stream.BaseStream = (System.Runtime.InteropServices.ComTypes.IStream)rtpClient.AudioStream;
            stream.BaseStream = rtpClient[zoneNum - 1].AudioStream;
            //SpStream st = new SpStream();

            CreateGrammar(zoneNum);

            this.recoContext[zoneNum - 1].Recognizer.AudioInputStream = stream;
            //this.recoContext.Recognizer.AudioInputStream = (ISpeechBaseStream) stream.BaseStream;
            //this.recoContext.Recognizer.AudioInputStream = (ISpeechBaseStream)rtpClient.Stream;
            //RecoContext.RetainedAudioFormat.Type = SpeechAudioFormatType.SAFT32kHz16BitMono;
            if (zoneNum < 3)
            {
                recoContext[zoneNum - 1].RetainedAudioFormat.Type = SpeechAudioFormatType.SAFT24kHz16BitMono;
            }
            else
            {
                recoContext[zoneNum - 1].RetainedAudioFormat.Type = SpeechAudioFormatType.SAFT48kHz16BitMono;
            }
            //RecoContext.RetainedAudioFormat.Type = SpeechAudioFormatType.SAFT12kHz16BitMono;
            //RecoContext.EventInterests = SPSEMANTICFORMAT. SRERecognition + SRESoundEnd + SREStreamEnd + SREStreamStart + SRESoundEnd;
            recoContext[zoneNum - 1].Recognition += new SpeechLib._ISpeechRecoContextEvents_RecognitionEventHandler(InterpretCommand);
            //RecoContext.Recognition += new _ISpeechRecoContextEvents_

            recoContext[zoneNum - 1].Recognizer.SetPropertyNumber("AdaptationOn", 0); //turns adaptation off so it doesn't train to noise
        }
Ejemplo n.º 6
0
        private void ControlSpeechInit()
        {
            //ルール認識 音声認識オブジェクトの生成
            this.ControlRule = new SpInProcRecoContext();
            bool hit = false;

            foreach (SpObjectToken recoperson in this.ControlRule.Recognizer.GetRecognizers()) //'Go through the SR enumeration
            {
                string language = recoperson.GetAttribute("Language");
                if (language == "411")
                {                                                        //日本語を聴き取れる人だ
                    this.ControlRule.Recognizer.Recognizer = recoperson; //君に聞いていて欲しい
                    hit = true;
                    break;
                }
            }
            if (!hit)
            {
                MessageBox.Show("日本語認識が利用できません。\r\n日本語音声認識 MSSpeech_SR_ja-JP_TELE をインストールしてください。\r\n");
                Application.Exit();
            }

            //マイクから拾ってね。
            this.ControlRule.Recognizer.AudioInput = this.CreateMicrofon();

            //音声認識イベントで、デリゲートによるコールバックを受ける.
            //認識完了
            this.ControlRule.Recognition +=
                delegate(int streamNumber, object streamPosition, SpeechLib.SpeechRecognitionType srt, SpeechLib.ISpeechRecoResult isrr)
            {
                string strText = isrr.PhraseInfo.GetText(0, -1, true);
                SpeechTextBranch(strText);
            };
            //認識失敗
            this.ControlRule.FalseRecognition +=
                delegate(int streamNumber, object streamPosition, SpeechLib.ISpeechRecoResult isrr)
            {
                label1.Text = "??";
                show(filelist.GetPath("what", 0));
            };

            //言語モデルの作成
            this.ControlGrammarRule = this.ControlRule.CreateGrammar(0);

            this.ControlGrammarRule.Reset(0);
            //言語モデルのルールのトップレベルを作成する.
            this.ControlGrammarRuleGrammarRule = this.ControlGrammarRule.Rules.Add("ControlRule",
                                                                                   SpeechRuleAttributes.SRATopLevel | SpeechRuleAttributes.SRADynamic);
            //認証用文字列の追加.
            this.ControlGrammarRuleGrammarRule.InitialState.AddWordTransition(null, "プログラムを実行したい");
            this.ControlGrammarRuleGrammarRule.InitialState.AddWordTransition(null, "ツイートしたい");
            this.ControlGrammarRuleGrammarRule.InitialState.AddWordTransition(null, "検索したい");
            this.ControlGrammarRuleGrammarRule.InitialState.AddWordTransition(null, "席をはずす");
            this.ControlGrammarRuleGrammarRule.InitialState.AddWordTransition(null, "バッテリー残量は");
            this.ControlGrammarRuleGrammarRule.InitialState.AddWordTransition(null, "プログラムリスト更新");
            this.ControlGrammarRuleGrammarRule.InitialState.AddWordTransition(null, "設定を開いて");
            this.ControlGrammarRuleGrammarRule.InitialState.AddWordTransition(null, "時計に戻して");
            this.ControlGrammarRuleGrammarRule.InitialState.AddWordTransition(null, "君の名前は");
            this.ControlGrammarRuleGrammarRule.InitialState.AddWordTransition(null, "疲れた");
            this.ControlGrammarRuleGrammarRule.InitialState.AddWordTransition(null, "終了");

            //ルールを反映させる。
            this.ControlGrammarRule.Rules.Commit();
        }
Ejemplo n.º 7
0
        public C2SRold(Form1 form)
        {
            form1                      = form;
            gpio                       = new C2gpio(1, "");
            state                      = State.IDLE;
            voice                      = new C2Voice(1);
            C2attentionTimer           = new Timer(30000); //60 second time out for C2 to stop listening
            C2attentionTimer.Elapsed  += new ElapsedEventHandler(C2attentionTimer_Elapsed);
            C2attentionTimer.AutoReset = false;

            missunderstandCount = 0;
            voice.Speak("C2 standing by and awaiting your instructions!");

            //recoContext = new SpSharedRecoContext();
            recoContext = new SpInProcRecoContext();

            //set up the socket stream first
            //IPEndPoint receiver = new IPEndPoint(new IPAddress(("192.168.2.101"), 1234);
//            UdpClient udpClient = new UdpClient("192.168.2.101", 1234);
            //UdpClient udpClient = new UdpClient(1234);
            //udpClient.Connect(receiver);
//            Socket socket = udpClient.Client;

            //TcpClient tcpClient = new TcpClient("192.168.2.101", 1234);
//            Socket socket = new Socket(AddressFamily.InterNetwork, SocketType.Dgram, ProtocolType.Udp);
//            socket.Connect("192.168.2.101", 1234);
//            if (!socket.Connected)
//            {
//                form1.statusMsg = "socket was never connected!";
//                return;
//            }

            //SpMMAudioIn instream = new SpMMAudioIn();
//            ASRStreamClass myAsrStream = new ASRStreamClass();
//            mySrStream = new C2SRStream("192.168.2.101", 1234);
            rtpClient = new RTPReceiver(1234);
            rtpClient.StartClient();
            SpCustomStream stream = new SpCustomStream();

//            stream.BaseStream = (System.Runtime.InteropServices.ComTypes.IStream)mySrStream;
//            stream.BaseStream = (System.Runtime.InteropServices.ComTypes.IStream)rtpClient.AudioStream;
            stream.BaseStream = rtpClient.AudioStream;
            //SpStream st = new SpStream();
            //st.



            //m_GrammarID = 1;
            Grammar = this.recoContext.CreateGrammar(0);
            Grammar.DictationLoad("", SpeechLoadOption.SLOStatic);
            //our program doesn't do this
            Grammar.DictationSetState(SpeechRuleState.SGDSActive);
            //our program doesn't do this

            //            ISpeechGrammarRule CommandsRule;
            //            CommandsRule = Grammar.Rules.Add("CommandsRule", SpeechRuleAttributes.SRATopLevel | SpeechRuleAttributes.SRADynamic, 1);
            //            CommandsRule.Clear();
            //            object dummy = 0;
            //            string sCommand = "see";
            //            CommandsRule.InitialState.AddWordTransition(null, sCommand, " ", SpeechGrammarWordType.SGLexical, null, 0, ref dummy, 0);
            //            Grammar.Rules.Commit();
            //            Grammar.CmdSetRuleState("CommandsRule", SpeechRuleState.SGDSActive);
            //stream.get
            this.recoContext.Recognizer.AudioInputStream = stream;
            //this.recoContext.Recognizer.AudioInputStream = (ISpeechBaseStream) stream.BaseStream;
            //this.recoContext.Recognizer.AudioInputStream = (ISpeechBaseStream)rtpClient.Stream;
            //RecoContext.EventInterests = SpeechRecoEvents.SREAllEvents;
            //RecoContext.RetainedAudioFormat.Type = SpeechAudioFormatType.SAFT32kHz16BitMono;
            recoContext.RetainedAudioFormat.Type = SpeechAudioFormatType.SAFT24kHz16BitMono;
            //RecoContext.EventInterests = SPSEMANTICFORMAT. SRERecognition + SRESoundEnd + SREStreamEnd + SREStreamStart + SRESoundEnd;
            recoContext.Recognition += new SpeechLib._ISpeechRecoContextEvents_RecognitionEventHandler(InterpretCommand);
            //RecoContext.Recognition += new _ISpeechRecoContextEvents_

            recoContext.Recognizer.SetPropertyNumber("AdaptationOn", 0);
        }
    bool UseDictation;     // Declare boolean variable for storing pronunciation dictation grammar setting

    public void main()
    {
        // Reset relevant VoiceAttack text variables
        VA.SetText("~~RecognitionError", null);
        VA.SetText("~~RecognizedText", null);
        VA.SetText("~~SAPIPhonemes", null);
        VA.SetText("~~SAPIPhonemesRaw", null);
        //VA.SetText("~~FalseRecognitionFlag", null);

        // Retrieve the desired word data contained within VoiceAttack text variable
        string ProcessText = null;                     // Initialize string variable for storing the text of interest

        if (VA.GetText("~~ProcessText") != null)       // Check if user provided valid text in input variable
        {
            ProcessText = VA.GetText("~~ProcessText"); // Store text of interest held by VA text variable
        }
        else
        {
            VA.SetText("~~RecognitionError", "Error in input text string (SAPI)"); // Send error detail back to VoiceAttack as text variable
            return;                                                                // End code processing
        }

        // Retrieve path to speech grammar XML file from VoiceAttack
        GrammarPath = VA.GetText("~~GrammarFilePath");

        // Retrieve path to voice recognition input wav file from VoiceAttack
        AudioPath = VA.GetText("~~AudioFilePath");

        // Check if TTS engine is voicing the input for the speech recognition engine
        if (VA.GetBoolean("~~UserVoiceInput") == false)
        {
            //VA.WriteToLog("creating wav file");
            if (TextToWav(AudioPath, ProcessText) == false) // Create wav file with specified path that voices specified text (with text-to-speech) and check if the creation was NOT successful
            {
                return;                                     // Stop executing the code
            }
        }

        // Create speech recognizer and associated context
        SpInprocRecognizer  MyRecognizer = new SpInprocRecognizer();                              // Create new instance of SpInprocRecognizer
        SpInProcRecoContext RecoContext  = (SpInProcRecoContext)MyRecognizer.CreateRecoContext(); // Initialize the SpInProcRecoContext (in-process recognition context)

        try                                                                                       // Attempt the following code
        {
            // Open the created wav in a new FileStream
            FileStream = new SpFileStream();                                        // Create new instance of SpFileStream
            FileStream.Open(AudioPath, SpeechStreamFileMode.SSFMOpenForRead, true); // Open the specified file in the FileStream for reading with events enabled

            // Set the voice recognition input as the FileStream
            MyRecognizer.AudioInputStream = FileStream;             // This will internally "speak" the wav file for input into the voice recognition engine

            // Set up recognition event handling
            RecoContext.Recognition      += new _ISpeechRecoContextEvents_RecognitionEventHandler(RecoContext_Recognition);           // Register for successful voice recognition events
            RecoContext.FalseRecognition += new _ISpeechRecoContextEvents_FalseRecognitionEventHandler(RecoContext_FalseRecognition); // Register for failed (low confidence) voice recognition events
            if (VA.GetBoolean("~~ShowRecognitionHypothesis") == true)                                                                 // Check if user wants to show voice recognition hypothesis results
            {
                RecoContext.Hypothesis += new _ISpeechRecoContextEvents_HypothesisEventHandler(RecoContext_Hypothesis);               // Register for voice recognition hypothesis events
            }
            RecoContext.EndStream += new _ISpeechRecoContextEvents_EndStreamEventHandler(RecoContext_EndStream);                      // Register for end of file stream events

            // Set up the grammar
            grammar      = RecoContext.CreateGrammar();                     // Initialize the grammar object
            UseDictation = (bool?)VA.GetBoolean("~~UseDictation") ?? false; // Set UserDictation based on value from VoiceAttack boolean variable
            if (UseDictation == true)                                       // Check if pronunciation dictation grammar should be used with speech recognition
            {
                //grammar.DictationLoad("", SpeechLoadOption.SLOStatic); // Load blank dictation topic into the grammar
                grammar.DictationLoad("Pronunciation", SpeechLoadOption.SLOStatic);    // Load pronunciation dictation topic into the grammar so that the raw (unfiltered) phonemes may be retrieved
                grammar.DictationSetState(SpeechRuleState.SGDSActive);                 // Activate dictation grammar
            }
            else
            {
                grammar.CmdLoadFromFile(GrammarPath, SpeechLoadOption.SLODynamic);           // Load custom XML grammar file
                grammar.CmdSetRuleIdState(0, SpeechRuleState.SGDSActive);                    // Activate the loaded grammar
            }
            Application.Run();                                                               // Starts a standard application message loop on the current thread
        }
        catch                                                                                // Handle exceptions in above code
        {
            VA.SetText("~~RecognitionError", "Error during voice recognition setup (SAPI)"); // Send error detail back to VoiceAttack as text variable
            return;                                                                          // Stop executing the code
        }
        finally                                                                              // Runs whether an exception is encountered or not
        {
            MyRecognizer = null;                                                             // Set to null in preparation for garbage collection
            FileStream.Close();                                                              // Close the input FileStream
            FileStream = null;                                                               // Set to null in preparation for garbage collection

            // Close up recognition event handling
            RecoContext.Recognition      -= new _ISpeechRecoContextEvents_RecognitionEventHandler(RecoContext_Recognition);           // Unregister for successful voice recognition events
            RecoContext.FalseRecognition -= new _ISpeechRecoContextEvents_FalseRecognitionEventHandler(RecoContext_FalseRecognition); // Unregister for failed (low confidence) voice recognition events
            if (VA.GetBoolean("~~ShowRecognitionHypothesis") == true)                                                                 // Check if user wanted to show voice recognition hypothesis results
            {
                RecoContext.Hypothesis -= new _ISpeechRecoContextEvents_HypothesisEventHandler(RecoContext_Hypothesis);               // Unregister for voice recognition hypothesis events
            }
            RecoContext.EndStream -= new _ISpeechRecoContextEvents_EndStreamEventHandler(RecoContext_EndStream);                      // Unregister for end of file stream events
            RecoContext            = null;                                                                                            // Set to null in preparation for garbage collection
        }
        //VA.WriteToLog("voice recognition complete"); // Output info to event log
    }
Ejemplo n.º 9
0
        // THESE MAKE ABSOLUTELY NO DIFFERENCE WHATSOEVER TO AUDIOFILE INPUT ->
        // apparently.
        // CFGConfidenceRejectionThreshold
        // HighConfidenceThreshold
        // NormalConfidenceThreshold
        // LowConfidenceThreshold

//			int val = 0;
//			_recognizer.GetPropertyNumber("CFGConfidenceRejectionThreshold", ref val);	// default 60-
//			logfile.Log(". CFGConfidenceRejectionThreshold= " + val);
//			_recognizer.GetPropertyNumber("HighConfidenceThreshold", ref val);			// default 80+
//			logfile.Log(". HighConfidenceThreshold= " + val);
//			_recognizer.GetPropertyNumber("NormalConfidenceThreshold", ref val);		// default 50+
//			logfile.Log(". NormalConfidenceThreshold= " + val);
//			_recognizer.GetPropertyNumber("LowConfidenceThreshold", ref val);			// default 20+
//			logfile.Log(". LowConfidenceThreshold= " + val);
//			logfile.Log();
//
//			_recognizer.SetPropertyNumber("CFGConfidenceRejectionThreshold", 0); // tried 100 ... results are identical to 0.
//			_recognizer.GetPropertyNumber("CFGConfidenceRejectionThreshold", ref val);
//			logfile.Log(". CFGConfidenceRejectionThreshold= " + val);
//
//			_recognizer.SetPropertyNumber("HighConfidenceThreshold", 0);
//			_recognizer.GetPropertyNumber("HighConfidenceThreshold", ref val);
//			logfile.Log(". HighConfidenceThreshold= " + val);
//
//			_recognizer.SetPropertyNumber("NormalConfidenceThreshold", 0);
//			_recognizer.GetPropertyNumber("NormalConfidenceThreshold", ref val);
//			logfile.Log(". NormalConfidenceThreshold= " + val);
//
//			_recognizer.SetPropertyNumber("LowConfidenceThreshold", 0);
//			_recognizer.GetPropertyNumber("LowConfidenceThreshold", ref val);
//			logfile.Log(". LowConfidenceThreshold= " + val);
//			logfile.Log();


        internal void Start(string text)
        {
#if DEBUG
            logfile.Log();
            logfile.Log("Start()");
#endif
            // these don't all need to be cleared ->
            Expected.Clear();

            RatioWords_def             =
                RatioPhons_def         =
                    RatioWords_enh     =
                        RatioPhons_enh = 0.0;

            _ars_def.Clear();
            _ars_enh.Clear();

            Confidence_def       = 0f;
            Confidence_def_count = 0;


            if (SetRecognizer(FxeGeneratorF.That.GetRecognizer()))             // <- workaround. See FxeGeneratorF.GetRecognizer()
            {
                _text = text;
#if DEBUG
                logfile.Log(". _text= " + _text);
#endif
                if (_text == String.Empty)
                {
#if DEBUG
                    logfile.Log(". . DEFAULT - fire TtsStreamEnded event");
#endif
//					if (TtsStreamEnded != null)
                    TtsStreamEnded();
                }
                else
                {
#if DEBUG
                    logfile.Log(". . ENHANCED - call SpVoice.Speak()");
                    logfile.Log(". . _phoneConverter.LanguageId= " + _phoneConverter.LanguageId);
#endif
                    _voice.Speak(_text);                     // -> fire TtsEndStream when the TTS-stream ends.
                    _voice.WaitUntilDone(-1);
                }

#if DEBUG
                logfile.Log(". create (SpInProcRecoContext)_recoContext");
#endif
                _recoContext = (SpInProcRecoContext)_recognizer.CreateRecoContext();
#if DEBUG
                logfile.Log(". (SpInProcRecoContext)_recoContext CREATED");
#endif
                _recoContext.FalseRecognition += rc_FalseRecognition;
                _recoContext.Recognition      += rc_Recognition;
                _recoContext.EndStream        += rc_EndStream;
#if DEBUG
                _recoContext.Hypothesis   += rc_Hypothesis;
                _recoContext.StartStream  += rc_StartStream;
                _recoContext.SoundStart   += rc_SoundStart;
                _recoContext.SoundEnd     += rc_SoundEnd;
                _recoContext.PhraseStart  += rc_PhraseStart;
                _recoContext.Interference += rc_Interference;
//				_recoContext.AudioLevel       += rc_AudioLevel; // does not fire. SURPRISE!

//				_recoContext.EventInterests = SpeechRecoEvents.SREAllEvents;
                _recoContext.EventInterests = (SpeechRecoEvents)(int)SpeechRecoEvents.SREFalseRecognition
                                              + (int)SpeechRecoEvents.SRERecognition
                                              + (int)SpeechRecoEvents.SREStreamEnd
                                              + (int)SpeechRecoEvents.SREHypothesis
                                              + (int)SpeechRecoEvents.SREStreamStart
                                              + (int)SpeechRecoEvents.SRESoundStart
                                              + (int)SpeechRecoEvents.SRESoundEnd
                                              + (int)SpeechRecoEvents.SREPhraseStart
                                              + (int)SpeechRecoEvents.SREInterference;
//															  + (int)SpeechRecoEvents.SREAudioLevel; // does not fire. SURPRISE!
                logfile.Log(". _recoContext.EventInterests= " + _recoContext.EventInterests);
#else
                _recoContext.EventInterests = (SpeechRecoEvents)(int)SpeechRecoEvents.SREFalseRecognition
                                              + (int)SpeechRecoEvents.SRERecognition
                                              + (int)SpeechRecoEvents.SREStreamEnd;
#endif

/*				https://docs.microsoft.com/en-us/previous-versions/windows/desktop/ee125206%28v%3dvs.85%29
 *                              enum SpeechRecoEvents
 *                              SREStreamEnd            = 1
 *                              SRESoundStart           = 2
 *                              SRESoundEnd             = 4
 *                              SREPhraseStart          = 8
 *                              SRERecognition          = 16
 *                              SREHypothesis           = 32
 *                              SREBookmark             = 64
 *                              SREPropertyNumChange    = 128
 *                              SREPropertyStringChange = 256
 *                              SREFalseRecognition     = 512
 *                              SREInterference         = 1024
 *                              SRERequestUI            = 2048
 *                              SREStateChange          = 4096
 *                              SREAdaptation           = 8192
 *                              SREStreamStart          = 16384
 *                              SRERecoOtherContext     = 32768
 *                              SREAudioLevel           = 65536
 *                              SREPrivate              = 262144
 *                              SREAllEvents            = 393215
 */
                _generato = Generator.Dictati;
                Generate();
#if DEBUG
                logfile.Log("Start() DONE");
                logfile.Log();
#endif
            }
        }
Ejemplo n.º 10
0
        private void button3_Click(object sender, EventArgs e)
        {
            if (this.recRule != null)
            {
                return;
            }
            //this.button3.Enabled = false;
            this.recRule = new SpInProcRecoContext();

            bool hit = false;

            foreach (SpObjectToken recPerson in this.recRule.Recognizer.GetRecognizers())
            {
                string lang = recPerson.GetAttribute("Language");
                if (lang == "411")
                {
                    this.recRule.Recognizer.Recognizer = recPerson;
                    hit = true;
                    break;
                }
            }

            if (!hit)
            {
                MessageBox.Show("日本語認識が利用できません。");
            }
            else
            {
                Console.WriteLine("マイク取得開始\n");

                Console.WriteLine(this.recRule.Recognizer.Status.ClsidEngine);
                this.recRule.Recognizer.AudioInput = this.CreateMicrofon();

                Console.WriteLine("マイク取得完了\n");
                Console.WriteLine("デリゲート登録\n");
                //認識の途中
                this.recRule.Hypothesis +=
                    delegate(int streamNumber, object streamPosition, SpeechLib.ISpeechRecoResult result) {
                    string strText = result.PhraseInfo.GetText(0, -1, true);
                    this._SourceViewLabel.Text = strText;
                };
                //認識完了
                this.recRule.Recognition +=
                    delegate(int streamNumber, object streamPosition, SpeechLib.SpeechRecognitionType srt, SpeechLib.ISpeechRecoResult isrr) {
                    string strText = isrr.PhraseInfo.GetText(0, -1, true);
                    this._ResultLabel.Text = strText;
                };
                //ストリームに何かデータが来た(?)
                this.recRule.StartStream +=
                    delegate(int streamNumber, object streamPosition) {
                    this._SourceViewLabel.Text = "認識?";
                    this._ResultLabel.Text     = "認識?";
                };
                //認識失敗
                this.recRule.FalseRecognition +=
                    delegate(int streamNumber, object streamPosition, SpeechLib.ISpeechRecoResult isrr) {
                    this._ResultLabel.Text = "--ERROR!--";
                };
                Console.WriteLine("デリゲート登録完了\n");
                Console.WriteLine("モデル作成\n");
                //言語モデルの作成
                this.recGrammerRule = this.recRule.CreateGrammar(0);
                Console.WriteLine("モデル作成完了\n");
                this.recGrammerRule.Reset(0);
                //言語モデルのルールのトップレベルを作成する.
                this.recGRGrammerRule = this.recGrammerRule.Rules.Add("TopLevelRule",
                                                                      SpeechRuleAttributes.SRATopLevel | SpeechRuleAttributes.SRADynamic);

                // 読み込む対象文字列をここですべて読み込んでおく必要がある
                this.recGRGrammerRule.InitialState.AddWordTransition(null, "私は");

                //ルールを反映させる。
                this.recGrammerRule.Rules.Commit();

                //音声認識開始。(トップレベルのオブジェクトの名前で SpeechRuleState.SGDSActive を指定する.)
                this.recGrammerRule.CmdSetRuleState("TopLevelRule", SpeechRuleState.SGDSActive);

                Console.WriteLine("音声認識開始");
            }
        }