Пример #1
0
        private void ProgramSpeechInit()
        {
            //ルール認識 音声認識オブジェクトの生成
            this.ProgramRule = new SpInProcRecoContext();
            bool hit = false;

            foreach (SpObjectToken recoperson in this.ProgramRule.Recognizer.GetRecognizers()) //'Go through the SR enumeration
            {
                string language = recoperson.GetAttribute("Language");
                if (language == "411")
                {                                                        //日本語を聴き取れる人だ
                    this.ProgramRule.Recognizer.Recognizer = recoperson; //君に聞いていて欲しい
                    hit = true;
                    break;
                }
            }
            if (!hit)
            {
                MessageBox.Show("日本語認識が利用できません。\r\n日本語音声認識 MSSpeech_SR_ja-JP_TELE をインストールしてください。\r\n");
                Application.Exit();
            }

            //マイクから拾ってね。
            this.ProgramRule.Recognizer.AudioInput = this.CreateMicrofon();

            //音声認識イベントで、デリゲートによるコールバックを受ける.
            //認識完了
            this.ProgramRule.Recognition +=
                delegate(int streamNumber, object streamPosition, SpeechLib.SpeechRecognitionType srt, SpeechLib.ISpeechRecoResult isrr)
            {
                string strText = isrr.PhraseInfo.GetText(0, -1, true);
                ProgramRun(strText);
            };
            //認識失敗
            this.ProgramRule.FalseRecognition +=
                delegate(int streamNumber, object streamPosition, SpeechLib.ISpeechRecoResult isrr)
            {
                label1.Text = "??";
                show("what", 0);
            };

            //言語モデルの作成
            this.ProgramGrammarRule = this.ProgramRule.CreateGrammar(0);

            this.ProgramGrammarRule.Reset(0);
            //言語モデルのルールのトップレベルを作成する.
            this.ProgramGrammarRuleGrammarRule = this.ProgramGrammarRule.Rules.Add("ProgramRule",
                                                                                   SpeechRuleAttributes.SRATopLevel | SpeechRuleAttributes.SRADynamic);
            //認証用文字列の追加.
            ArrayList voicelist = list.GetProgramCommand();

            foreach (string voice in voicelist)
            {
                this.ProgramGrammarRuleGrammarRule.InitialState.AddWordTransition(null, voice);
            }
            this.ProgramGrammarRuleGrammarRule.InitialState.AddWordTransition(null, Program.skinini.GetValue("skininfo", "clock", "時計に戻して"));

            //ルールを反映させる。
            this.ProgramGrammarRule.Rules.Commit();
        }
Пример #2
0
        /// <summary>
        /// Tries to load a SRGS grammar for SAPI 5.1 or earlier
        /// </summary>
        /// <returns>true if grammar was loaded successfully, false otherwise</returns>
        protected override bool LoadGrammar(FileInfo grammarFile)
        {
            if ((grammarFile == null) || !grammarFile.Exists)
            {
                return(false);
            }
            hasGrammar = false;
            try
            {
                // set up the grammar
                grammar = recoContext.CreateGrammar(0);
                // set up the dictation grammar
                grammar.DictationLoad("", SpeechLoadOption.SLOStatic);
                grammar.DictationSetState(SpeechRuleState.SGDSInactive);
                // load the command and control grammar
                grammar.CmdLoadFromFile(grammarFile.FullName, SpeechLoadOption.SLOStatic);
                grammar.CmdSetRuleIdState(0, SpeechRuleState.SGDSInactive);
                // activate one of the grammars if we don't want both at the same time
                //if (commandAndControl)
                grammar.CmdSetRuleIdState(0, SpeechRuleState.SGDSActive);
                //else
                //	grammar.DictationSetState(SpeechRuleState.SGDSActive);
                //if (GrammarLoaded != null) GrammarLoaded(this);
                hasGrammar = true;
            }
            catch
            {
                hasGrammar = false;
            }

            return(hasGrammar);
        }
Пример #3
0
        public void SetPluginHost(IPluginHost Host)
        {
            this.FHost = Host;

            //Create outputs
            this.FHost.CreateStringOutput("Word", TSliceMode.Dynamic, TPinVisibility.True, out this.FPinOutWord);

            this.FHost.CreateValueOutput("Bang", 1, null, TSliceMode.Single, TPinVisibility.True, out this.FPinOutBang);
            this.FPinOutBang.SetSubType(0, 1, 0, 0, true, false, true);

            this.FHost.CreateStringOutput("Status", TSliceMode.Single, TPinVisibility.OnlyInspector, out this.FPinOutStatus);

            try
            {
                //Load the Speech context
                this.FContext = new SpeechLib.SpSharedRecoContext();
                this.FGrammar = this.FContext.CreateGrammar(0);

                //Just to avoid double event
                this.FContext.Recognition -= OnRecognition;
                this.FContext.Recognition += OnRecognition;
                this.FPinOutStatus.SetString(0, "OK");
            }
            catch (Exception ex)
            {
                this.FPinOutStatus.SetString(0, "Error: " + ex.Message);
            }

            this.OnSetPluginHost();
        }
Пример #4
0
 public Meta_Grammar(Speech_Recognizer new_engine)
 {
     engine  = new_engine;
     grammar = engine.context.CreateGrammar(100);
     //grammar2 = engine.context.CreateGrammar(101);
     //grammar2.State = SpeechGrammarState.SGSEnabled;
     enabled = true;
 }
Пример #5
0
 public CommandRecognizerMode(string name, ISpeechRecoGrammar grammar, int firstRuleId, int lastRuleId)
 {
     m_name = name;
     m_grammar = grammar;
     m_firstRuleId = firstRuleId;
     m_nextRuleId = firstRuleId;
     m_lastRuleId = lastRuleId;
 }
Пример #6
0
        private SpeechRecognition()
        {
            ssrContex = new SpSharedRecoContext();
            isrg      = ssrContex.CreateGrammar(1);
            _ISpeechRecoContextEvents_RecognitionEventHandler recHandle =
                new _ISpeechRecoContextEvents_RecognitionEventHandler(ContexRecognition);

            ssrContex.Recognition += recHandle;
        }
Пример #7
0
        private void startSpeech()
        {
            if (RecoContext == null)
            {
                RecoContext = new SpSharedRecoContext();
                grammar     = RecoContext.CreateGrammar(1);
                grammar.DictationLoad();
            }

            grammar.DictationSetState(SpeechRuleState.SGDSActive);    // Opens up diction possibility.
        }
Пример #8
0
        public CommandRecognizer()
        {
            m_context = new SpSharedRecoContext();

            // We want to know when a phrase is recognized and also when one fails to be recognized
            m_context.EventInterests = SpeechRecoEvents.SRERecognition | SpeechRecoEvents.SREFalseRecognition;
            m_context.Recognition += new _ISpeechRecoContextEvents_RecognitionEventHandler(RecognitionHandler);
            m_context.FalseRecognition += new _ISpeechRecoContextEvents_FalseRecognitionEventHandler(FailedRecognitionHandler);

            m_grammar = m_context.CreateGrammar(0);
            m_grammar.Reset(0);
        }
Пример #9
0
        //音声認識初期化
        private void AlwaysSpeechInit()
        {
            //ルール認識 音声認識オブジェクトの生成
            this.AlwaysRule = new SpInProcRecoContext();
            bool hit = false;

            foreach (SpObjectToken recoperson in this.AlwaysRule.Recognizer.GetRecognizers()) //'Go through the SR enumeration
            {
                string language = recoperson.GetAttribute("Language");
                if (language == "411")
                {                                                       //日本語を聴き取れる人だ
                    this.AlwaysRule.Recognizer.Recognizer = recoperson; //君に聞いていて欲しい
                    hit = true;
                    break;
                }
            }
            if (!hit)
            {
                MessageBox.Show("日本語認識が利用できません。\r\n日本語音声認識 MSSpeech_SR_ja-JP_TELE をインストールしてください。\r\n");
                Application.Exit();
            }

            //マイクから拾ってね。
            this.AlwaysRule.Recognizer.AudioInput = this.CreateMicrofon();

            //音声認識イベントで、デリゲートによるコールバックを受ける.
            //認識完了
            this.AlwaysRule.Recognition +=
                delegate(int streamNumber, object streamPosition, SpeechLib.SpeechRecognitionType srt, SpeechLib.ISpeechRecoResult isrr)
            {
                //音声認識終了
                this.AlwaysGrammarRule.CmdSetRuleState("AlwaysRule", SpeechRuleState.SGDSInactive);
                //ウィンドウをアクティブにする
                this.Activate();
                //聞き取り開始時間取得
                starttime = Environment.TickCount & int.MaxValue;
                //聞き取り開始
                label1_MouseUp(null, null);
            };
            //言語モデルの作成
            this.AlwaysGrammarRule = this.AlwaysRule.CreateGrammar(0);

            this.AlwaysGrammarRule.Reset(0);
            //言語モデルのルールのトップレベルを作成する.
            this.AlwaysGrammarRuleGrammarRule = this.AlwaysGrammarRule.Rules.Add("AlwaysRule",
                                                                                 SpeechRuleAttributes.SRATopLevel | SpeechRuleAttributes.SRADynamic);
            //認証用文字列の追加.
            this.AlwaysGrammarRuleGrammarRule.InitialState.AddWordTransition(null, Program.skinini.GetValue("skininfo", "response", "テスト"));

            //ルールを反映させる。
            this.AlwaysGrammarRule.Rules.Commit();
        }
Пример #10
0
        /// <summary>
        /// 开始侦听
        /// </summary>
        public void BeginRec()
        {
            ssrContex = new SpSharedRecoContext();
            ssrContex.EventInterests = SpeechRecoEvents.SREAllEvents;//在"语音事件"中有说明
            isrgammar = ssrContex.CreateGrammar(0);
            //isrgammar.CmdLoadFromFile("D:\\SpeechGammar.xml", SpeechLoadOption.SLODynamic);//读入规则
            isrgammar.CmdLoadFromFile("", SpeechLoadOption.SLODynamic);//读入规则
            isrgammar.CmdSetRuleState(isrgammar.Rules.Item(0).Name, SpeechRuleState.SGDSActive);//激活规则
            ssrContex.Recognition += new _ISpeechRecoContextEvents_RecognitionEventHandler(ContexRecognition);

            ssrContex.State = SpeechRecoContextState.SRCS_Enabled;
            isrgammar.DictationSetState(SpeechRuleState.SGDSActive);
            IsStart = true;
        }
Пример #11
0
        //public void create_dictation(Action_Library library)
        //{
        //    grammar.DictationLoad("", SpeechLoadOption.SLOStatic);
        //    grammar.DictationSetState(SpeechRuleState.SGDSActive);
        //    grammar.State = SpeechGrammarState.SGSDisabled;
        //    is_dictation = true;
        //    engine.grammars.Add((Decimal)0, this);
        //    //recognized += new Recognition_Event(library.Familiar_Grammar_recognized);
        //}

        //public void create(Speech_Recognizer engine, Familiar_Document document)
        //{
        //    grammar = engine.context.CreateGrammar(0);
        //    grammar.State = SpeechGrammarState.SGSDisabled;
        //    ISpeechGrammarRule rule = grammar.Rules.Add("root", SpeechRuleAttributes.SRATopLevel | SpeechRuleAttributes.SRADefaultToActive, 0);
        //    //ISpeechGrammarRuleState state = grammar.Rules.Item(0).AddState();
        //    //ISpeechGrammarRuleState state2 = grammar.Rules.Item(0).AddState();

        //    object temp = "";
        //    rule.InitialState.AddWordTransition(null, "bob it", " ", SpeechGrammarWordType.SGLexical, "", 0, ref temp, 1.0f);
        //    //string error;
        //    grammar.Rules.Commit();//(out error);
        //    //foreach (Element_Base rule in document.rules)
        //    //{
        //    //    //grammar.Rules.Add(
        //    //    //choice.Add(create_choices(rule));
        //    //}
        //    grammar.State = SpeechGrammarState.SGSEnabled;
        //    //engine.context.
        //    grammar.CmdSetRuleIdState(0, SpeechRuleState.SGDSActive);
        //}

        //    static protected Decimal next_id = 1;

        public Familiar_Grammar(Speech_Recognizer new_engine)
        {
            engine = new_engine;

            Decimal next_id = 1;

            while (engine.grammars.ContainsKey(next_id))
            {
                ++next_id;
            }

            grammar = engine.context.CreateGrammar(next_id);
            //      grammar.DictationSetState(SpeechRuleState.SGDSActive);
            engine.grammars.Add(next_id, this);
        }
Пример #12
0
        //音声認識初期化
        private void VoiceCommandInit()
        {
            //ルール認識 音声認識オブジェクトの生成
            this.DialogRule = new SpInProcRecoContext();
            bool hit = false;

            foreach (SpObjectToken recoperson in this.DialogRule.Recognizer.GetRecognizers()) //'Go through the SR enumeration
            {
                string language = recoperson.GetAttribute("Language");
                if (language == "411")                                  //日本語を聴き取れる人だ
                {
                    this.DialogRule.Recognizer.Recognizer = recoperson; //君に聞いていて欲しい
                    hit = true;
                    break;
                }
            }
            if (!hit)
            {
                MessageBox.Show("日本語認識が利用できません。\r\n日本語音声認識 MSSpeech_SR_ja-JP_TELE をインストールしてください。\r\n");
                Application.Exit();
            }

            //マイクから拾ってね。
            this.DialogRule.Recognizer.AudioInput = this.CreateMicrofon();

            //音声認識イベントで、デリゲートによるコールバックを受ける.
            //認識完了
            this.DialogRule.Recognition +=
                delegate(int streamNumber, object streamPosition, SpeechLib.SpeechRecognitionType srt, SpeechLib.ISpeechRecoResult isrr) {
                string strText = isrr.PhraseInfo.GetText(0, -1, true);
                this.SpeechTextBranch(strText);
            };

            //言語モデルの作成
            this.DialogGrammarRule = this.DialogRule.CreateGrammar(0);

            this.DialogGrammarRule.Reset(0);
            //言語モデルのルールのトップレベルを作成する.
            this.DialogGrammarRuleGrammarRule = this.DialogGrammarRule.Rules.Add("DialogRule",
                                                                                 SpeechRuleAttributes.SRATopLevel | SpeechRuleAttributes.SRADynamic);
            //認証用文字列の追加.
            this.DialogGrammarRuleGrammarRule.InitialState.AddWordTransition(null, "音声入力");
            this.DialogGrammarRuleGrammarRule.InitialState.AddWordTransition(null, "オーケー");
            this.DialogGrammarRuleGrammarRule.InitialState.AddWordTransition(null, "キャンセル");

            //ルールを反映させる。
            this.DialogGrammarRule.Rules.Commit();
        }
Пример #13
0
        /// <summary>
        /// Starts the listening
        /// </summary>
        public void Start()
        {
            recoContext              = new SpSharedRecoContextClass();
            recoContext.Recognition += new _ISpeechRecoContextEvents_RecognitionEventHandler(recoContext_Recognition);

            recoGrammar     = recoContext.CreateGrammar(0);
            recoGrammarRule = recoGrammar.Rules.Add("VoiceCommands", SpeechRuleAttributes.SRATopLevel | SpeechRuleAttributes.SRADynamic, 1);

            object propValue = "";

            for (int i = 0; i < commands.Count; i++)
            {
                recoGrammarRule.InitialState.AddWordTransition(null, commands[i].ToString(), " ", SpeechGrammarWordType.SGLexical, Commands[i].ToString(), i, ref propValue, 1.0F);
            }

            recoGrammar.Rules.Commit();
            recoGrammar.CmdSetRuleState("VoiceCommands", SpeechRuleState.SGDSActive);
        }
Пример #14
0
        public NamesFromSpeech(Form1 parent, List <string> names)
        {
            // SAPI 5.4 Overview: http://msdn.microsoft.com/en-us/library/ee125077%28v=vs.85%29.aspx
            // Some code from https://github.com/kring/Voodoo-Voice/blob/master/VoiceRecognition/CommandRecognizer.cs

            m_parent = parent;
            m_names  = new List <string>();

            m_context = new SpSharedRecoContext();
            m_context.EventInterests = SpeechRecoEvents.SRERecognition;
            m_context.Recognition   += context_Recognition;

            m_grammar = m_context.CreateGrammar();
            m_grammar.Reset();

            foreach (string name in names)
            {
                AddRuleForName(name);
            }

            CommitAndActivate();
        }
Пример #15
0
        /// <summary>
        /// Starts the listening
        /// </summary>
        public void Start()
        {
            recoContext = new SpSharedRecoContextClass();
            recoContext.Recognition += new _ISpeechRecoContextEvents_RecognitionEventHandler(recoContext_Recognition);

            recoGrammar = recoContext.CreateGrammar(0);
            recoGrammarRule = recoGrammar.Rules.Add("VoiceCommands", SpeechRuleAttributes.SRATopLevel | SpeechRuleAttributes.SRADynamic, 1);

            object propValue = "";

            for (int i = 0; i < commands.Count; i++)
                recoGrammarRule.InitialState.AddWordTransition(null, commands[i].ToString(), " ", SpeechGrammarWordType.SGLexical, Commands[i].ToString(), i, ref propValue, 1.0F);

            recoGrammar.Rules.Commit();
            recoGrammar.CmdSetRuleState("VoiceCommands", SpeechRuleState.SGDSActive);
        }
Пример #16
0
        //STARTS THE RECOGNITION ENGINE
        private void initSAPI()
        {
            try
            {
                objRecoContext = new SpeechLib.SpSharedRecoContext();
                objRecoContext.AudioLevel +=
                    new _ISpeechRecoContextEvents_AudioLevelEventHandler(RecoContext_VUMeter);
                objRecoContext.Recognition +=
                    new _ISpeechRecoContextEvents_RecognitionEventHandler(RecoContext_Recognition);
                objRecoContext.EventInterests = SpeechLib.SpeechRecoEvents.SRERecognition |
                    SpeechLib.SpeechRecoEvents.SREAudioLevel;
                // objRecoContext.StartStream += new _ISpeechRecoContextEvents_StartStreamEventHandler(RecoContext_StartStream);

                //create grammar interface with ID = 0
                grammar = objRecoContext.CreateGrammar(0);
            }
            catch (Exception ex)
            {
                throw new Exception(ex.Message);
                //Label1.Text = "Exeption (Init SAPI)\n" + ex.ToString();
            }
        }
Пример #17
0
 /// <summary>
 /// Stops the listening
 /// </summary>
 public void Stop()
 {
     recoContext     = null;
     recoGrammar     = null;
     recoGrammarRule = null;
 }
Пример #18
0
        private void ControlSpeechInit()
        {
            //ルール認識 音声認識オブジェクトの生成
            this.ControlRule = new SpInProcRecoContext();
            bool hit = false;

            foreach (SpObjectToken recoperson in this.ControlRule.Recognizer.GetRecognizers()) //'Go through the SR enumeration
            {
                string language = recoperson.GetAttribute("Language");
                if (language == "411")
                {                                                        //日本語を聴き取れる人だ
                    this.ControlRule.Recognizer.Recognizer = recoperson; //君に聞いていて欲しい
                    hit = true;
                    break;
                }
            }
            if (!hit)
            {
                MessageBox.Show("日本語認識が利用できません。\r\n日本語音声認識 MSSpeech_SR_ja-JP_TELE をインストールしてください。\r\n");
                Application.Exit();
            }

            //マイクから拾ってね。
            this.ControlRule.Recognizer.AudioInput = this.CreateMicrofon();

            //音声認識イベントで、デリゲートによるコールバックを受ける.
            //認識完了
            this.ControlRule.Recognition +=
                delegate(int streamNumber, object streamPosition, SpeechLib.SpeechRecognitionType srt, SpeechLib.ISpeechRecoResult isrr)
            {
                string strText = isrr.PhraseInfo.GetText(0, -1, true);
                SpeechTextBranch(strText);
            };
            //認識失敗
            this.ControlRule.FalseRecognition +=
                delegate(int streamNumber, object streamPosition, SpeechLib.ISpeechRecoResult isrr)
            {
                label1.Text = "??";
                show(filelist.GetPath("what", 0));
            };

            //言語モデルの作成
            this.ControlGrammarRule = this.ControlRule.CreateGrammar(0);

            this.ControlGrammarRule.Reset(0);
            //言語モデルのルールのトップレベルを作成する.
            this.ControlGrammarRuleGrammarRule = this.ControlGrammarRule.Rules.Add("ControlRule",
                                                                                   SpeechRuleAttributes.SRATopLevel | SpeechRuleAttributes.SRADynamic);
            //認証用文字列の追加.
            this.ControlGrammarRuleGrammarRule.InitialState.AddWordTransition(null, "プログラムを実行したい");
            this.ControlGrammarRuleGrammarRule.InitialState.AddWordTransition(null, "ツイートしたい");
            this.ControlGrammarRuleGrammarRule.InitialState.AddWordTransition(null, "検索したい");
            this.ControlGrammarRuleGrammarRule.InitialState.AddWordTransition(null, "席をはずす");
            this.ControlGrammarRuleGrammarRule.InitialState.AddWordTransition(null, "バッテリー残量は");
            this.ControlGrammarRuleGrammarRule.InitialState.AddWordTransition(null, "プログラムリスト更新");
            this.ControlGrammarRuleGrammarRule.InitialState.AddWordTransition(null, "設定を開いて");
            this.ControlGrammarRuleGrammarRule.InitialState.AddWordTransition(null, "時計に戻して");
            this.ControlGrammarRuleGrammarRule.InitialState.AddWordTransition(null, "君の名前は");
            this.ControlGrammarRuleGrammarRule.InitialState.AddWordTransition(null, "疲れた");
            this.ControlGrammarRuleGrammarRule.InitialState.AddWordTransition(null, "終了");

            //ルールを反映させる。
            this.ControlGrammarRule.Rules.Commit();
        }
    bool UseDictation;     // Declare boolean variable for storing pronunciation dictation grammar setting

    public void main()
    {
        // Reset relevant VoiceAttack text variables
        VA.SetText("~~RecognitionError", null);
        VA.SetText("~~RecognizedText", null);
        VA.SetText("~~SAPIPhonemes", null);
        VA.SetText("~~SAPIPhonemesRaw", null);
        //VA.SetText("~~FalseRecognitionFlag", null);

        // Retrieve the desired word data contained within VoiceAttack text variable
        string ProcessText = null;                     // Initialize string variable for storing the text of interest

        if (VA.GetText("~~ProcessText") != null)       // Check if user provided valid text in input variable
        {
            ProcessText = VA.GetText("~~ProcessText"); // Store text of interest held by VA text variable
        }
        else
        {
            VA.SetText("~~RecognitionError", "Error in input text string (SAPI)"); // Send error detail back to VoiceAttack as text variable
            return;                                                                // End code processing
        }

        // Retrieve path to speech grammar XML file from VoiceAttack
        GrammarPath = VA.GetText("~~GrammarFilePath");

        // Retrieve path to voice recognition input wav file from VoiceAttack
        AudioPath = VA.GetText("~~AudioFilePath");

        // Check if TTS engine is voicing the input for the speech recognition engine
        if (VA.GetBoolean("~~UserVoiceInput") == false)
        {
            //VA.WriteToLog("creating wav file");
            if (TextToWav(AudioPath, ProcessText) == false) // Create wav file with specified path that voices specified text (with text-to-speech) and check if the creation was NOT successful
            {
                return;                                     // Stop executing the code
            }
        }

        // Create speech recognizer and associated context
        SpInprocRecognizer  MyRecognizer = new SpInprocRecognizer();                              // Create new instance of SpInprocRecognizer
        SpInProcRecoContext RecoContext  = (SpInProcRecoContext)MyRecognizer.CreateRecoContext(); // Initialize the SpInProcRecoContext (in-process recognition context)

        try                                                                                       // Attempt the following code
        {
            // Open the created wav in a new FileStream
            FileStream = new SpFileStream();                                        // Create new instance of SpFileStream
            FileStream.Open(AudioPath, SpeechStreamFileMode.SSFMOpenForRead, true); // Open the specified file in the FileStream for reading with events enabled

            // Set the voice recognition input as the FileStream
            MyRecognizer.AudioInputStream = FileStream;             // This will internally "speak" the wav file for input into the voice recognition engine

            // Set up recognition event handling
            RecoContext.Recognition      += new _ISpeechRecoContextEvents_RecognitionEventHandler(RecoContext_Recognition);           // Register for successful voice recognition events
            RecoContext.FalseRecognition += new _ISpeechRecoContextEvents_FalseRecognitionEventHandler(RecoContext_FalseRecognition); // Register for failed (low confidence) voice recognition events
            if (VA.GetBoolean("~~ShowRecognitionHypothesis") == true)                                                                 // Check if user wants to show voice recognition hypothesis results
            {
                RecoContext.Hypothesis += new _ISpeechRecoContextEvents_HypothesisEventHandler(RecoContext_Hypothesis);               // Register for voice recognition hypothesis events
            }
            RecoContext.EndStream += new _ISpeechRecoContextEvents_EndStreamEventHandler(RecoContext_EndStream);                      // Register for end of file stream events

            // Set up the grammar
            grammar      = RecoContext.CreateGrammar();                     // Initialize the grammar object
            UseDictation = (bool?)VA.GetBoolean("~~UseDictation") ?? false; // Set UserDictation based on value from VoiceAttack boolean variable
            if (UseDictation == true)                                       // Check if pronunciation dictation grammar should be used with speech recognition
            {
                //grammar.DictationLoad("", SpeechLoadOption.SLOStatic); // Load blank dictation topic into the grammar
                grammar.DictationLoad("Pronunciation", SpeechLoadOption.SLOStatic);    // Load pronunciation dictation topic into the grammar so that the raw (unfiltered) phonemes may be retrieved
                grammar.DictationSetState(SpeechRuleState.SGDSActive);                 // Activate dictation grammar
            }
            else
            {
                grammar.CmdLoadFromFile(GrammarPath, SpeechLoadOption.SLODynamic);           // Load custom XML grammar file
                grammar.CmdSetRuleIdState(0, SpeechRuleState.SGDSActive);                    // Activate the loaded grammar
            }
            Application.Run();                                                               // Starts a standard application message loop on the current thread
        }
        catch                                                                                // Handle exceptions in above code
        {
            VA.SetText("~~RecognitionError", "Error during voice recognition setup (SAPI)"); // Send error detail back to VoiceAttack as text variable
            return;                                                                          // Stop executing the code
        }
        finally                                                                              // Runs whether an exception is encountered or not
        {
            MyRecognizer = null;                                                             // Set to null in preparation for garbage collection
            FileStream.Close();                                                              // Close the input FileStream
            FileStream = null;                                                               // Set to null in preparation for garbage collection

            // Close up recognition event handling
            RecoContext.Recognition      -= new _ISpeechRecoContextEvents_RecognitionEventHandler(RecoContext_Recognition);           // Unregister for successful voice recognition events
            RecoContext.FalseRecognition -= new _ISpeechRecoContextEvents_FalseRecognitionEventHandler(RecoContext_FalseRecognition); // Unregister for failed (low confidence) voice recognition events
            if (VA.GetBoolean("~~ShowRecognitionHypothesis") == true)                                                                 // Check if user wanted to show voice recognition hypothesis results
            {
                RecoContext.Hypothesis -= new _ISpeechRecoContextEvents_HypothesisEventHandler(RecoContext_Hypothesis);               // Unregister for voice recognition hypothesis events
            }
            RecoContext.EndStream -= new _ISpeechRecoContextEvents_EndStreamEventHandler(RecoContext_EndStream);                      // Unregister for end of file stream events
            RecoContext            = null;                                                                                            // Set to null in preparation for garbage collection
        }
        //VA.WriteToLog("voice recognition complete"); // Output info to event log
    }
        public bool RebuildGrammar(ISpeechRecoGrammar grammar, bool speechEnabled,
            SpSharedRecoContext objRecoContext, ISpeechGrammarRule ruleListItemsDefault)
        {
            Debug.WriteLine("RebuildGrammar " + this.state.ToString());

            if (!speechEnabled) {
                return false;
            }

            //grammar = objRecoContext.CreateGrammar(grammarId);
            ////ruleTopLevel = grammar.Rules.Add("TopLevelRule", SpeechRuleAttributes.SRATopLevel | SpeechRuleAttributes.SRADynamic, 1);
            object PropValue = "";

            switch (this.state) {
                case State.init:
                    //ruleListItemsNames = grammar.Rules.Add("ListItemsNameRule", SpeechRuleAttributes.SRADynamic, 2);
                    //ruleListItemsIDs = grammar.Rules.Add("ListItemsIDRule", SpeechRuleAttributes.SRADynamic, 3);
                    //ruleListItemsDefault = grammar.Rules.Add("ListItemsDefaultRule", SpeechRuleAttributes.SRADynamic, 2);

                    //SpeechLib.ISpeechGrammarRuleState stateName;
                    //SpeechLib.ISpeechGrammarRuleState stateID;
                    ////SpeechLib.ISpeechGrammarRuleState stateDefault;

                    //stateName = ruleTopLevel.AddState();
                    //stateID = ruleTopLevel.AddState();
                    ////stateDefault = ruleListItemsDefault.AddState();

                    //object PropValue1 = "";
                    //object PropValue2 = "";
                    ////PropValue = "";

                    //ruleTopLevel.InitialState.AddWordTransition(stateName, "name", " ", SpeechGrammarWordType.SGLexical, "", 0, ref PropValue1, 1.0F);
                    //ruleTopLevel.InitialState.AddWordTransition(stateID, "id", " ", SpeechGrammarWordType.SGLexical, "", 0, ref PropValue2, 1.0F);
                    ////ruleTopLevel.InitialState.AddWordTransition(stateDefault, " ", " ", SpeechGrammarWordType.SGLexical, "", 0, ref PropValue, 1.0F);

                    //PropValue1 = "";
                    //PropValue2 = "";
                    ////PropValue = "";

                    //stateName.AddRuleTransition(null, ruleListItemsNames, "", 1, ref PropValue1, 0F);
                    //stateID.AddRuleTransition(null, ruleListItemsIDs, "", 1, ref PropValue2, 0F);
                    ////stateDefault.AddRuleTransition(null, ruleListItemsDefault, "", 1, ref PropValue, 0F);

                    try {
                        //ruleListItemsNames.Clear();
                        //ruleListItemsIDs.Clear();
                        ruleListItemsDefault.Clear();

                        //int i = 0;
                        //foreach (string patientName in patientenNameList) {
                        //    string word = patientName;
                        //    ruleListItemsNames.InitialState.AddWordTransition(null, word, " ", SpeechGrammarWordType.SGLexical, word, i, ref PropValue1, 1F);
                        //    i++;
                        //}

                        //i = 0;
                        //foreach (long patientID in patientIDList) {
                        //    long ID = patientID;
                        //    ruleListItemsIDs.InitialState.AddWordTransition(null, ID.ToString(), " ", SpeechGrammarWordType.SGLexical, ID.ToString(), i, ref PropValue2, 1F);
                        //    i++;
                        //}

                        int i = 0;
                        foreach (string patientName in patientenNameList) {
                            string word = "name " + patientName;
                            ruleListItemsDefault.InitialState.AddWordTransition(null, word, " ", SpeechGrammarWordType.SGLexical, word, i, ref PropValue, 1F);
                            i++;
                        }

                        foreach (long patientID in patientIDList) {
                            string word = "id " + patientID.ToString();
                            ruleListItemsDefault.InitialState.AddWordTransition(null, word, " ", SpeechGrammarWordType.SGLexical, word, i, ref PropValue, 1F);
                            i++;
                        }

                        grammar.Rules.Commit();

                        ////grammar.CmdSetRuleState("TopLevelRule", SpeechRuleState.SGDSActive);
                    } catch (Exception e) {
                        System.Windows.Forms.MessageBox.Show(
                            "Exception caught when rebuilding dynamic listbox rule.\r\n\r\n"
                            + e.ToString(),
                            "Error");
                    }
                    break;

                case State.cognitied:
                    //ruleListItemsDefault = grammar.Rules.Add("ListItemsDefaultRule", SpeechRuleAttributes.SRADynamic, 2);

                    //SpeechLib.ISpeechGrammarRuleState stateCognitied;
                    //stateCognitied = ruleTopLevel.AddState();

                    //PropValue = "";

                    //ruleTopLevel.InitialState.AddWordTransition(stateCognitied, "", " ", SpeechGrammarWordType.SGLexical, "", 0, ref PropValue, 1.0F);

                    //PropValue = "";

                    //stateCognitied.AddRuleTransition(null, ruleListItemsDefault, "", 1, ref PropValue, 0F);

                    try {
                        ruleListItemsDefault.Clear();

                        ruleListItemsDefault.InitialState.AddWordTransition(null, "master", " ", SpeechGrammarWordType.SGLexical, "master", 0, ref PropValue, 1F);
                        ruleListItemsDefault.InitialState.AddWordTransition(null, "operation", " ", SpeechGrammarWordType.SGLexical, "operation", 1, ref PropValue, 1F);
                        ruleListItemsDefault.InitialState.AddWordTransition(null, "visit", " ", SpeechGrammarWordType.SGLexical, "visit", 2, ref PropValue, 1F);
                        ruleListItemsDefault.InitialState.AddWordTransition(null, "reset", " ", SpeechGrammarWordType.SGLexical, "reset", 3, ref PropValue, 1F);
                        grammar.Rules.Commit();

                        //grammar.CmdSetRuleState("TopLevelRule", SpeechRuleState.SGDSActive);
                    } catch (Exception e) {
                        System.Windows.Forms.MessageBox.Show(
                            "Exception caught when rebuilding dynamic listbox rule.\r\n\r\n"
                            + e.ToString(),
                            "Error");
                    }
                    break;

                case State.master:
                    try {
                        ruleListItemsDefault.Clear();

                        ruleListItemsDefault.InitialState.AddWordTransition(null, "repeat", " ", SpeechGrammarWordType.SGLexical, "master", 0, ref PropValue, 1F);
                        ruleListItemsDefault.InitialState.AddWordTransition(null, "back", " ", SpeechGrammarWordType.SGLexical, "operation", 1, ref PropValue, 1F);
                        ruleListItemsDefault.InitialState.AddWordTransition(null, "reset", " ", SpeechGrammarWordType.SGLexical, "reset", 2, ref PropValue, 1F);
                        grammar.Rules.Commit();

                        //grammar.CmdSetRuleState("TopLevelRule", SpeechRuleState.SGDSActive);
                    } catch (Exception e) {
                        System.Windows.Forms.MessageBox.Show(
                            "Exception caught when rebuilding dynamic listbox rule.\r\n\r\n"
                            + e.ToString(),
                            "Error");
                    }
                    break;

                case State.lastOperation:
                    try {
                        ruleListItemsDefault.Clear();

                        ruleListItemsDefault.InitialState.AddWordTransition(null, "repeat", " ", SpeechGrammarWordType.SGLexical, "master", 0, ref PropValue, 1F);
                        ruleListItemsDefault.InitialState.AddWordTransition(null, "back", " ", SpeechGrammarWordType.SGLexical, "operation", 1, ref PropValue, 1F);
                        ruleListItemsDefault.InitialState.AddWordTransition(null, "reset", " ", SpeechGrammarWordType.SGLexical, "reset", 2, ref PropValue, 1F);
                        ruleListItemsDefault.InitialState.AddWordTransition(null, "more", " ", SpeechGrammarWordType.SGLexical, "reset", 3, ref PropValue, 1F);
                        grammar.Rules.Commit();

                        //grammar.CmdSetRuleState("TopLevelRule", SpeechRuleState.SGDSActive);
                    } catch (Exception e) {
                        System.Windows.Forms.MessageBox.Show(
                            "Exception caught when rebuilding dynamic listbox rule.\r\n\r\n"
                            + e.ToString(),
                            "Error");
                    }
                    break;

                case State.lastVisit:
                    try {
                        ruleListItemsDefault.Clear();

                        ruleListItemsDefault.InitialState.AddWordTransition(null, "repeat", " ", SpeechGrammarWordType.SGLexical, "master", 0, ref PropValue, 1F);
                        ruleListItemsDefault.InitialState.AddWordTransition(null, "back", " ", SpeechGrammarWordType.SGLexical, "operation", 1, ref PropValue, 1F);
                        ruleListItemsDefault.InitialState.AddWordTransition(null, "reset", " ", SpeechGrammarWordType.SGLexical, "reset", 2, ref PropValue, 1F);
                        ruleListItemsDefault.InitialState.AddWordTransition(null, "more", " ", SpeechGrammarWordType.SGLexical, "reset", 3, ref PropValue, 1F);
                        grammar.Rules.Commit();
                    } catch (Exception e) {
                        System.Windows.Forms.MessageBox.Show(
                            "Exception caught when rebuilding dynamic listbox rule.\r\n\r\n"
                            + e.ToString(),
                            "Error");
                    }
                    break;

                case State.allOperation:
                    try {
                        ruleListItemsDefault.Clear();

                        ruleListItemsDefault.InitialState.AddWordTransition(null, "repeat", " ", SpeechGrammarWordType.SGLexical, "master", 0, ref PropValue, 1F);
                        ruleListItemsDefault.InitialState.AddWordTransition(null, "back", " ", SpeechGrammarWordType.SGLexical, "operation", 1, ref PropValue, 1F);
                        ruleListItemsDefault.InitialState.AddWordTransition(null, "reset", " ", SpeechGrammarWordType.SGLexical, "reset", 2, ref PropValue, 1F);
                        grammar.Rules.Commit();
                    } catch (Exception e) {
                        System.Windows.Forms.MessageBox.Show(
                            "Exception caught when rebuilding dynamic listbox rule.\r\n\r\n"
                            + e.ToString(),
                            "Error");
                    }
                    break;

                case State.allVisit:
                    try {
                        ruleListItemsDefault.Clear();

                        ruleListItemsDefault.InitialState.AddWordTransition(null, "repeat", " ", SpeechGrammarWordType.SGLexical, "master", 0, ref PropValue, 1F);
                        ruleListItemsDefault.InitialState.AddWordTransition(null, "back", " ", SpeechGrammarWordType.SGLexical, "operation", 1, ref PropValue, 1F);
                        ruleListItemsDefault.InitialState.AddWordTransition(null, "reset", " ", SpeechGrammarWordType.SGLexical, "reset", 2, ref PropValue, 1F);
                        grammar.Rules.Commit();
                    } catch (Exception e) {
                        System.Windows.Forms.MessageBox.Show(
                            "Exception caught when rebuilding dynamic listbox rule.\r\n\r\n"
                            + e.ToString(),
                            "Error");
                    }
                    break;

                default:

                    break;
            }

            return true;
        }
Пример #21
0
        /// <summary>
        /// Generate() will be called only once if there is no typed-text; it
        /// should use dictation. Generate() will be called a second time if
        /// there is typed-text; the second pass should use both dictation and
        /// context-free-grammar (ie, Command and Control: a Rule that's based
        /// on the typed-text).
        /// </summary>
        void Generate()
        {
#if DEBUG
            logfile.Log();
            logfile.Log("Generate() _generato= " + _generato);
#endif
            _offset = 0;
            Confidence_def_count = 0;

            // was "2" but MS doc says not needed on its end.
            // and I don't see grammar id #2 defined on this end either.
            _recoGrammar = _recoContext.CreateGrammar();
//			_recoGrammar.DictationLoad(); // ("Pronunciation") <- causes orthemes to print as phonemes instead of words

            switch (_generato)
            {
            case Generator.Dictati:
                if (_recoGrammar.Rules.FindRule(RULE) != null)
                {
#if DEBUG
                    logfile.Log(". set Rule INACTIVE");
#endif
                    _recoGrammar.CmdSetRuleState(RULE, SpeechRuleState.SGDSInactive);
                }
#if DEBUG
                logfile.Log(". set Dictation ACTIVE");
#endif
                _recoGrammar.DictationSetState(SpeechRuleState.SGDSActive);
                break;

            case Generator.Dialogi:
#if DEBUG
                logfile.Log(". set Dictation INACTIVE");
#endif
                _recoGrammar.DictationSetState(SpeechRuleState.SGDSInactive);

                if (_recoGrammar.Rules.FindRule(RULE) == null)
                {
#if DEBUG
                    logfile.Log(". . add \"" + RULE + "\" Rule");
#endif
                    ISpeechGrammarRule rule = _recoGrammar.Rules.Add(RULE,
                                                                     SpeechRuleAttributes.SRATopLevel,
                                                                     1);
                    rule.InitialState.AddWordTransition(null,
                                                        _text,
                                                        " ",
                                                        SpeechGrammarWordType.SGLexical,
                                                        RULE,
                                                        1);
                    _recoGrammar.Rules.Commit();
                }
#if DEBUG
                logfile.Log(". set Rule ACTIVE");
#endif
                _recoGrammar.CmdSetRuleState(RULE, SpeechRuleState.SGDSActive);


//					logfile.Log(". max alternates(pre)= " + _recoContext.CmdMaxAlternates);
//					_recoContext.CmdMaxAlternates = 3;
//					logfile.Log(". max alternates(pos)= " + _recoContext.CmdMaxAlternates);
                break;
            }

#if DEBUG
            logfile.Log(". create (SpFileStream)_fs");
#endif
            _fs = new SpFileStream();
#if DEBUG
            logfile.Log(". (SpFileStream)_fs CREATED");
#endif
//			_fs.Format.Type = SpeechAudioFormatType.SAFT44kHz16BitMono;

#if DEBUG
            logfile.Log(". Open Wavefile _fs");
#endif
            _fs.Open(Wavefile);
#if DEBUG
            logfile.Log(". _fs.Format.Type= " + _fs.Format.Type);             // SpeechAudioFormatType.SAFT44kHz16BitMono
            SpWaveFormatEx data = _fs.Format.GetWaveFormatEx();
            logfile.Log(". . SamplesPerSec= " + data.SamplesPerSec);
            logfile.Log(". . BitsPerSample= " + data.BitsPerSample);
            logfile.Log(". . AvgBytesPerSec= " + data.AvgBytesPerSec);
            logfile.Log(". . Channels= " + data.Channels);
            logfile.Log(". . BlockAlign= " + data.BlockAlign);
            logfile.Log(". . FormatTag= " + data.FormatTag);
            logfile.Log(". . ExtraData= " + data.ExtraData);

            // filestream byte-data ->
//			int bytes, pos = 0;
//			object o = new byte[2];
//			while ((bytes = _fs.Read(out o, 2)) > 0)
//			{
//				var buffer = (byte[])o;
//				logfile.Log(pos + " : " + buffer[1] + " " + buffer[0]); // treat as little-endian shorts
//				pos += bytes;
//			}
//			_fs.Seek(0);


            logfile.Log(". assign _fs to _recognizer.AudioInputStream");
#endif
            _recognizer.AudioInputStream = _fs;             // <- start Recognition <--
#if DEBUG
            logfile.Log("Generate() DONE");
            logfile.Log();
#endif
        }
Пример #22
0
        public C2SRold(Form1 form)
        {
            form1                      = form;
            gpio                       = new C2gpio(1, "");
            state                      = State.IDLE;
            voice                      = new C2Voice(1);
            C2attentionTimer           = new Timer(30000); //60 second time out for C2 to stop listening
            C2attentionTimer.Elapsed  += new ElapsedEventHandler(C2attentionTimer_Elapsed);
            C2attentionTimer.AutoReset = false;

            missunderstandCount = 0;
            voice.Speak("C2 standing by and awaiting your instructions!");

            //recoContext = new SpSharedRecoContext();
            recoContext = new SpInProcRecoContext();

            //set up the socket stream first
            //IPEndPoint receiver = new IPEndPoint(new IPAddress(("192.168.2.101"), 1234);
//            UdpClient udpClient = new UdpClient("192.168.2.101", 1234);
            //UdpClient udpClient = new UdpClient(1234);
            //udpClient.Connect(receiver);
//            Socket socket = udpClient.Client;

            //TcpClient tcpClient = new TcpClient("192.168.2.101", 1234);
//            Socket socket = new Socket(AddressFamily.InterNetwork, SocketType.Dgram, ProtocolType.Udp);
//            socket.Connect("192.168.2.101", 1234);
//            if (!socket.Connected)
//            {
//                form1.statusMsg = "socket was never connected!";
//                return;
//            }

            //SpMMAudioIn instream = new SpMMAudioIn();
//            ASRStreamClass myAsrStream = new ASRStreamClass();
//            mySrStream = new C2SRStream("192.168.2.101", 1234);
            rtpClient = new RTPReceiver(1234);
            rtpClient.StartClient();
            SpCustomStream stream = new SpCustomStream();

//            stream.BaseStream = (System.Runtime.InteropServices.ComTypes.IStream)mySrStream;
//            stream.BaseStream = (System.Runtime.InteropServices.ComTypes.IStream)rtpClient.AudioStream;
            stream.BaseStream = rtpClient.AudioStream;
            //SpStream st = new SpStream();
            //st.



            //m_GrammarID = 1;
            Grammar = this.recoContext.CreateGrammar(0);
            Grammar.DictationLoad("", SpeechLoadOption.SLOStatic);
            //our program doesn't do this
            Grammar.DictationSetState(SpeechRuleState.SGDSActive);
            //our program doesn't do this

            //            ISpeechGrammarRule CommandsRule;
            //            CommandsRule = Grammar.Rules.Add("CommandsRule", SpeechRuleAttributes.SRATopLevel | SpeechRuleAttributes.SRADynamic, 1);
            //            CommandsRule.Clear();
            //            object dummy = 0;
            //            string sCommand = "see";
            //            CommandsRule.InitialState.AddWordTransition(null, sCommand, " ", SpeechGrammarWordType.SGLexical, null, 0, ref dummy, 0);
            //            Grammar.Rules.Commit();
            //            Grammar.CmdSetRuleState("CommandsRule", SpeechRuleState.SGDSActive);
            //stream.get
            this.recoContext.Recognizer.AudioInputStream = stream;
            //this.recoContext.Recognizer.AudioInputStream = (ISpeechBaseStream) stream.BaseStream;
            //this.recoContext.Recognizer.AudioInputStream = (ISpeechBaseStream)rtpClient.Stream;
            //RecoContext.EventInterests = SpeechRecoEvents.SREAllEvents;
            //RecoContext.RetainedAudioFormat.Type = SpeechAudioFormatType.SAFT32kHz16BitMono;
            recoContext.RetainedAudioFormat.Type = SpeechAudioFormatType.SAFT24kHz16BitMono;
            //RecoContext.EventInterests = SPSEMANTICFORMAT. SRERecognition + SRESoundEnd + SREStreamEnd + SREStreamStart + SRESoundEnd;
            recoContext.Recognition += new SpeechLib._ISpeechRecoContextEvents_RecognitionEventHandler(InterpretCommand);
            //RecoContext.Recognition += new _ISpeechRecoContextEvents_

            recoContext.Recognizer.SetPropertyNumber("AdaptationOn", 0);
        }
        //private void initSpeech() {
        //    //Debug.WriteLine("Initializing SAPI");
        //    try {
        //        //create Main context Obj
        //        objRecoContext = new SpSharedRecoContext();
        //        objRecoContext.Recognition += new _ISpeechRecoContextEvents_RecognitionEventHandler(objRecoContext_Recognition);
        //        grammar = objRecoContext.CreateGrammar(0);
        //        string path = "Grammar.xml";
        //        grammar.CmdLoadFromFile(path, SpeechLoadOption.SLODynamic);
        //        //activate Top Level Rule
        //        grammar.CmdSetRuleIdState(0, SpeechRuleState.SGDSActive);
        //        //speechInitialized = true;
        //    } catch (Exception e) {
        //        logOut("Exception: " + e.ToString());
        //    }
        //}
        private void InitializeSpeech()
        {
            Debug.WriteLine("Initializing SAPI objects...");

            try {
                // First of all, let's create the main reco context object.
                // In this sample, we are using shared reco context. Inproc reco
                // context is also available. Please see the document to decide
                // which is best for your application.
                objRecoContext = new SpeechLib.SpSharedRecoContext();

                // Then, let's set up the event handler. We only care about
                // Hypothesis and Recognition events in this sample.
                objRecoContext.Hypothesis += new
                    _ISpeechRecoContextEvents_HypothesisEventHandler(
                    RecoContext_Hypothesis);

                objRecoContext.Recognition += new
                    _ISpeechRecoContextEvents_RecognitionEventHandler(
                    RecoContext_Recognition);

                // Now let's build the grammar.
                // The top level rule consists of two parts: "select <items>".
                // So we first add a word transition for the "select" part, then
                // a rule transition for the "<items>" part, which is dynamically
                // built as items are added or removed from the listbox.
                //grammar = objRecoContext.CreateGrammar(grammarId);
                //ruleTopLevel = grammar.Rules.Add("TopLevelRule",
                //    SpeechRuleAttributes.SRATopLevel | SpeechRuleAttributes.SRADynamic, 1);
                //ruleListItems = grammar.Rules.Add("ListItemsRule",
                //    SpeechRuleAttributes.SRADynamic, 2);

                //SpeechLib.ISpeechGrammarRuleState stateAfterSelect;
                //stateAfterSelect = ruleTopLevel.AddState();

                //object PropValue = "";
                //ruleTopLevel.InitialState.AddWordTransition(stateAfterSelect,
                //    PreCommandString, " ", SpeechGrammarWordType.SGLexical,
                //    "", 0, ref PropValue, 1.0F);

                //PropValue = "";
                //stateAfterSelect.AddRuleTransition(null, ruleListItems, "",
                //    1, ref PropValue, 0F);

                // Now add existing list items to the ruleListItems

                grammar = objRecoContext.CreateGrammar(10);
                ruleTopLevel = grammar.Rules.Add("TopLevelRule", SpeechRuleAttributes.SRATopLevel | SpeechRuleAttributes.SRADynamic, 1);
                ruleListItemsDefault = grammar.Rules.Add("ListItemsRule", SpeechRuleAttributes.SRADynamic, 2);

                SpeechLib.ISpeechGrammarRuleState stateAfterSelect;
                stateAfterSelect = ruleTopLevel.AddState();

                object PropValue = "";
                ruleTopLevel.InitialState.AddWordTransition(stateAfterSelect, "", " ", SpeechGrammarWordType.SGLexical, "", 0, ref PropValue, 1.0F);

                PropValue = "";
                stateAfterSelect.AddRuleTransition(null, ruleListItemsDefault, "", 1, ref PropValue, 0F);

                voiceInfoAutomat.RebuildGrammar(this.grammar, this.speechEnabled, this.objRecoContext, ruleListItemsDefault);

                // Now we can activate the top level rule. In this sample, only
                // the top level rule needs to activated. The ListItemsRule is
                // referenced by the top level rule.

                grammar.CmdSetRuleState("TopLevelRule", SpeechRuleState.SGDSActive);
                speechInitialized = true;
            } catch (Exception e) {
                System.Windows.Forms.MessageBox.Show(
                    "Exception caught when initializing SAPI."
                    + " This application may not run correctly.\r\n\r\n"
                    + e.ToString(),
                    "Error");
            }
        }
Пример #24
0
 /// <summary>
 /// Stops the listening
 /// </summary>
 public void Stop()
 {
     recoContext = null;
     recoGrammar = null;
     recoGrammarRule = null;
 }
Пример #25
0
 public SpRecognition()
 {
     this.ssrContex              = new SpSharedRecoContextClass();
     this.isrg                   = this.ssrContex.CreateGrammar(1);
     this.ssrContex.Recognition += SsrContex_Recognition;;
 }
Пример #26
0
        /// <summary>
        /// Tries to load a SRGS grammar for SAPI 5.1 or earlier
        /// </summary>
        /// <returns>true if grammar was loaded successfully, false otherwise</returns>
        protected override bool LoadGrammar(FileInfo grammarFile)
        {
            if ((grammarFile == null)||!grammarFile.Exists) return false;
            hasGrammar = false;
            try
            {
                // set up the grammar
                grammar = recoContext.CreateGrammar(0);
                // set up the dictation grammar
                grammar.DictationLoad("", SpeechLoadOption.SLOStatic);
                grammar.DictationSetState(SpeechRuleState.SGDSInactive);
                // load the command and control grammar
                grammar.CmdLoadFromFile(grammarFile.FullName, SpeechLoadOption.SLOStatic);
                grammar.CmdSetRuleIdState(0, SpeechRuleState.SGDSInactive);
                // activate one of the grammars if we don't want both at the same time
                //if (commandAndControl)
                grammar.CmdSetRuleIdState(0, SpeechRuleState.SGDSActive);
                //else
                //	grammar.DictationSetState(SpeechRuleState.SGDSActive);
                //if (GrammarLoaded != null) GrammarLoaded(this);
                hasGrammar = true;
            }
            catch
            {
                hasGrammar= false;
            }

            return hasGrammar;
        }
Пример #27
0
        private void button3_Click(object sender, EventArgs e)
        {
            if (this.recRule != null)
            {
                return;
            }
            //this.button3.Enabled = false;
            this.recRule = new SpInProcRecoContext();

            bool hit = false;

            foreach (SpObjectToken recPerson in this.recRule.Recognizer.GetRecognizers())
            {
                string lang = recPerson.GetAttribute("Language");
                if (lang == "411")
                {
                    this.recRule.Recognizer.Recognizer = recPerson;
                    hit = true;
                    break;
                }
            }

            if (!hit)
            {
                MessageBox.Show("日本語認識が利用できません。");
            }
            else
            {
                Console.WriteLine("マイク取得開始\n");

                Console.WriteLine(this.recRule.Recognizer.Status.ClsidEngine);
                this.recRule.Recognizer.AudioInput = this.CreateMicrofon();

                Console.WriteLine("マイク取得完了\n");
                Console.WriteLine("デリゲート登録\n");
                //認識の途中
                this.recRule.Hypothesis +=
                    delegate(int streamNumber, object streamPosition, SpeechLib.ISpeechRecoResult result) {
                    string strText = result.PhraseInfo.GetText(0, -1, true);
                    this._SourceViewLabel.Text = strText;
                };
                //認識完了
                this.recRule.Recognition +=
                    delegate(int streamNumber, object streamPosition, SpeechLib.SpeechRecognitionType srt, SpeechLib.ISpeechRecoResult isrr) {
                    string strText = isrr.PhraseInfo.GetText(0, -1, true);
                    this._ResultLabel.Text = strText;
                };
                //ストリームに何かデータが来た(?)
                this.recRule.StartStream +=
                    delegate(int streamNumber, object streamPosition) {
                    this._SourceViewLabel.Text = "認識?";
                    this._ResultLabel.Text     = "認識?";
                };
                //認識失敗
                this.recRule.FalseRecognition +=
                    delegate(int streamNumber, object streamPosition, SpeechLib.ISpeechRecoResult isrr) {
                    this._ResultLabel.Text = "--ERROR!--";
                };
                Console.WriteLine("デリゲート登録完了\n");
                Console.WriteLine("モデル作成\n");
                //言語モデルの作成
                this.recGrammerRule = this.recRule.CreateGrammar(0);
                Console.WriteLine("モデル作成完了\n");
                this.recGrammerRule.Reset(0);
                //言語モデルのルールのトップレベルを作成する.
                this.recGRGrammerRule = this.recGrammerRule.Rules.Add("TopLevelRule",
                                                                      SpeechRuleAttributes.SRATopLevel | SpeechRuleAttributes.SRADynamic);

                // 読み込む対象文字列をここですべて読み込んでおく必要がある
                this.recGRGrammerRule.InitialState.AddWordTransition(null, "私は");

                //ルールを反映させる。
                this.recGrammerRule.Rules.Commit();

                //音声認識開始。(トップレベルのオブジェクトの名前で SpeechRuleState.SGDSActive を指定する.)
                this.recGrammerRule.CmdSetRuleState("TopLevelRule", SpeechRuleState.SGDSActive);

                Console.WriteLine("音声認識開始");
            }
        }
        private void startSpeech()
        {
            if (RecoContext == null)
            {
                RecoContext = new SpSharedRecoContext();
                grammar = RecoContext.CreateGrammar(1);
                grammar.DictationLoad();
            }

            grammar.DictationSetState(SpeechRuleState.SGDSActive);    // Opens up diction possibility.
        }