Пример #1
0
        public void select_documents(ICollection keys)
        {
            foreach (var document in documents.Values)
            {
                grammar.CmdSetRuleState(document.name, SpeechRuleState.SGDSInactive);
            }

            foreach (string key in keys)
            {
                grammar.CmdSetRuleState(key, SpeechRuleState.SGDSActive);
            }
        }
Пример #2
0
 private void CommitAndActivate()
 {
     m_grammar.Rules.Commit();
     m_grammar.State = SpeechGrammarState.SGSExclusive;
     m_context.State = SpeechRecoContextState.SRCS_Enabled;
     foreach (string name in m_names)
     {
         m_grammar.CmdSetRuleState(name, SpeechRuleState.SGDSActive);
     }
 }
Пример #3
0
        /// <summary>
        /// 开始侦听
        /// </summary>
        public void BeginRec()
        {
            ssrContex = new SpSharedRecoContext();
            ssrContex.EventInterests = SpeechRecoEvents.SREAllEvents;//在"语音事件"中有说明
            isrgammar = ssrContex.CreateGrammar(0);
            //isrgammar.CmdLoadFromFile("D:\\SpeechGammar.xml", SpeechLoadOption.SLODynamic);//读入规则
            isrgammar.CmdLoadFromFile("", SpeechLoadOption.SLODynamic);//读入规则
            isrgammar.CmdSetRuleState(isrgammar.Rules.Item(0).Name, SpeechRuleState.SGDSActive);//激活规则
            ssrContex.Recognition += new _ISpeechRecoContextEvents_RecognitionEventHandler(ContexRecognition);

            ssrContex.State = SpeechRecoContextState.SRCS_Enabled;
            isrgammar.DictationSetState(SpeechRuleState.SGDSActive);
            IsStart = true;
        }
Пример #4
0
        /// <summary>
        /// Starts the listening
        /// </summary>
        public void Start()
        {
            recoContext              = new SpSharedRecoContextClass();
            recoContext.Recognition += new _ISpeechRecoContextEvents_RecognitionEventHandler(recoContext_Recognition);

            recoGrammar     = recoContext.CreateGrammar(0);
            recoGrammarRule = recoGrammar.Rules.Add("VoiceCommands", SpeechRuleAttributes.SRATopLevel | SpeechRuleAttributes.SRADynamic, 1);

            object propValue = "";

            for (int i = 0; i < commands.Count; i++)
            {
                recoGrammarRule.InitialState.AddWordTransition(null, commands[i].ToString(), " ", SpeechGrammarWordType.SGLexical, Commands[i].ToString(), i, ref propValue, 1.0F);
            }

            recoGrammar.Rules.Commit();
            recoGrammar.CmdSetRuleState("VoiceCommands", SpeechRuleState.SGDSActive);
        }
        //private void initSpeech() {
        //    //Debug.WriteLine("Initializing SAPI");
        //    try {
        //        //create Main context Obj
        //        objRecoContext = new SpSharedRecoContext();
        //        objRecoContext.Recognition += new _ISpeechRecoContextEvents_RecognitionEventHandler(objRecoContext_Recognition);
        //        grammar = objRecoContext.CreateGrammar(0);
        //        string path = "Grammar.xml";
        //        grammar.CmdLoadFromFile(path, SpeechLoadOption.SLODynamic);
        //        //activate Top Level Rule
        //        grammar.CmdSetRuleIdState(0, SpeechRuleState.SGDSActive);
        //        //speechInitialized = true;
        //    } catch (Exception e) {
        //        logOut("Exception: " + e.ToString());
        //    }
        //}
        private void InitializeSpeech()
        {
            Debug.WriteLine("Initializing SAPI objects...");

            try {
                // First of all, let's create the main reco context object.
                // In this sample, we are using shared reco context. Inproc reco
                // context is also available. Please see the document to decide
                // which is best for your application.
                objRecoContext = new SpeechLib.SpSharedRecoContext();

                // Then, let's set up the event handler. We only care about
                // Hypothesis and Recognition events in this sample.
                objRecoContext.Hypothesis += new
                    _ISpeechRecoContextEvents_HypothesisEventHandler(
                    RecoContext_Hypothesis);

                objRecoContext.Recognition += new
                    _ISpeechRecoContextEvents_RecognitionEventHandler(
                    RecoContext_Recognition);

                // Now let's build the grammar.
                // The top level rule consists of two parts: "select <items>".
                // So we first add a word transition for the "select" part, then
                // a rule transition for the "<items>" part, which is dynamically
                // built as items are added or removed from the listbox.
                //grammar = objRecoContext.CreateGrammar(grammarId);
                //ruleTopLevel = grammar.Rules.Add("TopLevelRule",
                //    SpeechRuleAttributes.SRATopLevel | SpeechRuleAttributes.SRADynamic, 1);
                //ruleListItems = grammar.Rules.Add("ListItemsRule",
                //    SpeechRuleAttributes.SRADynamic, 2);

                //SpeechLib.ISpeechGrammarRuleState stateAfterSelect;
                //stateAfterSelect = ruleTopLevel.AddState();

                //object PropValue = "";
                //ruleTopLevel.InitialState.AddWordTransition(stateAfterSelect,
                //    PreCommandString, " ", SpeechGrammarWordType.SGLexical,
                //    "", 0, ref PropValue, 1.0F);

                //PropValue = "";
                //stateAfterSelect.AddRuleTransition(null, ruleListItems, "",
                //    1, ref PropValue, 0F);

                // Now add existing list items to the ruleListItems

                grammar = objRecoContext.CreateGrammar(10);
                ruleTopLevel = grammar.Rules.Add("TopLevelRule", SpeechRuleAttributes.SRATopLevel | SpeechRuleAttributes.SRADynamic, 1);
                ruleListItemsDefault = grammar.Rules.Add("ListItemsRule", SpeechRuleAttributes.SRADynamic, 2);

                SpeechLib.ISpeechGrammarRuleState stateAfterSelect;
                stateAfterSelect = ruleTopLevel.AddState();

                object PropValue = "";
                ruleTopLevel.InitialState.AddWordTransition(stateAfterSelect, "", " ", SpeechGrammarWordType.SGLexical, "", 0, ref PropValue, 1.0F);

                PropValue = "";
                stateAfterSelect.AddRuleTransition(null, ruleListItemsDefault, "", 1, ref PropValue, 0F);

                voiceInfoAutomat.RebuildGrammar(this.grammar, this.speechEnabled, this.objRecoContext, ruleListItemsDefault);

                // Now we can activate the top level rule. In this sample, only
                // the top level rule needs to activated. The ListItemsRule is
                // referenced by the top level rule.

                grammar.CmdSetRuleState("TopLevelRule", SpeechRuleState.SGDSActive);
                speechInitialized = true;
            } catch (Exception e) {
                System.Windows.Forms.MessageBox.Show(
                    "Exception caught when initializing SAPI."
                    + " This application may not run correctly.\r\n\r\n"
                    + e.ToString(),
                    "Error");
            }
        }
Пример #6
0
        /// <summary>
        /// Starts the listening
        /// </summary>
        public void Start()
        {
            recoContext = new SpSharedRecoContextClass();
            recoContext.Recognition += new _ISpeechRecoContextEvents_RecognitionEventHandler(recoContext_Recognition);

            recoGrammar = recoContext.CreateGrammar(0);
            recoGrammarRule = recoGrammar.Rules.Add("VoiceCommands", SpeechRuleAttributes.SRATopLevel | SpeechRuleAttributes.SRADynamic, 1);

            object propValue = "";

            for (int i = 0; i < commands.Count; i++)
                recoGrammarRule.InitialState.AddWordTransition(null, commands[i].ToString(), " ", SpeechGrammarWordType.SGLexical, Commands[i].ToString(), i, ref propValue, 1.0F);

            recoGrammar.Rules.Commit();
            recoGrammar.CmdSetRuleState("VoiceCommands", SpeechRuleState.SGDSActive);
        }
Пример #7
0
        /// <summary>
        /// Handles 'SpInProcRecoContext.EndStream' event. Fires after
        /// Recognition(s) completes itself, closes the filestream, and either
        /// (a) calls Generate() for a 2nd pass or (b) calculates word/phoneme
        /// ratios and fires SrStreamEnded -> FxeGenerator.OnSrStreamEnded()
        /// to print results and generate FXE data/datablocks.
        /// </summary>
        /// <param name="StreamNumber"></param>
        /// <param name="StreamPosition"></param>
        /// <param name="StreamReleased"></param>
        void rc_EndStream(int StreamNumber, object StreamPosition, bool StreamReleased)
        {
#if DEBUG
            logfile.Log();
            logfile.Log("rc_EndStream() #" + StreamNumber + " StreamPosition= " + StreamPosition + " _generato= " + _generato);
            //logfile.Log(". StreamReleased= " + StreamReleased);
#endif
            switch (_generato)
            {
            case Generator.Dictati:
#if DEBUG
                logfile.Log(". set Dictation INACTIVE");
#endif
                _recoGrammar.DictationSetState(SpeechRuleState.SGDSInactive);
                break;

            case Generator.Dialogi:
                if (_recoGrammar.Rules.FindRule(RULE) != null)
                {
#if DEBUG
                    logfile.Log(". set Rule INACTIVE");
#endif
                    _recoGrammar.CmdSetRuleState(RULE, SpeechRuleState.SGDSInactive);
                }
                break;
            }
//			_recoGrammar.DictationUnload();

#if DEBUG
            logfile.Log(". close _fs");
#endif
            _fs.Close();
            _fs = null;


            Orthography();

            switch (_generato)
            {
            case Generator.Dictati:
                if (_text != String.Empty)
                {
                    _generato = Generator.Dialogi;
                    Generate();
                }
                else
                {
                    Confidence_def /= (float)Confidence_def_count;
                    goto case Generator.Dialogi;
                }
                break;

            case Generator.Dialogi:
                CalculateRatios_word();
                CalculateRatios_phon();

//					if (SrStreamEnded != null)
                SrStreamEnded(_ars_def, _ars_enh);
                break;
            }
        }
Пример #8
0
        /// <summary>
        /// Generate() will be called only once if there is no typed-text; it
        /// should use dictation. Generate() will be called a second time if
        /// there is typed-text; the second pass should use both dictation and
        /// context-free-grammar (ie, Command and Control: a Rule that's based
        /// on the typed-text).
        /// </summary>
        void Generate()
        {
#if DEBUG
            logfile.Log();
            logfile.Log("Generate() _generato= " + _generato);
#endif
            _offset = 0;
            Confidence_def_count = 0;

            // was "2" but MS doc says not needed on its end.
            // and I don't see grammar id #2 defined on this end either.
            _recoGrammar = _recoContext.CreateGrammar();
//			_recoGrammar.DictationLoad(); // ("Pronunciation") <- causes orthemes to print as phonemes instead of words

            switch (_generato)
            {
            case Generator.Dictati:
                if (_recoGrammar.Rules.FindRule(RULE) != null)
                {
#if DEBUG
                    logfile.Log(". set Rule INACTIVE");
#endif
                    _recoGrammar.CmdSetRuleState(RULE, SpeechRuleState.SGDSInactive);
                }
#if DEBUG
                logfile.Log(". set Dictation ACTIVE");
#endif
                _recoGrammar.DictationSetState(SpeechRuleState.SGDSActive);
                break;

            case Generator.Dialogi:
#if DEBUG
                logfile.Log(". set Dictation INACTIVE");
#endif
                _recoGrammar.DictationSetState(SpeechRuleState.SGDSInactive);

                if (_recoGrammar.Rules.FindRule(RULE) == null)
                {
#if DEBUG
                    logfile.Log(". . add \"" + RULE + "\" Rule");
#endif
                    ISpeechGrammarRule rule = _recoGrammar.Rules.Add(RULE,
                                                                     SpeechRuleAttributes.SRATopLevel,
                                                                     1);
                    rule.InitialState.AddWordTransition(null,
                                                        _text,
                                                        " ",
                                                        SpeechGrammarWordType.SGLexical,
                                                        RULE,
                                                        1);
                    _recoGrammar.Rules.Commit();
                }
#if DEBUG
                logfile.Log(". set Rule ACTIVE");
#endif
                _recoGrammar.CmdSetRuleState(RULE, SpeechRuleState.SGDSActive);


//					logfile.Log(". max alternates(pre)= " + _recoContext.CmdMaxAlternates);
//					_recoContext.CmdMaxAlternates = 3;
//					logfile.Log(". max alternates(pos)= " + _recoContext.CmdMaxAlternates);
                break;
            }

#if DEBUG
            logfile.Log(". create (SpFileStream)_fs");
#endif
            _fs = new SpFileStream();
#if DEBUG
            logfile.Log(". (SpFileStream)_fs CREATED");
#endif
//			_fs.Format.Type = SpeechAudioFormatType.SAFT44kHz16BitMono;

#if DEBUG
            logfile.Log(". Open Wavefile _fs");
#endif
            _fs.Open(Wavefile);
#if DEBUG
            logfile.Log(". _fs.Format.Type= " + _fs.Format.Type);             // SpeechAudioFormatType.SAFT44kHz16BitMono
            SpWaveFormatEx data = _fs.Format.GetWaveFormatEx();
            logfile.Log(". . SamplesPerSec= " + data.SamplesPerSec);
            logfile.Log(". . BitsPerSample= " + data.BitsPerSample);
            logfile.Log(". . AvgBytesPerSec= " + data.AvgBytesPerSec);
            logfile.Log(". . Channels= " + data.Channels);
            logfile.Log(". . BlockAlign= " + data.BlockAlign);
            logfile.Log(". . FormatTag= " + data.FormatTag);
            logfile.Log(". . ExtraData= " + data.ExtraData);

            // filestream byte-data ->
//			int bytes, pos = 0;
//			object o = new byte[2];
//			while ((bytes = _fs.Read(out o, 2)) > 0)
//			{
//				var buffer = (byte[])o;
//				logfile.Log(pos + " : " + buffer[1] + " " + buffer[0]); // treat as little-endian shorts
//				pos += bytes;
//			}
//			_fs.Seek(0);


            logfile.Log(". assign _fs to _recognizer.AudioInputStream");
#endif
            _recognizer.AudioInputStream = _fs;             // <- start Recognition <--
#if DEBUG
            logfile.Log("Generate() DONE");
            logfile.Log();
#endif
        }