예제 #1
0
        private void comboBoxEx1_SelectedIndexChanged(object sender, EventArgs e)
        {
            _WAVFile = comboBoxEx1.SelectedText;
            if (_WAVFile == null)
            {
                return;
            }
            wavRecoContext = new SpeechLib.SpInProcRecoContext();

            ((SpInProcRecoContext)wavRecoContext).Recognition +=
                new _ISpeechRecoContextEvents_RecognitionEventHandler(wavRecoContext_Recognition);

            ((SpInProcRecoContext)wavRecoContext).EndStream +=
                new _ISpeechRecoContextEvents_EndStreamEventHandler(wavRecoContext_EndStream);

            Grammar = wavRecoContext.CreateGrammar(0);

            Grammar.DictationLoad("", SpeechLoadOption.SLOStatic);

            InputWAV = new SpFileStream();

            InputWAV.Open(@_WAVFile, SpeechStreamFileMode.SSFMOpenForRead, false);

            wavRecoContext.Recognizer.AudioInputStream = InputWAV;

            Grammar.DictationSetState(SpeechRuleState.SGDSActive);
        }
예제 #2
0
        private void labelX1_Click(object sender, EventArgs e)
        {
            try
            {
                OpenFileDialog dialog = new OpenFileDialog();

                dialog.Title = "Select a Speech file";

                dialog.ShowDialog();

                _WAVFile = dialog.FileName;

                if (_WAVFile == null)
                {
                    return;
                }

                comboBoxEx1.Items.Add(dialog.FileName);
                comboBoxEx1.SelectedText = dialog.FileName;

                wavRecoContext = new SpeechLib.SpInProcRecoContext();

                ((SpInProcRecoContext)wavRecoContext).Recognition +=

                    new _ISpeechRecoContextEvents_RecognitionEventHandler(wavRecoContext_Recognition);

                ((SpInProcRecoContext)wavRecoContext).EndStream +=
                    new _ISpeechRecoContextEvents_EndStreamEventHandler(wavRecoContext_EndStream);

                Grammar = wavRecoContext.CreateGrammar(0);

                Grammar.DictationLoad("", SpeechLoadOption.SLOStatic);

                InputWAV = new SpFileStream();

                InputWAV.Open(@_WAVFile, SpeechStreamFileMode.SSFMOpenForRead, false);

                wavRecoContext.Recognizer.AudioInputStream = InputWAV;

                Grammar.DictationSetState(SpeechRuleState.SGDSActive);
            }
            catch (Exception er)
            {
                //MessageBox.Show("An Error Occured!", "SpeechApp", MessageBoxButtons.OK, MessageBoxIcon.Error);
                System.Console.WriteLine(er.ToString());
            }
        }
예제 #3
0
파일: Form1.cs 프로젝트: lixu1/Csharp
        private void button1_Click(object sender, EventArgs e)
        {
            OpenFileDialog dialog = new OpenFileDialog();

            dialog.Title = "Select a Speech file";

            dialog.ShowDialog();

            _WAVFile = dialog.FileName;

            if (_WAVFile == null)
            {
                return;
            }

            //***********************************************

            // Now we have the WAV file, we can set up the

            // inline SPeech Engine to process it

            //***********************************************

            // create the recognition context

            wavRecoContext = new SpeechLib.SpInProcRecoContext();

            //******************************************************************

            // Register our event as a listener on the Recognition event

            // that way, anytime the speech engine thinks it "hears" something

            // that it recognize, we're called to check it out for ourselves

            //********************************************************************

            ((SpInProcRecoContext)wavRecoContext).Recognition +=

                new _ISpeechRecoContextEvents_RecognitionEventHandler(wavRecoContext_Recognition);

            //******************************************************************

            // Register a method on the endStream event so we can do basic clean-up

            // when the Audio file is finished

            //******************************************************************

            ((SpInProcRecoContext)wavRecoContext).EndStream += new _ISpeechRecoContextEvents_EndStreamEventHandler(wavRecoContext_EndStream);

            //*************************************************************************

            // the parameter passed to CreateGrammar is any int. It is only used as if

            // you have more than one grammar active, so you can specify which one is

            // to be used....

            //*************************************************************************

            Grammar = wavRecoContext.CreateGrammar(1);

            // I simply use the default Frammar for dictation

            Grammar.DictationLoad("", SpeechLoadOption.SLOStatic);

            //*************************************************************************

            //Speech engine is now ready to go, so set it to the

            // audio file TO do this, we open the requested file

            // using the SPeechStreamFileMode, and pass that to the

            // speech engine to use as its input source

            //*************************************************************************

            InputWAV = new SpFileStream();

            InputWAV.Open(@_WAVFile, SpeechStreamFileMode.SSFMOpenForRead, false);

            wavRecoContext.Recognizer.AudioInputStream = InputWAV;

            //*************************************************************************

            // the way you "Turn On" the speech engine is by setting the Diction State

            // of its grammar to "Active"

            //*************************************************************************

            Grammar.DictationSetState(SpeechRuleState.SGDSActive);

            //*************************************************************************

            // the result will be handled by wavRecoContext_Recognition()

            //*************************************************************************
        }