private void TtsSave() { SaveFileDialog saveFileDialog = new SaveFileDialog(); saveFileDialog.Title = "保存声音"; saveFileDialog.Filter = "*.wav|*.wav|*.mp3|*.mp3"; saveFileDialog.InitialDirectory = Environment.GetFolderPath(Environment.SpecialFolder.MyDocuments); DialogResult saveDialog = saveFileDialog.ShowDialog(); try { if (saveDialog == System.Windows.Forms.DialogResult.OK) { SpeechLib.SpeechStreamFileMode SSFM = SpeechLib.SpeechStreamFileMode.SSFMCreateForWrite; SpeechLib.SpFileStream sfs = new SpeechLib.SpFileStream(); sfs.Open(saveFileDialog.FileName, SSFM, false); voice.AudioOutputStream = sfs; Tts(); voice.WaitUntilDone(System.Threading.Timeout.Infinite); sfs.Close(); System.Diagnostics.Process.Start("Explorer.exe", string.Format(@"/select,{0}", saveFileDialog.FileName));//打开wav目录并选中文件 } } catch (Exception er) { MessageBox.Show(er.ToString(), "提示", MessageBoxButtons.OK, MessageBoxIcon.Error); } }
private void comboBoxEx1_SelectedIndexChanged(object sender, EventArgs e) { _WAVFile = comboBoxEx1.SelectedText; if (_WAVFile == null) { return; } wavRecoContext = new SpeechLib.SpInProcRecoContext(); ((SpInProcRecoContext)wavRecoContext).Recognition += new _ISpeechRecoContextEvents_RecognitionEventHandler(wavRecoContext_Recognition); ((SpInProcRecoContext)wavRecoContext).EndStream += new _ISpeechRecoContextEvents_EndStreamEventHandler(wavRecoContext_EndStream); Grammar = wavRecoContext.CreateGrammar(0); Grammar.DictationLoad("", SpeechLoadOption.SLOStatic); InputWAV = new SpFileStream(); InputWAV.Open(@_WAVFile, SpeechStreamFileMode.SSFMOpenForRead, false); wavRecoContext.Recognizer.AudioInputStream = InputWAV; Grammar.DictationSetState(SpeechRuleState.SGDSActive); }
private void labelX1_Click(object sender, EventArgs e) { try { OpenFileDialog dialog = new OpenFileDialog(); dialog.Title = "Select a Speech file"; dialog.ShowDialog(); _WAVFile = dialog.FileName; if (_WAVFile == null) { return; } comboBoxEx1.Items.Add(dialog.FileName); comboBoxEx1.SelectedText = dialog.FileName; wavRecoContext = new SpeechLib.SpInProcRecoContext(); ((SpInProcRecoContext)wavRecoContext).Recognition += new _ISpeechRecoContextEvents_RecognitionEventHandler(wavRecoContext_Recognition); ((SpInProcRecoContext)wavRecoContext).EndStream += new _ISpeechRecoContextEvents_EndStreamEventHandler(wavRecoContext_EndStream); Grammar = wavRecoContext.CreateGrammar(0); Grammar.DictationLoad("", SpeechLoadOption.SLOStatic); InputWAV = new SpFileStream(); InputWAV.Open(@_WAVFile, SpeechStreamFileMode.SSFMOpenForRead, false); wavRecoContext.Recognizer.AudioInputStream = InputWAV; Grammar.DictationSetState(SpeechRuleState.SGDSActive); } catch (Exception er) { //MessageBox.Show("An Error Occured!", "SpeechApp", MessageBoxButtons.OK, MessageBoxIcon.Error); System.Console.WriteLine(er.ToString()); } }
private void button1_Click(object sender, EventArgs e) { OpenFileDialog dialog = new OpenFileDialog(); dialog.Title = "Select a Speech file"; dialog.ShowDialog(); _WAVFile = dialog.FileName; if (_WAVFile == null) { return; } //*********************************************** // Now we have the WAV file, we can set up the // inline SPeech Engine to process it //*********************************************** // create the recognition context wavRecoContext = new SpeechLib.SpInProcRecoContext(); //****************************************************************** // Register our event as a listener on the Recognition event // that way, anytime the speech engine thinks it "hears" something // that it recognize, we're called to check it out for ourselves //******************************************************************** ((SpInProcRecoContext)wavRecoContext).Recognition += new _ISpeechRecoContextEvents_RecognitionEventHandler(wavRecoContext_Recognition); //****************************************************************** // Register a method on the endStream event so we can do basic clean-up // when the Audio file is finished //****************************************************************** ((SpInProcRecoContext)wavRecoContext).EndStream += new _ISpeechRecoContextEvents_EndStreamEventHandler(wavRecoContext_EndStream); //************************************************************************* // the parameter passed to CreateGrammar is any int. It is only used as if // you have more than one grammar active, so you can specify which one is // to be used.... //************************************************************************* Grammar = wavRecoContext.CreateGrammar(1); // I simply use the default Frammar for dictation Grammar.DictationLoad("", SpeechLoadOption.SLOStatic); //************************************************************************* //Speech engine is now ready to go, so set it to the // audio file TO do this, we open the requested file // using the SPeechStreamFileMode, and pass that to the // speech engine to use as its input source //************************************************************************* InputWAV = new SpFileStream(); InputWAV.Open(@_WAVFile, SpeechStreamFileMode.SSFMOpenForRead, false); wavRecoContext.Recognizer.AudioInputStream = InputWAV; //************************************************************************* // the way you "Turn On" the speech engine is by setting the Diction State // of its grammar to "Active" //************************************************************************* Grammar.DictationSetState(SpeechRuleState.SGDSActive); //************************************************************************* // the result will be handled by wavRecoContext_Recognition() //************************************************************************* }