void context_Recognition(int StreamNumber, object StreamPosition, SpeechRecognitionType RecognitionType, ISpeechRecoResult Result) { if (context.AudioInputInterferenceStatus == SpeechInterference.SINoise) { return; } // Feedback.print(string.Format("Result = {0} {1}\r\n", StreamNumber, context.AudioInputInterferenceStatus), Feedback.Status.debug); Feedback.print(string.Format("Result = {0}, {1}\r\n", Result.PhraseInfo.GetText(0, -1, true), Result.PhraseInfo.Elements.Item(0).EngineConfidence), Feedback.Status.debug); Profiler.initialize(); Familiar_Result result = new Familiar_Result(context, Result); // Familiar_Result result = get_result("> ", Result); // result.display("> "); Profiler.trace("result created"); Feedback.print(string.Format("Element Count = {0}\r\n", Result.PhraseInfo.Elements.Count), Feedback.Status.debug); foreach (Token word in result.chosen_phrase.words) { Feedback.print(string.Format("{0} ({1}, {2})\r\n", word.text, word.confidence, word.source.document.name), Feedback.Status.debug); } //if (Result.PhraseInfo.Elements.Count == 1 && result.chosen_phrase.words[0].confidence < 0.6) // return; result.run(); if (recognized != null) { recognized.Invoke(this, result); } }
/*EVENT TRIGGERED IN A RECOGNITION EVENT*/ public void RecoContext_Recognition(int StreamNumber, object StreamPosition, SpeechRecognitionType RecognitionType, ISpeechRecoResult e) { //get phrase string phrase = e.PhraseInfo.GetText(0, -1, true); Debug.WriteLine(phrase); }
/// <summary> /// RecoContext_Hypothesis is the event handler function for /// SpSharedRecoContext object's Recognition event. /// </summary> /// <param name="StreamNumber"></param> /// <param name="StreamPosition"></param> /// <param name="RecognitionType"></param> /// <param name="Result"></param> /// <remarks> /// See EnableSpeech() for how to hook up this function with the /// event. /// </remarks> public void RecoContext_Recognition(int StreamNumber, object StreamPosition, SpeechRecognitionType RecognitionType, ISpeechRecoResult Result) { Debug.WriteLine("Recognition: " + Result.PhraseInfo.GetText(0, -1, true) + ", " + StreamNumber + ", " + StreamPosition); int index; ISpeechPhraseProperty oItem; // oItem will be the property of the second part in the recognized // phase. For example, if the top level rule matchs // "select Seattle". Then the ListItemsRule matches "Seattle" part. // The following code will get the property of the "Seattle" // phrase, which is set when the word "Seattle" is added to the // ruleListItems in RebuildGrammar. oItem = Result.PhraseInfo.Properties.Item(0).Children.Item(0); index = oItem.Id; if ((System.Decimal)Result.PhraseInfo.GrammarId == grammarId) { // Check to see if the item at the same position in the list // still has the same text. // This is to prevent the rare case that the user keeps // talking while the list is being added or removed. By the // time this event is fired and handled, the list box may have // already changed. if (oItem.Name.CompareTo(this.Items[index].ToString()) == 0) { this.SelectedIndex = index; } } }
private void OnRecognition(int StreamNumber, object StreamPosition, SpeechRecognitionType RecognitionType, ISpeechRecoResult Result) { string word = Result.PhraseInfo.GetText(0, -1, true); //Notify plugin to output word on next time it runs evaluate this.FInvalidate = true; this.FData = word; }
private void OnReco(int StreamNumber, object StreamPosition, SpeechRecognitionType RecognitionType, ISpeechRecoResult Result) { string recoResult = Result.PhraseInfo.GetText(); // Translates whatever was somewhat definitively recognized by Windows Speech Recognition into text. recoResult = recoResult.ToLower(); // This is the same as taking inquiry text and making it all lowercase in Minerva. submit(recoResult); }
private void SsrContex_Recognition(int StreamNumber, object StreamPosition, SpeechRecognitionType RecognitionType, ISpeechRecoResult Result) { bool flag = this.SetMessage != null; if (flag) { this.SetMessage(Result.PhraseInfo.GetText(0, -1, true)); } }
private void ContexRecognition(int iIndex, object obj, SpeechRecognitionType type, ISpeechRecoResult result) { bool flag = this.SetMessage != null; if (flag) { this.SetMessage(result.PhraseInfo.GetText(0, -1, true)); } }
private void Reco_Event(int StreamNumber, object StreamPosition, SpeechRecognitionType RecognitionType, ISpeechRecoResult Result) { string lWord = Result.PhraseInfo.GetText(0, -1, true); if (VoiceCommandReceivedEvent != null) { VoiceCommandReceivedEvent(this, new VoiceCommandEventArgs(lWord)); } }
// Event handler for successful (higher confidence) voice recognition public void RecoContext_Recognition(int StreamNumber, object StreamPosition, SpeechRecognitionType RecognitionType, ISpeechRecoResult Result) { //VA.WriteToLog("Recognition successful"); // Output info to event log //VA.SetText("~~FalseRecognitionFlag", ""); // Send blank recognition flag ("") back to VoiceAttack as text variable //RecognitionFlag = ""; // Set the RecognitionFlag as blank RecognitionProcessing(Result); // Process the voice recognition result //if (UseDictation == false) // Check if pronunciation dictation grammar should NOT be used with speech recognition GetPhonemes(Result); // Retrieve SAPI phonemes from recognition result }
private void Reco_Event(int StreamNumber, object StreamPosition, SpeechRecognitionType RecognitionType, ISpeechRecoResult Result) { String text = Result.PhraseInfo.GetText(0, -1, true); synth.Speak("Recognition: " + text); // DEBUG // TODO: For "Do you mean?" functionality, check yes/no NOT in the following function // because they are not actions. DoActionFromVoiceCommand(text); }
public void RecoContext_Recognition(int StreamNumber, object StreamPosition, SpeechRecognitionType RecognitionType, ISpeechRecoResult Result) { //MessageBox.Show(charReq.Status.ToString()); if (charReq.Status == 0) { grammar.DictationSetState(SpeechRuleState.SGDSInactive); inputBox.Text = Result.PhraseInfo.GetText(0, -1, true); doChatting(); grammar.DictationSetState(SpeechLib.SpeechRuleState.SGDSActive); } }
/// <summary> /// RecoContext_Hypothesis is the event handler function for /// SpInProcRecoContext object's Recognition event. /// </summary> /// <param name="StreamNumber"></param> /// <param name="StreamPosition"></param> /// <param name="RecognitionType"></param> /// <param name="Result"></param> /// <remarks> /// See EnableSpeech() for how to hook up this function with the /// event. /// </remarks> private void RecoContext_Recognition(int StreamNumber, object StreamPosition, SpeechRecognitionType RecognitionType, ISpeechRecoResult Result) { Debug.WriteLine("Recognition: " + Result.PhraseInfo.GetText(0, -1, true) + ", " + StreamNumber + ", " + StreamPosition); int index; int index1; int index2; ISpeechPhraseProperty oCard; ISpeechPhraseProperty oNumber; ISpeechPhraseProperty oCommand; // oItem will be the property of the second part in the recognized // phase. For example, if the top level rule matchs // "select Seattle". Then the ListItemsRule matches "Seattle" part. // The following code will get the property of the "Seattle" // phrase, which is set when the word "Seattle" is added to the // ruleListItems in RebuildGrammar. oCommand = Result.PhraseInfo.Properties.Item(0).Children.Item(0); index = oCommand.Id; oNumber = Result.PhraseInfo.Properties.Item(1).Children.Item(0); index1 = oNumber.Id; oCard = Result.PhraseInfo.Properties.Item(2).Children.Item(0); index2 = oCard.Id; if ((System.Decimal)Result.PhraseInfo.GrammarId == grammarId) { // Check to see if the item at the same position in the list // still has the same text. // This is to prevent the rare case that the user keeps // talking while the list is being added or removed. By the // time this event is fired and handled, the list box may have // already changed. if (oCard.Name.CompareTo(libcards[index2].ToString()) == 0 || oCard.Name.CompareTo(cryptcards[index2].ToString()) == 0) { listView1.Items[index2].Selected = true; listView1.Items[index2].Focused = true; listView1.TopItem = listView1.Items[index2]; txtNumber.Text = oNumber.Name; comboBox2.SelectedIndex = index; } } }
private void ContexRecognition(int iIndex, object obj, SpeechRecognitionType type, ISpeechRecoResult result) { SetMessage?.Invoke(result.PhraseInfo.GetText(0, -1, true)); }
/// <summary> /// main objRecoContext event /// launched when engine recognized a phrase /// </summary> /// <param name="e">contained information on the phrase that been recognized</param> public void RecoContext_Recognition(int StreamNumber, object StreamPosition, SpeechRecognitionType RecognitionType, ISpeechRecoResult e) { //calculate accuracy float accuracy=(float)e.PhraseInfo.Elements.Item(0).EngineConfidence; //change accuracyMax dynamicly if (accuracyMax<accuracy) accuracyMax=accuracy; if (accuracy<0) accuracy=0; accuracy=(int)((float)accuracy/accuracyMax*100); label2.Text="Accuracy "+accuracy.ToString()+ "%"; //get phrase string phrase=e.PhraseInfo.GetText(0,-1,true); //make sure it's in lower case (for safer use only) phrase=phrase.ToLower(); //if recognized any ... if (phrase!="" && accuracy>=accuracyLimit) { //Only if agent enabled if (menuItem14.Checked==true) { agent1.StopAll(""); agent1.Speak(phrase,""); } switch (e.PhraseInfo.Rule.Name) //rule name (not the phrase !) { case "Activate": { //Only if agent enabled if (menuItem14.Checked==true) { //show character agent1.Show(false); } //load grammar SAPIGrammarFromFile("XMLDeactivate.xml"); //notify user label1.Text="Activate"; //Only if agent enabled if (menuItem14.Checked==true) { //animate character agent1.Play("StartListening"); agent1.Speak("I'm listening",""); } break; } case "Deactivate": { //load grammar SAPIGrammarFromFile("XMLActivate.xml"); //notify user label1.Text="Deactivate"; //Only if agent enabled if (menuItem14.Checked==true) { //animate character agent1.Play("Wave"); agent1.Hide(false); } break; } case "Start": { keybd_event((byte)Keys.LWin,0,0,0); //key down keybd_event((byte)Keys.LWin,0,2,0); //key up //load grammar SAPIGrammarFromFile("XMLStart.xml"); //notify user label1.Text="Start"; break; } case "Right": { keybd_event((byte)Keys.Right,0,0,0); //key down keybd_event((byte)Keys.Right,0,2,0); //key up break; } case "Left": { keybd_event((byte)Keys.Left,0,0,0); //key down keybd_event((byte)Keys.Left,0,2,0); //key up break; } case "Up": { keybd_event((byte)Keys.Up,0,0,0); //key down keybd_event((byte)Keys.Up,0,2,0); //key up break; } case "Down": { keybd_event((byte)Keys.Down,0,0,0); //key down keybd_event((byte)Keys.Down,0,2,0); //key up break; } case "Enter": { keybd_event((byte)Keys.Enter,0,0,0); //key down keybd_event((byte)Keys.Enter,0,2,0); //key up break; } case "Escape": { keybd_event((byte)Keys.Escape,0,0,0); //key down keybd_event((byte)Keys.Escape,0,2,0); //key up keybd_event((byte)Keys.LMenu,0,2,0); //key up //load grammar (used to reset grammar in case it contains menu stuff ...) SAPIGrammarFromFile("XMLDeactivate.xml"); //notify user label1.Text="Activate"; break; } case "PureEscape": { keybd_event((byte)Keys.Escape,0,0,0); //key down keybd_event((byte)Keys.Escape,0,2,0); //key up break; } case "Alt": { keybd_event((byte)Keys.LMenu,0,0,0); //key down keybd_event((byte)Keys.LMenu,0,2,0); //key up //check if there is any menu and hook it IntPtr hWnd=GetForegroundWindow(); IntPtr hMnu=GetMenu(hWnd); int mnuCnt=GetMenuItemCount(hMnu); if (mnuCnt!=0) { //Only if agent enabled if (menuItem14.Checked==true) { //animate character agent1.Play("DoMagic1"); agent1.Think("Hooking menu ..."); } //add menu to grammar hookMenu(hMnu); //Only if agent enabled if (menuItem14.Checked==true) { //animate character agent1.Play("Idle1_1"); } } else { //load grammar SAPIGrammarFromFile("XMLDeactivate.xml"); //notify user label1.Text="Activate"; } break; } case "Tab": { keybd_event((byte)Keys.Tab,0,0,0); //key down keybd_event((byte)Keys.Tab,0,2,0); //key up break; } case "ShiftTab": { keybd_event((byte)Keys.LShiftKey,0,0,0); //key down keybd_event((byte)Keys.Tab,0,0,0); //key down keybd_event((byte)Keys.Tab,0,2,0); //key up keybd_event((byte)Keys.LShiftKey,0,2,0); //key up break; } case "CloseProgram": { Close(); break; } case "ShowAbout": { if (frmAbout1==null) { //show frmAbout frmAbout1=new frmAbout(); frmAbout1.Closed+=new EventHandler(frmAbout1_Closed); //send user profile frmAbout1.Tag=(string)objRecoContext.Recognizer.Profile.GetDescription(0); frmAbout1.Show(); } //load grammar SAPIGrammarFromFile("XMLAbout.xml"); //notify user label1.Text="About Speech Recognition"; break; } case "CloseAbout": { //close frmAbout if (frmAbout1!=null) { frmAbout1.Close(); frmAbout1=null; } break; } case "ShowCommands": { if (frmCommands1==null) { //show frmAbout frmCommands1=new frmCommands(); frmCommands1.Closed+=new EventHandler(frmCommands1_Closed); //send grammar frmCommands1.Tag=label1.Text; frmCommands1.Show(); } //load grammar SAPIGrammarFromFile("XMLCommands.xml"); break; } case "CloseCommands": { //close frmCommands if (frmCommands1!=null) { frmCommands1.Close(); frmCommands1=null; } break; } case "ShowFavorites": { if (frmFavorites1==null) { //show frmFavorites frmFavorites1=new frmFavorites(); frmFavorites1.Closed+=new EventHandler(frmFavorites1_Closed); //send file name frmFavorites1.Tag=appPath+"XMLFavorites.xml"; frmFavorites1.Show(); } //load grammar SAPIGrammarFromFile("XMLFavorites.xml"); //notify user label1.Text="Favorites"; break; } case "CloseFavorites": { //show frmAbout if (frmFavorites1!=null) { frmFavorites1.Close(); frmFavorites1=null; } break; } case "CloseForm": { IntPtr hWnd=GetForegroundWindow(); //make sure we are not closing our program ... if (hWnd!=this.Handle) { keybd_event((byte)Keys.LMenu,0,0,0); //key down keybd_event((byte)Keys.F4,0,0,0); //key down keybd_event((byte)Keys.LMenu,0,2,0); //key up keybd_event((byte)Keys.F4,0,2,0); //key up } break; } case "Programs": case "Documents": case "Settings": case "Search": case "Help": case "Run": { keybd_event((byte)(e.PhraseInfo.Rule.Name[0]),0,0,0); //key down keybd_event((byte)(e.PhraseInfo.Rule.Name[0]),0,2,0); //key up //load grammar SAPIGrammarFromFile("XMLDeactivate.xml"); //notify user label1.Text="Activate"; break; } case "RunProgram": { //show frmAbout if (frmFavorites1!=null) { frmFavorites1.Close(); frmFavorites1=null; } try { System.Diagnostics.Process.Start(phrase); } catch { //Only if agent enabled if (menuItem14.Checked==true) { agent1.Speak("Could not run : "+phrase,""); } } //load grammar SAPIGrammarFromFile("XMLDeactivate.xml"); //notify user label1.Text="Activate"; break; } case "SwitchProgram": { keybd_event((byte)Keys.LMenu,0,0,0); //key down keybd_event((byte)Keys.Tab,0,0,0); //key down keybd_event((byte)Keys.Tab,0,2,0); //key up //load grammar SAPIGrammarFromFile("XMLSwitchProgram.xml"); //notify user label1.Text="Switch Program"; break; } case "SwitchEnter": { keybd_event((byte)Keys.LMenu,0,2,0); //key up //load grammar SAPIGrammarFromFile("XMLDeactivate.xml"); //notify user label1.Text="Activate"; break; } case "HoldKey": { //load grammar SAPIGrammarFromFile("XMLStickyKeys.xml"); //notify user label1.Text="Press key"; break; } case "ReleaseKey": { timer2.Enabled=false; //load grammar SAPIGrammarFromFile("XMLDeactivate.xml"); //notify user label1.Text="Activate"; break; } case "HoldRight": { keyHolding=(byte)Keys.Right; timer2.Enabled=true; break; } case "HoldLeft": { keyHolding=(byte)Keys.Left; timer2.Enabled=true; break; } case "HoldUp": { keyHolding=(byte)Keys.Up; timer2.Enabled=true; break; } case "HoldDown": { keyHolding=(byte)Keys.Down; timer2.Enabled=true; break; } case "PageUp": { keybd_event((byte)Keys.PageUp,0,0,0); //key down keybd_event((byte)Keys.PageUp,0,2,0); //key up break; } case "Yes": { keybd_event((byte)Keys.Y,0,0,0); //key down keybd_event((byte)Keys.Y,0,2,0); //key up break; } case "No": { keybd_event((byte)Keys.N,0,0,0); //key down keybd_event((byte)Keys.N,0,2,0); //key up break; } case "BackSpace": { keybd_event((byte)Keys.Back,0,0,0); //key down keybd_event((byte)Keys.Back,0,2,0); //key up break; } case "ShutDown": { Shell32.ShellClass a=new Shell32.ShellClass(); a.ShutdownWindows(); //load grammar SAPIGrammarFromFile("XMLShutDown.xml"); //notify user label1.Text="Shut Down"; break; } case "ActivateWithoutAnimation": { //load grammar SAPIGrammarFromFile("XMLDeactivate.xml"); //notify user label1.Text="Activate"; break; } case "EnterNumericState": { //load grammar SAPIGrammarFromFile("XMLNumericState.xml"); //notify user label1.Text="Numeric State..."; break; } case "Zero": case "One": case "Two": case "Three": case "Four": case "Five": case "Six": case "Seven": case "Eight": case "Nine": { byte k=(byte)e.PhraseInfo.GetText(0,-1,false)[0]; keybd_event((byte)(k+'0'),0,0,0); //key down keybd_event((byte)(k+'0'),0,2,0); //key up break; } case "Plus": { keybd_event((byte)Keys.Add,0,0,0); //key down keybd_event((byte)Keys.Add,0,2,0); //key up break; } case "Minus": { keybd_event((byte)Keys.Subtract,0,0,0); //key down keybd_event((byte)Keys.Subtract,0,2,0); //key up break; } case "Div": { keybd_event((byte)Keys.Divide,0,0,0); //key down keybd_event((byte)Keys.Divide,0,2,0); //key up break; } case "Mul": { keybd_event((byte)Keys.Multiply,0,0,0); //key down keybd_event((byte)Keys.Multiply,0,2,0); //key up break; } case "Equal": { keybd_event(187,0,0,0); //key down keybd_event(187,0,2,0); //key up break; } case "EnterAlphabeticState": { //load grammar SAPIGrammarFromFile("XMLAlphabeticState.xml"); //notify user label1.Text="Alphabetic State..."; break; } case "abcA":case "abcB":case "abcC":case "abcD":case "abcE":case "abcF":case "abcG": case "abcH":case "abcI":case "abcJ":case "abcK":case "abcL":case "abcM":case "abcN": case "abcO":case "abcP":case "abcQ":case "abcR":case "abcS":case "abcT":case "abcU": case "abcV":case "abcW":case "abcX":case "abcY":case "abcZ": { firstRecognition=phrase; string str1=phrase; str1=str1.ToUpper(); keybd_event((byte)(str1[0]),0,0,0); //key down keybd_event((byte)(str1[0]),0,2,0); //key up break; } case "At": { keybd_event((byte)Keys.LShiftKey,0,0,0); //key down keybd_event((byte)Keys.D2,0,0,0); //key down keybd_event((byte)Keys.D2,0,2,0); //key up keybd_event((byte)Keys.LShiftKey,0,2,0); //key up break; } case "UnderLine": { keybd_event((byte)Keys.LShiftKey,0,0,0); //key down keybd_event((byte)Keys.OemMinus,0,0,0); //key down keybd_event((byte)Keys.OemMinus,0,2,0); //key up keybd_event((byte)Keys.LShiftKey,0,2,0); //key up break; } case "Dash": { keybd_event((byte)Keys.Subtract,0,0,0); //key down keybd_event((byte)Keys.Subtract,0,2,0); //key up break; } case "Dot": { keybd_event(190,0,0,0); //key down keybd_event(190,0,2,0); //key up break; } case "BackSlash": { keybd_event((byte)Keys.Divide,0,0,0); //key down keybd_event((byte)Keys.Divide,0,2,0); //key up break; } case "AlphabeticStateNo": { //delete the first letter keybd_event((byte)Keys.Back,0,0,0); //key down keybd_event((byte)Keys.Back,0,2,0); //key up //write the replacement letter string str1=firstRecognition; //fix miss recognition switch(firstRecognition) { case "a": str1="h"; break; case "b": str1="d"; break; case "c": str1="t"; break; case "d": str1="p"; break; case "f": str1="x"; break; case "h": str1="f"; break; case "m": str1="n"; break; case "n": str1="l"; break; case "l": str1="m"; break; case "p": str1="v"; break; case "u": str1="q"; break; case "v": str1="t"; break; case "e": str1="b"; break; case "j": str1="k"; break; } firstRecognition=str1; str1=str1.ToUpper(); keybd_event((byte)(str1[0]),0,0,0); //key down keybd_event((byte)(str1[0]),0,2,0); //key up break; } //else press the key (probably a menu ...) default: { string str1=e.PhraseInfo.Rule.Name; str1=str1.ToUpper(); keybd_event((byte)(str1[0]),0,0,0); //key down keybd_event((byte)(str1[0]),0,2,0); //key up //could be submenu (hook it) hookSubmenu(e.PhraseInfo.Rule.Name[0].ToString()); break; } } } //if not recognized ... else { //Only if agent enabled if (menuItem14.Checked==true) { //animate character agent1.Play("Decline"); } } }
private void RecognitionHandler( int StreamNumber, object StreamPosition, SpeechRecognitionType RecognitionType, ISpeechRecoResult Result) { int id = Result.PhraseInfo.Rule.Id; foreach (CommandRecognizerMode mode in Modes) { if (id >= mode.FirstRuleId && id <= mode.LastRuleId) { mode.OnCommandRecognized(Result); } } }
private void writeCommand(int StreamNumber, object StreamPosition, SpeechRecognitionType RecognitionType, ISpeechRecoResult Result) { commandBox.Text = Result.PhraseInfo.GetText(0, -1, true); }
/// <summary> /// Fires when a voice command has been received /// </summary> /// <param name="StreamNumber"></param> /// <param name="StreamPosition"></param> /// <param name="RecognitionType"></param> /// <param name="Result"></param> private void recoContext_Recognition(int StreamNumber, object StreamPosition, SpeechRecognitionType RecognitionType, ISpeechRecoResult Result) { // Fire the CommandReceived event with the received command text CommandReceived(Result.PhraseInfo.GetText(0, -1, false)); }
private void Reco_Event(int StreamNumber, object StreamPosition, SpeechRecognitionType RecognitionType, ISpeechRecoResult Result) { txtReco.Text = Result.PhraseInfo.GetText(0, -1, true); }
private void Reco_Event(int StreamNumber, object StreamPosition,SpeechRecognitionType RecognitionType,ISpeechRecoResult Result) { txtReco.Text = Result.PhraseInfo.GetText(0, -1, true); }
/** * Places a recognized commmand in the buffer. * * @param StreamNumber * @param StreamPosition * @param RecongitionType * @param Result */ private void RecoEvent(int StreamNumber, object StreamPosition, SpeechRecognitionType RecognitionType, ISpeechRecoResult Result) { _buffer.Enqueue(Result.PhraseInfo.GetText(0, -1, true)); }
private void Reco_Event(int StreamNumber, object StreamPosition, SpeechRecognitionType RecognitionType, ISpeechRecoResult Result) { string lWord = Result.PhraseInfo.GetText(0, -1, true); if (VoiceCommandReceivedEvent != null) VoiceCommandReceivedEvent(this, new VoiceCommandEventArgs(lWord)); }
/// <summary> /// RecoContext_Hypothesis is the event handler function for /// SpSharedRecoContext object's Recognition event. /// </summary> /// <param name="StreamNumber"></param> /// <param name="StreamPosition"></param> /// <param name="RecognitionType"></param> /// <param name="Result"></param> /// <remarks> /// See EnableSpeech() for how to hook up this function with the /// event. /// </remarks> public void RecoContext_Recognition(int StreamNumber, object StreamPosition, SpeechRecognitionType RecognitionType, ISpeechRecoResult Result) { Debug.WriteLine("Recognition: " + Result.PhraseInfo.GetText(0, -1, true) + ", " + StreamNumber + ", " + StreamPosition); int index; ISpeechPhraseProperty oItem; // oItem will be the property of the second part in the recognized // phase. For example, if the top level rule matchs // "select Seattle". Then the ListItemsRule matches "Seattle" part. // The following code will get the property of the "Seattle" // phrase, which is set when the word "Seattle" is added to the // ruleListItems in RebuildGrammar. oItem = Result.PhraseInfo.Properties.Item(0).Children.Item(0); index = oItem.Id; if ((System.Decimal)Result.PhraseInfo.GrammarId == grammarId) { // Check to see if the item at the same position in the list // still has the same text. // This is to prevent the rare case that the user keeps // talking while the list is being added or removed. By the // time this event is fired and handled, the list box may have // already changed. if( oItem.Name.CompareTo(this.Items[index].ToString())==0 ) { this.SelectedIndex = index; } } }
public void RecoContext_Recognition(int StreamNumber, object StreamPosition, SpeechRecognitionType RecognitionType, ISpeechRecoResult Result) { Debug.WriteLine("Recognition: " + Result.PhraseInfo.GetText(0, -1, true) + ", " + StreamNumber + ", " + StreamPosition); this.understandet(Result.PhraseInfo.GetText(0, -1, true)); //ISpeechPhraseProperty oItem; }
// Create a simple handler for the SpeechRecognized event. //void sre_SpeechRecognized(object sender, SpeechRecognizedEventArgs e) //{ // MessageBox.Show("Speech recognized: " + e.Result.Text); // textBoxX1.Text += e.Result.Text; //} //private void listBox1_SelectedIndexChanged(object sender, EventArgs e) //{ // /*FileStream stream = File.Open(lstSongs.Text, FileMode.Open); // SpeechAudioFormatInfo speechaudioformatinfo=new SpeechAudioFormatInfo(100, // System.Speech.AudioFormat.AudioBitsPerSample.Eight, // System.Speech.AudioFormat.AudioChannel.Mono);//单声道的通道 // // System.Speech.AudioFormat.AudioChannel.Stereo 立体声通道 // SpeechRecognitionEngine speechRecognitionEngine = new SpeechRecognitionEngine(); // Choices colors = new Choices(); // colors.Add(new string[] {"海","天", "星星", "点灯", "孩子" }); // // Create a GrammarBuilder object and append the Choices object. // GrammarBuilder gb = new GrammarBuilder(); // gb.Append(colors); // // Create the Grammar instance and load it into the speech recognition engine. // Grammar g = new Grammar(gb); // speechRecognitionEngine.LoadGrammar(g); // speechRecognitionEngine.SetInputToAudioStream(stream, speechaudioformatinfo); // speechRecognitionEngine.SpeechRecognized += // new EventHandler<SpeechRecognizedEventArgs>(sre_SpeechRecognized); // stream.Close(); // */ //} private void wavRecoContext_Recognition(int StreamNumber, object StreamPosition, SpeechRecognitionType RecognitionType, ISpeechRecoResult Result) { strData = Result.PhraseInfo.GetText(0, -1, true); allRecognized += textBoxX1.Text; textBoxX1.Text = strData; string path = _WAVFile.Substring(0, _WAVFile.Length - 4); path += ".lrc"; FileStream fs = new FileStream(path, FileMode.Append); StreamWriter writer = new StreamWriter(fs); writer.WriteLine(); //把原来的转换为byte数组,而且是utf_8编码的 //Byte[] change = System.Text.Encoding.UTF8.GetBytes(allRecognized.ToCharArray()); //用convet直接转换 //Byte[] changde = Encoding.Convert(System.Text.Encoding.UTF8, System.Text.Encoding.ASCII, change); //allRecognized=changde.ToString(); //allRecognized = Encoding.ASCII.GetString(Encoding.ASCII.GetBytes(allRecognized.ToCharArray()));//其中ss为你的utf8的数据 writer.Write(allRecognized, Encoding.ASCII); writer.Flush(); fs.Close(); allRecognized = ""; //Encoding ecp1252 = Encoding.GetEncoding(1252); //StreamReader sr = new StreamReader(path, Encoding.Unicode, false); //歌词文件编码转化 //String newPath = path.Substring(0, _WAVFile.Length - 4) + ".lrc"; //StreamWriter sw = new StreamWriter(newPath, false, ecp1252); //sw.Write(sr.ReadToEnd()); //sw.Close(); //sr.Close(); }
// ulong GetAudioStreamPositionSeconds(string pos) // { // ulong sec = UInt64.Parse(pos); // // sec /= 2uL; // bytes per sample (16-bit) // sec /= 44100; // samples per second // // return sec; // } /// <summary> /// Handles 'SpInProcRecoContext.Recognition' event. Fires as the final /// hypothesis for a phrase. Each word will be added to a list of /// 'OrthographicResult's for the phrase. /// WARNING: This can fire 2+ on the same file-stream causing the engine /// to drop/reset important variables like 'PhraseInfo.StartTime' and /// 'word.AudioStreamOffset' and 'word.AudioTimeOffset' /// TODO: a fact that is exceedingly annoying to try to compensate for. /// </summary> /// <param name="StreamNumber"></param> /// <param name="StreamPosition"></param> /// <param name="RecognitionType"></param> /// <param name="Result"></param> void rc_Recognition(int StreamNumber, object StreamPosition, SpeechRecognitionType RecognitionType, ISpeechRecoResult Result) { #if DEBUG logfile.Log(); logfile.Log("rc_Recognition() #" + StreamNumber + " StreamPosition= " + StreamPosition + " _generato= " + _generato); logfile.Log(". RecognitionType= " + RecognitionType); // <- standard. logfile.Log(". _phoneConverter.LanguageId= " + _phoneConverter.LanguageId); logfile.Log(". " + Result.PhraseInfo.GetText()); // (0, -1, true) logfile.Log(". _offset = " + _offset); logfile.Log(". PhraseInfo.AudioStreamPosition= " + Result.PhraseInfo.AudioStreamPosition); // logfile.Log(". . sec= " + GetAudioStreamPositionSeconds(Result.PhraseInfo.AudioStreamPosition.ToString())); logfile.Log(". PhraseInfo.AudioSizeBytes = " + Result.PhraseInfo.AudioSizeBytes); logfile.Log(". PhraseInfo.StartTime = " + Result.PhraseInfo.StartTime); logfile.Log(". PhraseInfo.AudioSizeTime = " + Result.PhraseInfo.AudioSizeTime); logfile.Log(". Result.PhraseInfo.Rule.Name= " + Result.PhraseInfo.Rule.Name); // <- blank. logfile.Log(". Result.PhraseInfo.Rule.Id= " + Result.PhraseInfo.Rule.Id); logfile.Log(". Result.PhraseInfo.Rule.EngineConfidence= " + Result.PhraseInfo.Rule.EngineConfidence); logfile.Log(". Result.PhraseInfo.Rule.Confidence= " + Result.PhraseInfo.Rule.Confidence); logfile.Log(". wordcount= " + Result.PhraseInfo.Elements.Count); #endif List <OrthographicResult> ars = null; switch (_generato) { case Generator.Dictati: ars = _ars_def; break; case Generator.Dialogi: ars = _ars_enh; break; } foreach (ISpeechPhraseElement word in Result.PhraseInfo.Elements) { #if DEBUG logfile.Log(". . word= " + word.DisplayText); logfile.Log(". . LexicalForm= " + word.LexicalForm); logfile.Log(". . DisplayAttributes= " + word.DisplayAttributes); logfile.Log(". . EngineConfidence= " + word.EngineConfidence); logfile.Log(". . ActualConfidence= " + word.ActualConfidence); var ids = (ushort[])word.Pronunciation; foreach (var id in ids) { logfile.Log(". . . PhoneId= " + id + " - " + _phoneConverter.IdToPhone(id)); } logfile.Log(". . word.AudioStreamOffset= " + word.AudioStreamOffset); logfile.Log(". . word.AudioSizeBytes = " + word.AudioSizeBytes); logfile.Log(". . word.AudioTimeOffset = " + word.AudioTimeOffset); logfile.Log(". . word.AudioSizeTime = " + word.AudioSizeTime); #endif var ar = new OrthographicResult(); ar.Orthography = word.DisplayText; string phons = _phoneConverter.IdToPhone(word.Pronunciation); // NOTE: object is a ushort or ushort[] ar.Phons = new List <string>(phons.Split(' ')); ar.Confi = word.EngineConfidence; ar.Level = word.ActualConfidence.ToString().Replace("SEC", String.Empty).Replace("Confidence", String.Empty); ar.Start = _offset + Utility.GarpstoSecs(word.AudioTimeOffset); ar.Stop = _offset + Utility.GarpstoSecs(word.AudioTimeOffset + word.AudioSizeTime); ars.Add(ar); } // NOTE: Recognition could be fired before the entire audiofile has // completed, which means it's going to fire again but the AudioTimeOffsets // will be completely borked obviously. So add this time-offset to any // second or subsequent Recognition event that happens on this stream _offset += Utility.GarpstoSecs(Result.PhraseInfo.AudioSizeTime); // TODO. is not accurate. if (_text == String.Empty) { ++Confidence_def_count; Confidence_def += Result.PhraseInfo.Rule.EngineConfidence; } #if DEBUG logfile.Log(); #endif }
private void RecoContext_Recognition(int StreamNumber, object StreamPosition, SpeechRecognitionType RecognitionType, ISpeechRecoResult Result) { ////result_textBox.AppendText(Result.PhraseInfo.GetText(0, -1, true) + "\n"); //string result; //ISpeechPhraseInfo info; //ISpeechPhraseAlternate alternate; //ISpeechPhraseAlternates alternates = Result.Alternates(20, 0, -1); //ISpeechPhraseReplacements replacements; //if (alternates != null) // alternate = alternates.Item(0); //info = Result.PhraseInfo; //replacements = info.Replacements; //string rep; //if (replacements != null) // rep = replacements.Item(0).Text; //result = Result.PhraseInfo.GetText(0, -1, true); //if (result.Length < 1) result = "???"; //OnSpeechRecognized(null); ////result_textBox.AppendText(Result.PhraseInfo.GetText(0, -1, true) + "\n"); //string result; string result; float confidence; RecognizedSpeechAlternate[] alternates; RecognizedSpeech recognizedSpeech; result = Result.PhraseInfo.GetText(0, -1, true); confidence = Result.PhraseInfo.Rule.EngineConfidence; //confidence = Result.PhraseInfo.Rule.Confidence; if (result.Length < 1) return; alternates = new RecognizedSpeechAlternate[1]; alternates[0] = new RecognizedSpeechAlternate(result, confidence); recognizedSpeech = new RecognizedSpeech(alternates); OnSpeechRecognized(recognizedSpeech); }
public void RecoContext_Recognition(int StreamNumber, object StreamPosition, SpeechRecognitionType RecognitionType, ISpeechRecoResult Result) { //MessageBox.Show(charReq.Status.ToString()); if(charReq.Status == 0) { grammar.DictationSetState(SpeechRuleState.SGDSInactive); inputBox.Text = Result.PhraseInfo.GetText(0, -1, true); doChatting(); grammar.DictationSetState(SpeechLib.SpeechRuleState.SGDSActive); } }
private void context_Recognition(int StreamNumber, object StreamPosition, SpeechRecognitionType RecognitionType, ISpeechRecoResult Result) { m_parent.onSpeech(Result.PhraseInfo.Rule.Name); }
private void RecoContext_Recognition(int StreamNumber, object StreamPosition, SpeechRecognitionType RecognitionType, ISpeechRecoResult Result) { ////result_textBox.AppendText(Result.PhraseInfo.GetText(0, -1, true) + "\n"); //string result; //ISpeechPhraseInfo info; //ISpeechPhraseAlternate alternate; //ISpeechPhraseAlternates alternates = Result.Alternates(20, 0, -1); //ISpeechPhraseReplacements replacements; //if (alternates != null) // alternate = alternates.Item(0); //info = Result.PhraseInfo; //replacements = info.Replacements; //string rep; //if (replacements != null) // rep = replacements.Item(0).Text; //result = Result.PhraseInfo.GetText(0, -1, true); //if (result.Length < 1) result = "???"; //OnSpeechRecognized(null); ////result_textBox.AppendText(Result.PhraseInfo.GetText(0, -1, true) + "\n"); //string result; string result; float confidence; RecognizedSpeechAlternate[] alternates; RecognizedSpeech recognizedSpeech; result = Result.PhraseInfo.GetText(0, -1, true); confidence = Result.PhraseInfo.Rule.EngineConfidence; //confidence = Result.PhraseInfo.Rule.Confidence; if (result.Length < 1) { return; } alternates = new RecognizedSpeechAlternate[1]; alternates[0] = new RecognizedSpeechAlternate(result, confidence); recognizedSpeech = new RecognizedSpeech(alternates); OnSpeechRecognized(recognizedSpeech); }
//*************************************************************** //' Event fired when speech recognition engine recognizes audio //*************************************************************** private void wavRecoContext_Recognition(int StreamNumber, object StreamPosition, SpeechRecognitionType RecognitionType, ISpeechRecoResult Result) { strData = Result.PhraseInfo.GetText(0, -1, true); // parseSpeechResult(strData); -- Call a function to parse it if wanted _lastRecognized = textBox1.Text; textBox1.Text = strData; }