/// <summary> /// 音声認識システムの初期化。各種コールバック処理もここ /// </summary> public void Init(IVoiceReciever callerReciever) { reciever = callerReciever; busy = false; Boolean recognizeHit = false; this.RecognizerRule = new SpeechLib.SpInProcRecoContext(); foreach (SpObjectToken recoperson in this.RecognizerRule.Recognizer.GetRecognizers()) //'Go through the SR enumeration { string language = recoperson.GetAttribute("Language"); if (language == "411") // 411=日本語 { this.RecognizerRule.Recognizer.Recognizer = recoperson; recognizeHit = true; break; } } if (!recognizeHit) { System.Windows.Forms.MessageBox.Show("日本語認識が利用できません", "マイク初期化エラー"); } this.RecognizerRule.Recognizer.AudioInput = GetAudioInput(); this.RecognizerRule.Hypothesis += delegate(int streamNumber, object streamPosition, SpeechLib.ISpeechRecoResult result) { }; this.RecognizerRule.Recognition += delegate(int streamNumber, object streamPosition, SpeechLib.SpeechRecognitionType srt, SpeechLib.ISpeechRecoResult isrr) { if (busy) { return; } busy = true; string text = isrr.PhraseInfo.GetText(0, -1, true); reciever.ReuqestByVoice(text); busy = false; }; this.RecognizerRule.StartStream += delegate(int streamNumber, object streamPosition) { }; this.RecognizerRule.FalseRecognition += delegate(int streamNumber, object streamPosition, SpeechLib.ISpeechRecoResult isrr) { }; }
/// <summary> /// This function will create the main SpInProcRecoContext object /// and other required objects like Grammar and rules. /// In this sample, we are building grammar dynamically since /// listbox content can change from time to time. /// If your grammar is static, you can write your grammar file /// and ask SAPI to load it during run time. This can reduce the /// complexity of your code. /// </summary> private void InitializeSpeech() { Debug.WriteLine("Initializing SAPI objects..."); try { // First of all, let's create the main reco context object. // In this sample, we are using inproc reco context. Shared reco // context is also available. Please see the document to decide // which is best for your application. objRecoContext = new SpeechLib.SpInProcRecoContext(); SpeechLib.SpObjectTokenCategory objAudioTokenCategory = new SpeechLib.SpObjectTokenCategory(); objAudioTokenCategory.SetId(SpeechLib.SpeechStringConstants.SpeechCategoryAudioIn, false); SpeechLib.SpObjectToken objAudioToken = new SpeechLib.SpObjectToken(); objAudioToken.SetId(objAudioTokenCategory.Default, SpeechLib.SpeechStringConstants.SpeechCategoryAudioIn, false); objRecoContext.Recognizer.AudioInput = objAudioToken; // Then, let's set up the event handler. We only care about // Hypothesis and Recognition events in this sample. //objRecoContext.Hypothesis += new _ISpeechRecoContextEvents_HypothesisEventHandler(RecoContext_Hypothesis); objRecoContext.Recognition += new _ISpeechRecoContextEvents_RecognitionEventHandler(RecoContext_Recognition); // Now let's build the grammar. // The top level rule consists of two parts: "select <items>". // So we first add a word transition for the "select" part, then // a rule transition for the "<items>" part, which is dynamically // built as items are added or removed from the listbox. grammar = objRecoContext.CreateGrammar(grammarId); ruleTopLevel = grammar.Rules.Add("TopLevelRule", SpeechRuleAttributes.SRATopLevel | SpeechRuleAttributes.SRADynamic, 1); ruleCommand = grammar.Rules.Add("CommandRule", SpeechRuleAttributes.SRADynamic, 2); ruleNumbers = grammar.Rules.Add("NumberRule", SpeechRuleAttributes.SRADynamic, 3); ruleListItems = grammar.Rules.Add("ListItemsRule", SpeechRuleAttributes.SRADynamic, 4); //Prepare states SpeechLib.ISpeechGrammarRuleState stateAfterPre; SpeechLib.ISpeechGrammarRuleState stateAfterCommand; SpeechLib.ISpeechGrammarRuleState stateAfterNumber; stateAfterPre = ruleTopLevel.AddState(); stateAfterCommand = ruleTopLevel.AddState(); stateAfterNumber = ruleTopLevel.AddState(); //Add keywords: add,set,delete object PropValue = ""; ruleTopLevel.InitialState.AddWordTransition(stateAfterPre, PreCommandString, null, SpeechGrammarWordType.SGLexicalNoSpecialChars, "", 0, ref PropValue, 1.0F); String word; PropValue = ""; word = "Add"; ruleCommand.InitialState.AddWordTransition(null, word, "", SpeechGrammarWordType.SGLexicalNoSpecialChars, word, 0, ref PropValue, 1f); word = "Set"; ruleCommand.InitialState.AddWordTransition(null, word, "", SpeechGrammarWordType.SGLexicalNoSpecialChars, word, 1, ref PropValue, 1f); word = "Remove"; ruleCommand.InitialState.AddWordTransition(null, word, "", SpeechGrammarWordType.SGLexicalNoSpecialChars, word, 2, ref PropValue, 1f); stateAfterPre.AddRuleTransition(stateAfterCommand, ruleCommand, "", 1, ref PropValue, 1F); PropValue = ""; for (int x = 0; x <= 100; x++) { word = Convert.ToString(x); // Note: if the same word is added more than once to the same // rule state, SAPI will return error. In this sample, we // don't allow identical items in the list box so no need for // the checking, otherwise special checking for identical words // would have to be done here. ruleNumbers.InitialState.AddWordTransition(null, word, "", SpeechGrammarWordType.SGLexicalNoSpecialChars, word, x, ref PropValue, 1F); } stateAfterCommand.AddRuleTransition(stateAfterNumber, ruleNumbers, "", 2, ref PropValue, 1.0F); PropValue = ""; stateAfterNumber.AddRuleTransition(null, ruleListItems, "", 3, ref PropValue, 1.0F); // Now add existing list items to the ruleListItems RebuildGrammar(); // Now we can activate the top level rule. In this sample, only // the top level rule needs to activated. The ListItemsRule is // referenced by the top level rule. grammar.CmdSetRuleState("TopLevelRule", SpeechRuleState.SGDSActive); speechInitialized = true; } catch (Exception e) { System.Windows.Forms.MessageBox.Show( "Exception caught when initializing SAPI." + " This application may not run correctly.\r\n\r\n" + e.ToString(), "Error"); throw; } }
public HmfListeningAssist() { this.recognize_rule = new SpeechLib.SpInProcRecoContext(); }
private void initRecognizer() { mRecognizerRule = new SpeechLib.SpInProcRecoContext(); bool hit = false; foreach (SpObjectToken recoPerson in mRecognizerRule.Recognizer.GetRecognizers()) { if (recoPerson.GetAttribute("Language") == "411") { mRecognizerRule.Recognizer.Recognizer = recoPerson; hit = true; break; } } if (!hit) { MessageBox.Show("日本語認識が利用できません"); Application.Exit(); } mRecognizerRule.Recognizer.AudioInput = createMicrofon(); if (mRecognizerRule.Recognizer.AudioInput == null) { MessageBox.Show("マイク初期化エラー"); Application.Exit(); } mRecognizerRule.Hypothesis += delegate(int streamNumber, object streamPosition, SpeechLib.ISpeechRecoResult result) { string strText = result.PhraseInfo.GetText(); textBox1.Text = strText; }; mRecognizerRule.Recognition += delegate(int streamNumber, object streamPosition, SpeechLib.SpeechRecognitionType srt, SpeechLib.ISpeechRecoResult isrr) { SpeechEngineConfidence confidence = isrr.PhraseInfo.Rule.Confidence; switch (confidence) { case SpeechEngineConfidence.SECHighConfidence: label3.Text = "Confidence is High"; break; case SpeechEngineConfidence.SECNormalConfidence: label3.Text = "Confidence is Normal"; break; case SpeechEngineConfidence.SECLowConfidence: label3.Text = "Confidence is Low"; textBox2.Text = "信頼性が低すぎます"; return; } string strText = isrr.PhraseInfo.GetText(); //isrr.PhraseInfo. label4.Text = isrr.RecoContext.Voice.Volume.ToString(); if (strText == "えんいー") { Application.Exit(); } if (itunes != null) { switch (strText) { case "あいちゅーんず.つぎのきょく": case "あいちゅーんず.つぎ": itunes.NextTrack(); break; case "あいちゅーんず.まえのきょく": case "あいちゅーんず.まえ": itunes.PreviousTrack(); break; case "あいちゅーんず.いちじていし": itunes.Pause(); break; case "あいちゅーんず.ていし": itunes.Stop(); break; case "あいちゅーんず.さいせい": itunes.Play(); break; case "あいちゅーんず.しね": itunes.Quit(); unhockiTunes(); break; case "あいちゅーんず.しずかに": itunes.SoundVolume = 50; break; case "あいちゅーんず.おおきく": itunes.SoundVolume = 100; break; case "あいちゅーんず.らんだむ": itunes.CurrentPlaylist.Shuffle = !itunes.CurrentPlaylist.Shuffle; break; } } else { if (strText == "あいちゅーんず.おきろ") { initiTunes(); } } textBox2.Text = strText; }; mRecognizerRule.StartStream += delegate(int streamNumber, object streamPosition) { textBox1.Text = textBox2.Text = ""; }; mRecognizerRule.FalseRecognition += delegate(int streamNumber, object streamPosition, SpeechLib.ISpeechRecoResult isrr) { textBox1.Text = textBox2.Text = label3.Text = "--Error!--"; }; mRecognizerGrammarRule = mRecognizerRule.CreateGrammar(); mRecognizerGrammarRule.Reset(); mRecognizerGrammarRuleGrammarRule = mRecognizerGrammarRule.Rules.Add("TopLevelRule", SpeechRuleAttributes.SRATopLevel | SpeechRuleAttributes.SRADynamic); mRecognizerGrammarRuleGrammarRule.InitialState.AddWordTransition(null, "あいちゅーんず.おきろ"); mRecognizerGrammarRuleGrammarRule.InitialState.AddWordTransition(null, "あいちゅーんず.つぎのきょく"); mRecognizerGrammarRuleGrammarRule.InitialState.AddWordTransition(null, "あいちゅーんず.まえのきょく"); mRecognizerGrammarRuleGrammarRule.InitialState.AddWordTransition(null, "あいちゅーんず.つぎ"); mRecognizerGrammarRuleGrammarRule.InitialState.AddWordTransition(null, "あいちゅーんず.まえ"); mRecognizerGrammarRuleGrammarRule.InitialState.AddWordTransition(null, "あいちゅーんず.いちじていし"); mRecognizerGrammarRuleGrammarRule.InitialState.AddWordTransition(null, "あいちゅーんず.ていし"); mRecognizerGrammarRuleGrammarRule.InitialState.AddWordTransition(null, "あいちゅーんず.さいせい"); mRecognizerGrammarRuleGrammarRule.InitialState.AddWordTransition(null, "あいちゅーんず.しね"); mRecognizerGrammarRuleGrammarRule.InitialState.AddWordTransition(null, "あいちゅーんず.しずかに"); mRecognizerGrammarRuleGrammarRule.InitialState.AddWordTransition(null, "あいちゅーんず.おおきく"); mRecognizerGrammarRuleGrammarRule.InitialState.AddWordTransition(null, "あいちゅーんず.らんだむ"); // mRecognizerGrammarRuleGrammarRule.InitialState.AddWordTransition(null, "えんいー"); mRecognizerGrammarRule.Rules.Commit(); mRecognizerGrammarRule.CmdSetRuleState("TopLevelRule", SpeechRuleState.SGDSActive); }
/// <summary> /// This function will create the main SpInProcRecoContext object /// and other required objects like Grammar and rules. /// In this sample, we are building grammar dynamically since /// listbox content can change from time to time. /// If your grammar is static, you can write your grammar file /// and ask SAPI to load it during run time. This can reduce the /// complexity of your code. /// </summary> private void InitializeSpeech() { Debug.WriteLine("Initializing SAPI objects..."); try { // First of all, let's create the main reco context object. // In this sample, we are using inproc reco context. Shared reco // context is also available. Please see the document to decide // which is best for your application. objRecoContext = new SpeechLib.SpInProcRecoContext(); SpeechLib.SpObjectTokenCategory objAudioTokenCategory = new SpeechLib.SpObjectTokenCategory(); objAudioTokenCategory.SetId(SpeechLib.SpeechStringConstants.SpeechCategoryAudioIn, false); SpeechLib.SpObjectToken objAudioToken = new SpeechLib.SpObjectToken(); objAudioToken.SetId(objAudioTokenCategory.Default, SpeechLib.SpeechStringConstants.SpeechCategoryAudioIn, false); objRecoContext.Recognizer.AudioInput = objAudioToken; // Then, let's set up the event handler. We only care about // Hypothesis and Recognition events in this sample. objRecoContext.Hypothesis += new _ISpeechRecoContextEvents_HypothesisEventHandler( RecoContext_Hypothesis); objRecoContext.Recognition += new _ISpeechRecoContextEvents_RecognitionEventHandler( RecoContext_Recognition); // Now let's build the grammar. // The top level rule consists of two parts: "select <items>". // So we first add a word transition for the "select" part, then // a rule transition for the "<items>" part, which is dynamically // built as items are added or removed from the listbox. grammar = objRecoContext.CreateGrammar(grammarId); ruleTopLevel = grammar.Rules.Add("TopLevelRule", SpeechRuleAttributes.SRATopLevel | SpeechRuleAttributes.SRADynamic, 1); ruleListItems = grammar.Rules.Add("ListItemsRule", SpeechRuleAttributes.SRADynamic, 2); SpeechLib.ISpeechGrammarRuleState stateAfterSelect; stateAfterSelect = ruleTopLevel.AddState(); object PropValue = ""; ruleTopLevel.InitialState.AddWordTransition(stateAfterSelect, PreCommandString, " ", SpeechGrammarWordType.SGLexical, "", 0, ref PropValue, 1.0F ); PropValue = ""; stateAfterSelect.AddRuleTransition(null, ruleListItems, "", 1, ref PropValue, 1.0F); // Now add existing list items to the ruleListItems RebuildGrammar(); // Now we can activate the top level rule. In this sample, only // the top level rule needs to activated. The ListItemsRule is // referenced by the top level rule. grammar.CmdSetRuleState("TopLevelRule", SpeechRuleState.SGDSActive); speechInitialized = true; } catch(Exception e) { System.Windows.Forms.MessageBox.Show( "Exception caught when initializing SAPI." + " This application may not run correctly.\r\n\r\n" + e.ToString(), "Error"); throw; } }
/// <summary> /// This function will create the main SpInProcRecoContext object /// and other required objects like Grammar and rules. /// In this sample, we are building grammar dynamically since /// listbox content can change from time to time. /// If your grammar is static, you can write your grammar file /// and ask SAPI to load it during run time. This can reduce the /// complexity of your code. /// </summary> private void InitializeSpeech() { Debug.WriteLine("Initializing SAPI objects..."); try { // First of all, let's create the main reco context object. // In this sample, we are using inproc reco context. Shared reco // context is also available. Please see the document to decide // which is best for your application. objRecoContext = new SpeechLib.SpInProcRecoContext(); SpeechLib.SpObjectTokenCategory objAudioTokenCategory = new SpeechLib.SpObjectTokenCategory(); objAudioTokenCategory.SetId(SpeechLib.SpeechStringConstants.SpeechCategoryAudioIn, false); SpeechLib.SpObjectToken objAudioToken = new SpeechLib.SpObjectToken(); objAudioToken.SetId(objAudioTokenCategory.Default, SpeechLib.SpeechStringConstants.SpeechCategoryAudioIn, false); objRecoContext.Recognizer.AudioInput = objAudioToken; // Then, let's set up the event handler. We only care about // Hypothesis and Recognition events in this sample. objRecoContext.Hypothesis += new _ISpeechRecoContextEvents_HypothesisEventHandler( RecoContext_Hypothesis); objRecoContext.Recognition += new _ISpeechRecoContextEvents_RecognitionEventHandler( RecoContext_Recognition); // Now let's build the grammar. // The top level rule consists of two parts: "select <items>". // So we first add a word transition for the "select" part, then // a rule transition for the "<items>" part, which is dynamically // built as items are added or removed from the listbox. grammar = objRecoContext.CreateGrammar(grammarId); ruleTopLevel = grammar.Rules.Add("TopLevelRule", SpeechRuleAttributes.SRATopLevel | SpeechRuleAttributes.SRADynamic, 1); ruleListItems = grammar.Rules.Add("ListItemsRule", SpeechRuleAttributes.SRADynamic, 2); SpeechLib.ISpeechGrammarRuleState stateAfterSelect; stateAfterSelect = ruleTopLevel.AddState(); object PropValue = ""; ruleTopLevel.InitialState.AddWordTransition(stateAfterSelect, PreCommandString, " ", SpeechGrammarWordType.SGLexical, "", 0, ref PropValue, 1.0F); PropValue = ""; stateAfterSelect.AddRuleTransition(null, ruleListItems, "", 1, ref PropValue, 1.0F); // Now add existing list items to the ruleListItems RebuildGrammar(); // Now we can activate the top level rule. In this sample, only // the top level rule needs to activated. The ListItemsRule is // referenced by the top level rule. grammar.CmdSetRuleState("TopLevelRule", SpeechRuleState.SGDSActive); speechInitialized = true; } catch (Exception e) { System.Windows.Forms.MessageBox.Show( "Exception caught when initializing SAPI." + " This application may not run correctly.\r\n\r\n" + e.ToString(), "Error"); throw; } }
/// <summary> /// constructor /// </summary> public HmfHearingAssist() { //ルール認識 音声認識オブジェクトの生成 this.RecognizerRule = new SpeechLib.SpInProcRecoContext(); this.RecognizerDictation = new SpeechLib.SpInProcRecoContext(); //マイクから拾ってね。 this.RecognizerRule.Recognizer.AudioInput = CreateMicrofon(); this.RecognizerDictation.Recognizer.AudioInput = CreateMicrofon(); //イベント設定(中継) //認識途中のデフォルト処理 this.RecognizerRule.Hypothesis += delegate(int streamNumber, object streamPosition, SpeechLib.ISpeechRecoResult result) { this.Hypothesis(streamNumber, streamPosition, result); }; //認識完了時のデフォルト処理 this.RecognizerRule.Recognition += delegate(int streamNumber, object streamPosition, SpeechLib.SpeechRecognitionType srt, SpeechLib.ISpeechRecoResult isrr) { //ここでDictationでマッチした語を見て、 必ず入っていなければいけない文字列がなければ握りつぶす. if (this.MustMatchString.Length >= 1 && this.DictationString.IndexOf(this.MustMatchString) <= -1 ) {//握りつぶす. this.FalseRecognition(streamNumber, streamPosition, isrr); return; } this.Recognition(streamNumber, streamPosition, srt, isrr); }; //ストリーム開始時のデフォルト処理 this.RecognizerRule.StartStream += delegate(int streamNumber, object streamPosition) { this.DictationString = ""; //開始時に前回マッチした文字列を消す. this.StartStream(streamNumber, streamPosition); }; //認識失敗時のデフォルト処理 this.RecognizerRule.FalseRecognition += delegate(int streamNumber, object streamPosition, SpeechLib.ISpeechRecoResult isrr) { this.FalseRecognition(streamNumber, streamPosition, isrr); }; //ストリーム終了時のデフォルト処理 this.RecognizerRule.EndStream += delegate(int streamNumber, object streamPosition, bool streamReleased) { this.EndStream(streamNumber, streamPosition, streamReleased); }; //Dictationでマッチした文字列. RuleよりDictationの方がマッチ順は早いらしい。 this.RecognizerDictation.Recognition += delegate(int streamNumber, object streamPosition, SpeechLib.SpeechRecognitionType srt, SpeechLib.ISpeechRecoResult isrr) { //マッチした文字列の記録 this.DictationString = isrr.PhraseInfo.GetText(0, -1, true); //コールバック用のデリゲートを呼ぶ.(これくらいあってもいいかな) this.DictationRecognition(streamNumber, streamPosition, srt, isrr); }; //言語モデルの作成 this.RecognizerGrammarRule = this.RecognizerRule.CreateGrammar(0); this.RecognizerGrammarDictation = this.RecognizerDictation.CreateGrammar(0); //言語モデルのルールのトップレベルを作成する. this.RecognizerGrammarRuleGrammarRule = this.RecognizerGrammarRule.Rules.Add("TopLevelRule", SpeechRuleAttributes.SRATopLevel | SpeechRuleAttributes.SRADynamic); //文字列の追加. this.RecognizerGrammarRuleGrammarRule.InitialState.AddWordTransition(null, "検索"); this.RecognizerGrammarRuleGrammarRule.InitialState.AddWordTransition(null, "予定"); this.RecognizerGrammarRuleGrammarRule.InitialState.AddWordTransition(null, "認識"); this.RecognizerGrammarRuleGrammarRule.InitialState.AddWordTransition(null, "メモ"); this.RecognizerGrammarRuleGrammarRule.InitialState.AddWordTransition(null, "認証"); //ルールを反映させる。 this.RecognizerGrammarRule.Rules.Commit(); this.RecognizerGrammarRule.CmdSetRuleState("TopLevelRule", SpeechRuleState.SGDSActive); this.RecognizerGrammarDictation.DictationSetState(SpeechRuleState.SGDSInactive); }