private void SpeechStartup() { var CultureInfo = new System.Globalization.CultureInfo("en-US"); speech = new SpeechRecognitionEngine(CultureInfo); Choices colors = new Choices(); foreach (KnownColor item in colorsArray) { colors.Add(new string[] { item.ToString() }); } speech.SetInputToDefaultAudioDevice(); GrammarBuilder gb = new GrammarBuilder(); gb.Append(colors); Grammar g = new Grammar(gb); speech.LoadGrammar(g); speech.SpeechRecognized += new EventHandler <SpeechRecognizedEventArgs>(speech_SpeechRecognized); speech.SpeechRecognitionRejected += Speech_SpeechRecognitionRejected; speech.EmulateRecognize("Blue"); speech.RecognizeAsync(RecognizeMode.Multiple); }
/// <summary> /// Creates the speech engine. /// </summary> /// <param name="preferredCulture">The preferred culture.</param> /// <param name="result">returns a result</param> /// <returns></returns> public System.Speech.Recognition.SpeechRecognitionEngine CreateSpeechEngine(string preferredCulture, out string result) { try { var speechRecognitionEngine = (from config in System.Speech.Recognition.SpeechRecognitionEngine.InstalledRecognizers() where config.Culture.ToString() == preferredCulture select new System.Speech.Recognition.SpeechRecognitionEngine(config)).FirstOrDefault(); result = "Success"; // if the desired culture is not found, then load default if (speechRecognitionEngine == null) { speechRecognitionEngine = new System.Speech.Recognition.SpeechRecognitionEngine(System.Speech.Recognition.SpeechRecognitionEngine.InstalledRecognizers()[0]); result = "The desired culture is not installed on this machine, the speech-engine will continue using " + System.Speech.Recognition.SpeechRecognitionEngine.InstalledRecognizers()[0].Culture + " as the default culture. " + "Culture " + preferredCulture + " not found!"; } return speechRecognitionEngine; } catch (Exception ex) { Log.ErrorLog(ex); throw; } }
public RecognitionEngine() { this.actions = new List<Action>(); engine = new sp.SpeechRecognitionEngine(); engine.SetInputToDefaultAudioDevice(); engine.SpeechRecognized += engine_SpeechRecognized; engine.RecognizerUpdateReached += engine_RecognizerUpdateReached; engine.LoadGrammar(new sp.DictationGrammar()); }
public RecognitionEngine(Profile profile) { this.actions = new List<Action>(); engine = new sp.SpeechRecognitionEngine(); engine.SetInputToDefaultAudioDevice(); engine.SpeechRecognized += engine_SpeechRecognized; engine.RecognizerUpdateReached += engine_RecognizerUpdateReached; LoadProfile(profile); }
// Public Methods (3) public void InitEngine() { _sre = new System.Speech.Recognition.SpeechRecognitionEngine(_engineId); _sre.SpeechRecognized += speechRecognized; _sre.RecognizeCompleted += recognizeCompleted; _sre.SpeechHypothesized += speechHypothesized; var grammar = new DictationGrammar(); _sre.LoadGrammar(grammar); _sre.SetInputToWaveFile(_filePath); }
public SpeechRecognizerServer(string moduleName) { System.Collections.ObjectModel.ReadOnlyCollection<RecognizerInfo> installedRecognizers = SpeechRecognitionEngine.InstalledRecognizers(); //Synchronous Recognition m_reco = new System.Speech.Recognition.SpeechRecognitionEngine(myLanguage); Network.init(); m_moduleName = moduleName; //TTS m_tts = new System.Speech.Synthesis.SpeechSynthesizer(); m_portISpeak = new Port(); m_portISpeak.open("/" + moduleName + "/tts/iSpeak:o"); Network.connect("/" + moduleName + "/tts/iSpeak:o", "/iSpeak"); //Grammars GrammarBuilder dictation = new GrammarBuilder(); dictation.Culture = myLanguage; dictation.AppendDictation(); m_grammar_dictation = new Grammar(dictation); GrammarBuilder spelling = new GrammarBuilder(); spelling.Culture = myLanguage; spelling.AppendDictation("spelling"); m_grammar_dictation_spelling = new Grammar(spelling); m_grammar_continuous = new GrammarBuilder("For sure this non empty grammar will never be recognized."); m_reco.SetInputToDefaultAudioDevice(); m_reco.LoadGrammar(m_grammar_dictation); //Continuous Recognition m_reco_continuous = new SpeechRecognitionEngine(); m_reco_continuous.SetInputToDefaultAudioDevice(); m_portContinuousRecognition = new Port(); m_portContinuousRecognition.open("/" + moduleName + "/recog/continuous:o"); m_reco_continuous.LoadGrammar(new Grammar(m_grammar_continuous)); m_reco_continuous.RecognizeCompleted += onContinuousRecognitionResult; m_reco_continuous.RecognizeAsync(); m_grammarManager = new RobotGrammarManager(); m_grammarManager.InitialiseVocabulories(); SetLanguage("EN-us"); //SetLanguage("fr-fr"); Console.WriteLine("#########################"); Console.WriteLine("# Speech Recognizer #"); Console.WriteLine("#########################"); Network.init(); m_rpcPort = new Port(); m_rpcPort.open("/" + m_moduleName + "/rpc"); m_rpcThread = new System.Threading.Thread(HandleRPC); m_rpcThread.Start(); }
private static void wreck_a_nice_beach() { var sre = new SSR.SpeechRecognitionEngine(); sre.SetInputToDefaultAudioDevice(); sre.UnloadAllGrammars(); var gb1 = new SSR.GrammarBuilder(); gb1.Append(new SSR.Choices("cut", "copy", "paste", "delete", "quit")); var g1 = new SSR.Grammar(gb1); sre.LoadGrammar(g1); sre.SpeechRecognized += SreOnSpeechRecognized; sre.SpeechDetected += SreOnSpeechDetected; sre.SpeechHypothesized += SreOnSpeechHypothesized; sre.SpeechRecognitionRejected += SreOnSpeechRecognitionRejected; sre.AudioSignalProblemOccurred += SreOnAudioSignalProblemOccurred; sre.RecognizeAsync(SSR.RecognizeMode.Multiple); }
private void startAudio() { var audioSource = this.sensor.AudioSource; audioSource.BeamAngleMode = BeamAngleMode.Adaptive; // This should be off by default, but just to be explicit, this MUST be set to false. audioSource.AutomaticGainControlEnabled = false; var kinectStream = audioSource.Start(); this.preSpeechRecognizer.SetInputToAudioStream( kinectStream, new SpeechAudioFormatInfo(EncodingFormat.Pcm, 16000, 16, 1, 32000, 2, null)); // Keep recognizing speech until window closes this.preSpeechRecognizer.RecognizeAsync(RecognizeMode.Multiple); this.postSpeechRecognizer.SetInputToAudioStream( kinectStream, new SpeechAudioFormatInfo(EncodingFormat.Pcm, 16000, 16, 1, 32000, 2, null)); // Keep recognizing speech until window closes this.postSpeechRecognizer.RecognizeAsync(RecognizeMode.Multiple); sensor = (from sensorToCheck in KinectSensor.KinectSensors where sensorToCheck.Status == KinectStatus.Connected select sensorToCheck).FirstOrDefault(); sensor.Start(); source = sensor.AudioSource; source.EchoCancellationMode = EchoCancellationMode.None; // No AEC for this sample source.AutomaticGainControlEnabled = false; // Important to turn this off for speech recognition ri = System.Speech.Recognition.SpeechRecognitionEngine.InstalledRecognizers().FirstOrDefault(); recoEngine = new System.Speech.Recognition.SpeechRecognitionEngine(ri.Id); customDictationGrammar = new System.Speech.Recognition.DictationGrammar(); customDictationGrammar.Name = "Dictation"; customDictationGrammar.Enabled = true; recoEngine.LoadGrammar(customDictationGrammar); recoEngine.SpeechRecognized += new EventHandler<System.Speech.Recognition.SpeechRecognizedEventArgs>(recoEngine_SpeechRecognized); s = source.Start(); recoEngine.SetInputToAudioStream(s, new System.Speech.AudioFormat.SpeechAudioFormatInfo(System.Speech.AudioFormat.EncodingFormat.Pcm, 16000, 16, 1, 32000, 2, null)); recoEngine.RecognizeAsync(System.Speech.Recognition.RecognizeMode.Multiple); }
public bool SetLanguage(string cultureName) { //System.Globalization.CultureInfo[] cultures = System.Globalization.CultureInfo.GetCultures(System.Globalization.CultureTypes.AllCultures); System.Globalization.CultureInfo culture; try { culture = System.Globalization.CultureInfo.GetCultureInfoByIetfLanguageTag(cultureName); } catch { Console.WriteLine("Culture info is not found."); return false; } myLanguage = culture; System.Collections.ObjectModel.ReadOnlyCollection<InstalledVoice> voices = m_tts.GetInstalledVoices(culture); if (voices.Count > 0) m_tts.SelectVoice(voices.First().VoiceInfo.Name); m_reco = new System.Speech.Recognition.SpeechRecognitionEngine(culture); m_reco.SetInputToDefaultAudioDevice(); GrammarBuilder dictation = new GrammarBuilder(); dictation.Culture = myLanguage; dictation.AppendDictation(); m_grammar_dictation = new Grammar(dictation); m_reco.LoadGrammar(m_grammar_dictation); m_reco_continuous.RecognizeCompleted -= onContinuousRecognitionResult; m_reco_continuous.RecognizeAsyncCancel(); //m_reco_continuous.RecognizeAsyncStop(); m_reco_continuous = new SpeechRecognitionEngine(culture); m_reco_continuous.SetInputToDefaultAudioDevice(); m_grammar_continuous.Culture = culture; m_reco_continuous.LoadGrammar(new Grammar(m_grammar_continuous)); m_reco_continuous.RecognizeCompleted += onContinuousRecognitionResult; m_reco_continuous.RecognizeAsync(); m_grammarManager.SetLanguage(cultureName); Console.WriteLine("The culture has been set to " + cultureName); return true; }
public Jarvis() { modules = new LinkedList<IJModule>(); /*************** IJModule Instatiation Stuff ****************/ modules.AddLast(new MusicControl(preferences.mediaplayerprocess, preferences.initialvolume, preferences.volumeincrements)); if (preferences.usegooglevoice) modules.AddLast(new GoogleVoice(preferences.googleemail, preferences.googlepassword, preferences.googleaddressbook)); if (preferences.facebookrssfeed != null) modules.AddLast(new Facebook(preferences.facebookrssfeed)); if (preferences.usegooglecalendar) modules.AddLast(new GoogleCalendar(preferences.googleemail, preferences.googlepassword, preferences.googlecalendaralerttime)); alertThread = new Thread(new ThreadStart(alertFunction)); alertThread.Name = "Alert Thread"; alertThread.Start(); /****************Get Grammar From Modules*********************/ var grammars = new LinkedList<Microsoft.Speech.Recognition.Grammar>(); foreach (IJModule module in modules) { if(module.getGrammarFile() != null) { var gb = new Microsoft.Speech.Recognition.GrammarBuilder(); gb.AppendRuleReference("file://" + System.Environment.CurrentDirectory + "\\" + module.getGrammarFile()); Console.WriteLine("file://"+System.Environment.CurrentDirectory+"\\" + module.getGrammarFile()); grammars.AddLast(new Microsoft.Speech.Recognition.Grammar(gb)); } } /************ Speech Recognition Stuff **********************/ dictation = new System.Speech.Recognition.SpeechRecognitionEngine(); dictation.SetInputToDefaultAudioDevice(); dictation.LoadGrammar(new DictationGrammar()); dictation.SpeechRecognized += SreSpeechRecognized; sensor = (from sensorToCheck in KinectSensor.KinectSensors where sensorToCheck.Status == KinectStatus.Connected select sensorToCheck).FirstOrDefault(); if (sensor == null) { Console.WriteLine( "No Kinect sensors are attached to this computer or none of the ones that are\n" + "attached are \"Connected\".\n" + "Press any key to continue.\n"); Console.ReadKey(true); return; } sensor.Start(); KinectAudioSource source = sensor.AudioSource; source.EchoCancellationMode = EchoCancellationMode.CancellationOnly; source.AutomaticGainControlEnabled = false; Microsoft.Speech.Recognition.RecognizerInfo ri = GetKinectRecognizer(); Debug.WriteLine(ri.Id); if (ri == null) { Console.WriteLine("Could not find Kinect speech recognizer. Please refer to the sample requirements."); return; } int wait = 4; while (wait > 0) { Console.Write("Device will be ready for speech recognition in {0} second(s).\r", wait--); Thread.Sleep(1000); } //sensor.DepthStream.Enable(DepthImageFormat.Resolution640x480Fps30); sre = new Microsoft.Speech.Recognition.SpeechRecognitionEngine(ri.Id); foreach(Microsoft.Speech.Recognition.Grammar g in grammars){ sre.LoadGrammar(g); } sre.SpeechRecognized += SreSpeechRecognized; using (Stream s = source.Start()) { sre.SetInputToAudioStream( s, new Microsoft.Speech.AudioFormat.SpeechAudioFormatInfo(Microsoft.Speech.AudioFormat.EncodingFormat.Pcm, 16000, 16, 1, 32000, 2, null)); Console.WriteLine("Recognizing speech. Say: 'red', 'green' or 'blue'. Press ENTER to stop"); sre.RecognizeAsync(Microsoft.Speech.Recognition.RecognizeMode.Multiple); Console.ReadLine(); Console.WriteLine("Stopping recognizer ..."); sre.RecognizeAsyncStop(); } source.Stop(); alertThread.Abort(); }
/// <summary> /// Speech recognizer state constructor /// </summary> public SpeechRecognizerState() { _recognizer = new sr.SpeechRecognitionEngine(); _dictionaryGrammar = new Dictionary <string, string>(); _grammarType = GrammarType.DictionaryStyle; }
public void InitializeEmulator(Mode commandMode) { SpeechProcessingException = null; try { var builder = new GrammarBuilder(); builder.AppendDictation(); _recognizer = new System.Speech.Recognition.SpeechRecognitionEngine(new System.Globalization.CultureInfo("en-GB")); _recognizer.RequestRecognizerUpdate(); _recognizer.LoadGrammar(new DictationGrammar()); _recognizer.LoadGrammar(GetSpellingGrammar()); _recognizer.LoadGrammar(GetWebSiteNamesGrammar()); _recognizer.SpeechRecognized += recognizer_SpeechRecognized; CommandMode = commandMode; } catch (Exception ex) { Log.ErrorLog(ex); throw; } }