예제 #1
0
        public MainWindow()
        {
            InitializeComponent();

            aTimer          = new System.Timers.Timer();
            aTimer.Interval = 500;

            aTimer.Elapsed += OnTimedEvent;

            aTimer.AutoReset = true;

            aTimer.Enabled = true;

            aTimer2          = new System.Timers.Timer();
            aTimer2.Interval = 2000;

            aTimer2.Elapsed += OnTimedEvent2;

            aTimer2.AutoReset = true;

            aTimer2.Enabled = true;

            aTimer3          = new System.Timers.Timer();
            aTimer3.Interval = 60000;

            aTimer3.Elapsed += OnTimedEvent3;

            aTimer3.AutoReset = true;

            aTimer3.Enabled = true;

            voz.SetInputToDefaultAudioDevice();
            voz.LoadGrammar(new System.Speech.Recognition.DictationGrammar());
            voz.RecognizeAsync(System.Speech.Recognition.RecognizeMode.Multiple);
        }
예제 #2
0
        private void SpeechStartup()
        {
            var CultureInfo = new System.Globalization.CultureInfo("en-US");

            speech = new SpeechRecognitionEngine(CultureInfo);

            Choices colors = new Choices();

            foreach (KnownColor item in colorsArray)
            {
                colors.Add(new string[] { item.ToString() });
            }


            speech.SetInputToDefaultAudioDevice();

            GrammarBuilder gb = new GrammarBuilder();

            gb.Append(colors);

            Grammar g = new Grammar(gb);

            speech.LoadGrammar(g);

            speech.SpeechRecognized +=
                new EventHandler <SpeechRecognizedEventArgs>(speech_SpeechRecognized);

            speech.SpeechRecognitionRejected += Speech_SpeechRecognitionRejected;

            speech.EmulateRecognize("Blue");

            speech.RecognizeAsync(RecognizeMode.Multiple);
        }
예제 #3
0
        private void Form1_Load(object sender, EventArgs e)
        {
            synth.Speak("Bienvenido al diseño de interfaces avanzadas. Inicializando la Aplicación");

            Grammar grammar  = CreateGrammarBuilderRGBSemantics2(null);
            Grammar grammar2 = CreateGrammarBuilderTimeSemantics2(null);
            Grammar grammar3 = CreateGrammarBuilderRemoveSemantics2(null);
            Grammar grammar4 = CreateGrammarBuilderTextSemantics2(null);

            _recognizer.SetInputToDefaultAudioDevice();
            _recognizer.UnloadAllGrammars();
            // Nivel de confianza del reconocimiento 70%
            _recognizer.UpdateRecognizerSetting("CFGConfidenceRejectionThreshold", 50);
            grammar.Enabled  = true;
            grammar2.Enabled = true;
            grammar3.Enabled = true;
            grammar4.Enabled = true;
            _recognizer.LoadGrammar(grammar);
            _recognizer.LoadGrammar(grammar2);
            _recognizer.LoadGrammar(grammar3);
            _recognizer.LoadGrammar(grammar4);
            _recognizer.SpeechRecognized += new EventHandler <SpeechRecognizedEventArgs>(_recognizer_SpeechRecognized);
            //reconocimiento asíncrono y múltiples veces
            _recognizer.RecognizeAsync(RecognizeMode.Multiple);
            synth.Speak("Aplicación preparada para reconocer su voz");
        }
예제 #4
0
        public SpeechRecognizerServer(string moduleName)
        {
            System.Collections.ObjectModel.ReadOnlyCollection<RecognizerInfo> installedRecognizers = SpeechRecognitionEngine.InstalledRecognizers();

            //Synchronous Recognition
            m_reco = new System.Speech.Recognition.SpeechRecognitionEngine(myLanguage);

            Network.init();
            m_moduleName = moduleName;

            //TTS
            m_tts = new System.Speech.Synthesis.SpeechSynthesizer();
            m_portISpeak = new Port();
            m_portISpeak.open("/" + moduleName + "/tts/iSpeak:o");
            Network.connect("/" + moduleName + "/tts/iSpeak:o", "/iSpeak");

            //Grammars
            GrammarBuilder dictation = new GrammarBuilder();
            dictation.Culture = myLanguage;
            dictation.AppendDictation();
            m_grammar_dictation = new Grammar(dictation);
            GrammarBuilder spelling = new GrammarBuilder();
            spelling.Culture = myLanguage;
            spelling.AppendDictation("spelling");
            m_grammar_dictation_spelling = new Grammar(spelling);
            m_grammar_continuous = new GrammarBuilder("For sure this non empty grammar will never be recognized.");

            m_reco.SetInputToDefaultAudioDevice();
            m_reco.LoadGrammar(m_grammar_dictation);

            //Continuous Recognition
            m_reco_continuous = new SpeechRecognitionEngine();
            m_reco_continuous.SetInputToDefaultAudioDevice();
            m_portContinuousRecognition = new Port();
            m_portContinuousRecognition.open("/" + moduleName + "/recog/continuous:o");
            m_reco_continuous.LoadGrammar(new Grammar(m_grammar_continuous));
            m_reco_continuous.RecognizeCompleted += onContinuousRecognitionResult;
            m_reco_continuous.RecognizeAsync();

            m_grammarManager = new RobotGrammarManager();
            m_grammarManager.InitialiseVocabulories();
            SetLanguage("EN-us");
            //SetLanguage("fr-fr");

            Console.WriteLine("#########################");
            Console.WriteLine("#    Speech Recognizer  #");
            Console.WriteLine("#########################");

            Network.init();
            m_rpcPort = new Port();
            m_rpcPort.open("/" + m_moduleName + "/rpc");
            m_rpcThread = new System.Threading.Thread(HandleRPC);
            m_rpcThread.Start();
        }
예제 #5
0
        private void button1_Click(object sender, EventArgs e)
        {
            //System.Speech.Recognition.SpeechUI sp = new SpeechUI();
            //BASSInput bip = Bass.BASS_RecordGetInput(-1);

            //Assembly.LoadFile("C:\\Windows\\SysWOW64\\winmm.dll");


            engine.LoadGrammar(new DictationGrammar());
            engine.SetInputToDefaultAudioDevice();
            engine.SpeechRecognized += Engine_RecognizeCompleted;
            // engine.RecognizeCompleted += Engine_RecognizeCompleted;
            engine.RecognizeAsync(RecognizeMode.Multiple);
        }
예제 #6
0
 public void StartRecognition(SilenceTimeout silenceTimeouts)
 {
     IsRunning      = true;
     _confidenceSum = 0;
     _number        = 1;
     setSilenceTimeouts(silenceTimeouts);
     if (RecognizeAsync)
     {
         _sre.RecognizeAsync(RecognizeMode.Multiple);
     }
     else
     {
         _sre.Recognize();
     }
 }
예제 #7
0
파일: Program.cs 프로젝트: saveenr/saveenr
        private static void wreck_a_nice_beach()
        {
            var sre = new SSR.SpeechRecognitionEngine();
            sre.SetInputToDefaultAudioDevice();
            sre.UnloadAllGrammars();

            var gb1 = new SSR.GrammarBuilder();
            gb1.Append(new SSR.Choices("cut", "copy", "paste", "delete", "quit"));


            var g1 = new SSR.Grammar(gb1);
            sre.LoadGrammar(g1);

            sre.SpeechRecognized += SreOnSpeechRecognized;
            sre.SpeechDetected += SreOnSpeechDetected;
            sre.SpeechHypothesized += SreOnSpeechHypothesized;
            sre.SpeechRecognitionRejected += SreOnSpeechRecognitionRejected;
            sre.AudioSignalProblemOccurred += SreOnAudioSignalProblemOccurred;

            sre.RecognizeAsync(SSR.RecognizeMode.Multiple);
        }
예제 #8
0
        private void Form1_Load(object sender, EventArgs e)
        {
            strGrammarFile = asr.GetValue("strGrammarFile", "".GetType()).ToString();

            foreach (System.Speech.Synthesis.InstalledVoice iv in ss.GetInstalledVoices())
            {
                SSS.VoiceInfo vi      = iv.VoiceInfo;
                string        strName = vi.Name + "/" + vi.Age.ToString() + "/" + vi.Gender.ToString() + "/" + vi.Culture.DisplayName;
                ToolStripItem tsi     = setVoiceToolStripMenuItem.DropDownItems.Add(strName);
                tsi.Tag    = vi.Name;
                tsi.Click += new EventHandler(tsi_Click);
            }
            sre.SpeechDetected   += new EventHandler <System.Speech.Recognition.SpeechDetectedEventArgs>(sre_SpeechDetected);
            sre.SpeechRecognized += new EventHandler <System.Speech.Recognition.SpeechRecognizedEventArgs>(sre_SpeechRecognized);

            ss.SpeakStarted   += new EventHandler <System.Speech.Synthesis.SpeakStartedEventArgs>(ss_SpeakStarted);
            ss.SpeakCompleted += new EventHandler <System.Speech.Synthesis.SpeakCompletedEventArgs>(ss_SpeakCompleted);

            loadGrammar();
            sre.SetInputToDefaultAudioDevice();
            sre.RecognizeAsync(SSR.RecognizeMode.Multiple);
        }
예제 #9
0
        private static void wreck_a_nice_beach()
        {
            var sre = new SSR.SpeechRecognitionEngine();

            sre.SetInputToDefaultAudioDevice();
            sre.UnloadAllGrammars();

            var gb1 = new SSR.GrammarBuilder();

            gb1.Append(new SSR.Choices("cut", "copy", "paste", "delete", "quit"));


            var g1 = new SSR.Grammar(gb1);

            sre.LoadGrammar(g1);

            sre.SpeechRecognized           += SreOnSpeechRecognized;
            sre.SpeechDetected             += SreOnSpeechDetected;
            sre.SpeechHypothesized         += SreOnSpeechHypothesized;
            sre.SpeechRecognitionRejected  += SreOnSpeechRecognitionRejected;
            sre.AudioSignalProblemOccurred += SreOnAudioSignalProblemOccurred;

            sre.RecognizeAsync(SSR.RecognizeMode.Multiple);
        }
예제 #10
0
        private void startAudio()
        {
            var audioSource = this.sensor.AudioSource;
            audioSource.BeamAngleMode = BeamAngleMode.Adaptive;

            // This should be off by default, but just to be explicit, this MUST be set to false.
            audioSource.AutomaticGainControlEnabled = false;
            var kinectStream = audioSource.Start();

            this.preSpeechRecognizer.SetInputToAudioStream(
                kinectStream, new SpeechAudioFormatInfo(EncodingFormat.Pcm, 16000, 16, 1, 32000, 2, null));
            // Keep recognizing speech until window closes
            this.preSpeechRecognizer.RecognizeAsync(RecognizeMode.Multiple);
            this.postSpeechRecognizer.SetInputToAudioStream(
                kinectStream, new SpeechAudioFormatInfo(EncodingFormat.Pcm, 16000, 16, 1, 32000, 2, null));
            // Keep recognizing speech until window closes
            this.postSpeechRecognizer.RecognizeAsync(RecognizeMode.Multiple);

            sensor = (from sensorToCheck in KinectSensor.KinectSensors where sensorToCheck.Status == KinectStatus.Connected select sensorToCheck).FirstOrDefault();
            sensor.Start();

            source = sensor.AudioSource;
            source.EchoCancellationMode = EchoCancellationMode.None; // No AEC for this sample
            source.AutomaticGainControlEnabled = false; // Important to turn this off for speech recognition

            ri = System.Speech.Recognition.SpeechRecognitionEngine.InstalledRecognizers().FirstOrDefault();

            recoEngine = new System.Speech.Recognition.SpeechRecognitionEngine(ri.Id);

            customDictationGrammar = new System.Speech.Recognition.DictationGrammar();
            customDictationGrammar.Name = "Dictation";
            customDictationGrammar.Enabled = true;

            recoEngine.LoadGrammar(customDictationGrammar);

            recoEngine.SpeechRecognized += new EventHandler<System.Speech.Recognition.SpeechRecognizedEventArgs>(recoEngine_SpeechRecognized);

            s = source.Start();
            recoEngine.SetInputToAudioStream(s, new System.Speech.AudioFormat.SpeechAudioFormatInfo(System.Speech.AudioFormat.EncodingFormat.Pcm, 16000, 16, 1, 32000, 2, null));
            recoEngine.RecognizeAsync(System.Speech.Recognition.RecognizeMode.Multiple);
        }
예제 #11
0
        public bool SetLanguage(string cultureName)
        {
            //System.Globalization.CultureInfo[] cultures = System.Globalization.CultureInfo.GetCultures(System.Globalization.CultureTypes.AllCultures);
            System.Globalization.CultureInfo culture;
            try
            {
                culture = System.Globalization.CultureInfo.GetCultureInfoByIetfLanguageTag(cultureName);
            }
            catch
            {
                Console.WriteLine("Culture info is not found.");
                return false;
            }
            myLanguage = culture;

            System.Collections.ObjectModel.ReadOnlyCollection<InstalledVoice> voices = m_tts.GetInstalledVoices(culture);
            if (voices.Count > 0)
                m_tts.SelectVoice(voices.First().VoiceInfo.Name);

            m_reco = new System.Speech.Recognition.SpeechRecognitionEngine(culture);

            m_reco.SetInputToDefaultAudioDevice();
            GrammarBuilder dictation = new GrammarBuilder();
            dictation.Culture = myLanguage;
            dictation.AppendDictation();
            m_grammar_dictation = new Grammar(dictation);

            m_reco.LoadGrammar(m_grammar_dictation);

            m_reco_continuous.RecognizeCompleted -= onContinuousRecognitionResult;
            m_reco_continuous.RecognizeAsyncCancel();
            //m_reco_continuous.RecognizeAsyncStop();
            m_reco_continuous = new SpeechRecognitionEngine(culture);
            m_reco_continuous.SetInputToDefaultAudioDevice();
            m_grammar_continuous.Culture = culture;
            m_reco_continuous.LoadGrammar(new Grammar(m_grammar_continuous));
            m_reco_continuous.RecognizeCompleted += onContinuousRecognitionResult;
            m_reco_continuous.RecognizeAsync();

            m_grammarManager.SetLanguage(cultureName);

            Console.WriteLine("The culture has been set to " + cultureName);
            return true;
        }