コード例 #1
0
        private void Form1_Load(object sender, EventArgs e)
        {
            synth.Speak("Bienvenido al diseño de interfaces avanzadas. Inicializando la Aplicación");

            Grammar grammar  = CreateGrammarBuilderRGBSemantics2(null);
            Grammar grammar2 = CreateGrammarBuilderTimeSemantics2(null);
            Grammar grammar3 = CreateGrammarBuilderRemoveSemantics2(null);
            Grammar grammar4 = CreateGrammarBuilderTextSemantics2(null);

            _recognizer.SetInputToDefaultAudioDevice();
            _recognizer.UnloadAllGrammars();
            // Nivel de confianza del reconocimiento 70%
            _recognizer.UpdateRecognizerSetting("CFGConfidenceRejectionThreshold", 50);
            grammar.Enabled  = true;
            grammar2.Enabled = true;
            grammar3.Enabled = true;
            grammar4.Enabled = true;
            _recognizer.LoadGrammar(grammar);
            _recognizer.LoadGrammar(grammar2);
            _recognizer.LoadGrammar(grammar3);
            _recognizer.LoadGrammar(grammar4);
            _recognizer.SpeechRecognized += new EventHandler <SpeechRecognizedEventArgs>(_recognizer_SpeechRecognized);
            //reconocimiento asíncrono y múltiples veces
            _recognizer.RecognizeAsync(RecognizeMode.Multiple);
            synth.Speak("Aplicación preparada para reconocer su voz");
        }
コード例 #2
0
        private void SpeechStartup()
        {
            var CultureInfo = new System.Globalization.CultureInfo("en-US");

            speech = new SpeechRecognitionEngine(CultureInfo);

            Choices colors = new Choices();

            foreach (KnownColor item in colorsArray)
            {
                colors.Add(new string[] { item.ToString() });
            }


            speech.SetInputToDefaultAudioDevice();

            GrammarBuilder gb = new GrammarBuilder();

            gb.Append(colors);

            Grammar g = new Grammar(gb);

            speech.LoadGrammar(g);

            speech.SpeechRecognized +=
                new EventHandler <SpeechRecognizedEventArgs>(speech_SpeechRecognized);

            speech.SpeechRecognitionRejected += Speech_SpeechRecognitionRejected;

            speech.EmulateRecognize("Blue");

            speech.RecognizeAsync(RecognizeMode.Multiple);
        }
コード例 #3
0
        public MainWindow()
        {
            InitializeComponent();

            aTimer          = new System.Timers.Timer();
            aTimer.Interval = 500;

            aTimer.Elapsed += OnTimedEvent;

            aTimer.AutoReset = true;

            aTimer.Enabled = true;

            aTimer2          = new System.Timers.Timer();
            aTimer2.Interval = 2000;

            aTimer2.Elapsed += OnTimedEvent2;

            aTimer2.AutoReset = true;

            aTimer2.Enabled = true;

            aTimer3          = new System.Timers.Timer();
            aTimer3.Interval = 60000;

            aTimer3.Elapsed += OnTimedEvent3;

            aTimer3.AutoReset = true;

            aTimer3.Enabled = true;

            voz.SetInputToDefaultAudioDevice();
            voz.LoadGrammar(new System.Speech.Recognition.DictationGrammar());
            voz.RecognizeAsync(System.Speech.Recognition.RecognizeMode.Multiple);
        }
コード例 #4
0
ファイル: Dummy.cs プロジェクト: warrior888/toci_porsche
        public void ReflectionIndependentInjection()
        {
            rentaloption ro = new rentaloption();

            var properties = ro.GetType().GetProperties();

            string code = "";

            foreach (var property in properties)
            {
                code += "entity." + property.Name + " = " + ";" + Environment.NewLine;
            }


            IVideo Holidays = Dependencyresolver.Resolve <IVideo>();//new Video(/*resolve*/ (IExif)(Dependencyresolver.Resolve<IExif>()));


            sre.LoadGrammar(new DictationGrammar());

            /*sre.SetInputToDefaultAudioDevice();
             *
             *
             *
             * sre.SpeechRecognized += Engine_RecognizeCompleted;
             *
             * sre.RecognizeAsync();*/

            //SharpLearning.Neural.NeuralNetLearner l = new NeuralNetLearner(new NeuralNet(), );

            //Type typ = Assembly.GetExecutingAssembly().GetTypes().Where(t => t.Name == "IExif").First().DeclaringType;
            //Activator.CreateInstance<IExif>();
        }
コード例 #5
0
        public RecognitionEngine()
        {
            this.actions = new List<Action>();
            engine = new sp.SpeechRecognitionEngine();
            engine.SetInputToDefaultAudioDevice();
            engine.SpeechRecognized += engine_SpeechRecognized;
            engine.RecognizerUpdateReached += engine_RecognizerUpdateReached;

            engine.LoadGrammar(new sp.DictationGrammar());
        }
コード例 #6
0
ファイル: Sre.cs プロジェクト: rasoulian/SubtitleTools
        // Public Methods (3) 

        public void InitEngine()
        {
            _sre = new System.Speech.Recognition.SpeechRecognitionEngine(_engineId);
            _sre.SpeechRecognized   += speechRecognized;
            _sre.RecognizeCompleted += recognizeCompleted;
            _sre.SpeechHypothesized += speechHypothesized;
            var grammar = new DictationGrammar();

            _sre.LoadGrammar(grammar);
            _sre.SetInputToWaveFile(_filePath);
        }
コード例 #7
0
        public SpeechRecognizerServer(string moduleName)
        {
            System.Collections.ObjectModel.ReadOnlyCollection<RecognizerInfo> installedRecognizers = SpeechRecognitionEngine.InstalledRecognizers();

            //Synchronous Recognition
            m_reco = new System.Speech.Recognition.SpeechRecognitionEngine(myLanguage);

            Network.init();
            m_moduleName = moduleName;

            //TTS
            m_tts = new System.Speech.Synthesis.SpeechSynthesizer();
            m_portISpeak = new Port();
            m_portISpeak.open("/" + moduleName + "/tts/iSpeak:o");
            Network.connect("/" + moduleName + "/tts/iSpeak:o", "/iSpeak");

            //Grammars
            GrammarBuilder dictation = new GrammarBuilder();
            dictation.Culture = myLanguage;
            dictation.AppendDictation();
            m_grammar_dictation = new Grammar(dictation);
            GrammarBuilder spelling = new GrammarBuilder();
            spelling.Culture = myLanguage;
            spelling.AppendDictation("spelling");
            m_grammar_dictation_spelling = new Grammar(spelling);
            m_grammar_continuous = new GrammarBuilder("For sure this non empty grammar will never be recognized.");

            m_reco.SetInputToDefaultAudioDevice();
            m_reco.LoadGrammar(m_grammar_dictation);

            //Continuous Recognition
            m_reco_continuous = new SpeechRecognitionEngine();
            m_reco_continuous.SetInputToDefaultAudioDevice();
            m_portContinuousRecognition = new Port();
            m_portContinuousRecognition.open("/" + moduleName + "/recog/continuous:o");
            m_reco_continuous.LoadGrammar(new Grammar(m_grammar_continuous));
            m_reco_continuous.RecognizeCompleted += onContinuousRecognitionResult;
            m_reco_continuous.RecognizeAsync();

            m_grammarManager = new RobotGrammarManager();
            m_grammarManager.InitialiseVocabulories();
            SetLanguage("EN-us");
            //SetLanguage("fr-fr");

            Console.WriteLine("#########################");
            Console.WriteLine("#    Speech Recognizer  #");
            Console.WriteLine("#########################");

            Network.init();
            m_rpcPort = new Port();
            m_rpcPort.open("/" + m_moduleName + "/rpc");
            m_rpcThread = new System.Threading.Thread(HandleRPC);
            m_rpcThread.Start();
        }
コード例 #8
0
ファイル: Form1.cs プロジェクト: warrior888/toci_porsche
        private void button1_Click(object sender, EventArgs e)
        {
            //System.Speech.Recognition.SpeechUI sp = new SpeechUI();
            //BASSInput bip = Bass.BASS_RecordGetInput(-1);

            //Assembly.LoadFile("C:\\Windows\\SysWOW64\\winmm.dll");


            engine.LoadGrammar(new DictationGrammar());
            engine.SetInputToDefaultAudioDevice();
            engine.SpeechRecognized += Engine_RecognizeCompleted;
            // engine.RecognizeCompleted += Engine_RecognizeCompleted;
            engine.RecognizeAsync(RecognizeMode.Multiple);
        }
コード例 #9
0
ファイル: Program.cs プロジェクト: saveenr/saveenr
        private static void wreck_a_nice_beach()
        {
            var sre = new SSR.SpeechRecognitionEngine();
            sre.SetInputToDefaultAudioDevice();
            sre.UnloadAllGrammars();

            var gb1 = new SSR.GrammarBuilder();
            gb1.Append(new SSR.Choices("cut", "copy", "paste", "delete", "quit"));


            var g1 = new SSR.Grammar(gb1);
            sre.LoadGrammar(g1);

            sre.SpeechRecognized += SreOnSpeechRecognized;
            sre.SpeechDetected += SreOnSpeechDetected;
            sre.SpeechHypothesized += SreOnSpeechHypothesized;
            sre.SpeechRecognitionRejected += SreOnSpeechRecognitionRejected;
            sre.AudioSignalProblemOccurred += SreOnAudioSignalProblemOccurred;

            sre.RecognizeAsync(SSR.RecognizeMode.Multiple);
        }
コード例 #10
0
        private void loadGrammar()
        {
            sre.UnloadAllGrammars();
            htWords.Clear();
            StreamReader sr   = File.OpenText(strGrammarFile);
            int          icnt = 0;

            while (!sr.EndOfStream && icnt < 10000)
            {
                string strLine = sr.ReadLine();
                if (strLine != "")
                {
                    SSR.GrammarBuilder gb = new System.Speech.Recognition.GrammarBuilder();
                    gb.Append(strLine);
                    SSR.Grammar gram = new System.Speech.Recognition.Grammar(gb);
                    sre.LoadGrammar(gram);

                    htWords.Add(htWords.Count, strLine.ToLower());
                }
                icnt++;
            }
        }
コード例 #11
0
        private static void wreck_a_nice_beach()
        {
            var sre = new SSR.SpeechRecognitionEngine();

            sre.SetInputToDefaultAudioDevice();
            sre.UnloadAllGrammars();

            var gb1 = new SSR.GrammarBuilder();

            gb1.Append(new SSR.Choices("cut", "copy", "paste", "delete", "quit"));


            var g1 = new SSR.Grammar(gb1);

            sre.LoadGrammar(g1);

            sre.SpeechRecognized           += SreOnSpeechRecognized;
            sre.SpeechDetected             += SreOnSpeechDetected;
            sre.SpeechHypothesized         += SreOnSpeechHypothesized;
            sre.SpeechRecognitionRejected  += SreOnSpeechRecognitionRejected;
            sre.AudioSignalProblemOccurred += SreOnAudioSignalProblemOccurred;

            sre.RecognizeAsync(SSR.RecognizeMode.Multiple);
        }
コード例 #12
0
        private void startAudio()
        {
            var audioSource = this.sensor.AudioSource;
            audioSource.BeamAngleMode = BeamAngleMode.Adaptive;

            // This should be off by default, but just to be explicit, this MUST be set to false.
            audioSource.AutomaticGainControlEnabled = false;
            var kinectStream = audioSource.Start();

            this.preSpeechRecognizer.SetInputToAudioStream(
                kinectStream, new SpeechAudioFormatInfo(EncodingFormat.Pcm, 16000, 16, 1, 32000, 2, null));
            // Keep recognizing speech until window closes
            this.preSpeechRecognizer.RecognizeAsync(RecognizeMode.Multiple);
            this.postSpeechRecognizer.SetInputToAudioStream(
                kinectStream, new SpeechAudioFormatInfo(EncodingFormat.Pcm, 16000, 16, 1, 32000, 2, null));
            // Keep recognizing speech until window closes
            this.postSpeechRecognizer.RecognizeAsync(RecognizeMode.Multiple);

            sensor = (from sensorToCheck in KinectSensor.KinectSensors where sensorToCheck.Status == KinectStatus.Connected select sensorToCheck).FirstOrDefault();
            sensor.Start();

            source = sensor.AudioSource;
            source.EchoCancellationMode = EchoCancellationMode.None; // No AEC for this sample
            source.AutomaticGainControlEnabled = false; // Important to turn this off for speech recognition

            ri = System.Speech.Recognition.SpeechRecognitionEngine.InstalledRecognizers().FirstOrDefault();

            recoEngine = new System.Speech.Recognition.SpeechRecognitionEngine(ri.Id);

            customDictationGrammar = new System.Speech.Recognition.DictationGrammar();
            customDictationGrammar.Name = "Dictation";
            customDictationGrammar.Enabled = true;

            recoEngine.LoadGrammar(customDictationGrammar);

            recoEngine.SpeechRecognized += new EventHandler<System.Speech.Recognition.SpeechRecognizedEventArgs>(recoEngine_SpeechRecognized);

            s = source.Start();
            recoEngine.SetInputToAudioStream(s, new System.Speech.AudioFormat.SpeechAudioFormatInfo(System.Speech.AudioFormat.EncodingFormat.Pcm, 16000, 16, 1, 32000, 2, null));
            recoEngine.RecognizeAsync(System.Speech.Recognition.RecognizeMode.Multiple);
        }
コード例 #13
0
        public bool SetLanguage(string cultureName)
        {
            //System.Globalization.CultureInfo[] cultures = System.Globalization.CultureInfo.GetCultures(System.Globalization.CultureTypes.AllCultures);
            System.Globalization.CultureInfo culture;
            try
            {
                culture = System.Globalization.CultureInfo.GetCultureInfoByIetfLanguageTag(cultureName);
            }
            catch
            {
                Console.WriteLine("Culture info is not found.");
                return false;
            }
            myLanguage = culture;

            System.Collections.ObjectModel.ReadOnlyCollection<InstalledVoice> voices = m_tts.GetInstalledVoices(culture);
            if (voices.Count > 0)
                m_tts.SelectVoice(voices.First().VoiceInfo.Name);

            m_reco = new System.Speech.Recognition.SpeechRecognitionEngine(culture);

            m_reco.SetInputToDefaultAudioDevice();
            GrammarBuilder dictation = new GrammarBuilder();
            dictation.Culture = myLanguage;
            dictation.AppendDictation();
            m_grammar_dictation = new Grammar(dictation);

            m_reco.LoadGrammar(m_grammar_dictation);

            m_reco_continuous.RecognizeCompleted -= onContinuousRecognitionResult;
            m_reco_continuous.RecognizeAsyncCancel();
            //m_reco_continuous.RecognizeAsyncStop();
            m_reco_continuous = new SpeechRecognitionEngine(culture);
            m_reco_continuous.SetInputToDefaultAudioDevice();
            m_grammar_continuous.Culture = culture;
            m_reco_continuous.LoadGrammar(new Grammar(m_grammar_continuous));
            m_reco_continuous.RecognizeCompleted += onContinuousRecognitionResult;
            m_reco_continuous.RecognizeAsync();

            m_grammarManager.SetLanguage(cultureName);

            Console.WriteLine("The culture has been set to " + cultureName);
            return true;
        }
コード例 #14
0
        public Jarvis()
        {
            modules = new LinkedList<IJModule>();
            /*************** IJModule Instatiation Stuff ****************/
            modules.AddLast(new MusicControl(preferences.mediaplayerprocess, preferences.initialvolume, preferences.volumeincrements));
            if (preferences.usegooglevoice)
                modules.AddLast(new GoogleVoice(preferences.googleemail, preferences.googlepassword, preferences.googleaddressbook));
            if (preferences.facebookrssfeed != null)
                modules.AddLast(new Facebook(preferences.facebookrssfeed));
            if (preferences.usegooglecalendar)
                modules.AddLast(new GoogleCalendar(preferences.googleemail, preferences.googlepassword, preferences.googlecalendaralerttime));
            alertThread = new Thread(new ThreadStart(alertFunction));
            alertThread.Name = "Alert Thread";
            alertThread.Start();

            /****************Get Grammar From Modules*********************/
            var grammars = new LinkedList<Microsoft.Speech.Recognition.Grammar>();
            foreach (IJModule module in modules)
            {
                if(module.getGrammarFile() != null)
                {
                    var gb = new Microsoft.Speech.Recognition.GrammarBuilder();
                    gb.AppendRuleReference("file://" + System.Environment.CurrentDirectory + "\\" + module.getGrammarFile());
                    Console.WriteLine("file://"+System.Environment.CurrentDirectory+"\\" + module.getGrammarFile());
                    grammars.AddLast(new Microsoft.Speech.Recognition.Grammar(gb));
                }
            }
            
            /************ Speech Recognition Stuff **********************/
            
            dictation = new System.Speech.Recognition.SpeechRecognitionEngine();
            dictation.SetInputToDefaultAudioDevice();
            dictation.LoadGrammar(new DictationGrammar());
            dictation.SpeechRecognized += SreSpeechRecognized;
            
            sensor = (from sensorToCheck in KinectSensor.KinectSensors where sensorToCheck.Status == KinectStatus.Connected select sensorToCheck).FirstOrDefault();

            if (sensor == null)
            {
                Console.WriteLine(
                        "No Kinect sensors are attached to this computer or none of the ones that are\n" +
                        "attached are \"Connected\".\n" +
                        "Press any key to continue.\n");

                Console.ReadKey(true);
                return;
            }

            sensor.Start();

            KinectAudioSource source = sensor.AudioSource;

            source.EchoCancellationMode = EchoCancellationMode.CancellationOnly; 
            source.AutomaticGainControlEnabled = false; 

            Microsoft.Speech.Recognition.RecognizerInfo ri = GetKinectRecognizer();
            Debug.WriteLine(ri.Id);
            if (ri == null)
            {
                Console.WriteLine("Could not find Kinect speech recognizer. Please refer to the sample requirements.");
                return;
            }

            int wait = 4;
            while (wait > 0)
            {
                Console.Write("Device will be ready for speech recognition in {0} second(s).\r", wait--);
                Thread.Sleep(1000);
            }
            //sensor.DepthStream.Enable(DepthImageFormat.Resolution640x480Fps30);
            sre = new Microsoft.Speech.Recognition.SpeechRecognitionEngine(ri.Id);
            
                foreach(Microsoft.Speech.Recognition.Grammar g in grammars){
                    sre.LoadGrammar(g);
                }
                sre.SpeechRecognized += SreSpeechRecognized;

                using (Stream s = source.Start())
                {
                    sre.SetInputToAudioStream(
                        s, new Microsoft.Speech.AudioFormat.SpeechAudioFormatInfo(Microsoft.Speech.AudioFormat.EncodingFormat.Pcm, 16000, 16, 1, 32000, 2, null));
                    Console.WriteLine("Recognizing speech. Say: 'red', 'green' or 'blue'. Press ENTER to stop");
                    sre.RecognizeAsync(Microsoft.Speech.Recognition.RecognizeMode.Multiple);




                Console.ReadLine();
                Console.WriteLine("Stopping recognizer ...");
                sre.RecognizeAsyncStop();

                }


                source.Stop();
                alertThread.Abort();
            
        }
コード例 #15
0
        public void InitializeEmulator(Mode commandMode)
        {
            SpeechProcessingException = null;
            try
            {
                var builder = new GrammarBuilder();
                builder.AppendDictation();

                _recognizer = new System.Speech.Recognition.SpeechRecognitionEngine(new System.Globalization.CultureInfo("en-GB"));
                _recognizer.RequestRecognizerUpdate();
                _recognizer.LoadGrammar(new DictationGrammar());
                _recognizer.LoadGrammar(GetSpellingGrammar());
                _recognizer.LoadGrammar(GetWebSiteNamesGrammar());
                _recognizer.SpeechRecognized += recognizer_SpeechRecognized;

                CommandMode = commandMode;
            }
            catch (Exception ex)
            {
                Log.ErrorLog(ex);
                throw;
            }
        }