Esempio n. 1
0
        public void initRS()
        {
            try
            {
                SpeechRecognitionEngine sre = new SpeechRecognitionEngine(new CultureInfo("en-US"));

                var words = new Choices();
                words.Add("Hello");
                words.Add("Jump");
                words.Add("Left");
                words.Add("Right");

                var gb = new GrammarBuilder();
                gb.Culture = new System.Globalization.CultureInfo("en-US");
                gb.Append(words);
                Grammar g = new Grammar(gb);

                sre.LoadGrammar(g);
                
                sre.SpeechRecognized += new EventHandler<SpeechRecognizedEventArgs>(sre_SpeechRecognized);
                sre.SetInputToDefaultAudioDevice();
                sre.RecognizeAsync(RecognizeMode.Multiple);
            }
            catch (Exception e)
            {
                label1.Text = "init RS Error : " + e.ToString();
            }
        }
    /*
     * SpeechRecognizer
     *
     * @param GName - grammar file name
     */
    public SpeechRecognizer(string GName, int minConfidence)
    {
        //creates the speech recognizer engine
        sr = new SpeechRecognitionEngine(new System.Globalization.CultureInfo("pt-PT"));
        sr.SetInputToDefaultAudioDevice();
        Console.WriteLine("confiança : " + minConfidence);
        sr.UpdateRecognizerSetting("CFGConfidenceRejectionThreshold", minConfidence);

        Grammar gr = null;

        //verifies if file exist, and loads the Grammar file, else load defualt grammar
        if (System.IO.File.Exists(GName))
        {
            gr = new Grammar(GName);
            gr.Enabled = true;
        }
        else
            Console.WriteLine("Can't read grammar file");

        //load Grammar to speech engine
        sr.LoadGrammar(gr);

        //assigns a method, to execute when speech is recognized
        sr.SpeechRecognized += new EventHandler<SpeechRecognizedEventArgs>(SpeechRecognized);

        //assigns a method, to execute when speech is NOT recognized
        sr.SpeechRecognitionRejected +=
          new EventHandler<SpeechRecognitionRejectedEventArgs>(SpeechRecognitionRejected);

        // Start asynchronous, continuous speech recognition.
        sr.RecognizeAsync(RecognizeMode.Multiple);
    }
Esempio n. 3
0
        public MainWindow()
        {
            InitializeComponent();

            var config = new JsonConfigHandler( System.IO.Path.Combine( Environment.GetFolderPath( Environment.SpecialFolder.ApplicationData ), "LeagueTag" ) );
            //config.Populate();
            config.Save();
            //config.Save(
            return;
            var engine = new SpeechRecognitionEngine();

            var builder = new GrammarBuilder();
            builder.Append( "tag" );
            builder.Append( new Choices( "baron", "dragon" ) );

            engine.RequestRecognizerUpdate();
            engine.LoadGrammar( new Grammar( builder ) );

            engine.SpeechRecognized += engine_SpeechRecognized;

            engine.SetInputToDefaultAudioDevice();
            engine.RecognizeAsync( RecognizeMode.Multiple );

            CompositionTarget.Rendering += CompositionTarget_Rendering;

            this.DataContext = this;
        }
Esempio n. 4
0
        private string Transcribe(MemoryStream audioFile)
        {
            using (var recognizer = new SpeechRecognitionEngine())
            {
                // Create and load a grammar.
                var dictation = new DictationGrammar
                {
                    Name = "Dictation Grammar"
                };

                recognizer.LoadGrammar(dictation);

                // Configure the input to the recognizer.
                recognizer.SetInputToWaveStream(audioFile);

                // Attach event handlers for the results of recognition.
                recognizer.SpeechRecognized +=
                  new EventHandler<SpeechRecognizedEventArgs>(recognizer_SpeechRecognized);
                recognizer.RecognizeCompleted +=
                  new EventHandler<RecognizeCompletedEventArgs>(recognizer_RecognizeCompleted);

                // Perform recognition on the entire file.
                Console.WriteLine("Starting asynchronous recognition...");
                completed = false;
                recognizer.RecognizeAsync(RecognizeMode.Single);

                // Keep the console window open.
                while (!completed)
                {
                    // let it work until it's done
                }
            }
            return TranscribedText;
        }
Esempio n. 5
0
        public void StartListening()
        {
            if (null != _ri)
            {
                _speechEngine = new SpeechRecognitionEngine(_ri.Id);

                // Create a grammar from grammar definition XML file.
                using (var memoryStream = new MemoryStream(Encoding.ASCII.GetBytes(KAIT.Kinect.Service.Properties.Resources.SpeechGrammar)))
                {
                    var g = new Grammar(memoryStream);
                    _speechEngine.LoadGrammar(g);
                }

                _speechEngine.SpeechRecognized += _speechEngine_SpeechRecognized;
                _speechEngine.SpeechRecognitionRejected += _speechEngine_SpeechRecognitionRejected;

                // let the convertStream know speech is going active
                _convertStream.SpeechActive = true;

                // For long recognition sessions (a few hours or more), it may be beneficial to turn off adaptation of the acoustic model.
                // This will prevent recognition accuracy from degrading over time.
                ////speechEngine.UpdateRecognizerSetting("AdaptationOn", 0);

                _speechEngine.SetInputToAudioStream(
                    _convertStream, new SpeechAudioFormatInfo(EncodingFormat.Pcm, 16000, 16, 1, 32000, 2, null));
                _speechEngine.RecognizeAsync(RecognizeMode.Multiple);

                //_isInTrainingMode = true;
            }
            //else
            //    throw new InvalidOperationException("RecognizerInfo cannot be null");
        }
Esempio n. 6
0
        public void InitializeSpeechRecognitionEngine(String filePath)
        {
            MySpeechRecognitionEngine = new SpeechRecognitionEngine();
            //MySpeechRecognitionEngine.SetInputToDefaultAudioDevice();

            MySpeechRecognitionEngine.UnloadAllGrammars();

            try
            {

                MySpeechRecognitionEngine.SetInputToWaveFile(filePath);

                Process.Start("C:\\Program Files\\Windows Media Player\\wmplayer.exe", ("\"" + filePath + "\""));

                MySpeechRecognitionEngine.LoadGrammar(new DictationGrammar());

                MySpeechRecognitionEngine.RecognizeAsync(RecognizeMode.Single);

                MySpeechRecognitionEngine.AudioLevelUpdated += MySpeechRecognitionEngine_AudioLevelUpdated;

                MySpeechRecognitionEngine.SpeechRecognized += MySpeechRecognitionEnginee_SpeechRecognized;

                MySpeechRecognitionEngine.AudioStateChanged += MySpeechRecognitionEnginee_AudioStateChanged;

                MySpeechRecognitionEngine.RecognizeCompleted += MySpeechRecognitionEngine_RecognizeCompleted;

            }

            catch (Exception ex)
            {

                Console.Write(ex.Message.ToString());

            }
        }
Esempio n. 7
0
        public void StartListening()
        {
            if (null != _ri)
            {
                _speechEngine = new SpeechRecognitionEngine(_ri.Id);

                // Create a grammar from grammar definition XML file.
                using (var memoryStream = new MemoryStream(Encoding.ASCII.GetBytes(KAIT.Kinect.Service.Properties.Resources.SpeechGrammar)))
                {
                    var g = new Grammar(memoryStream);
                    _speechEngine.LoadGrammar(g);
                }

                _speechEngine.SpeechRecognized += _speechEngine_SpeechRecognized;
                _speechEngine.SpeechRecognitionRejected += _speechEngine_SpeechRecognitionRejected;

                // let the convertStream know speech is going active
                _convertStream.SpeechActive = true;

     
                _speechEngine.SetInputToAudioStream(
                    _convertStream, new SpeechAudioFormatInfo(EncodingFormat.Pcm, 16000, 16, 1, 32000, 2, null));
                _speechEngine.RecognizeAsync(RecognizeMode.Multiple);

     
            }
        }
Esempio n. 8
0
        /// <summary>
        /// Initializes a new instance of the <see cref="MainWindow"/> class.
        /// </summary>
        public MainWindow()
        {
            InitializeComponent();

            try
            {
                // create the engine
                //speechRecognitionEngine = createSpeechEngine("de-DE");
                //speechRecognitionEngine = createSpeechEngine(CultureInfo.CurrentCulture.Name);
                speechRecognitionEngine = createSpeechEngine("es-ES");

                // hook to events
                speechRecognitionEngine.AudioLevelUpdated += new EventHandler<AudioLevelUpdatedEventArgs>(engine_AudioLevelUpdated);

                // Create and load a dictation grammar.
                speechRecognitionEngine.LoadGrammar(new DictationGrammar());

                speechRecognitionEngine.SpeechRecognized += new EventHandler<SpeechRecognizedEventArgs>(engine_SpeechRecognized);

                // use the system's default microphone
                speechRecognitionEngine.SetInputToDefaultAudioDevice();

                // start listening
                speechRecognitionEngine.RecognizeAsync(RecognizeMode.Multiple);
            }
            catch (Exception ex)
            {
                MessageBox.Show(ex.Message, "Voice recognition failed");
            }
        }
Esempio n. 9
0
        public VoiceRecognizer()
        {
            try
            {
                // Create a new SpeechRecognitionEngine instance.
                voiceEngine = new SpeechRecognitionEngine(new CultureInfo("en-US"));

                // Setup the audio device
                voiceEngine.SetInputToDefaultAudioDevice();

                // Create the Grammar instance and load it into the speech recognition engine.
                Grammar g = new Grammar(CommandPool.BuildSrgsGrammar());
                voiceEngine.LoadGrammar(g);

                //voiceEngine.EndSilenceTimeout = new TimeSpan(0, 0, 1);

                // Register a handler for the SpeechRecognized event
                voiceEngine.SpeechRecognized += new EventHandler<SpeechRecognizedEventArgs>(sre_SpeechRecognized);

                // Start listening in multiple mode (that is, don't quit after a single recongition)
                voiceEngine.RecognizeAsync(RecognizeMode.Multiple);
                IsSetup = true;
            }
            catch(Exception e)
            {
                IsSetup = false;
            }
        }
Esempio n. 10
0
        static void Main(string[] args)
        {
            try
            {
                ss.SetOutputToDefaultAudioDevice();
                Console.WriteLine("\n(Speaking: I am awake)");
                ss.Speak("I am awake");

                CultureInfo ci = new CultureInfo("en-us");
                sre = new SpeechRecognitionEngine(ci);
                sre.SetInputToDefaultAudioDevice();
                sre.SpeechRecognized += sre_SpeechRecognized;

                Choices ch_StartStopCommands = new Choices();
                ch_StartStopCommands.Add("Alexa record");
                ch_StartStopCommands.Add("speech off");
                ch_StartStopCommands.Add("klatu barada nikto");
                GrammarBuilder gb_StartStop = new GrammarBuilder();
                gb_StartStop.Append(ch_StartStopCommands);
                Grammar g_StartStop = new Grammar(gb_StartStop);

                sre.LoadGrammarAsync(g_StartStop);
                sre.RecognizeAsync(RecognizeMode.Multiple); // multiple grammars

                while (done == false) { ; }

                Console.WriteLine("\nHit <enter> to close shell\n");
                Console.ReadLine();
            }
            catch (Exception ex)
            {
                Console.WriteLine(ex.Message);
                Console.ReadLine();
            }
        }
Esempio n. 11
0
        private void btn_connect_Click(object sender, EventArgs e)
        {
            ushort port;
            ushort.TryParse(txt_port.Text, out port);
            try
            {
                current_player = new AssPlayer(players[cmb_players.SelectedItem.ToString()], txt_host.Text, port);
            }
            catch(Exception ex)
            {
                MessageBox.Show("Could not connect: " + ex.Message);
                return;
            }
            voice_threshold = (float)num_voice_threshold.Value;

            recognizer = new SpeechRecognitionEngine(new System.Globalization.CultureInfo("en-US"));
            Grammar player_gramar = prepare_grammar(current_player.commands);
            recognizer.LoadGrammar(player_gramar);
            recognizer.SpeechRecognized += new EventHandler<SpeechRecognizedEventArgs>(recognizer_SpeechRecognized);
            recognizer.SetInputToDefaultAudioDevice();
            recognizer.RecognizeAsync(RecognizeMode.Multiple);

            taskbar_icon.Visible = true;
            Hide();
        }
Esempio n. 12
0
File: VI.cs Progetto: jjonj/AVPI
        public void load_listen(VI_Profile profile, VI_Settings settings, ListView statusContainer)
        {
            this.profile = profile;
            this.settings = settings;
            this.statusContainer = statusContainer;

            vi_syn = profile.synth;
            vi_syn.SelectVoice(settings.voice_info);
            vi_sre = new SpeechRecognitionEngine(settings.recognizer_info);

            GrammarBuilder phrases_grammar = new GrammarBuilder();
            List<string> glossory = new List<string>();

            foreach (VI_Phrase trigger in profile.Profile_Triggers)
            {
                glossory.Add(trigger.value);
            }
            if (glossory.Count == 0)
            {
                MessageBox.Show("You need to add at least one Trigger");
                return;
            }
            phrases_grammar.Append(new Choices(glossory.ToArray()));

            vi_sre.LoadGrammar(new Grammar(phrases_grammar));
            //set event function
            vi_sre.SpeechRecognized += phraseRecognized;
            vi_sre.SpeechRecognitionRejected += _recognizer_SpeechRecognitionRejected;
            vi_sre.SetInputToDefaultAudioDevice();
            vi_sre.RecognizeAsync(RecognizeMode.Multiple);
        }
Esempio n. 13
0
        /// <summary>
        /// Starts up the SkeletonSlam class.
        /// </summary>
        public VoiceControl()
        {
            kinectSensor = KinectSensor.KinectSensors[0];

            TransformSmoothParameters smoothingParam = new TransformSmoothParameters();
            {
                smoothingParam.Smoothing = 0.5f;
                smoothingParam.Correction = 0.5f;
                smoothingParam.Prediction = 0.5f;
                smoothingParam.JitterRadius = 0.05f;
                smoothingParam.MaxDeviationRadius = 0.04f;
            };

            kinectSensor.SkeletonStream.Enable(smoothingParam);

            kinectSensor.SkeletonFrameReady += getSkeleton;

            sre = CreateSpeechRecognizer();

            kinectSensor.Start();

            sre.SetInputToAudioStream(kinectSensor.AudioSource.Start(),
                 new SpeechAudioFormatInfo(
                 EncodingFormat.Pcm, 16000, 16, 1,
                 32000, 2, null));

            sre.RecognizeAsync(RecognizeMode.Multiple);

            reset();
        }
Esempio n. 14
0
        public void Start()
        {
            try
            {
                // create the engine
                speechRecognitionEngine = createSpeechEngine("en-US");

                // hook to event
                speechRecognitionEngine.SpeechRecognized += new EventHandler<SpeechRecognizedEventArgs>(engine_SpeechRecognized);

                // load dictionary
                loadGrammarAndCommands();

                // use the system's default microphone
                speechRecognitionEngine.SetInputToDefaultAudioDevice();

                // start listening
                speechRecognitionEngine.RecognizeAsync(RecognizeMode.Multiple);

                //Create the speech synthesizer
                speechSynthesizer = new SpeechSynthesizer();
                speechSynthesizer.Rate = -5;

            }
            catch (Exception ex)
            {
                Console.WriteLine("Voice recognition failed " + ex.Message);
            }

            //Keeps the command prompt going until you say jarvis quit
            while(lastCommand.ToLower() != "quit")
            {

            }
        }
Esempio n. 15
0
        private void worker_DoWork(object sender, DoWorkEventArgs e)
        {
            Thread.CurrentThread.Name = "Kinect audio thread";
            if(_device.Type == DeviceType.KINECT_1)
            {
                SpeechRecognizer = new SpeechRecognitionEngine(recognizerInfo.Id);
                SpeechRecognizer.LoadGrammar(GetCurrentGrammar());
                SpeechRecognizer.SpeechRecognized += SreSpeechRecognized;
                SpeechRecognizer.SpeechHypothesized += SreSpeechHypothesized;
                SpeechRecognizer.SpeechRecognitionRejected += SreSpeechRecognitionRejected;

                //set sensor audio source to variable
                audioSource = _device.sensor.AudioSource;
                //Set the beam angle mode - the direction the audio beam is pointing
                //we want it to be set to adaptive
                audioSource.BeamAngleMode = BeamAngleMode.Adaptive;
                //start the audiosource
                var kinectStream = audioSource.Start();
                //configure incoming audio stream
                SpeechRecognizer.SetInputToAudioStream(
                    kinectStream, new SpeechAudioFormatInfo(EncodingFormat.Pcm, 16000, 16, 1, 32000, 2, null));
                //make sure the recognizer does not stop after completing
                SpeechRecognizer.RecognizeAsync(RecognizeMode.Multiple);
                //reduce background and ambient noise for better accuracy
                _device.sensor.AudioSource.EchoCancellationMode = EchoCancellationMode.None;
                _device.sensor.AudioSource.AutomaticGainControlEnabled = false;
                audioStarted = true;
            }
            Console.WriteLine("END OF WORKER AUDIO");
        }
Esempio n. 16
0
        public VoiceInput()
        {
            recognizer = new SpeechRecognitionEngine(new CultureInfo("en-US"));

            recognizer.SetInputToDefaultAudioDevice();
            Choices choices = new Choices();
            foreach (String command in commands)
            {
                choices.Add(command);
            }
            choices.Add(startListening);
            choices.Add(stopListening);
            choices.Add(stop);
            /*choices.Add("Close");
            choices.Add("Left");
            choices.Add("Right");
            choices.Add("Tilt Left");
            choices.Add("Tilt Right");
            choices.Add("Move");
            choices.Add("Back");
            choices.Add("Move Up");
            choices.Add("Down");
            choices.Add("Exit");
            choices.Add("Stop");
            choices.Add("Start Listening");
            choices.Add("Stop Listening");*/
            Grammar grammar = new Grammar(new GrammarBuilder(choices));
            recognizer.LoadGrammar(grammar);

            recognizer.SpeechRecognized +=
                new EventHandler<SpeechRecognizedEventArgs>(SpeechRecognized);
            recognizer.RecognizeAsync(RecognizeMode.Multiple);
        }
Esempio n. 17
0
        public SpeechInput(Settings settings, MusicList musicCollection, string playerPath) {
            ModeTimer = new CommandModeTimer();
            RNG = new Random();
            AppSettings = settings;
            SRecognize = new SpeechRecognitionEngine();
            Player = new Aimp3Player(playerPath);
            if(musicCollection != null) {
                MusicCollection = musicCollection;
            } else {
                throw new ArgumentNullException(nameof(musicCollection));
            }
            InitCommands();

            try {
                LoadGrammar();

                SRecognize.SetInputToDefaultAudioDevice();
                SRecognize.RecognizeAsync(RecognizeMode.Multiple);
            } catch(Exception e) {
                System.Windows.Forms.MessageBox.Show("Error while starting SpeechInput\n" + e.ToString());
            }
            SRecognize.SpeechRecognized += SRecognize_SpeechRecognized;

            MusicCollection.SongListUpdated += (s, a) => LoadGrammar();
        }
Esempio n. 18
0
        public static SpeechRecognitionEngine InitializeSRE()
        {
            //Create the speech recognition engine
            SpeechRecognitionEngine sre = new SpeechRecognitionEngine();
            using (sre)
            {

                //Set the audio device to the OS default
                sre.SetInputToDefaultAudioDevice();

                // Reset the Grammar
                sre.UnloadAllGrammars();

                // Load the plugins
                LoadPlugins();

                //Load all of the grammars
                foreach (IJarvisPlugin plugin in _plugins)
                    sre.LoadGrammar(plugin.getGrammar());

                //Set the recognition mode

                sre.RecognizeAsync(RecognizeMode.Multiple);

                //Add an event Handler
                sre.SpeechRecognized += new EventHandler<SpeechRecognizedEventArgs>(Engine.SpeechRecognized);
                while (!Jarvis.JarvisMain.stop)
                {
                }
            }
            return sre;
        }
Esempio n. 19
0
        public void writetofile(){
            SpeechRecognitionEngine rec = new SpeechRecognitionEngine();
            rec.SetInputToDefaultAudioDevice();
            rec.LoadGrammar(new DictationGrammar());
            rec.SpeechRecognized += new EventHandler<SpeechRecognizedEventArgs>(rec_SpeechRecognized);
            rec.RecognizeAsync(RecognizeMode.Single);

        }
        static void Main()
        {
            using (var source = new KinectAudioSource())
            {
                source.FeatureMode = true;
                source.AutomaticGainControl = false;
                source.SystemMode = SystemMode.OptibeamArrayOnly;

                RecognizerInfo ri = GetKinectRecognizer();

                if (ri == null)
                {
                    Console.WriteLine("Could not find Kinect speech recognizer. Please refer to the sample requirements.");
                    return;
                }

                Console.WriteLine("Using: {0}", ri.Name);

                using (var sre = new SpeechRecognitionEngine(ri.Id))
                {
                    //declare commands to be used
                    var commands = new Choices();
                    commands.Add("activate");
                    commands.Add("off");
                    commands.Add("open");
                    commands.Add("manual");
                    commands.Add("hold");
                    commands.Add("land");
                    commands.Add("stabilize");

                    var gb = new GrammarBuilder {Culture = ri.Culture};
                    //Specify the culture to match the recognizer in case we are running in a different culture.                                 
                    gb.Append(commands);

                    // Create the actual Grammar instance, and then load it into the speech recognizer.
                    var g = new Grammar(gb);

                    sre.LoadGrammar(g);
                    sre.SpeechRecognized += SreSpeechRecognized;
                    sre.SpeechRecognitionRejected += SreSpeechRejected;

                    using (Stream s = source.Start())
                    {
                        sre.SetInputToAudioStream(s,
                                                  new SpeechAudioFormatInfo(
                                                      EncodingFormat.Pcm, 16000, 16, 1,
                                                      32000, 2, null));

                        Console.WriteLine("Recognizing... Press ENTER to stop");

                        sre.RecognizeAsync(RecognizeMode.Multiple);
                        Console.ReadLine();
                        Console.WriteLine("Stopping recognizer ...");
                        sre.RecognizeAsyncStop();
                    }
                }
            }
        }
Esempio n. 21
0
 public Recognizer()
 {
     gameManager = new GameManager();
     Engine = new SpeechRecognitionEngine();
     Engine.SpeechRecognized += new EventHandler<SpeechRecognizedEventArgs>(recognizer_SpeechRecognized);
     Engine.SetInputToDefaultAudioDevice();
     AddGrammars(Engine);
     Engine.RecognizeAsync(RecognizeMode.Multiple);
 }
Esempio n. 22
0
        static void Main(string[] args)
        {                    
            using (var source = new KinectAudioSource())
            {
                source.FeatureMode = true;
                source.AutomaticGainControl = false; //Important to turn this off for speech recognition
				source.SystemMode = SystemMode.OptibeamArrayOnly; //No AEC for this sample

                RecognizerInfo ri = GetKinectRecognizer();

                if (ri == null)
                {
                    Console.WriteLine("Could not find Kinect speech recognizer. Please refer to the sample requirements.");
                    return;
                }

                Console.WriteLine("Using: {0}", ri.Name);

                using (var sre = new SpeechRecognitionEngine(ri.Id))
                {                
                    var colors = new Choices();
                    colors.Add("red");
                    colors.Add("green");
                    colors.Add("blue");

                    var gb = new GrammarBuilder();
                    //Specify the culture to match the recognizer in case we are running in a different culture.                                 
                    gb.Culture = ri.Culture;
                    gb.Append(colors);
                    

                    // Create the actual Grammar instance, and then load it into the speech recognizer.
                    var g = new Grammar(gb);                    

                    sre.LoadGrammar(g);
                    sre.SpeechRecognized += SreSpeechRecognized;
                    sre.SpeechHypothesized += SreSpeechHypothesized;
                    sre.SpeechRecognitionRejected += SreSpeechRecognitionRejected;

                    using (Stream s = source.Start())
                    {
                        sre.SetInputToAudioStream(s,
                                                  new SpeechAudioFormatInfo(
                                                      EncodingFormat.Pcm, 16000, 16, 1,
                                                      32000, 2, null));

						Console.WriteLine("Recognizing. Say: 'red', 'green' or 'blue'. Press ENTER to stop");

                        sre.RecognizeAsync(RecognizeMode.Multiple);
                        Console.ReadLine();
                        Console.WriteLine("Stopping recognizer ...");
                        sre.RecognizeAsyncStop();                       
                    }
                }
            }
        }
        public void StartSpeechRecognition()
        {
            speechRecognizer = new SpeechRecognitionEngine();
            speechRecognizer.SetInputToDefaultAudioDevice();
            speechRecognizer.LoadGrammar(GetGrammar());


            speechRecognizer.SpeechRecognized += new EventHandler<SpeechRecognizedEventArgs>(speechRecognizer_SpeechRecognized);
            speechRecognizer.RecognizeAsync(RecognizeMode.Multiple);
        }
Esempio n. 24
0
 public Listening(SpeechRecognized recDelegate)
 {
     ear = new SpeechRecognitionEngine(System.Globalization.CultureInfo.CurrentCulture);
     createGrammars();
     State = ListenerState.Default;
     ear.SpeechRecognized += new EventHandler<SpeechRecognizedEventArgs>(recDelegate);
     ear.SpeechDetected += new EventHandler<SpeechDetectedEventArgs>(ear_SpeechDetected);
     ear.SetInputToDefaultAudioDevice();
     ear.RecognizeAsync(RecognizeMode.Multiple);
 }
Esempio n. 25
0
 public SpeechRecognizer(List<string> phrases, string wavFilePath)
 {
     _phrases = phrases;
     _speechRecognitionEngine = CreateSpeechRecognizer();
     if (string.IsNullOrEmpty(wavFilePath))
         _speechRecognitionEngine.SetInputToDefaultAudioDevice();
     else
         _speechRecognitionEngine.SetInputToWaveFile(wavFilePath);
     _speechRecognitionEngine.RecognizeAsync(RecognizeMode.Multiple);
 }
Esempio n. 26
0
 public void StartSpeechRecognition(Grammar grammer, System.EventHandler<System.Speech.Recognition.SpeechRecognizedEventArgs> speechRecognised, EventHandler<SpeechHypothesizedEventArgs> speechHypothesised, EventHandler<SpeechRecognitionRejectedEventArgs> speechRejected)
 {
     _speechRecogniser = CreateSpeechRecognizer(speechRecognised, speechHypothesised, speechRejected, grammer);
     _controller.Sensor.Start();
     _controller.AudioSource.BeamAngleMode = BeamAngleMode.Adaptive;
     var audioStream = _controller.AudioSource.Start();
     _speechRecogniser.SetInputToAudioStream(audioStream, new SpeechAudioFormatInfo(EncodingFormat.Pcm, 16000, 16, 1, 32000, 2, null));
     _speechRecogniser.RecognizeAsync(RecognizeMode.Multiple);
     _controller.AudioSource.EchoCancellationMode = EchoCancellationMode.None;
     _controller.AudioSource.AutomaticGainControlEnabled = false;
 }
Esempio n. 27
0
 public void listen()
 {
     main_sre = new SpeechRecognitionEngine();
     GrammarBuilder grammarBuilder = new GrammarBuilder();
     grammarBuilder.Append(new Choices(glossary));
     main_sre.LoadGrammar(new Grammar(grammarBuilder));
     //main_sre.LoadGrammar(new DictationGrammar()); can be used to detect abritrary words (accuracy is so so)
     main_sre.SpeechRecognized += speechRecognitionWithDictationGrammar_SpeechRecognized;
     main_sre.SetInputToDefaultAudioDevice();
     main_sre.RecognizeAsync(RecognizeMode.Multiple);
 }
Esempio n. 28
0
 // start listening withEnable button
 private void btnEnable_Click(object sender, EventArgs e)
 {
     recEngine.RecognizeAsync(RecognizeMode.Multiple);
     btnDisable.Enabled = true;
     btnEnable.Enabled  = false;
 }
Esempio n. 29
0
        public Form1()
        {
            Thread.CurrentThread.CurrentCulture   = new CultureInfo("en-GB");
            Thread.CurrentThread.CurrentUICulture = new CultureInfo("en-GB");
            CultureInfo culture = new CultureInfo("en-GB");

            recognitionEngine = new SpeechRecognitionEngine(culture);
            recognitionEngine.SetInputToDefaultAudioDevice();

            recognitionEngine.SpeechRecognized += (s, args) =>
            {
                string line     = string.Empty;
                float  security = 1.0f;
                foreach (RecognizedWordUnit word in args.Result.Words)
                {
                    if (word.Confidence > 0.01f)
                    {
                        line    += word.Text + " ";
                        security = security * word.Confidence;
                    }
                }

                if (line.Contains("Alice"))
                {
                    var command = Regex.Replace(line, "Alice", string.Empty).Trim();

                    var adress = "www.google.se";

                    var p = new Process();

                    switch (command)
                    {
                    case "mute":
                        if (reader != null)
                        {
                            reader.Dispose();
                        }
                        break;

                    case "run notepad":
                        reader.SpeakAsync("Starting notepad");
                        p = Process.Start("notepad.exe");
                        processesRunning.Add(
                            new ProcessObject {
                            ProcessName = "Notepad", ProcessId = p.Id, Started = DateTime.Now
                        });
                        break;

                    case "run calculator":
                        reader.SpeakAsync("Starting calculator");
                        p = Process.Start("calc.exe");
                        processesRunning.Add(
                            new ProcessObject {
                            ProcessName = "Calculator", ProcessId = p.Id, Started = DateTime.Now
                        });
                        break;

                    case "run paint":
                        reader.SpeakAsync("Starting paint");
                        p = Process.Start("mspaint.exe");
                        processesRunning.Add(
                            new ProcessObject {
                            ProcessName = "Paint", ProcessId = p.Id, Started = DateTime.Now
                        });
                        break;

                    case "run firefox":
                        reader.SpeakAsync("Starting firefox");
                        p = Process.Start(@"C:\Program Files (x86)\Mozilla Firefox\firefox.exe");
                        processesRunning.Add(
                            new ProcessObject {
                            ProcessName = "Firefox", ProcessId = p.Id, Started = DateTime.Now
                        });
                        break;

                    case "run browser":
                        reader.SpeakAsync("Starting default browser");
                        string defaultBrowserPath = GetDefaultBrowserPath();
                        p = Process.Start(defaultBrowserPath);
                        processesRunning.Add(
                            new ProcessObject {
                            ProcessName = "Browser", ProcessId = p.Id, Started = DateTime.Now
                        });
                        break;

                    case "kill notepad":
                        reader.SpeakAsync("Killing notepad");
                        try
                        {
                            ProcessObject matches =
                                processesRunning.Where(item => item.ProcessName == "Notepad").OrderByDescending(
                                    process => process.Started).First();
                            foreach (Process proc in Process.GetProcesses())
                            {
                                if (proc.Id == matches.ProcessId)
                                {
                                    proc.Kill();
                                    processesRunning.RemoveAll(x => x.ProcessId == matches.ProcessId);
                                    reader.SpeakAsync("Done");
                                }
                            }
                        }
                        catch
                        {
                            reader.SpeakAsync("Could not kill notepad");
                        }
                        break;

                    case "kill all notepad":
                        reader.SpeakAsync("Killing all notepads");
                        try
                        {
                            List <ProcessObject> matches =
                                this.processesRunning.Where(proc => proc.ProcessName == "Notepad").ToList();

                            foreach (Process proc in Process.GetProcesses())
                            {
                                foreach (ProcessObject obj in matches)
                                {
                                    if (proc.Id == obj.ProcessId)
                                    {
                                        proc.Kill();
                                        processesRunning.RemoveAll(x => x.ProcessId == obj.ProcessId);
                                    }
                                }
                            }
                            reader.SpeakAsync("Done");
                        }
                        catch
                        {
                            reader.SpeakAsync("Could not kill all notepads");
                        }
                        break;

                    case "kill calculator":
                        reader.SpeakAsync("Killing calculator");
                        try
                        {
                            ProcessObject matches =
                                processesRunning.Where(item => item.ProcessName == "Calculator").
                                OrderByDescending(process => process.Started).First();
                            foreach (Process proc in Process.GetProcesses())
                            {
                                if (proc.Id == matches.ProcessId)
                                {
                                    proc.Kill();
                                    processesRunning.RemoveAll(x => x.ProcessId == matches.ProcessId);
                                    reader.SpeakAsync("Done");
                                }
                            }
                        }
                        catch
                        {
                            reader.SpeakAsync("Could not kill calculator");
                        }
                        break;

                    case "kill all calculator":
                        reader.SpeakAsync("Killing all calculators");
                        try
                        {
                            List <ProcessObject> matches =
                                this.processesRunning.Where(proc => proc.ProcessName == "Calculator").ToList();
                            foreach (Process proc in Process.GetProcesses())
                            {
                                foreach (ProcessObject obj in matches)
                                {
                                    if (proc.Id == obj.ProcessId)
                                    {
                                        proc.Kill();
                                        processesRunning.RemoveAll(x => x.ProcessId == obj.ProcessId);
                                    }
                                }
                            }
                            reader.SpeakAsync("Done");
                        }
                        catch
                        {
                            reader.SpeakAsync("Could not kill all calculators");
                        }
                        break;

                    case "kill paint":
                        reader.SpeakAsync("Killing paint");
                        try
                        {
                            ProcessObject matches =
                                processesRunning.Where(item => item.ProcessName == "Paint").OrderByDescending(
                                    process => process.Started).First();
                            foreach (Process proc in Process.GetProcesses())
                            {
                                if (proc.Id == matches.ProcessId)
                                {
                                    proc.Kill();
                                    processesRunning.RemoveAll(x => x.ProcessId == matches.ProcessId);
                                    reader.SpeakAsync("Done");
                                }
                            }
                        }
                        catch
                        {
                            reader.SpeakAsync("Could not kill paint");
                        }
                        break;

                    case "kill all paint":
                        reader.SpeakAsync("Killing all paints");
                        try
                        {
                            List <ProcessObject> matches =
                                this.processesRunning.Where(proc => proc.ProcessName == "Paint").ToList();

                            foreach (Process proc in Process.GetProcesses())
                            {
                                foreach (ProcessObject obj in matches)
                                {
                                    if (proc.Id == obj.ProcessId)
                                    {
                                        proc.Kill();
                                        processesRunning.RemoveAll(x => x.ProcessId == obj.ProcessId);
                                    }
                                }
                            }
                            reader.SpeakAsync("Done");
                        }
                        catch
                        {
                            reader.SpeakAsync("Could not kill all paints");
                        }
                        break;

                    // Firefox only uses one processid
                    case "kill firefox":
                    case "kill all firefox":
                        reader.SpeakAsync("Killing firefox");
                        try
                        {
                            ProcessObject matches =
                                processesRunning.Where(item => item.ProcessName == "Firefox").First();
                            foreach (Process proc in Process.GetProcesses())
                            {
                                if (proc.Id == matches.ProcessId)
                                {
                                    proc.Kill();
                                    processesRunning.RemoveAll(x => x.ProcessName == matches.ProcessName);
                                    reader.SpeakAsync("Done");
                                }
                            }
                        }
                        catch
                        {
                            reader.SpeakAsync("Could not kill firefox");
                        }
                        break;

                    case "kill browser":
                        reader.SpeakAsync("Killing default browser");
                        try
                        {
                            ProcessObject matches =
                                processesRunning.Where(item => item.ProcessName == "Browser").First();
                            foreach (Process proc in Process.GetProcesses())
                            {
                                if (proc.Id == matches.ProcessId)
                                {
                                    proc.Kill();
                                    processesRunning.RemoveAll(x => x.ProcessName == matches.ProcessName);
                                    reader.SpeakAsync("Done");
                                }
                            }
                        }
                        catch
                        {
                            reader.SpeakAsync("Could not kill default browser");
                        }
                        break;
                    }

                    richTextBox3.Text += line + "certainty: " + security * 100 + "%";
                    richTextBox3.Text += Environment.NewLine;
                    var listOfProcesses = "";
                    foreach (ProcessObject pr in processesRunning)
                    {
                        listOfProcesses += pr.ProcessName + " " + pr.ProcessId + Environment.NewLine;
                    }
                    richTextBox2.Text = "Programs in list: " + Environment.NewLine + listOfProcesses;
                }
            };
            recognitionEngine.LoadGrammar(CreateGrammarObject());

            InitializeComponent();

            // Get all installed applications
            GetInstalledApplications();

            recognitionEngine.RecognizeAsync(RecognizeMode.Multiple);
        }
Esempio n. 30
0
        //        [STAThread]
        static void Main(string[] args)
        {
            //Process.Start(@"C:/Program Files (x86)/Google/Chrome/Application/chrome.exe -kiosk https://search.naver.com/search.naver?ie=utf8&sm=stp_hty&where=se&query=" + givenString);
            music                   = 0;
            document                = 0;
            map                     = 0;
            movie                   = 0;
            movie_count             = 0;
            music_playing           = 0;
            Time                    = 0;
            search                  = 0;
            imgsearch               = 0;
            document_classification = 0;

            using (SpeechSynthesizer synth = new SpeechSynthesizer())
            {
                foreach (InstalledVoice voice in synth.GetInstalledVoices())
                {
                    VoiceInfo info         = voice.VoiceInfo;
                    string    AudioFormats = "";
                    foreach (SpeechAudioFormatInfo fmt in info.SupportedAudioFormats)
                    {
                        AudioFormats += String.Format("{0}\n",
                                                      fmt.EncodingFormat.ToString());
                    }

                    Console.WriteLine(" Name:          " + info.Name);
                    Console.WriteLine(" Culture:       " + info.Culture);
                    Console.WriteLine(" Age:           " + info.Age);
                    Console.WriteLine(" Gender:        " + info.Gender);
                    Console.WriteLine(" Description:   " + info.Description);
                    Console.WriteLine(" ID:            " + info.Id);
                    Console.WriteLine(" Enabled:       " + voice.Enabled);
                    if (info.SupportedAudioFormats.Count != 0)
                    {
                        Console.WriteLine(" Audio formats: " + AudioFormats);
                    }
                    else
                    {
                        Console.WriteLine(" No supported audio formats found");
                    }

                    string AdditionalInfo = "";
                    foreach (string key in info.AdditionalInfo.Keys)
                    {
                        AdditionalInfo += String.Format("  {0}: {1}\n", key, info.AdditionalInfo[key]);
                    }

                    Console.WriteLine(" Additional Info - " + AdditionalInfo);
                    Console.WriteLine();
                }
            }

            foreach (RecognizerInfo ri in SpeechRecognitionEngine.InstalledRecognizers())
            {
                Console.WriteLine(ri.Id + ": " + ri.Culture);
            }

            using (SpeechRecognitionEngine recognizer = new SpeechRecognitionEngine("SR_MS_ko-KR_TELE_11.0"))
            {
                Grammar grammar = new Grammar("computer.xml");

                recognizer.LoadGrammar(grammar);

                recognizer.SetInputToDefaultAudioDevice();
                recognizer.SpeechRecognized += new EventHandler <SpeechRecognizedEventArgs>(recognizer_SpeechRecognized);

                recognizer.RecognizeAsync(RecognizeMode.Multiple);
                window_form = new Form1();


                new Thread(new ThreadStart(showForm)).Start();
                new Thread(new ThreadStart(showWeb)).Start();
                while (true)
                {
                    Console.ReadLine();
                }
            }
        }
Esempio n. 31
0
        /// <summary>
        /// Constructor for voice recognition
        /// </summary>
        /// <param name="kinectSensor"></param>
        /// <param name="mainWindow"></param>
        public VoiceRecognition(KinectSensor kinectSensor, MainWindow mainWindow)
        {
            this.kinectSensor = kinectSensor;
            this.mainWindow   = mainWindow;

            // Grab the audio stream
            IReadOnlyList <AudioBeam> audioBeamList = kinectSensor.AudioSource.AudioBeams;
            Stream audioStream = audioBeamList[0].OpenInputStream();

            // Create the convert stream
            convertStream = new KinectAudioStream(audioStream);

            RecognizerInfo recognizerInfo = TryGetKinectRecognizer();

            if (recognizerInfo != null)
            {
                speechEngine = new SpeechRecognitionEngine(recognizerInfo.Id);

                Choices        indices     = new Choices();
                GrammarBuilder indexValues = new GrammarBuilder {
                    Culture = recognizerInfo.Culture
                };
                GrammarBuilder imageNavigationSpeech = new GrammarBuilder();

                for (int i = 0; i < 78; i++)
                {
                    SemanticResultValue index = new SemanticResultValue(IntToWord.IntegerToWritten(i + 1), i + 1);
                    indices.Add(index);
                    indexValues.Append(index);
                }

                indexValues.Append(indices);
                imageNavigationSpeech.Append("go to image");
                imageNavigationSpeech.Append(new SemanticResultKey("imageNumber", indices));

                Grammar imageNavigationGrammar = new Grammar(imageNavigationSpeech);
                imageNavigationGrammar.Name = "ImageNavigation";

                speechEngine.LoadGrammar(imageNavigationGrammar);

                // Create a grammar from grammar definition XML file.
                using (var memoryStream = new MemoryStream(Encoding.ASCII.GetBytes(Properties.Resources.SpeechGrammar)))
                {
                    Grammar XMLGrammar = new Grammar(memoryStream);
                    XMLGrammar.Name = "XMLGrammar";
                    speechEngine.LoadGrammar(XMLGrammar);
                }

                speechEngine.SpeechRecognized          += SpeechRecognized;
                speechEngine.SpeechRecognitionRejected += SpeechRejected;

                // let the convertStream know speech is going active
                convertStream.SpeechActive = true;

                // For long recognition sessions (a few hours or more), it may be beneficial to turn off adaptation of the acoustic model.
                // This will prevent recognition accuracy from degrading over time.
                speechEngine.UpdateRecognizerSetting("AdaptationOn", 0);

                speechEngine.SetInputToAudioStream(this.convertStream, new SpeechAudioFormatInfo(EncodingFormat.Pcm, 16000, 16, 1, 32000, 2, null));
                speechEngine.RecognizeAsync(RecognizeMode.Multiple);
            }
        }
Esempio n. 32
0
        private void LoadSpeech()
        {
            try


            {
                engine = new SpeechRecognitionEngine(); // instância
                engine.SetInputToDefaultAudioDevice();  //Microfone



                Choices cNumeros = new Choices();

                for (int i = 0; i <= 100; i++)
                {
                    cNumeros.Add(i.ToString());
                }



                Choices c_commandsOfSystem = new Choices();
                c_commandsOfSystem.Add(GrammRuleS.WhatTimesIS.ToArray());       //WhatTimeIS
                c_commandsOfSystem.Add(GrammRuleS.whatDateIS.ToArray());        //WhatdateIS
                c_commandsOfSystem.Add(GrammRuleS.PamStartListening.ToArray()); // Pam start listening
                c_commandsOfSystem.Add(GrammRuleS.PamStopListening.ToArray());  //Pam stop listening
                c_commandsOfSystem.Add(GrammRuleS.MinimizeWindow.ToArray());    //Minimizar janela
                c_commandsOfSystem.Add(GrammRuleS.normalWindow.ToArray());      // janela em tamanho normal
                c_commandsOfSystem.Add(GrammRuleS.ChangeVoice.ToArray());       // mudar a voz , mudando a voz
                c_commandsOfSystem.Add(GrammRuleS.OpenProgram.ToArray());       // abrir navegador



                //"pare de ouvir" -> "Pam"


                GrammarBuilder gb_commandsOfSystem = new GrammarBuilder();
                gb_commandsOfSystem.Append(c_commandsOfSystem);

                Grammar g_commandsOfSystem = new Grammar(gb_commandsOfSystem);
                g_commandsOfSystem.Name = "sys";

                GrammarBuilder gbNumeros = new GrammarBuilder();
                gbNumeros.Append(cNumeros); //5 vezes
                gbNumeros.Append(new Choices("vezes", "mais ", "menos", "por", "para"));
                gbNumeros.Append(cNumeros);

                Grammar gNumeros = new Grammar(gbNumeros);
                gNumeros.Name = "calc";

                engine.LoadGrammar(g_commandsOfSystem); //carregar gramatica
                engine.LoadGrammar(gNumeros);

                // carregar a gramática
                //engine.LoadGrammar(new Grammar(new GrammarBuilder(new Choices(words))));

                engine.SpeechRecognized          += new EventHandler <SpeechRecognizedEventArgs>(Rec);
                engine.AudioLevelUpdated         += new EventHandler <AudioLevelUpdatedEventArgs>(Audiolevel);
                engine.SpeechRecognitionRejected += new EventHandler <SpeechRecognitionRejectedEventArgs>(rej);


                engine.RecognizeAsync(RecognizeMode.Multiple); // Iniciar o Reconhecimento


                Speaker.Speak(" Olá Sid, No Que Posso Ajudar.");
            }
            catch (Exception ex)
            {
                MessageBox.Show("Ocorreu no LoadSpeech(): " + ex.Message);
            }
        }
Esempio n. 33
0
        private void backgroundWorker_DoWork(object sender, System.ComponentModel.DoWorkEventArgs e)
        {
            try
            {
                pTTS.SetOutputToDefaultAudioDevice();
                pTTS.Speak("Witam w pizzerii");
                CultureInfo ci = new CultureInfo("pl-PL");
                pSRE = new SpeechRecognitionEngine(ci);
                pSRE.SetInputToDefaultAudioDevice();
                // Przypisanie obsługi zdarzenia realizowanego po rozpoznaniu wypowiedzi zgodnej z gramatyką:
                pSRE.SpeechRecognized += PSRE_SpeechRecognized; // TAB   PSRE_SpeechRecognized;
                // -------------------------------------------------------------------------
                // Budowa gramatyki numer 1 - POLECENIA SYSTEMOWE
                // Budowa gramatyki numer 1 - określenie komend:
                Choices stopChoice = new Choices();
                stopChoice.Add("Stop");
                stopChoice.Add("Pomoc");
                stopChoice.Add("Dziękuję");
                // Budowa gramatyki numer 1 - definiowanie składni gramatyki:
                GrammarBuilder buildGrammarSystem = new GrammarBuilder();
                buildGrammarSystem.Append(stopChoice);
                // Budowa gramatyki numer 1 - utworzenie gramatyki:
                Grammar grammarSystem = new Grammar(buildGrammarSystem);


                Choices chSizes = new Choices();
                chSizes.Add(sizes);
                Choices chThickness = new Choices();
                chThickness.Add(thickness);
                Choices chDouble = new Choices();
                chDouble.Add(doubbler);
                Choices chAddons = new Choices();
                chAddons.Add(addons);


                GrammarBuilder grammarPizza = new GrammarBuilder();
                //grammarPizza.Append("Poproszę");
                grammarPizza.Append("Poproszę", 0, 1);
                grammarPizza.Append(new SemanticResultKey("size", chSizes), 0, 1);
                grammarPizza.Append("pizzę", 0, 1);

                grammarPizza.Append("na", 0, 1);
                grammarPizza.Append(new SemanticResultKey("thickness", chThickness), 0, 1);
                grammarPizza.Append("cieście", 0, 1);

                grammarPizza.Append("z", 0, 1);

                grammarPizza.Append(new SemanticResultKey("dA1", chDouble), 0, 1);
                grammarPizza.Append(new SemanticResultKey("add1", chAddons), 0, 1);
                grammarPizza.Append("i", 0, 1);
                grammarPizza.Append(new SemanticResultKey("dA2", chDouble), 0, 1);
                grammarPizza.Append(new SemanticResultKey("add2", chAddons), 0, 1);
                grammarPizza.Append("i", 0, 1);
                grammarPizza.Append(new SemanticResultKey("dA3", chDouble), 0, 1);
                grammarPizza.Append(new SemanticResultKey("add3", chAddons), 0, 1);
                grammarPizza.Append("i", 0, 1);
                grammarPizza.Append(new SemanticResultKey("dA4", chDouble), 0, 1);
                grammarPizza.Append(new SemanticResultKey("add4", chAddons), 0, 1);
                grammarPizza.Append("i", 0, 1);
                grammarPizza.Append(new SemanticResultKey("dA5", chDouble), 0, 1);
                grammarPizza.Append(new SemanticResultKey("add5", chAddons), 0, 1);


                Grammar g_Pizza = new Grammar(grammarPizza);

                pSRE.LoadGrammarAsync(g_Pizza);

                pSRE.LoadGrammarAsync(grammarSystem);
                // Ustaw rozpoznawanie przy wykorzystaniu wielu gramatyk:
                pSRE.RecognizeAsync(RecognizeMode.Multiple);
                while (speechOn == true)
                {
                    ;
                }                             //pętla w celu uniknięcia zamknięcia programu
                Console.WriteLine("\tWCIŚNIJ <ENTER> aby wyjść z programu\n");
                Console.ReadLine();
            }
            catch (Exception ex)
            {
                Console.WriteLine(ex.Message);
                Console.ReadLine();
            }
        }
Esempio n. 34
0
        void conectaActiva()
        {
            //Nos aseguramos que la cuenta de sensores conectados sea de al menos 1
            if (KinectSensor.KinectSensors.Count > 0)
            {
                //Checamos que la variable _sensor sea nula
                if (this.sensor == null)
                {
                    //Asignamos el primer sensor Kinect a nuestra variable
                    this.sensor = KinectSensor.KinectSensors[0];
                    if (this.sensor != null)
                    {
                        try
                        {
                            //Iniciamos el dispositivo Kinect
                            this.sensor.Start();
                            //Esto es opcional pero ayuda a colocar el dispositivo Kinect a un cierto angulo de inclinacion, desde -27 a 27
                            //   sensor.ElevationAngle = 3;
                            //Informamos que se ha conectado e inicializado correctamente el dispositivo Kinect
                            //  Error err = new VME.Error(RecursosLocalizables.StringResources.KinectDetect, 3);
                            // err.Show();
                        }
                        catch (Exception ex)
                        {
                        }

                        //Creamos esta variable ri que tratara de encontrar un language pack valido haciendo uso del metodo obtenerLP
                        RecognizerInfo ri = obtenerLP();

                        //Si se encontro el language pack requerido lo asignaremos a nuestra variable speechengine
                        if (ri != null)
                        {
                            this.speechengine = new SpeechRecognitionEngine(ri.Id);
                            //Creamos esta variable opciones la cual almacenara las opciones de palabras o frases que podran ser reconocidas por el dispositivo
                            Choices opciones = new Choices();
                            //Comenzamos a agregar las opciones comenzando por el valor de opcion que tratamos reconocer y una llave que identificara a ese valor
                            //Por ejemplo en esta linea "uno" es el valor de opcion y "UNO" es la llave

                            opciones.Add(RecursosLocalizables.StringResources.cerrar, "UNO");
                            //En esta linea "dos" es el valor de opcion y "DOS" es la llave
                            opciones.Add(RecursosLocalizables.StringResources.Reflexes, "DOS");
                            opciones.Add(RecursosLocalizables.StringResources.configuracion, "TRES");
                            opciones.Add(RecursosLocalizables.StringResources.opciones, "TRES");
                            opciones.Add(RecursosLocalizables.StringResources.usuario, "TRES");
                            opciones.Add(RecursosLocalizables.StringResources.acercaDe1, "TRES");
                            opciones.Add(RecursosLocalizables.StringResources.tabInformacion, "TRES");
                            //En esta linea "windows ocho" es el valor de opcion y "TRES" es la llave y asi sucesivamente
                            opciones.Add(new SemanticResultValue("windows", "TRES"));
                            opciones.Add(new SemanticResultValue("new windows", "TRES"));

                            //Esta variable creará todo el conjunto de frases y palabras en base a nuestro lenguaje elegido en la variable ri
                            var grammarb = new GrammarBuilder {
                                Culture = ri.Culture
                            };
                            //Agregamos las opciones de palabras y frases a grammarb
                            grammarb.Append(opciones);
                            //Creamos una variable de tipo Grammar utilizando como parametro a grammarb
                            var grammar = new Grammar(grammarb);
                            //Le decimos a nuestra variable speechengine que cargue a grammar
                            this.speechengine.LoadGrammar(grammar);
                            //mandamos llamar al evento SpeechRecognized el cual se ejecutara cada vez que una palabra sea detectada
                            speechengine.SpeechRecognized += new EventHandler <SpeechRecognizedEventArgs>(speechengine_SpeechRecognized);
                            //speechengine inicia la entrada de datos de tipo audio
                            speechengine.SetInputToAudioStream(sensor.AudioSource.Start(), new SpeechAudioFormatInfo(EncodingFormat.Pcm, 16000, 16, 1, 32000, 2, null));
                            speechengine.RecognizeAsync(RecognizeMode.Multiple);
                        }
                    }
                }
            }
        }
Esempio n. 35
0
        private void Form1_Load(object sender, EventArgs e)
        {
            // Create a new SpeechRecognitionEngine instance.
            sre = new SpeechRecognitionEngine();

            // Create a simple grammar that recognizes "red", "green", or "blue".
            Choices grammar = new Choices();

            try//读取文件
            {
                OpenFileDialog file = new OpenFileDialog();
                file.Filter = "sentences file|*.txt";
                if (file.ShowDialog() != DialogResult.OK)
                {
                    Application.Exit();
                }
                string sfn    = file.FileName;
                status status = status.none;
                this.textBox1.AppendText("sentences file using " + sfn + "\r\nLoading grammar file:\r\n");
                // Create an instance of StreamReader to read from a file.
                // The using statement also closes the StreamReader.
                using (StreamReader sr = new StreamReader(sfn))
                {
                    string line;
                    // Read and display lines from the file until the end of
                    // the file is reached.
                    while ((line = sr.ReadLine()) != null)
                    {
                        if (line == "" || line[0] == '#')
                        {
                            continue;
                        }
                        if ('[' == line[0])
                        {
                            switch (line)                     // configure type announcement
                            {
                            case "[sentences]":               // for sentences definition, see configure file commont for detail
                                status = status.sentences;
                                break;

                            case "[configure]":               // for environment variable definition, see configure file commont for detail
                                status = status.configs;
                                break;

                            default:                          // undefined types, announce error and halt
                                textBox1.AppendText("[Illeagal experesion]: " + line + "\r\n[halt]");
                                go = true;
                                break;
                            }
                            continue;
                        }
                        string[] eq = line.Split('!');
                        switch (status)
                        {
                        case status.sentences:                      // when in sentences announcement
                            textBox1.AppendText(eq[0] + "\r\n");    // recognition
                            grammar.Add(eq[0]);                     // add to SR
                            string[] op   = eq[1].Split(',');       // see configure file commont for detail
                            string[] push = new string[3];
                            push[0] = eq[0];                        // recognition sentence
                            push[1] = op[0];                        // sent sentence
                            push[2] = op[1];                        // operation
                            operations.Add(push);
                            break;

                        case status.configs:
                            switch (eq[0])
                            {
                            case "holdtime":                        // see configure file commont for detail
                                holdtime = int.Parse(eq[1]) * second;
                                break;
                            }
                            break;

                        default:                                    // undefined variables, announce error and halt
                            textBox1.AppendText("[Illeagal experesion]: " + line + "\r\n[halt]");
                            go = true;
                            break;
                        }
                    }
                }
            }
            catch (Exception ex)
            {
                // Let the user know what went wrong.
                textBox1.AppendText("The file could not be read:");
                textBox1.AppendText(ex.Message);
                go = true;
                return;
            }
            textBox1.AppendText("------------------------------------------------------------\r\n");    // file operation finished.

            GrammarBuilder gb = new GrammarBuilder();

            gb.Append(grammar);

            // Create the actual Grammar instance, and then load it into the speech recognizer.
            Grammar g = new Grammar(gb);

            sre.LoadGrammar(g);

            // Register a handler for the SpeechRecognized event.
            sre.SpeechRecognized += new EventHandler <SpeechRecognizedEventArgs>(sre_SpeechRecognized);
            sre.SetInputToDefaultAudioDevice();
            sre.RecognizeAsync(RecognizeMode.Multiple);
        }
Esempio n. 36
0
 private void Button_Click(object sender, RoutedEventArgs e)
 {
     recognizer.RecognizeAsync(RecognizeMode.Multiple);
     DisableButton();
 }
Esempio n. 37
0
        //Pour la touche espace

        //private void HookManager_KeyPress(object sender, KeyPressEventArgs e)
        //{
        //    if(e.KeyChar == (char)Keys.Space)
        //    {
        //        if (a == 0)
        //        {
        //            recEngine.RecognizeAsync(RecognizeMode.Multiple);
        //            a = 1;
        //        }
        //    }

        //    else
        //    {
        //        a = 0;
        //        recEngine.RecognizeAsyncStop();
        //    }
        //}

        private void enableButton_Click(object sender, EventArgs e)
        {
            recEngine.RecognizeAsync(RecognizeMode.Multiple);
            DisableButton.Enabled = true;
        }
Esempio n. 38
0
        public void Start()
        {
#if KINECT
            this.sensor = KinectSensor.GetDefault();

            if (null != this.sensor)
            {
                try
                {
                    this.sensor.Open();
                }
                catch (IOException ex)
                {
                    this.sensor = null;
                    //Logger.Error(ex.Message);
                }
            }

            if (null == this.sensor)
            {
                return;
            }
            var ri = GetKinectRecognizer();

            if (null != ri)
            {
                this.speechEngine = new SpeechRecognitionEngine(ri.Id);

                var audioBeamList = this.sensor.AudioSource.AudioBeams;
                var audioStream   = audioBeamList[0].OpenInputStream();
                convertStream = new KinectAudioStream(audioStream);

                // Create a grammar definition ...

                this._PluginsList = Plugin.loadPlugins(this.speechEngine);

                speechEngine.SpeechRecognized += SpeechRecognized;

                convertStream.SpeechActive = true;
                speechEngine.SetInputToAudioStream(convertStream, new SpeechAudioFormatInfo(EncodingFormat.Pcm, 16000, 16, 1, 32000, 2, null));
                Console.WriteLine("ok");
#endif
#if MICRO
            this.speechEngine              = new SpeechRecognitionEngine(new System.Globalization.CultureInfo("fr-FR"));
            this._PluginsList              = Plugin.loadPlugins(this.speechEngine);
            speechEngine.SpeechRecognized += new EventHandler <SpeechRecognizedEventArgs>(SpeechRecognized);
            speechEngine.UpdateRecognizerSetting("CFGConfidenceRejectionThreshold", (int)(this.ConfidenceThreshold * 100));

            speechEngine.MaxAlternates              = 10;
            speechEngine.InitialSilenceTimeout      = TimeSpan.FromSeconds(0);
            speechEngine.BabbleTimeout              = TimeSpan.FromSeconds(0);
            speechEngine.EndSilenceTimeout          = TimeSpan.FromSeconds(0.150);
            speechEngine.EndSilenceTimeoutAmbiguous = TimeSpan.FromSeconds(0.500);
            speechEngine.SetInputToDefaultAudioDevice();
#endif


            speechEngine.RecognizeAsync(RecognizeMode.Multiple);

            speaker.Speak(ConfigurationManager.AppSettings["OperationalSystem"]);
#if KINECT
        }

        else
        {
            speaker.Speak(ConfigurationManager.AppSettings["ErroredSystem"]);
        }
#endif
        }
Esempio n. 39
0
        public ChangeAdditives()
        {
            InitializeComponent();

            int count = 0;

            AdditiveList = new List <string>()
            {
                "wołowina",
                "kiełbasa pepperoni",
                "zielona papryka",
                "pieczarki",
                "cebula",
                "szynka",
                "ananas",
                "kurczak",
                "pomidor"
            };

            Sre = new SpeechRecognitionEngine(new System.Globalization.CultureInfo("pl-PL"));

            for (int i = 0; i < GridAdditives.RowDefinitions.Count; i++)
            {
                for (int j = 0; j < GridAdditives.ColumnDefinitions.Count; j++)
                {
                    ToggleButton additiveButton = new ToggleButton
                    {
                        Content = AdditiveList.ElementAt(count),
                        Margin  = new Thickness(10, 10, 10, 10),
                    };

                    additiveButton.Checked   += Btn_Check;
                    additiveButton.Unchecked += Btn_Uncheck;

                    foreach (var additive in HomePage.OrderList.ElementAt(HomePage.OnTheList).AdditivesList)
                    {
                        if (additive == additiveButton.Content.ToString())
                        {
                            additiveButton.IsChecked = true;
                        }
                    }

                    AdditiveButtonsList.Add(additiveButton);

                    Grid.SetColumn(additiveButton, j);
                    Grid.SetRow(additiveButton, i);
                    GridAdditives.Children.Add(additiveButton);


                    count++;
                }
            }

            var additiveWordsList = new string[AdditiveList.Count + 1];

            for (var index = 0; index < AdditiveList.Count; index++)
            {
                additiveWordsList[index] = AdditiveList.ElementAt(index);
            }

            additiveWordsList[AdditiveList.Count] = "Dalej";

            // MICROSOFT SPEECH PLATFORM
            try
            {
                Sre.SetInputToDefaultAudioDevice();
                Sre.SpeechRecognized += new EventHandler <SpeechRecognizedEventArgs>(Sre_SpeechRecognized);

                Choices        words     = new Choices(additiveWordsList);
                GrammarBuilder gramBuild = new GrammarBuilder();
                gramBuild.Append(words);
                Grammar gramSre = new Grammar(gramBuild);
                Sre.LoadGrammar(gramSre);

                Sre.RecognizeAsync(RecognizeMode.Multiple);
            }
            catch (Exception ex)
            {
                Console.WriteLine(ex.Message);
                Console.ReadLine();
            }
            // MICROSOFT SPEECH PLATFORM
        }
 void Start()
 {
     // Start asynchronous, continuous speech recognition.
     recognizer.RecognizeAsync(RecognizeMode.Single);
 }
Esempio n. 41
0
        //
        //  public bool load_listen()
        //
        //  load_listen() establishes the speech recognition engine based on the command glossary stored within the
        //  currently loaded Profile.  load_listen() may fail, returning Boolean FALSE, if a Profile's glossary does
        //  not meet the engine's grammar requirements; load_listen() will also fail, returning Boolean FALSE, should
        //  an exception occur that cannot be resolved within the method.  load_listen() will return Boolean TRUE upon
        //  success.
        //

        public bool load_listen()
        {
            vi_syn = GAVPI.vi_profile.synth;
            vi_syn.SelectVoice(GAVPI.vi_settings.voice_info);
            vi_sre = new SpeechRecognitionEngine(GAVPI.vi_settings.recognizer_info);

            GrammarBuilder phrases_grammar = new GrammarBuilder();

            // Grammer must match speech recognition language localization
            phrases_grammar.Culture = GAVPI.vi_settings.recognizer_info;

            List <string> glossory = new List <string>();

            foreach (VI_Phrase trigger in GAVPI.vi_profile.Profile_Triggers)
            {
                glossory.Add(trigger.value);
            }
            if (glossory.Count == 0)
            {
                MessageBox.Show("You need to add at least one Trigger");
                return(false);
            }
            phrases_grammar.Append(new Choices(glossory.ToArray()));

            vi_sre.LoadGrammar(new Grammar(phrases_grammar));
            //set event function
            vi_sre.SpeechRecognized          += phraseRecognized;
            vi_sre.SpeechRecognitionRejected += _recognizer_SpeechRecognitionRejected;

            try {
                vi_sre.SetInputToDefaultAudioDevice();
            } catch (InvalidOperationException exception) {
                //  For the time being, we're only catching failures to address an input device (typically a
                //  microphone).

                MessageBox.Show("Have you connected a microphone to this computer?\n\n" +
                                "Please ensure that you have successfull connected and configured\n" +
                                "your microphone before trying again.",
                                "I cannot hear you!",
                                MessageBoxButtons.OK,
                                MessageBoxIcon.Exclamation,
                                MessageBoxDefaultButton.Button1);

                return(false);
            }

            vi_sre.RecognizeAsync(RecognizeMode.Multiple);

            //  TODO:
            //  Push-to-Talk keyboard hook.  Unimplemented.
            try {
                KeyboardHook.KeyDown += pushtotalk_keyDownHook;
                KeyboardHook.KeyUp   += pushtotalk_keyUpHook;
                KeyboardHook.InstallHook();
            } catch (OverflowException exception) {
                //  TODO:
                //  InputManager library, which we rely upon, has issues with .Net 4.5 and throws an Overflow exception.
                //  We'll catch it here and pretty much let it go for now (since Push-to-Talk isn't implemented yet)
                //  with the intent of resolving it later.
            }

            if (GAVPI.vi_settings.pushtotalk_mode != "Hold" && GAVPI.vi_settings.pushtotalk_mode != "PressOnce")
            {
                pushtotalk_active = true;
            }

            //  We have successfully establish an instance of a SAPI engine with a well-formed grammar.

            IsListening = true;

            return(true);
        }
Esempio n. 42
0
 public void MessageReceived(string message, string userID)
 {
     if (message == "face1")
     {
         this.currentState = new Face1(this);
         label1.Visible    = false;
     }
     else if (message == "face2")
     {
         this.currentState = new Face2(this);
         label1.Visible    = false;
     }
     else if (message == "face3")
     {
         this.currentState = new Face3(this);
         label1.Visible    = false;
     }
     else if (message == "face4")
     {
         this.currentState = new Face4(this);
         label1.Visible    = false;
         SendSerial("f");
     }
     else if (message == "face5")
     {
         this.currentState = new Face5(this);
         label1.Visible    = false;
     }
     else if (message == "start_song")
     {
         this.currentState = new SongState(this);
         label1.Visible    = false;
     }
     else if (message == "stop_song")
     {
         this.currentState = new Face1(this);
         label1.Visible    = false;
     }
     else if (message == "start_song2")
     {
         this.currentState = new DanceState(this);
         label1.Visible    = false;
     }
     else if (message == "stop_song2")
     {
         this.currentState = new Face1(this);
         label1.Visible    = false;
     }
     else if (message.StartsWith("q:"))
     {
         this.currentState = new Face1(this);
         label1.Text       = message.Substring(2);
         label1.Location   = new Point((this.Width - label1.Width) / 2, 30);
         label1.Visible    = true;
     }
     else if (message == "Good")
     {
         this.currentState = new GoodAnswer(this);
         label1.Text       = "Good!";
         label1.Location   = new Point((this.Width - label1.Width) / 2, 30);
         label1.Visible    = true;
         SendSerial("e");
     }
     else if (message == "Incorrect")
     {
         this.currentState = new BadAnswer(this);
         label1.Text       = "Incorrect...";
         label1.Location   = new Point((this.Width - label1.Width) / 2, 30);
         label1.Visible    = true;
         SendSerial("f");
     }
     else if (message == "stop_voice")
     {
         recognizer.RecognizeAsyncStop();
     }
     else if (message == "start_voice")
     {
         recognizer.RecognizeAsync(RecognizeMode.Multiple);
     }
 }
Esempio n. 43
0
 public void Feed()
 {
     _recognizer.SetInputToDefaultAudioDevice();
     _recognizer.RecognizeAsync(RecognizeMode.Multiple);
 }
Esempio n. 44
0
        private void DoWork(object sender, DoWorkEventArgs e)
        {
            BackgroundWorker worker = sender as BackgroundWorker;

            while (hp > 0 && monsterHP > 0)
            {
                if ((worker.CancellationPending == true))
                {
                    e.Cancel = true;
                    break;
                }
                completed = false;
                Random rnd   = new Random();
                int    index = rnd.Next(0, 13);


                // Indicate whether asynchronous recognition has finished.
                Console.WriteLine(Sentence[index]);

                answer = Sentence[index];


                this.richTextBox2.Text = answer;

                this.richTextBox2.Text += "\n 3...";
                Thread.Sleep(1000);
                this.richTextBox2.Text += "2...";
                Thread.Sleep(1000);
                this.richTextBox2.Text += "1...";
                Thread.Sleep(1000);
                this.richTextBox2.Text += "開始念!!!";


                using (SpeechRecognitionEngine recognizer =
                           new SpeechRecognitionEngine(new CultureInfo("zh-TW")))
                {
                    // Sentence to be said
                    Choices words = new Choices();
                    words.Add(new string[] { Sentence[index] });
                    GrammarBuilder gb = new GrammarBuilder();
                    gb.Append(words);
                    Grammar g = new Grammar(gb);
                    recognizer.LoadGrammar(g);

                    //Create and load the dictation grammar.
                    Grammar dictation = new DictationGrammar();
                    dictation.Name = "Dictation Grammar";
                    recognizer.LoadGrammar(dictation);

                    // Attach event handlers to the recognizer.
                    recognizer.SpeechRecognized +=
                        new EventHandler <SpeechRecognizedEventArgs>(
                            SpeechRecognizedHandler);
                    recognizer.RecognizeCompleted +=
                        new EventHandler <RecognizeCompletedEventArgs>(
                            RecognizeCompletedHandler);

                    // Assign input to the recognizer.
                    recognizer.SetInputToDefaultAudioDevice();

                    // Begin asynchronous recognition.
                    Console.WriteLine("Starting recognition...");
                    completed = false;
                    recognizer.RecognizeAsync(RecognizeMode.Multiple);

                    // Wait for recognition to finish.
                    while (!completed)
                    {
                        Thread.Sleep(333);
                    }
                    Console.WriteLine("Done.");
                    worker.ReportProgress(1);
                }
            }
        }
Esempio n. 45
0
        private VoiceCommandEngine()
        {
            try {
                _srEngine.SetInputToDefaultAudioDevice();
                _srEngine.SpeechRecognized += SpeechEngine_SpeechRecognized;

                Zulrah.Instance.OnPhaseChanged          += BossPhaseChanged;
                Zulrah.Instance.OnPhaseDecisionRequired += BossPhaseInputRequired;

                _speechSynthesizer = new SpeechSynthesizer {
                    Rate = 4
                };

                string[] generalCommands =
                {
                    Settings.Default.ResetVoiceCommand,
                    Settings.Default.ResumeVoiceCommand,
                    Settings.Default.PauseVoiceCommand
                };


                string[] styleCommands =
                {
                    Settings.Default.MeleeVoiceCommand,
                    Settings.Default.MageVoiceCommand,
                    Settings.Default.RangeVoiceCommand
                };

                string[] positionCommands =
                {
                    Settings.Default.NorthPositionVoiceCommand,
                    Settings.Default.SouthPositionVoiceCommand,
                    Settings.Default.WestPositionVoiceCommand,
                    Settings.Default.EastPositionVoiceCommand
                };

                string nextCommand = Settings.Default.NextVoiceCommand;

                _generalChoices = new Grammar(new Choices(generalCommands).ToGrammarBuilder())
                {
                    Name = "General"
                };
                _styleChoices = new Grammar(new Choices(styleCommands).ToGrammarBuilder())
                {
                    Name = "StyleChoice"
                };
                _positionChoices = new Grammar(new Choices(positionCommands).ToGrammarBuilder())
                {
                    Name = "LocationChoice", Enabled = false
                };
                _nextChoice = new Grammar(new Choices(nextCommand).ToGrammarBuilder())
                {
                    Name = "NextChoice", Enabled = false
                };

                _srEngine.LoadGrammarAsync(_generalChoices);
                _srEngine.LoadGrammarAsync(_styleChoices);
                _srEngine.LoadGrammarAsync(_positionChoices);
                _srEngine.LoadGrammarAsync(_nextChoice);

                _srEngine.RecognizeAsync(RecognizeMode.Multiple);
            }
            catch {
                MessageBox.Show("Program Will Not Function, No Microphone Found");
            }
        }
Esempio n. 46
0
        private void LoadSpeechRecognition()                                                  // fazer o que é preciso para o reconhecimento de voz
        {
            sre = new SpeechRecognitionEngine(new System.Globalization.CultureInfo("pt-BR")); // instanciando  o reconhecedor passando a cultura da engine
            sre.SetInputToDefaultAudioDevice();                                               // definindo o microfone como entrada de aúdio

            commandsForQA = new List <string>();
            commandsForQA.Add("o que é");
            commandsForQA.Add("qual é");
            commandsForQA.Add("defina");
            commandsForQA.Add("definição de");

            // vamos processar o AIML aqui
            Choices cAIML = new Choices(AIML.GetWordsOrSentences()); // obtendo frases e palavras

            // Vamos ler o arquivo dos comandos
            string[] cmds = File.ReadAllText("comandos.txt", Encoding.UTF8).Split('$'); // lendo ele e dividindo em linhas

            Choices cControls = new Choices();

            cControls.Add("detecção de movimento");

            // Alarme
            Choices cAlarm = new Choices();

            for (int i = 1; i <= 12; i++)
            {
                cAlarm.Add(i.ToString());
            }

            // Criação das Gramáticas, Choices
            Choices cChats = new Choices(); // palavras ou frases de conversa

            cChats.Add("bom dia");
            cChats.Add("boa tarde");
            cChats.Add("boa noite");
            cChats.Add("jarvis você está ai?");
            cChats.Add("ainda acordado jarvis?");
            cChats.Add("alguma ideia jarvis?");
            cChats.Add("obrigado jarvis");


            Choices cDummes = new Choices(); // conversa mais desenrolada

            cDummes.Add(DummeIn.InStartingConversation.ToArray());
            cDummes.Add(DummeIn.InQuestionForDumme.ToArray());
            cDummes.Add(DummeIn.InDoWork.ToArray());
            cDummes.Add(DummeIn.InDummeStatus.ToArray());
            cDummes.Add(DummeIn.InJarvis.ToArray());

            Choices cCommands = new Choices(); // palavras ou frases que são comandos

            // informações de hora e data
            cCommands.Add("que horas são");
            cCommands.Add("que dia é hoje");
            cCommands.Add("data de hoje");
            cCommands.Add("em que mês estamos");
            cCommands.Add("em que ano estamos");
            cCommands.Add("minimizar a janela principal");
            cCommands.Add("mostrar janela principal");


            // Comandos do programa
            cCommands.Add("exibir lista de comandos");

            // status do usuário
            cCommands.Add("estou com sono");
            cCommands.Add("estou indo dormir");

            // sair do JARVIS
            cCommands.Add("até mais jarvis");

            // configurar o sintetizador
            cCommands.Add("pare de falar");

            // notícias
            cCommands.Add("quais são as notícias");
            cCommands.Add("próxima notícia");

            // media player
            cCommands.Add("media player");
            cCommands.Add("selecionar arquivo para o media player");
            cCommands.Add("pausar");
            cCommands.Add("continuar");
            cCommands.Add("parar");
            cCommands.Add("fechar media player");
            cCommands.Add("abrir diretório para reproduzir");
            cCommands.Add("próximo");
            cCommands.Add("anterior");
            cCommands.Add("aumentar volume do media player");
            cCommands.Add("diminuir volume do media player");
            cCommands.Add("media player sem som");
            cCommands.Add("media player com som");
            cCommands.Add("media player em tela cheia");
            cCommands.Add("que arquivo está tocando");

            // informações do sistema
            cCommands.Add("em quanto estar o uso do processador?");
            cCommands.Add("quanta memória ram estar sendo usada?");
            cCommands.Add("quanta mamória ram ainda há livre?");
            cCommands.Add("quanta memória ram há no total?");

            // Comandos, adicionar
            cCommands.Add("adicionar novo comando");
            // processos
            cCommands.Add("detalhes dos processos");
            // processList
            cCommands.Add("lista de processos");
            cCommands.Add("fechar o processo selecionado");
            // jarvis
            cCommands.Add("introdução ao assistente jarvis");

            cCommands.Add("desligar computador");
            cCommands.Add("reiniciar computador");
            cCommands.Add("cancelar desligamento");
            cCommands.Add("cancelar reinicialização");

            // Youtube
            cCommands.Add("tocar algo do youtube");
            cCommands.Add("adicionar link para o youube");
            cCommands.Add("previsão do tempo");

            // controle de janelas
            cCommands.Add("alterar de janela");
            cCommands.Add("fechar janela");

            // comandos de teclas
            cCommands.Add("copiar texto selecionado");
            cCommands.Add("colar texto selecionado");
            cCommands.Add("salvar este arquivo");
            cCommands.Add("selecionar tudo");
            cCommands.Add("nova linha");

            // Comandos do canal Código Logo
            cCommands.Add("reproduza um vídeo do canal");


            //Choices cNumbers = new Choices(File.ReadAllLines("n.txt")); // números

            Choices cProcess = new Choices(); // lista de comandos

            cProcess.Add("bloco de notas");
            cProcess.Add("windows media player");
            cProcess.Add("prompt de comando");
            cProcess.Add("gerenciador de tarefas");
            cProcess.Add("minhas pastas");
            cProcess.Add("calculadora");
            cProcess.Add("mapa de caracteres");
            cProcess.Add("limpeza de disco");
            cProcess.Add("gerenciamento de cores");
            cProcess.Add("serviços de componente");
            cProcess.Add("gerenciamento de computador");
            cProcess.Add("definir programas padrão");
            cProcess.Add("painel de controle");
            cProcess.Add("otimizador de texto");
            cProcess.Add("calibragem de cores");
            cProcess.Add("desfragmentador de disco");
            cProcess.Add("adicionar um novo dispositivo");
            cProcess.Add("gerenciador de dispositivos");
            cProcess.Add("discagem telefônica");
            cProcess.Add("gerenciamento de disco");

            Choices cCustomSites = new Choices(); // lista de comandos do usuário

            Choices      webSearch = new Choices();
            StreamReader srWords   = new StreamReader("words.txt", Encoding.UTF8);

            while (srWords.Peek() >= 0)
            {
                try
                {
                    webSearch.Add(srWords.ReadLine());
                }
                catch { }
            }
            srWords.Close();

            // vamos processar o comandos.txt
            for (int i = 0; i < cmds.Length; i++)
            {
                try
                {
                    if (cmds[i].StartsWith("site#"))
                    {
                        cmds[i] = cmds[i].Replace("site#", "");
                        string[] temp = cmds[i].Split('#');
                        cCustomSites.Add(temp[0]); // adicionamos a palavra na gramática
                        dictCmdSites.Add(temp[0], temp[1]);
                    }
                }
                catch { }
            }

            // Gramática do alarme
            GrammarBuilder gbAlarm = new GrammarBuilder();

            gbAlarm.Append(new Choices("defina alarme", "alarme às", "despertador às"));
            gbAlarm.Append(cAlarm);
            gbAlarm.Append(new Choices("horas da manhã", "horas da tarde", "horas da noite"));

            // GrammarsBuilders
            GrammarBuilder gbChats = new GrammarBuilder(); // vamos criar um grammaBuilder para as conversas

            gbChats.Append(cChats);                        // já foi feito

            GrammarBuilder gbDumme = new GrammarBuilder(); // conversa solta

            gbDumme.Append(cDummes);

            GrammarBuilder gbCommands = new GrammarBuilder(); //para a lista de comandos

            gbCommands.Append(cCommands);                     // feito

            GrammarBuilder gbControls = new GrammarBuilder();

            gbControls.Append(cControls);

            /*
             * GrammarBuilder gbCalculations = new GrammarBuilder(); // gramática que vai fazer cálculos
             * gbCalculations.Append("quanto é"); // primeira parte
             * gbCalculations.Append(cNumbers); // números
             * gbCalculations.Append(new Choices("mais", "menos", "dividido por", "vezes", "porcento de"));
             * gbCalculations.Append(cNumbers); // números novamente
             * // essa gramática pode fazer por exemplo "quanto é 65 vezes 42", "quanto é 14 porcento de 500"
             */

            GrammarBuilder gbProcess = new GrammarBuilder();

            gbProcess.Append(new Choices("abrir", "fechar"));                               // comando
            gbProcess.Append(cProcess);                                                     // adicionar lista de processos

            GrammarBuilder gbCustomSites = new GrammarBuilder();                            // sites e páginas

            gbCustomSites.Append(new Choices("abrir", "iniciar", "carregar", "ir para o")); // parametros
            gbCustomSites.Append(cCustomSites);

            GrammarBuilder gbWebSearch = new GrammarBuilder();

            gbWebSearch.Append(new Choices("pesquisar", "buscar", "procurar", "buscar por", "pesquisar vídeo de", "imagem de",
                                           "imagens de", "pesquisar imagem de", "pesquisar por"));
            gbWebSearch.Append(webSearch);
            gbWebSearch.Append(new Choices("no youtube", "no google"));


            GrammarBuilder gbQA = new GrammarBuilder(); // gramática para responder perguntas

            gbQA.Append(new Choices(commandsForQA.ToArray()));
            gbQA.Append(cAIML);

            // Grammars

            Grammar gQA = new Grammar(gbQA);

            gQA.Name = "QA";

            Grammar gChats = new Grammar(gbChats); // gramática das conversas

            gChats.Name = "Chats";                 // damos um nome para a gramática, pois vamos usa isso mais adiante

            Grammar gDumme = new Grammar(gbDumme);

            gDumme.Name = "Dumme";                             // nome

            Grammar gCommands = new Grammar(gbCommands);       // gramática dos comandos

            gCommands.Name = "Commands";                       // nome

            Grammar gCustomSites = new Grammar(gbCustomSites); // gramáticas dos sites

            gCustomSites.Name = "Sites";


            Grammar gAIML = new Grammar(new GrammarBuilder(cAIML));

            gAIML.Name = "AIML";

            /*
             * Grammar gCalculations = new Grammar(gbCalculations);
             * gCalculations.Name = "Calculations"; */

            Grammar gProcess = new Grammar(gbProcess);

            gProcess.Name = "Process";
            // Agora vamos carregar as gramáticas

            Grammar gWebSearch = new Grammar(gbWebSearch);

            gWebSearch.Name = "Search";


            Grammar gControls = new Grammar(gbControls);

            gControls.Name = "Control";

            // podemos fazer de várias maneiras, por enquanto vou fazer o seguinte
            // Lista de gramáticas
            List <Grammar> grammars = new List <Grammar>();

            grammars.Add(gQA);
            grammars.Add(gChats);
            grammars.Add(gDumme);
            grammars.Add(gCommands); // comandos
            grammars.Add(gAIML);
            grammars.Add(gWebSearch);
            grammars.Add(gControls);
            //grammars.Add(gCalculations);
            grammars.Add(gProcess);
            grammars.Add(gCustomSites);

            ParallelOptions op = new ParallelOptions()
            {
                MaxDegreeOfParallelism = 4
            };

            Parallel.For(0, grammars.Count, op, i => // loop paralelo
            {
                sre.LoadGrammar(grammars[i]);        // carregar gramática
            });

            speechRecognitionActived       = true;                                                             // reconhecimento de voz ativo!
            sre.SpeechRecognized          += new EventHandler <SpeechRecognizedEventArgs>(reconhecido);        // evento do reconhecimento
            sre.AudioLevelUpdated         += new EventHandler <AudioLevelUpdatedEventArgs>(audioElevou);       // quando o aúdio é elevadosre
            sre.SpeechRecognitionRejected += new EventHandler <SpeechRecognitionRejectedEventArgs>(rejeitado); // quando o reconhecimento de voz falhou
            sre.SpeechDetected            += new EventHandler <SpeechDetectedEventArgs>(vozDetectada);         // alguma voz foi detectada
            sre.LoadGrammarCompleted      += new EventHandler <LoadGrammarCompletedEventArgs>(loaded);         // gramática carregada
            sre.RecognizeAsync(RecognizeMode.Multiple);                                                        // iniciar o reconhecimento async e múltiplo
        }
Esempio n. 47
0
 private void buttonX1_Click(object sender, EventArgs e)
 {
     recengine.RecognizeAsync(RecognizeMode.Multiple);
     buttonX2.Enabled = true;
 }
Esempio n. 48
0
        public static void Main(string[] args)
        {
            // Obtain a KinectSensor if any are available
            KinectSensor sensor = (from sensorToCheck in KinectSensor.KinectSensors where sensorToCheck.Status == KinectStatus.Connected select sensorToCheck).FirstOrDefault();

            if (sensor == null)
            {
                Console.WriteLine(
                    "No Kinect sensors are attached to this computer or none of the ones that are\n" +
                    "attached are \"Connected\".\n" +
                    "Attach the KinectSensor and restart this application.\n" +
                    "If that doesn't work run SkeletonViewer-WPF to better understand the Status of\n" +
                    "the Kinect sensors.\n\n" +
                    "Press any key to continue.\n");

                // Give a chance for user to see console output before it is dismissed
                Console.ReadKey(true);
                return;
            }

            sensor.Start();

            // Obtain the KinectAudioSource to do audio capture
            KinectAudioSource source = sensor.AudioSource;

            source.EchoCancellationMode        = EchoCancellationMode.None; // No AEC for this sample
            source.AutomaticGainControlEnabled = false;                     // Important to turn this off for speech recognition

            RecognizerInfo ri = GetKinectRecognizer();

            if (ri == null)
            {
                Console.WriteLine("Could not find Kinect speech recognizer. Please refer to the sample requirements.");
                return;
            }

            Console.WriteLine("Using: {0}", ri.Name);

            // NOTE: Need to wait 4 seconds for device to be ready right after initialization
            int wait = 4;

            while (wait > 0)
            {
                Console.Write("Device will be ready for speech recognition in {0} second(s).\r", wait--);
                Thread.Sleep(1000);
            }

            using (var sre = new SpeechRecognitionEngine(ri.Id))
            {
                var colors = new Choices();
                colors.Add("red");
                colors.Add("green");
                colors.Add("blue");

                var gb = new GrammarBuilder {
                    Culture = ri.Culture
                };

                // Specify the culture to match the recognizer in case we are running in a different culture.
                gb.Append(colors);

                // Create the actual Grammar instance, and then load it into the speech recognizer.
                var g = new Grammar(gb);

                sre.LoadGrammar(g);
                sre.SpeechRecognized          += SreSpeechRecognized;
                sre.SpeechHypothesized        += SreSpeechHypothesized;
                sre.SpeechRecognitionRejected += SreSpeechRecognitionRejected;

                using (Stream s = source.Start())
                {
                    sre.SetInputToAudioStream(
                        s, new SpeechAudioFormatInfo(EncodingFormat.Pcm, 16000, 16, 1, 32000, 2, null));

                    Console.WriteLine("Recognizing speech. Say: 'red', 'green' or 'blue'. Press ENTER to stop");

                    sre.RecognizeAsync(RecognizeMode.Multiple);
                    Console.ReadLine();
                    Console.WriteLine("Stopping recognizer ...");
                    sre.RecognizeAsyncStop();
                }
            }

            sensor.Stop();
        }
Esempio n. 49
0
 public void startVoice()
 {
     recEngin.RecognizeAsync(RecognizeMode.Multiple);
 }
Esempio n. 50
0
 public void Enable()
 {
     SpeechRecEngine.RecognizeAsync(RecognizeMode.Multiple);
 }
Esempio n. 51
0
        public MainWindow()
        {
            InitializeComponent();

            //components = new System.ComponentModel.Container();
            contextMenu1 = new System.Windows.Forms.ContextMenu();
            menuItem1    = new System.Windows.Forms.MenuItem();

            contextMenu1.MenuItems.AddRange(new MenuItem[] { menuItem1 });

            menuItem1.Index  = 0;
            menuItem1.Text   = "Exit";
            menuItem1.Click += new EventHandler(exit_Click);

            notifyIcon             = new System.Windows.Forms.NotifyIcon();
            notifyIcon.Text        = "Voice Coding";
            notifyIcon.Icon        = new System.Drawing.Icon("icon_tray_light.ico");
            notifyIcon.Click      += new EventHandler(notifyIcon_Click);
            notifyIcon.ContextMenu = contextMenu1;

            if (!notifyIcon.Visible)
            {
                notifyIcon.Visible = true;
            }


            //grammar generation
            includeBuilder.Append("include");
            includeBuilder.Append(headers);

            functionBuilder.Append("function");
            functionBuilder.Append(dataType);
            functionBuilder.AppendDictation();

            printBuilder.Append(printCmdType);
            printBuilder.Append(printType);
            printBuilder.AppendDictation();


            //Assigne grammar
            includeChooser  = new Grammar(includeBuilder);
            functionChooser = new Grammar(functionBuilder);
            printChooser    = new Grammar(printBuilder);

            rec.SetInputToDefaultAudioDevice();

            //Load all different kind of grammar
            rec.LoadGrammarAsync(new Grammar(new GrammarBuilder(new Choices(File.ReadAllLines(@"commands.txt")))));
            rec.LoadGrammarAsync(includeChooser);
            rec.LoadGrammarAsync(functionChooser);
            rec.LoadGrammarAsync(printChooser);

            //All event handler
            rec.SpeechRecognized +=
                new EventHandler <SpeechRecognizedEventArgs>(rec_Recognized);
            rec.SpeechDetected +=
                new EventHandler <SpeechDetectedEventArgs>(rec_Detected);
            rec.RecognizeCompleted +=
                new EventHandler <RecognizeCompletedEventArgs>(rec_Completed);

            //Start recognizer
            rec.RecognizeAsync(RecognizeMode.Multiple);
            recognizing = true;

            /*   AUTO INPUT
             * Thread.Sleep(3000);
             * rec.EmulateRecognizeAsync("include iostream");
             * Thread.Sleep(500);
             * rec.EmulateRecognizeAsync("function void main");
             * Thread.Sleep(500);
             * rec.EmulateRecognizeAsync("printline string This string is going to be printed");
             * Thread.Sleep(500);
             * rec.EmulateRecognizeAsync("printf variable date");
             * Thread.Sleep(500);
             * rec.EmulateRecognizeAsync("function int recognized");
             * Thread.Sleep(500);
             */
            //rec.EmulateRecognizeAsync("exit");
        }
Esempio n. 52
0
        /// <summary>
        /// constructor
        /// </summary>
        /// <param name="mainWindow"></param>
        public TouchWallApp(MainWindow mainWindow)
        {
            // sorry for this
            ParentMainWindow = mainWindow;

            // Get the _kinectSensor object
            KinectSensor = KinectSensor.GetDefault();

            // Create the screen for calibration
            _screen = new Screen(ParentMainWindow);

            // Create a manager for processing sensor data from the kinect
            FrameDataManager = new FrameDataManager();

            // Open the sensor
            KinectSensor.Open();

            // Set the statuses: Calibration disabled, No current gesture, Cursor with movement, MultiTouch disabled
            CalibrateStatus    = 0;
            CurrentGestureType = 0;
            CursorStatus       = 2;
            MultiTouchMode     = 0;

            #region Speech

            // Grab the audio stream
            IReadOnlyList <AudioBeam> audioBeamList = KinectSensor.AudioSource.AudioBeams;
            Stream audioStream = audioBeamList[0].OpenInputStream();

            // Create the convert stream
            _convertStream = new KinectAudioStream(audioStream);

            RecognizerInfo recognizerInfo = TryGetKinectRecognizer();

            if (recognizerInfo != null)
            {
                _speechEngine = new SpeechRecognitionEngine(recognizerInfo.Id);

                var directions = new Choices();
                directions.Add(new SemanticResultValue("Kinect Calibrate Enable", "CALIBRATE_FULL"));
                directions.Add(new SemanticResultValue("Kinect Calibrate Disable", "CALIBRATE_CANCEL"));
                directions.Add(new SemanticResultValue("Kinect Cursor Enable", "CURSOR_ENABLE"));
                directions.Add(new SemanticResultValue("Kinect Cursor Disable", "CURSOR_DISABLE"));
                directions.Add(new SemanticResultValue("Kinect Depth Enable", "DEPTH_START"));
                directions.Add(new SemanticResultValue("Kinect Depth Disable", "DEPTH_END"));
                directions.Add(new SemanticResultValue("Kinect Multi Enable", "MULTI_START"));
                directions.Add(new SemanticResultValue("Kinect Multi Disable", "MULTI_END"));
                directions.Add(new SemanticResultValue("Kinect Open TouchDevelop", "TOUCHDEVELOP"));
                directions.Add(new SemanticResultValue("Kinect Open TouchDevelop Local", "TOUCHDEVELOPLOCAL"));
                directions.Add(new SemanticResultValue("Kinect Open Keyboard", "KEYBOARD"));


                var gb = new GrammarBuilder {
                    Culture = recognizerInfo.Culture
                };
                gb.Append(directions);
                var g = new Grammar(gb);
                _speechEngine.LoadGrammar(g);

                _speechEngine.SpeechRecognized          += SpeechRecognized;
                _speechEngine.SpeechRecognitionRejected += SpeechRejected;

                // Let the convertStream know speech is going active
                _convertStream.SpeechActive = true;

                // For long recognition sessions (a few hours or more), it may be beneficial to turn off adaptation of the acoustic model.
                // This will prevent recognition accuracy from degrading over time.
                //speechEngine.UpdateRecognizerSetting("AdaptationOn", 0);

                _speechEngine.SetInputToAudioStream(_convertStream, new SpeechAudioFormatInfo(EncodingFormat.Pcm, 16000, 16, 1, 32000, 2, null));
                _speechEngine.RecognizeAsync(RecognizeMode.Multiple);
            }

            #endregion
        }
Esempio n. 53
0
 private void button1_Click(object sender, EventArgs e)
 {
     spRec.RecognizeAsync(RecognizeMode.Multiple);
     btnDisable.Enabled = true;
     // myPort.Open();
 }
Esempio n. 54
0
 private void button1_Click(object sender, EventArgs e)
 {
     Engine.RecognizeAsync(RecognizeMode.Multiple);
 }
        /// <summary>
        /// Outlines the valid voice commands
        /// </summary>
        public void voiceRec()
        {
            if (null != this.ri)
            {
                this.speechEngine = new SpeechRecognitionEngine(ri.Id);

                //Use this code to create grammar programmatically rather than from
                //a grammar file.

                var commands = new Choices();
                commands.Add(new SemanticResultValue("play", "PLAY"));
                commands.Add(new SemanticResultValue("pause", "PAUSE"));
                commands.Add(new SemanticResultValue("next", "NEXT"));
                commands.Add(new SemanticResultValue("previous", "PREVIOUS"));
                commands.Add(new SemanticResultValue("mute", "MUTE"));
                commands.Add(new SemanticResultValue("volume up", "VOLUME UP"));
                commands.Add(new SemanticResultValue("volume down", "VOLUME DOWN"));
                commands.Add(new SemanticResultValue("search", "SEARCH"));
                commands.Add(new SemanticResultValue("song", "SONG"));
                commands.Add(new SemanticResultValue("artist", "ARTIST"));
                commands.Add(new SemanticResultValue("space", "SPACE"));
                commands.Add(new SemanticResultValue("select", "SELECT"));
                commands.Add(new SemanticResultValue("logout", "LOGOUT"));
                commands.Add(new SemanticResultValue("profile", "PROFILE"));
                commands.Add(new SemanticResultValue("left", "LEFT"));
                commands.Add(new SemanticResultValue("up", "UP"));
                commands.Add(new SemanticResultValue("down", "DOWN"));
                commands.Add(new SemanticResultValue("right", "RIGHT"));
                commands.Add(new SemanticResultValue("exit", "EXIT"));
                commands.Add(new SemanticResultValue("backspace", "BACKSPACE"));
                commands.Add(new SemanticResultValue("clear", "CLEAR"));
                commands.Add(new SemanticResultValue("tab", "TAB"));
                commands.Add(new SemanticResultValue("a", "A"));
                commands.Add(new SemanticResultValue("b", "B"));
                commands.Add(new SemanticResultValue("c", "C"));
                commands.Add(new SemanticResultValue("d", "D"));
                commands.Add(new SemanticResultValue("E", "E"));
                commands.Add(new SemanticResultValue("f", "F"));
                commands.Add(new SemanticResultValue("g", "G"));
                commands.Add(new SemanticResultValue("h", "H"));
                commands.Add(new SemanticResultValue("i", "I"));
                commands.Add(new SemanticResultValue("j", "J"));
                commands.Add(new SemanticResultValue("k", "K"));
                commands.Add(new SemanticResultValue("l", "L"));
                commands.Add(new SemanticResultValue("m", "M"));
                commands.Add(new SemanticResultValue("n", "N"));
                commands.Add(new SemanticResultValue("o", "O"));
                commands.Add(new SemanticResultValue("p", "P"));
                commands.Add(new SemanticResultValue("q", "Q"));
                commands.Add(new SemanticResultValue("r", "R"));
                commands.Add(new SemanticResultValue("s", "S"));
                commands.Add(new SemanticResultValue("t", "T"));
                commands.Add(new SemanticResultValue("u", "U"));
                commands.Add(new SemanticResultValue("v", "V"));
                commands.Add(new SemanticResultValue("w", "W"));
                commands.Add(new SemanticResultValue("x", "X"));
                commands.Add(new SemanticResultValue("y", "Y"));
                commands.Add(new SemanticResultValue("z", "Z"));

                var gb = new GrammarBuilder {
                    Culture = ri.Culture
                };
                gb.Append(commands);

                var g = new Grammar(gb);

                // Loads the grammar specified above
                speechEngine.LoadGrammar(g);

                speechEngine.SpeechRecognized += SpeechRecognized;

                speechEngine.SetInputToAudioStream(
                    sensor.AudioSource.Start(), new SpeechAudioFormatInfo(EncodingFormat.Pcm, 16000, 16, 1, 32000, 2, null));
                speechEngine.RecognizeAsync(RecognizeMode.Multiple);
            }
            else
            {
                throw new Exception();
            }
        }
Esempio n. 56
0
 public void RecognizeText()
 {
     _recognizer.RecognizeAsync(RecognizeMode.Multiple);
 }
Esempio n. 57
0
 private void On_Click(object sender, RoutedEventArgs e)
 {
     recEngine.RecognizeAsync(RecognizeMode.Multiple);
     Off.IsEnabled = true;
     On.IsEnabled  = false;
 }
        public Form1()
        {
            SpeechRecognitionEngine rec = new SpeechRecognitionEngine();
            Choices list = new Choices();

            list.Add(new String[] { "hello", "how are you" , "i'm fine" });

            Grammar gr = new Grammar(new GrammarBuilder(list));

            try
            {

                rec.RequestRecognizerUpdate();
                rec.LoadGrammar(gr);
                rec.SpeechRecognized += rec_SpeechRecognized;
                rec.SetInputToDefaultAudioDevice();
                rec.RecognizeAsync(RecognizeMode.Multiple);

            }
            catch { return; }

            s.SelectVoiceByHints(VoiceGender.Female);
            s.Speak("Hello , My name is VoiceBot");

            InitializeComponent();
        }
Esempio n. 59
-1
        public void button1_Click(object sender, EventArgs e)
        {
            button1.Enabled = false;
            button1.Text = "God Called";
            label2.Text = "The god is listening...";
            label2.ForeColor = Color.Red;

            SpeechRecognitionEngine GodListener = new SpeechRecognitionEngine();

            Choices GodList = new Choices();
            GodList.Add(new string[] { "Make toast", "Make me toast", "Make me some toast", "Make me immortal", "Make rain", "call rain", "call the rain", "make it rain", "wink out of existence", "begone", "go now", "wink yourself out of existence" });

            GrammarBuilder gb = new GrammarBuilder();
            gb.Append(GodList);

            Grammar GodGrammar = new Grammar(gb);

            GodListener.MaxAlternates = 2;

            try
            {
                GodListener.RequestRecognizerUpdate();
                GodListener.LoadGrammar(GodGrammar);
                GodListener.SetInputToDefaultAudioDevice();
                GodListener.SpeechRecognized += GodListener_SpeechRecognized;
                GodListener.AudioStateChanged += GodListener_AudioStateChanged;
                GodListener.AudioLevelUpdated += GodListener_AudioLevelUpdated;
                GodListener.RecognizeAsync(RecognizeMode.Multiple);
            }
            catch
            {
                return;
            }
        }
Esempio n. 60
-1
        public void button1_Click(object sender, EventArgs e)
        {
            button1.Enabled = false;
            button1.Text = "God Called";
            label2.Text = "The god is listening...";
            label2.ForeColor = Color.Red;

            SpeechRecognitionEngine GodListener = new SpeechRecognitionEngine(new System.Globalization.CultureInfo("en-UK"));
            DictationGrammar GodGrammar = new DictationGrammar();

            GodListener.MaxAlternates = 2;

            try
            {
                GodListener.RequestRecognizerUpdate();
                GodListener.LoadGrammar(GodGrammar);
                GodListener.SetInputToDefaultAudioDevice();
                GodListener.SpeechRecognized += GodListener_SpeechRecognized;
                GodListener.AudioStateChanged += GodListener_AudioStateChanged;
                GodListener.AudioLevelUpdated += GodListener_AudioLevelUpdated;
                GodListener.RecognizeAsync(RecognizeMode.Multiple);
            }
            catch
            {
                return;
            }
        }