예제 #1
0
        private void url_SpeechHypothesized(object sender, SpeechHypothesizedEventArgs e)
        {
            string speech = e.Result.Text;

            if (speech == null)
            {
                url.RecognizeAsyncCancel();
                _recognizer.RecognizeAsync(RecognizeMode.Multiple);
                return;
            }
            switch (speech)
            {
            case "facebook":
                Process.Start("chrome", "https://www.facebook.com/");
                break;

            case "youtube":
                Process.Start("chrome", "https://www.youtube.com");
                break;

            case "no":
                Process.Start("chrome");
                break;
            }
            url.RecognizeAsyncCancel();
            _recognizer.RecognizeAsync(RecognizeMode.Multiple);
        }
예제 #2
0
        public void initializeSpeach()
        {
            Choices sList = new Choices();

            //Add the words



            try
            {
                gbuilder.Append(new Choices(System.IO.File.ReadAllLines(@"C:\users\" + Environment.UserName.ToString() + @"\documents\commands.txt")));
            }
            catch { MessageBox.Show("The 'Commands' file must not contain empty lines.", "Error", MessageBoxButtons.OK, MessageBoxIcon.Error); pr.StartInfo.FileName = @"C:\users\" + Environment.UserName.ToString() + @"\documents\commands.txt"; pr.Start(); Application.Exit(); return; }

            Grammar gr = new Grammar(gbuilder);

            try
            {
                sRecognize.UnloadAllGrammars();
                sRecognize.RecognizeAsyncCancel();
                sRecognize.RequestRecognizerUpdate();
                sRecognize.LoadGrammar(gr);
                sRecognize.SpeechRecognized += sRecognize_SpeechRecognized;

                sRecognize.SetInputToDefaultAudioDevice();
                sRecognize.RecognizeAsync(RecognizeMode.Multiple);
            }

            catch
            {
                MessageBox.Show("Grammar Builder Error");
                return;
            }
        }
예제 #3
0
 private void buttonQuitter_Click(object sender, EventArgs e)
 {
     boutonStop_Click(sender, e);
     moteurReconnaissance.RecognizeAsyncCancel();
     timer2.Stop();
     this.Close();
 }
예제 #4
0
        private static void ResetGrammar()
        {
            // stop recognizing
            engine.RecognizeAsyncCancel();

            engine.UnloadAllGrammars();

            // reset the engine grammar
            var choices = new Choices();

            foreach (string key in recognitions.Keys)
            {
                choices.Add(key);
            }

            var gb = new GrammarBuilder {
                Culture = GetKinectRecognizer().Culture
            };

            gb.Append(choices);

            Grammar grammar = new Grammar(gb);

            engine.LoadGrammar(grammar);

            // start recognizing again
            engine.RecognizeAsync(RecognizeMode.Multiple);
        }
예제 #5
0
파일: MailBox.cs 프로젝트: FayshalU/OHannah
        private void button2_Click(object sender, EventArgs e)
        {
            this.Hide();
            engine.RecognizeAsyncCancel();
            Compose c = new Compose(this);

            c.Show();
        }
예제 #6
0
 public void CancelListening()
 {
     if (Engine != null && (State == VoiceRecognizerState.Listening || State == VoiceRecognizerState.LinsteningOnce))
     {
         Engine.RecognizeAsyncCancel();
         State = VoiceRecognizerState.Pausing;
         TriggerStoppedListening();
     }
 }
예제 #7
0
 public void search()
 {
     getSearchFile.Show();
     myVoice.RecognizeAsyncCancel();
     whatToSearch.SetInputToDefaultAudioDevice();
     whatToSearch.LoadGrammar(new Grammar(new GrammarBuilder(new Choices(getPhrases()))));
     whatToSearch.SpeechRecognized += new EventHandler <SpeechRecognizedEventArgs>(whatToSearch_SpeechRecognized);
     whatToSearch.RecognizeAsync(RecognizeMode.Multiple);
 }
예제 #8
0
파일: STTForm.cs 프로젝트: davtrig/STT_Bot
        private void Default_SpeechRecognized(object sender, SpeechRecognizedEventArgs e)
        {
            int    ranNum;
            string speech = e.Result.Text;

            if (e.Result == null)
            {
                return;
            }

            if (speech == "Hello")
            {
                Bot.SpeakAsync("Hello, I am here");
            }
            if (speech == "How are you")
            {
                Bot.SpeakAsync("I am working normally");
            }
            if (speech == "What time is it")
            {
                Bot.SpeakAsync(DateTime.Now.ToString("h mm tt"));
            }
            if (speech == "Ask questions")
            {
                Bot.SpeakAsync("For which of your pets do you want to schedule an appointment?");
                _recognizer.RecognizeAsyncCancel();
                _answersRecognizer.RecognizeAsync(RecognizeMode.Multiple);
            }
            if (speech == "Stop talking")
            {
                Bot.SpeakAsyncCancelAll();
                ranNum = rnd.Next(1, 2);
                if (ranNum == 1)
                {
                    Bot.SpeakAsync("Yes sir");
                }
                else if (ranNum == 2)
                {
                    Bot.SpeakAsync("I will be quiet");
                }
            }
            if (speech == "Stop listening")
            {
                Bot.SpeakAsync("if you need me just ask");
                _recognizer.RecognizeAsyncCancel();
                startlistening.RecognizeAsync(RecognizeMode.Multiple);
            }
            if (speech == "Show commands")
            {
                showCommands();
            }
            if (speech == "Hide commands")
            {
                LstCommands.Visible = false;
            }
        }
예제 #9
0
        private void startlistening_SpeechRecognized(object sender, SpeechRecognizedEventArgs e)
        {
            string speech = e.Result.Text;

            if (speech == "Wake up" || speech == "Hi " + name)
            {
                startlistening.RecognizeAsyncCancel();
                synth.SpeakAsync("Yes,I'm here");
                _recognizer.RecognizeAsync(RecognizeMode.Multiple);
            }
        }
        void movetoapplication()
        {
            _sps.SpeakAsync("Moving to Application Sector");

            button1.Text   = "ADD";
            update.Visible = true;
            delete.Visible = true;
            button2.Text   = "Open Application";
            _spe.RecognizeAsyncCancel();
            _speexe.RecognizeAsync(RecognizeMode.Multiple);
        }
예제 #11
0
 public void Close()
 {
     if (engine != null)
     {
         Stop();
         System.Diagnostics.Debug.WriteLine("Closing");
         engine.RecognizeAsyncCancel();
         engine.SpeechRecognized -= Engine_SpeechRecognized;
         engine.Dispose();
         engine = null;
     }
 }
예제 #12
0
        private void Default_speechRecognized(object sender, SpeechRecognizedEventArgs e)
        {
            int    ranNum;
            string speech = e.Result.Text;

            if (speech == "Hello")
            {
                Sarah.SpeakAsync("Hello, i am here");
            }
            if (speech == "How are you")
            {
                Sarah.SpeakAsync("i am working normally");
            }
            if (speech == "What time is it")
            {
                Sarah.SpeakAsync(DateTime.Now.ToString("h mm tt"));
            }
            if (speech == "Stop talking")
            {
                Sarah.SpeakAsyncCancelAll();
                ranNum = rnd.Next(1);
                if (ranNum == 1)
                {
                    Sarah.SpeakAsync("yes sir");
                }
                if (ranNum == 2)
                {
                    Sarah.SpeakAsync("i am sorry i will be quiet");
                }
            }
            if (speech == "Stop listening")
            {
                Sarah.SpeakAsync("if you need me just ask");
                _recognizer.RecognizeAsyncCancel();
                startlistening.RecognizeAsync(RecognizeMode.Multiple);
            }
            if (speech == "Show List")
            {
                string[] commands = (File.ReadAllLines(@"DefaultCommands.txt"));
                listCommands.Items.Clear();
                listCommands.SelectionMode = SelectionMode.None;
                listCommands.Visible       = true;
                foreach (string command in commands)
                {
                    listCommands.Items.Add(command);
                }
            }
            if (speech == "Close Commands")
            {
                listCommands.Visible = false;
            }
        }
예제 #13
0
        // NOTIFIER  -----------------------------------------------------

        void notifyIcon_Click(object sender, EventArgs e)
        {
            if (recognizing)
            {
                rec.RecognizeAsyncCancel();
                recognizing = false;
            }
            else
            {
                rec.RecognizeAsync(RecognizeMode.Multiple);
                recognizing = true;
            }
        }
예제 #14
0
        private void Form1_Load(object sender, EventArgs e)
        {
            foreach (InstalledVoice voice in sSynth.GetInstalledVoices())
            {
                VoiceInfo info = voice.VoiceInfo;
                name = info.Name;
                break;
            }
            GrammarBuilder gb = new GrammarBuilder();

            try
            {
                StreamReader sr = new StreamReader("commands.txt");

                while (sr.Peek() > -1)
                {
                    cmndlist.Items.Add(sr.ReadLine());
                }
                sr.Close();

                gb.Append(new Choices(File.ReadAllLines("commands.txt")));
            }
            catch
            {
                MessageBox.Show("The \"commands\" must not contain any empty lines!", "Error", MessageBoxButtons.OK, MessageBoxIcon.Error);
                pr.StartInfo.FileName = ("commands.txt");
                pr.Start();
                Application.Exit();
                return;
            }
            gb.Culture = new CultureInfo("en-IN");
            Grammar gr = new Grammar(gb);

            try
            {
                sRecognize.UnloadAllGrammars();
                sRecognize.RecognizeAsyncCancel();
                sRecognize.RequestRecognizerUpdate();
                sRecognize.LoadGrammar(gr);
                //sRecognize.LoadGrammar(new Grammar(new GrammarBuilder("exit")));
                //sRecognize.LoadGrammar(new DictationGrammar());
                sRecognize.SpeechRecognized += sRecognize_SpeechRecognized;
                sRecognize.SetInputToDefaultAudioDevice();
                sRecognize.RecognizeAsync(RecognizeMode.Multiple);
            }
            catch
            {
                MessageBox.Show("Grammar Builder Error");
                return;
            }
        }
예제 #15
0
        private void Default_SpeechRecognized(object sender, SpeechRecognizedEventArgs e)
        {
            int    ranNum;
            string speech = e.Result.Text;

            if (speech == "Hello")
            {
                Sarah.SpeakAsync("Hi!");
            }
            if (speech == "How are you")
            {
                Sarah.SpeakAsync("good. and you?");
            }
            if (speech == "What time is it")
            {
                Sarah.SpeakAsync(DateTime.Now.ToString("h mm tt"));
            }
            if (speech == "Stop talking")
            {
                Sarah.SpeakAsyncCancelAll();
                ranNum = rnd.Next(1, 2);
                if (ranNum == 1)
                {
                    Sarah.SpeakAsync("i wil stop talking");
                }
                if (ranNum == 2)
                {
                    Sarah.SpeakAsync("Sorry i will be quiet");
                }
            }
            if (speech == "Stop listening")
            {
                Sarah.SpeakAsync("if you need me just ask");
                _recognizer.RecognizeAsyncCancel();
                startlistening.RecognizeAsync(RecognizeMode.Multiple);
            }
            if (speech == "Show commands")
            {
                string[] commands = (File.ReadAllLines(@"DefaultCommands.txt"));
                LstCommands.Items.Clear();
                LstCommands.SelectionMode = SelectionMode.None;
                foreach (string command in commands)
                {
                    LstCommands.Items.Add(command);
                }
            }
            if (speech == "Hide commands")
            {
                LstCommands.Items.Clear();
            }
        }
예제 #16
0
        private void Recognizer(object sender, SpeechRecognizedEventArgs e)
        {
            string speech = e.Result.Text;


            if (speech == "Go Back")
            {
                Main_Menu g = new Main_Menu();
                spe.RecognizeAsyncCancel();
                this.Hide();

                g.Show();
            }
        }
예제 #17
0
        private void reminderTask_SpeechHypothesized(object sender, SpeechHypothesizedEventArgs e)
        {
            string speech = e.Result.Text;

            if (speech == null)
            {
                _recognizer.RecognizeAsync(RecognizeMode.Multiple);
                reminderTask.RecognizeAsyncCancel();
                return;
            }
            currentReminder.What = "You have a " + speech;
            reminders.Add(currentReminder);
            reminderTask.RecognizeAsyncCancel();
            _recognizer.RecognizeAsync(RecognizeMode.Multiple);
            Jarvis.SpeakAsync("Reminder set for" + currentReminder.When);
        }
예제 #18
0
        private void UninitializeKinectServices()
        {
            appendLogEntry("Kinect [Bağlı değil]");

            kinectSensor.SkeletonFrameReady -= kinectSensor_SkeletonFrameReady;

            kinectSensor.Stop();

            speechRecognizer.RecognizeAsyncCancel();
            speechRecognizer.RecognizeAsyncStop();

            colorViewer.Kinect    = null;
            skeletonViewer.Kinect = null;

            if (kinectSensor.SkeletonStream != null)
            {
                kinectSensor.SkeletonStream.Disable();
            }

            if (readyTimer == null)
            {
                return;
            }

            readyTimer.Stop();
            readyTimer = null;
        }
예제 #19
0
 public MainWindow()
 {
     InitializeComponent();
     this.DataContext = this;
     this.Unloaded   += delegate
     {
         _kinectSensor.SkeletonStream.Disable();
         _sre.RecognizeAsyncCancel();
         _sre.RecognizeAsyncStop();
         _sre.Dispose();
     };
     this.Loaded += delegate
     {
         _kinectSensor = KinectSensor.KinectSensors[0];
         _kinectSensor.SkeletonStream.Enable(new
                                             TransformSmoothParameters()
         {
             Correction         = 0.5f,
             JitterRadius       = 0.05f,
             MaxDeviationRadius = 0.04f,
             Smoothing          = 0.5f
         });
         _kinectSensor.SkeletonFrameReady += nui_SkeletonFrameReady;
         _kinectSensor.Start();
         StartSpeechRecognition();
     };
 }
예제 #20
0
 public void ListenIO()
 {
     System.Console.WriteLine("Listening for input. Press Enter to stop listening.");
     Console.ReadLine();
     Engine.RecognizeAsyncCancel();
     new MainMenu();
 }
예제 #21
0
        static void Main(string[] args)
        {
            var numberChoices = new Choices();

            for (int i = 0; i <= 100; i++)
            {
                numberChoices.Add(i.ToString());
            }
            var gb = new GrammarBuilder();

            gb.Append("Select player");
            gb.Append(new SemanticResultKey("number", numberChoices));

            var sr = new SpeechRecognitionEngine();

            sr.SetInputToDefaultAudioDevice();
            sr.LoadGrammar(new Grammar(gb));
            sr.SpeechRecognized          += SpeechRecognized;
            sr.SpeechDetected            += SpeechDetected;
            sr.SpeechRecognitionRejected += SpeechRejected;
            // loop recognition
            sr.RecognizeCompleted += (s, e) => sr.RecognizeAsync();

            Console.WriteLine("Say phrase \"Select player {0-100}\"");

            sr.RecognizeAsync();
            Console.Read();
            sr.RecognizeAsyncCancel();
        }
예제 #22
0
 private void pictureBox1_Click(object sender, EventArgs e)
 {
     if (changelisten == true)
     {
         _recognizer.RecognizeAsync(RecognizeMode.Multiple); startlistening.RecognizeAsyncCancel(); changelisten = false;
     }
 }
        private void bdumbchk_CheckedChanged(object sender, EventArgs e)
        {
            if (bdumbchk.Checked == true)
            {
                _RecEng.RecognizeAsyncCancel();

                SP.BackgroundImage = global::SAM_Media_Player.Properties.Resources.Microphone_iconC;
            }

            else
            {
                _RecEng.RecognizeAsync(RecognizeMode.Multiple);

                SP.BackgroundImage = global::SAM_Media_Player.Properties.Resources.Microphone_iconActC;
            }
        }
예제 #24
0
        //I need cancellationTokenSource, because I might want to cancel listening by command.
        public void Run(CancellationTokenSource cancellationTokenSource)
        {
            if (cancellationTokenSource == null)
            {
                throw new ArgumentNullException(nameof(cancellationTokenSource));
            }

            using (var recognizer = new SpeechRecognitionEngine(new CultureInfo("en-US")))
            {
                var grammars = this._speechGrammarProvider.ProvideGrammars();
                foreach (var grammar in grammars)
                {
                    recognizer.LoadGrammarAsync(grammar);
                }

                recognizer.SpeechRecognized +=
                    new EventHandler <SpeechRecognizedEventArgs>(
                        SpeechRecognizedHandler);

                recognizer.SetInputToDefaultAudioDevice();
                recognizer.RecognizeAsync(RecognizeMode.Multiple);

                cancellationTokenSource.Token.WaitHandle.WaitOne();
                recognizer.RecognizeAsyncCancel();
            }
        }
예제 #25
0
        private void reminder_SpeechRecognized(object sender, SpeechRecognizedEventArgs e)
        {
            if (e.Result.Text == null)
            {
                _recognizer.RecognizeAsync(RecognizeMode.Multiple);
                reminder.RecognizeAsyncCancel();
                return;
            }
            string speech = e.Result.Text;

            currentReminder      = new Reminder();
            currentReminder.When = speech;
            reminder.RecognizeAsyncCancel();
            reminderTask.RecognizeAsync(RecognizeMode.Multiple);
            Jarvis.SpeakAsync("Reminder for");
        }
예제 #26
0
 private void OnToggle(object src, RoutedEventArgs e)
 {
     if (recognising)
     {
         rec.RecognizeAsyncCancel();
         recognising = false;
         //statusBar.window.status.Content = "Recognition Stop";
     }
     else
     {
         rec.RecognizeAsync(RecognizeMode.Multiple);
         recognising = true;
         //statusBar.window.status.Content = "I'm listening...";
     }
     //statusBar.ToggleColor(recognising);
 }
예제 #27
0
 private void CloseBtn_Click(object sender, EventArgs e)
 {
     Recog_speech.RecognizeAsyncCancel();
     Recog_speech.Dispose();
     this.Dispose();
     this.Close();
 }
예제 #28
0
        public override string ReadLine()
        {
            _inputMethod = InputMethod.Unknown;

            var task = Task.Factory.StartNew(() =>
            {
                var s        = System.Console.ReadLine();
                _inputMethod = InputMethod.Keyboard;
                if (!_reading)
                {
                    return;
                }
                _input = s;
                _autoResetEvent.Set();
            });

            _mainSpeechRecognitionEngine.RecognizeAsync(RecognizeMode.Multiple);

            _reading = true;
            _autoResetEvent.WaitOne();
            _reading = false;

            _mainSpeechRecognitionEngine.RecognizeAsyncCancel();

            if (_inputMethod == InputMethod.Voice)
            {
                var hWnd = System.Diagnostics.Process.GetCurrentProcess().MainWindowHandle;
                PostMessage(hWnd, WM_KEYDOWN, VK_RETURN, 0);
            }

            task.Wait();
            task.Dispose();

            return(_input);
        }
 /*
  * Enable/disable SR
  * Disable SR when you absolutely don't need to recognize a speech.
  * You can keep the SR running when activate/deactivate grammars
  */
 public void enableSR(bool b)
 {
     if (b)
     {
         if (!recOn)
         {
             while (!loadGrammarCompleted && loadGrammarCount != 0)
             {
                 Thread.Sleep(3);
             }
             Debug.WriteLine("is load grammar complete before enable SR? " +
                             loadGrammarCompleted + "\t" + EBookUtil.GetUnixTimeMillis());
             ebookStream.enable(true);
             recEngine.RecognizeAsync(RecognizeMode.Multiple);
             recOn = true;
             Debug.WriteLine("Rec on");
         }
     }
     else
     {
         if (recOn)
         {
             ebookStream.enable(false);
             recEngine.RecognizeAsyncCancel();//.RecognizeAsyncStop();
             recOn = false;
             Debug.WriteLine("Rec off");
         }
     }
 }
예제 #30
0
        public MainWindow()
        {
            InitializeComponent();
            UnInitializePtr();

            this.DataContext = this;
            this.Unloaded   += delegate
            {
                kinectDevice.SkeletonStream.Disable();
                _sre.RecognizeAsyncCancel();
                _sre.RecognizeAsyncStop();
                _sre.Dispose();
            };
            this.Loaded += delegate
            {
                kinectDevice = KinectSensor.KinectSensors[0];
                kinectDevice.SkeletonStream.Enable(new
                                                   TransformSmoothParameters()
                {
                    Correction         = 0.5f,
                    JitterRadius       = 0.05f,
                    MaxDeviationRadius = 0.04f,
                    Smoothing          = 0.5f
                });
                kinectDevice.SkeletonFrameReady += KinectDevice_SkeletonFrameReady;
                kinectDevice.Start();
                StartSpeechRecognition();
            };

            //注册Kinect状态改变事件
            KinectSensor.KinectSensors.StatusChanged += KinectSensors_StatusChanged;
            //返回可用的Kinect
            this.KinectDevice = KinectSensor.KinectSensors.FirstOrDefault(x => x.Status == KinectStatus.Connected);
        }
예제 #31
0
파일: Program.cs 프로젝트: bruceburge/Nigel
        static void Main(string[] args)
        {
            // Create an in-process speech recognizer.
            using (SpeechRecognitionEngine recognizer = new SpeechRecognitionEngine(new CultureInfo("en-US")))
            {
                // Create a grammar for choosing commandChoices for a flight.
                Choices commandChoices = new Choices(new string[] { "Lights On", "Lights Off", "All Off", "Say Time"});

                GrammarBuilder gb = new GrammarBuilder();
                gb.Append(Properties.Settings.Default.AssistantName);
                gb.Append(commandChoices);

                // Construct a Grammar object and load it to the recognizer.
                Grammar commandChooser = new Grammar(gb);
                commandChooser.Name = ("Command Chooser");
                recognizer.LoadGrammarAsync(commandChooser);

                // Attach event handlers.
                recognizer.SpeechDetected += new EventHandler<SpeechDetectedEventArgs>(SpeechDetectedHandler);
                recognizer.SpeechHypothesized += new EventHandler<SpeechHypothesizedEventArgs>(SpeechHypothesizedHandler);
                recognizer.SpeechRecognitionRejected += new EventHandler<SpeechRecognitionRejectedEventArgs>(SpeechRecognitionRejectedHandler);
                recognizer.SpeechRecognized += new EventHandler<SpeechRecognizedEventArgs>(SpeechRecognizedHandler);
                recognizer.RecognizeCompleted += new EventHandler<RecognizeCompletedEventArgs>(RecognizeCompletedHandler);

                // Assign input to the recognizer and start asynchronous
                recognizer.SetInputToDefaultAudioDevice();

                _completed = false;
                Console.WriteLine("Starting asynchronous recognition...");
                //recognizer.RecognizeAsync(RecognizeMode.Multiple);

                recognizer.EmulateRecognizeAsync("Nigel Lights On");

                // Wait 30 seconds, and then cancel asynchronous recognition.
                Thread.Sleep(TimeSpan.FromSeconds(30));
                recognizer.RecognizeAsyncCancel();

                // Wait for the operation to complete.
                while (!_completed)
                {
                    Thread.Sleep(333);
                }
                Console.WriteLine("Done.");
            }

            Console.WriteLine();
            Console.WriteLine("Press any key to exit...");
            Console.ReadKey();
        }
예제 #32
0
파일: Form1.cs 프로젝트: bonfiredog/knole
        //-------------------------------------
        //Load form.
        private void Form1_Load(object sender, EventArgs e)
        {
            //Display the default audio level & the first line of the text box.
            textBox5.Text = GodAudioLevel.ToString();
            textBox1.Text = "Begin speaking.";
            textBox6.Text = "Error Log" + Environment.NewLine + "-----------------" + Environment.NewLine;

            //Wipe the results text file, for a clean start.
            WipeLatestResults();

            //Audio Box defaults.
            radioButton1.Text = "Receiving Audio";
            radioButton1.Checked = true;
            radioButton1.ForeColor = Color.Red;

            //Create and initialise the speech recognition, with a UK culture marker.
            SpeechRecognitionEngine GodListener = new SpeechRecognitionEngine(new CultureInfo("en-UK"));

            //The specific phrases that the god will be able to recognise, split into different grammars.

            Choices TestPhrases = new Choices();
            TestPhrases.Add(TestPhrasesArray);
            Grammar TestGrammar = new Grammar(TestPhrases);
            TestGrammar.Name = "TestGrammar";

            //-------------

            Choices RandomNoisePhrases = new Choices();
            RandomNoisePhrases.Add(new string[] {"tap"});
            Grammar RandomNoiseGrammar = new Grammar(RandomNoisePhrases);
            RandomNoiseGrammar.Name = "RandomNoiseGrammar";

            //-------------

            // etc....

            //A DictationGrammar to handle minor differences.

            DictationGrammar DictationGrammar = new DictationGrammar("grammar:dictation#pronunciation");
            DictationGrammar.Name = "DictationGrammar";

            //Start recognising.
            try
            {
                GodListener.SetInputToDefaultAudioDevice();
                GodListener.LoadGrammar(TestGrammar);
                GodListener.LoadGrammar(DictationGrammar);
                GodListener.MaxAlternates = 2;
                GodListener.RecognizeAsync(RecognizeMode.Multiple);
            }
            //Show up any errors in beginning recognition.
            catch(Exception error)
            {
                radioButton1.Text = "Error...";
                GodListener.RecognizeAsyncCancel();
                textBox5.Text = "";
                textBox3.Text = "";
                textBox6.Text = textBox6.Text + Environment.NewLine + error.Message;
            }

            //Handling events from audio recognition.
            GodListener.AudioStateChanged += GodListener_AudioStateChanged;
            GodListener.AudioLevelUpdated += GodListener_AudioLevelUpdated;
            GodListener.SpeechRecognized += GodListener_SpeechRecognized;
            GodListener.SpeechRecognitionRejected += GodListener_SpeechRecognitionRejected;
            GodListener.SpeechDetected += GodListener_SpeechDetected;
        }
예제 #33
0
        private void EngineWorker()
        {
            KinectAudioSource audioSource = new KinectAudioSource();
            audioSource.FeatureMode = true;
            audioSource.AutomaticGainControl = false;
            audioSource.SystemMode = SystemMode.OptibeamArrayOnly;

            Stream audioStream = audioSource.Start();

            SpeechRecognitionEngine engine = new SpeechRecognitionEngine(ri.Id);
            engine.LoadGrammar(grammar);
            engine.SpeechRecognized += OnSpeechRecognized;
            engine.SetInputToAudioStream(audioStream, new SpeechAudioFormatInfo(EncodingFormat.Pcm, 16000, 16, 1, 32000, 2, null));
            engine.RecognizeAsync(RecognizeMode.Multiple);

            engineLock.WaitOne();

            engine.RecognizeAsyncCancel();
            engine.SpeechRecognized -= OnSpeechRecognized;
            audioSource.Stop();
        }