예제 #1
0
        private void SpeechRecEngine_SpeechRecognized(object sender, SpeechRecognizedEventArgs e)
        {
            try
            {
                Task.Delay(2000);
                switch (e.Result.Text.ToString())
                {
                case "Hi":

                    speechRecEngine.RecognizeAsyncStop();
                    transaction.DoTransaction(accountNo);
                    break;

                case "Cancel":
                    speechRecEngine.RecognizeAsyncStop();
                    speechSynthesizer.SpeakAsync("Transaction canceled.");
                    Task.Delay(3000);
                    Environment.Exit(0);
                    break;
                }
                GC.Collect();
            }
            catch (Exception ex)
            {
                Console.WriteLine("Error : ", ex.Message);
                GC.Collect();
            }
        }
예제 #2
0
 public void PauseRecognition()
 {
     if (recognizer != null)
     {
         recognizer.RecognizeAsyncStop();
     }
 }
예제 #3
0
 private void _speechRecognition_SpeechRecognitionRejected(object sender, SpeechRecognitionRejectedEventArgs e)
 {
     _speechRecognition.RecognizeAsyncStop();
     Thread.Sleep(30);
     _speech.Speak("请再说一遍");
     _speechRecognition.RecognizeAsync(RecognizeMode.Multiple);
 }
예제 #4
0
        private void button5_Click(object sender, EventArgs e)
        {
            if (button5.Text == "               Откалибровать")
            {
                button5.Text = "                     Отмена";

                Form1.button1.Enabled = false;
                Form1.button2.Enabled = false;
                Form1.button3.Enabled = false;
                Form1.button4.Enabled = false;
                Form1.button5.Enabled = false;

                determinant_.SetInputToDefaultAudioDevice();
                determinant_.LoadGrammar(new Grammar(new GrammarBuilder("калькулятор, дисретчер задач, редактор реестра")));

                determinant_.SpeechRecognized += new EventHandler <SpeechRecognizedEventArgs>(Determinant_SpeechRecognized_);

                determinant_.RecognizeAsync(RecognizeMode.Multiple);
            }
            else
            {
                button5.Text = "               Откалибровать";

                determinant_.RecognizeAsyncStop();

                Form1.button1.Enabled = true;
                Form1.button2.Enabled = true;
                Form1.button3.Enabled = true;
                Form1.button4.Enabled = true;
                Form1.button5.Enabled = true;
            }
        }
예제 #5
0
        private void chkXinfiniti_CheckedChanged(object sender, EventArgs e)
        {
            sRecognize.RecognizeAsyncStop();
            chkXinfiniti.Enabled = false;
            checkBox1.Enabled    = true;

            if (chkXinfiniti.Checked)
            {
                sSynth.Speak("Welcome to X infinity");
                System.Threading.Thread.Sleep(500);

                Choices sList = new Choices();
                sList.Add(new string[] { "Open", "Play", "Resume", "Pause", "Stop", "Listen", "Hide", "Show", "Close", "Out", "Music" });
                Grammar gr = new Grammar(new GrammarBuilder(sList));
                try
                {
                    sRecognize.RequestRecognizerUpdate();
                    sRecognize.LoadGrammar(gr);
                    sRecognize.SpeechRecognized += sRecognize_SpeechRecognized;
                    sRecognize.SetInputToDefaultAudioDevice();
                    sRecognize.RecognizeAsync(RecognizeMode.Multiple);
                }
                catch
                {
                    return;
                }
            }
        }
예제 #6
0
 void recEngine_SpeechRecognized(object sender, SpeechRecognizedEventArgs e)
 {
     MessageBox.Show(e.Result.Text);
     recEngine.RecognizeAsyncStop();
     textBox1.Text       = e.Result.Text;
     pictureBox1.Visible = false;
 }
예제 #7
0
 private void withdrawMoney(int requestAmount)
 {
     try
     {
         speechRecEngine3.RecognizeAsyncStop();
         speechSynthesizer.SpeakAsync("Withdrawing.");
         Task.Delay(1000);
         if ((accountBal - requestAmount) <= 0)
         {
             speechSynthesizer.SpeakAsync("Sorry. You don't have enough amount in your account to withdraw");
             return;
         }
         else
         {
             accountBal = (accountBal - requestAmount);
             speechSynthesizer.SpeakAsync("You have withdraw ");
             speechSynthesizer.SpeakAsync(this.requestAmount.ToString());
             speechSynthesizer.SpeakAsync("rupees Successfully.");
             conn.Open();
             SqlCommand cmd = conn.CreateCommand();
             cmd.CommandType = CommandType.Text;
             cmd.CommandText = "update AccountDetails set AccountBalance = '" + accountBal + "' where AccountNo = '" + accountNo + "'";
             cmd.ExecuteNonQuery();
             conn.Close();
             Console.WriteLine("Updated succesfully");
             Task.Delay(2000);
             return;
         }
     }
     catch (Exception ex)
     {
         Console.WriteLine("Error : " + ex.Message);
     }
 }
예제 #8
0
 private void Window_Closing(object sender, System.ComponentModel.CancelEventArgs e)
 {
     // unhook events
     speechRecognitionEngine.RecognizeAsyncStop();
     // clean references
     speechRecognitionEngine.Dispose();
 }
예제 #9
0
파일: Alarm.cs 프로젝트: FayshalU/OHannah
        void engine4_SpeechRecognized(object sender, SpeechRecognizedEventArgs e)
        {
            string speech = e.Result.Text;

            textBox1.Text += speech + "\r\n";
            if (e.Result.Text == "yes")
            {
                engine2.RecognizeAsyncStop();
                SelectHour();
            }
            else if (e.Result.Text == "no")
            {
                engine2.RecognizeAsyncStop();
                DateTime d2 = DateTime.Now;
                string   tt = d2.ToString("tt", CultureInfo.InvariantCulture);


                if (tt == "PM")
                {
                    hour = hour + 12;
                }

                DateTime d = new DateTime(d2.Year, d2.Month, d2.Day, hour, min, 0);
                //MessageBox.Show(d.ToLongTimeString());
                time = d;
                engine.RecognizeAsync(RecognizeMode.Multiple);
                SetAlarm();
            }
            else if ((e.Result.Text == "go back"))
            {
                button2.PerformClick();
            }
        }
예제 #10
0
파일: Weather.cs 프로젝트: FayshalU/OHannah
        void CheckWeather()
        {
            engine.RecognizeAsyncStop();
            label7.Text  = "";
            label8.Text  = "";
            label9.Text  = "";
            label10.Text = "";
            label11.Text = "";
            label12.Text = "";
            GetWeather weather = new GetWeather(textBox1.Text);

            label7.Text += weather.Temp;
            ohannah.SpeakAsync(label1.Text + label7.Text);
            label8.Text += weather.TempMax;
            ohannah.SpeakAsync(label2.Text + label8.Text);
            label9.Text += weather.TempMin;
            ohannah.SpeakAsync(label3.Text + label9.Text);
            label10.Text += weather.Humidity;
            ohannah.SpeakAsync(label4.Text + label10.Text);
            label11.Text += weather.Wind;
            ohannah.SpeakAsync(label5.Text + label11.Text);
            label12.Text += weather.Clouds;
            ohannah.SpeakAsync(label6.Text + label12.Text);
            //ohannah.Speak(label1.Text + label2.Text + label3.Text + label4.Text + label5.Text + label6.Text );

            engine.RecognizeAsync(RecognizeMode.Multiple);
        }
예제 #11
0
        void engine2_SpeechRecognized(object sender, SpeechRecognizedEventArgs e)
        {
            string speech = e.Result.Text;

            textBox1.Text += speech + "\r\n";

            if (e.Result.Text == "set reminder")
            {
                engine2.RecognizeAsyncStop();
                //ohannah.Resume();
                engine.RecognizeAsync(RecognizeMode.Multiple);
                //ohannah.SpeakAsync("Message added");
                button1.PerformClick();
            }
            else if (e.Result.Text == "add date")
            {
                engine2.RecognizeAsyncStop();

                ohannah.Speak("Message added");
                AddMonth();
            }
            else if (e.Result.Text == "back" || e.Result.Text == "go back")
            {
                engine2.RecognizeAsyncStop();
                //ohannah.Resume();
                engine.RecognizeAsync(RecognizeMode.Multiple);
                //ohannah.SpeakAsync("Message added");
                button2.PerformClick();
            }
            else
            {
                richTextBox1.Text += (speech + " ");
            }
        }
예제 #12
0
 /// <summary>
 /// Used to add name to database
 /// </summary>
 public void AddNameToDatabase(string addName, SqlConnection connection)
 {
     if (addName != string.Empty)
     {
         if (commandList.IndexOf(addName.ToLower()) >= 0)
         {
             MessageBox.Show(addName + " is a keyword and cannot be added to the database as a name", "Warning", MessageBoxButtons.OK, MessageBoxIcon.Exclamation);
             txtName.Text = string.Empty;
         }
         else
         {
             //Stops recognizer temporarily before adding name to database
             nameRecognizer.RecognizeAsyncStop();
             connection.Open();
             SqlCommand insertNameCommand = connection.CreateCommand();
             insertNameCommand.CommandText = "Insert into Patient (Name) values('" + addName + "')";
             insertNameCommand.ExecuteNonQuery();
             connection.Close();
             MessageBox.Show("Added " + addName + " !", "Database Updated", MessageBoxButtons.OK, MessageBoxIcon.Information);
             txtName.Text = string.Empty;
             nameRecognizer.RecognizeAsync(RecognizeMode.Multiple);
         }
     }
     else
     {
         if (txtName.Text == string.Empty)
         {
             //Displays label prompting the user to enter valid input
             lblInvalidInput.Show();
         }
     }
 }
예제 #13
0
        private void rec_SpeechRecognized(object sender, SpeechRecognizedEventArgs e)
        {
            if (e.Result.Confidence > -1f)
            {
                answerLabel.Text += e.Result.Text.ToString();

                if (answerLabel.Text.Length == 3 && answerLabel.Text == words[pos - 1])
                {
                    playSound("WON");
                    //MessageBox.Show("CORRECT! " + answerLabel.Text);
                    points++;
                    wordsLeft++;
                    numOfWordsLabel.Text = "Words: " + wordsLeft.ToString() + "/" + words.Length.ToString();
                    answerLabel.Text     = null;
                    sre.RecognizeAsyncStop();
                    if (pos == words.Length)
                    {
                        pos       = 1;
                        wordsLeft = 1;
                        seconds   = 0;
                        stopGame();
                    }
                    else
                    {
                        pos = pos + 1;
                    }
                    if (seconds > 0)
                    {
                        game(pos - 1);
                    }
                    //MessageBox.Show("result: " + e.Result.Text.ToString());
                }
                else if (answerLabel.Text.Length == 3 && answerLabel.Text != words[pos - 1])
                {
                    playSound("LOSE");
                    //MessageBox.Show("WRONG! " + answerLabel.Text);
                    wordsLeft++;
                    numOfWordsLabel.Text = "Words: " + wordsLeft.ToString() + "/" + words.Length.ToString();
                    answerLabel.Text     = null;
                    sre.RecognizeAsyncStop();
                    if (pos == words.Length)
                    {
                        pos       = 1;
                        wordsLeft = 1;
                        seconds   = 0;
                        stopGame();
                    }
                    else
                    {
                        pos = pos + 1;
                    }

                    if (seconds > 0)
                    {
                        game(pos - 1);
                    }
                }
            }
        }
예제 #14
0
    public void StopListening()
    {
        engine.RecognizeAsyncStop();
        OnAudioLevelUpdated(new VoiceCommandEvents.AudioLevelUpdatedEventArgs(0));

        sfx.SoundLocation = @"C:\Windows\Media\Speech Sleep.wav";
        sfx.Play();
    }
예제 #15
0
        private void button1_Click(object sender, EventArgs e)
        {
            rec.RecognizeAsyncStop();
            ventalumno nueva_venta = new ventalumno();

            nueva_venta.Show();
            this.Hide();
        }
 public void Start()
 {
     if (speechEngine.AudioState == AudioState.Stopped)
     {
         speechEngine.RecognizeAsyncStop();
         speechEngine.RecognizeAsync(RecognizeMode.Multiple);
     }
 }
예제 #17
0
        private void recEngine_SpeechRecognized(object sender, SpeechRecognizedEventArgs e)
        {
            if (e.Result.Text == preposition[num])
            {
                label1.ForeColor = System.Drawing.Color.Yellow;
                label1.Text      = "CORECT";
                recEngine.RecognizeAsyncStop();
                //btaDisable.Enabled = false;
                timer1.Stop();
                progressBar1.Value = 0;
                num = random.Next(0, preposition.Length - 1);
            }



            /*     switch (e.Result.Text)
             *   {
             *
             *       case "means":
             *           label1.Text = "Animal1";
             *           break;
             *
             *
             *   }*/


            /*
             * if (textBox1.Text.ToString() == preposition[0])
             * {
             *
             *  s.Speak("correct");
             *
             *  round++;
             *  textBox1.Clear();
             *
             *  for (int i = 0; i < preposition[num].Length; i++)
             *  {
             *      points++;
             *  }
             *  totalPoints += (points - ctr);
             *  label1.Text = totalPoints.ToString();
             *  points = 0;
             *  ctr = 0;
             *  num = random.Next(0, preposition.Length - 1);
             *  if (round == 2)
             *  {
             *
             *      this.Close();
             *      th = new Thread(openForm2);
             *
             *      th.SetApartmentState(ApartmentState.STA);
             *      th.Start();
             *  }
             *  ///////////////////
             *  /*
             *
             */
        }
예제 #18
0
        private void button1_Click(object sender, EventArgs e)
        {
            rec1.RecognizeAsyncStop();
            rec.RecognizeAsyncStop();
            Form1 principal = new Form1();

            principal.Show();
            this.Close();
        }
예제 #19
0
        private void registerToolStripMenuItem_Click_1(object sender, EventArgs e)
        {
            Find f2 = new Find();

            //this.Hide();
            //f2.FormClosed += (s, args) => this.Close();
            f2.Show();
            f2.Focus();
            sRecognize.RecognizeAsyncStop();
        }
예제 #20
0
        //========================================
        //normal SpeechRecognitionEngine Functions
        //========================================

        private void normal_SpeechRecognized(object sender, SpeechRecognizedEventArgs e)
        {
            normal.RecognizeAsyncStop();
            rec = true;
            ss.Speak("Hi thulana, what can i do for you");
            sre.SetInputToDefaultAudioDevice();
            sre.RecognizeAsync(RecognizeMode.Multiple);
            label1.Text             = "I'm listening . . .";
            crclbar.Visible         = true;
            Enblbtn.BackgroundImage = Image.FromFile(@"C:\Users\Thulana\Desktop\Voice Recognizing project\Resources\mic.png");
        }
예제 #21
0
 public void speakText(string textSpeak)
 {
     sRecognize.RecognizeAsyncCancel();
     sRecognize.RecognizeAsyncStop();
     pBuilder.ClearContent();
     pBuilder.AppendText(textSpeak.ToString());
     sSynth.SelectVoice(name);
     sSynth.SpeakAsync(pBuilder);
     sRecognize.RecognizeAsyncCancel();
     sRecognize.RecognizeAsyncStop();
     sRecognize.RecognizeAsync(RecognizeMode.Multiple);
 }
예제 #22
0
 private void rbOFF_CheckedChanged(object sender, EventArgs e)
 {
     if (rbOFF.Checked == true)
     {
         gbCommandEditor.Enabled = true;
         _recog.RecognizeAsyncStop();
         ls = ListnerStates.LS_INACTIVE;
         steps.Clear();
         step            = 0;
         lbAlert.Visible = true;
     }
 }
예제 #23
0
        private void InitializeSynthesis()
        {
            speechEngine  = new SpeechSynthesizer();
            speechEnabled = true;

            speechEngine.Volume = 100;
            speechEngine.SelectVoiceByHints(VoiceGender.Male, VoiceAge.Adult);
            speechEngine.Rate = -4;
            speechRecognizer.RecognizeAsyncStop();
            speechEngine.Speak("Speech Enabled.");
            speechRecognizer.RecognizeAsync(RecognizeMode.Multiple);
        }
예제 #24
0
 private void ListenAlways_SpeechRecognized(object sender, SpeechRecognizedEventArgs e)
 {
     switch (e.Result.Text.ToUpper())
     {
     case "HELLO":
         txtInput.Text  = "Listening...";
         listening      = true;
         btnListen.Text = "Stop Listening..";
         listenAlways.RecognizeAsyncStop();
         engine.RecognizeAsync(RecognizeMode.Multiple);
         break;
     }
 }
예제 #25
0
 public override void actualizar_estado_microfono(bool estado)
 {
     if (estado)
     {
         recEngine.RecognizeAsync(RecognizeMode.Multiple);
         disableBtn.Enabled = true;
     }
     else
     {
         recEngine.RecognizeAsyncStop();
         disableBtn.Enabled = false;
     }
 }
예제 #26
0
        void sre2_SpeechRecognized(object sender, SpeechRecognizedEventArgs e)
        {
            switch (e.Result.Text.ToString())
            {
            case "Yes":
                sre.Recognize();
                break;

            case "No":
                axWindowsMediaPlayer1.Ctlcontrols.play();
                sre.RecognizeAsyncStop();
                break;
            }
        }
예제 #27
0
 void sre_SpeechRecognized(object sender, SpeechRecognizedEventArgs e)
 {
     try
     {
         sre.RecognizeAsyncCancel();
         sre.RecognizeAsyncStop();
         getResponse(e.Result.Text.ToLower());
         btnSpeak.Text = "Speak!";
         return;
     }
     catch (Exception er) {
         Console.WriteLine(er.Message);
     }
 }
예제 #28
0
 private void btnVoice_Click(object sender, EventArgs e)
 {
     if (trigger == false)
     {
         LunaBrain.RecognizeAsync(RecognizeMode.Multiple);
         trigger = true;
         btnVoice.BackgroundImage = ((System.Drawing.Image)(Properties.Resources.micActive));
     }
     else
     {
         LunaBrain.RecognizeAsyncStop();
         btnVoice.BackgroundImage = ((System.Drawing.Image)(Properties.Resources.micOff));
         trigger = false;
     }
 }
예제 #29
0
 private void Regcon_SpeechRecognized(object sender, SpeechRecognizedEventArgs e)
 {
     if (e.Result.Text == textBox1.Text)
     {
         MessageBox.Show("Good!");
         Regcon.RecognizeAsyncStop();
         metroButton2.Enabled = true;
     }
     else
     {
         MessageBox.Show("No!");
         Regcon.RecognizeAsyncStop();
         metroButton2.Enabled = true;
     }
 }
예제 #30
0
 void SpeechRej(object sender, SpeechRecognitionRejectedEventArgs e)
 {
     try
     {
         StatusTxt.Text = "Status : Cannot understand, Speak again!";
         Speak("Cannot understand, Speak again!");
         sre.RecognizeAsyncStop();
         StartBtn.Enabled = true;
         StopBtn.Enabled  = false;
     }
     catch (Exception ex)
     {
         MessageBox.Show("Error : " + ex);
     }
 }
        static void Main()
        {
            using (var source = new KinectAudioSource())
            {
                source.FeatureMode = true;
                source.AutomaticGainControl = false;
                source.SystemMode = SystemMode.OptibeamArrayOnly;

                RecognizerInfo ri = GetKinectRecognizer();

                if (ri == null)
                {
                    Console.WriteLine("Could not find Kinect speech recognizer. Please refer to the sample requirements.");
                    return;
                }

                Console.WriteLine("Using: {0}", ri.Name);

                using (var sre = new SpeechRecognitionEngine(ri.Id))
                {
                    //declare commands to be used
                    var commands = new Choices();
                    commands.Add("activate");
                    commands.Add("off");
                    commands.Add("open");
                    commands.Add("manual");
                    commands.Add("hold");
                    commands.Add("land");
                    commands.Add("stabilize");

                    var gb = new GrammarBuilder {Culture = ri.Culture};
                    //Specify the culture to match the recognizer in case we are running in a different culture.                                 
                    gb.Append(commands);

                    // Create the actual Grammar instance, and then load it into the speech recognizer.
                    var g = new Grammar(gb);

                    sre.LoadGrammar(g);
                    sre.SpeechRecognized += SreSpeechRecognized;
                    sre.SpeechRecognitionRejected += SreSpeechRejected;

                    using (Stream s = source.Start())
                    {
                        sre.SetInputToAudioStream(s,
                                                  new SpeechAudioFormatInfo(
                                                      EncodingFormat.Pcm, 16000, 16, 1,
                                                      32000, 2, null));

                        Console.WriteLine("Recognizing... Press ENTER to stop");

                        sre.RecognizeAsync(RecognizeMode.Multiple);
                        Console.ReadLine();
                        Console.WriteLine("Stopping recognizer ...");
                        sre.RecognizeAsyncStop();
                    }
                }
            }
        }
        public void main_menu()
        {

            recognizer = new SpeechRecognitionEngine();
            obj.SpeakAsync("Hello My name is ICERS, What can i do for you: 1 for checking old enquiries, 2 for entering new enquiry, and 3 for latest car analytics...");
            Thread.Sleep(10000);
            recognizer.RecognizeAsyncStop();
            GrammarBuilder gb = new GrammarBuilder();
            Choices menu = new Choices();
            menu.Add(new string[] { "one", "two", "three"});

            gb.Append(menu);

            // Create the Grammar instance and load it into the speech recognition engine.
            Grammar g = new Grammar(gb);
            recognizer.LoadGrammar(g);
            recognizer.SetInputToDefaultAudioDevice();

            
            recognizer.RecognizeAsync(RecognizeMode.Single);
         
            recognizer.SpeechRecognized +=
             new EventHandler<SpeechRecognizedEventArgs>(mainRecog);
        }
        private void match_enq()
        {
            try
            {
                con = new SqlCeConnection(@"Data Source=C:\Users\RnBz\Documents\Visual Studio 2012\Projects\Speech Recognition2\SpeechRecognition\bin\Debug\icers.sdf");
                con.Open();
                SqlCeDataAdapter da = new SqlCeDataAdapter("select distinct(sel_id) from matches where enq_id=" + enq_check, con);
               DataTable dt= new DataTable();
                da.Fill(dt);
                if(dt.Rows.Count==0)
                {
                obj.SpeakAsync("I can not Finf any matching results...");
                    Thread.Sleep(1500);
               
                }
                else
                {
                    foreach (DataRow row in dt.Rows) 
                    { 
                        
                int sel_id = int.Parse(row["sel_id"].ToString());
                
                Console.WriteLine(row["sel_id"].ToString());
                SqlCeDataAdapter da1 = new SqlCeDataAdapter("select * from sellers where id="+sel_id, con);
                DataTable dt_sel = new DataTable();
                da1.Fill(dt_sel);
                recognizer = new SpeechRecognitionEngine();

                recognizer.RecognizeAsyncStop();
                GrammarBuilder gb = new GrammarBuilder();
                Choices id = new Choices();
                foreach (DataRow row2 in dt_sel.Rows)
                { obj.SpeakAsync("Unique ID "+row2["id"].ToString()+": "+row2["title"].ToString());
                    
                obj.SpeakAsync(",Contact Number... : " + row2["contact"].ToString());
                obj.SpeakAsync("I repeat: " + row2["contact"].ToString());
                id.Add(row2["id"].ToString());

                }
                gb.Append(id);

                // Create the Grammar instance and load it into the speech recognition engine.
                Grammar g = new Grammar(gb);
                recognizer.LoadGrammar(g);
                recognizer.SetInputToDefaultAudioDevice();

                while (recognizer.AudioState.Equals( AudioState.Speech))
                {
                    Thread.Sleep(100);
                }

                     
                    
                
                    
                    }

                }

                
                    Thread.Sleep(500);

                    obj.SpeakAsync("Thank you for using my help, Please come back again.....");
                    Thread.Sleep(4000);
                    mainmenu hey = new mainmenu();
                    hey.Show();
                    this.Close();
              

            }
            catch (Exception ea)
            {
                Console.WriteLine(ea.Message);
                obj.SpeakAsync("I can not find any such results, Please try another enquiry...");
                Thread.Sleep(5000);
                check_enq();



            }
            finally { con.Close(); }
        }
        void enqRecog(object sender, SpeechRecognizedEventArgs e)
        {
            obj.SpeakAsync("Chosen:"+e.Result.Text);
            enq_check = int.Parse( e.Result.Text);
            recognizer = new SpeechRecognitionEngine();
            recognizer.SetInputToDefaultAudioDevice();
            obj.SpeakAsync("Say yes or no");
            recognizer.RecognizeAsyncStop();
            Choices choices = new Choices("yes", "no");
            GrammarBuilder grammarBuilder = new GrammarBuilder(choices);

            Grammar grammar = new Grammar(grammarBuilder);
            recognizer.UnloadAllGrammars();
            recognizer.LoadGrammar(grammar);
            Thread.Sleep(3000);
            recognizer.RecognizeAsync(RecognizeMode.Single);
            recognizer.SpeechRecognized +=
          new EventHandler<SpeechRecognizedEventArgs>(enqCheck);

        }
        void srs_SpeechRecognized(object sender, SpeechRecognizedEventArgs e)
        {
            recognizer = new SpeechRecognitionEngine();
            recognizer.SetInputToDefaultAudioDevice();
            if (e.Result.Text == "Toyota")
            {

                obj.SpeakAsync("You have chosen Toyota");
                title = "Toyota";
                toyo_rb.IsChecked = true;

            }
            else if (e.Result.Text == "Honda")
            {

                obj.SpeakAsync("You have chosen Honda");
                title = "Honda";
                hon_rb.IsChecked = true;

            }
            else if (e.Result.Text == "Suzuki")
            {

                obj.SpeakAsync("You have chosen Pakistani SUZUKI");
                title = "Suzuki";
                suzu_rb.IsChecked = true;
            }

            else if (e.Result.Text == "Kia")
            {

                obj.SpeakAsync("You have chosen KIA");
               title = "Kia";
                kia_rb.IsChecked = true;
            }
            else if (e.Result.Text == "BMW")
            {
                title = "BMW";
                obj.SpeakAsync("You have chosen B M W");
                bmw_rb.IsChecked = true;
            }
            else
            {
                obj.SpeakAsync("Please choose from the list.");
            }
            obj.SpeakAsync("Say yes or no");
            recognizer.RecognizeAsyncStop();
            Choices choices = new Choices("yes", "no");
            GrammarBuilder grammarBuilder = new GrammarBuilder(choices);
            recognizer.RecognizeAsyncStop();
            Grammar grammar = new Grammar(grammarBuilder);
            recognizer.UnloadAllGrammars();
            recognizer.LoadGrammar(grammar);
            Thread.Sleep(4000);
            recognizer.RecognizeAsync(RecognizeMode.Single);
            recognizer.SpeechRecognized +=
          new EventHandler<SpeechRecognizedEventArgs>(carname);

        }
        public void car_model()
        {
            main.Visibility = System.Windows.Visibility.Hidden;
            brand.Visibility = System.Windows.Visibility.Visible;
            recognizer = new SpeechRecognitionEngine();
            obj.SpeakAsync("Please Choose the Car Brand....");
            recognizer.RecognizeAsyncStop();
            GrammarBuilder gb = new GrammarBuilder();
            Choices models = new Choices();
            models.Add(new string[] { "Toyota", "Suzuki", "Honda", "Kia", "BMW" });

            gb.Append(models);

            // Create the Grammar instance and load it into the speech recognition engine.
            Grammar g = new Grammar(gb);
            recognizer.LoadGrammar(g);
            recognizer.SetInputToDefaultAudioDevice();

            Thread.Sleep(3000);
            recognizer.RecognizeAsync(RecognizeMode.Single);
           
            recognizer.SpeechRecognized +=
             new EventHandler<SpeechRecognizedEventArgs>(srs_SpeechRecognized);
           
        }
예제 #37
0
파일: Form1.cs 프로젝트: Vinlaell/VinsFTP
        private void Initialize()
        {
            recognitionEngine = new SpeechRecognitionEngine();
            recognitionEngine.SetInputToDefaultAudioDevice();
            recognitionEngine.SpeechRecognized += (s, args) =>
            {
                foreach (RecognizedWordUnit word in args.Result.Words)
                {
                    // You can change the minimun confidence level here
                    if (word.Confidence > 0.8f)
                    {
                        switch (word.Text)
                        {
                            case "Download": button6.PerformClick();break;
                            case "speech": checkBox4.Checked = true; break;
                            case "Refresh": button1.PerformClick(); break;
                            case "recognition": checkBox5.Checked = false; recognitionEngine.RecognizeAsyncStop(); break;
                            case "Connect": button1.PerformClick(); break;
                            case "select remote": listView2.Select(); break;
                            case "close": closeprog(); break;

                        }
                    }
                }
            };
        }
예제 #38
0
        private void Window_Loaded(object sender, EventArgs e)
        {
            nui = new Runtime();

            try
            {
                nui.Initialize(RuntimeOptions.UseDepthAndPlayerIndex | RuntimeOptions.UseSkeletalTracking | RuntimeOptions.UseColor);
            }
            catch (InvalidOperationException)
            {
                System.Windows.MessageBox.Show("Runtime initialization failed. Please make sure Kinect device is plugged in.");
                return;
            }

            consoleFrame.Text = "Window_Loaded";

            using (var source = new KinectAudioSource())
            {
                source.FeatureMode = true;
                source.AutomaticGainControl = false; //Important to turn this off for speech recognition
                source.SystemMode = SystemMode.OptibeamArrayOnly; //No AEC for this sample

                RecognizerInfo ri = SpeechRecognitionEngine.InstalledRecognizers().Where(r => r.Id == RecognizerId).FirstOrDefault();

                if (ri == null)
                {
                    Console.WriteLine("Could not find speech recognizer: {0}. Please refer to the sample requirements.", RecognizerId);
                    return;
                }

                consoleFrame.Text = "Using:" + ri.Name;

                sre = new SpeechRecognitionEngine(ri.Id);
                var orders = new Choices();
                orders.Add("up");
                orders.Add("down");
                orders.Add("center");

                var gb = new GrammarBuilder();
                gb.Culture = ri.Culture;
                gb.Append(orders);

                var g = new Grammar(gb);
                sre.LoadGrammar(g);
                sre.SpeechRecognized += SreSpeechRecognized;
                sre.SpeechHypothesized += SreSpeechHypothesized;
                sre.SpeechRecognitionRejected += SreSpeechRecognitionRejected;

                var s = source.Start();

                sre.SetInputToAudioStream(s,
                    new SpeechAudioFormatInfo(
                        EncodingFormat.Pcm, 16000, 16, 1, 32000, 2, null));
                sre.RecognizeAsync(RecognizeMode.Multiple);
                sre.RecognizeAsyncStop();

            }
            consoleFrame.Text += "\n Recognizing started. Say up, down or center";

            try
            {
                nui.VideoStream.Open(ImageStreamType.Video, 2, ImageResolution.Resolution640x480, ImageType.Color);
                nui.DepthStream.Open(ImageStreamType.Depth, 2, ImageResolution.Resolution320x240, ImageType.DepthAndPlayerIndex);
            }
            catch (InvalidOperationException)
            {
                System.Windows.MessageBox.Show("Failed to open stream. Please make sure to specify a supported image type and resolution.");
                return;
            }

            lastTime = DateTime.Now;

            nui.DepthFrameReady += new EventHandler<ImageFrameReadyEventArgs>(nui_DepthFrameReady);
            nui.SkeletonFrameReady += new EventHandler<SkeletonFrameReadyEventArgs>(nui_SkeletonFrameReady);
            nui.VideoFrameReady += new EventHandler<ImageFrameReadyEventArgs>(nui_ColorFrameReady);
        }
예제 #39
0
        /**
         * Application main method
         *
         */
        public static void Main(string[] args)
        {
            // We get a source obj and initialize context specific variables
            using (KinectAudioSource source = new KinectAudioSource()) {
                source.FeatureMode = true;
                source.AutomaticGainControl = false;
                source.SystemMode = SystemMode.OptibeamArrayOnly;
                RecognizerInfo recognizer_info = SpeechRecognitionEngine.InstalledRecognizers().Where(r => r.Id == SpeechRecognitionID).FirstOrDefault();

                // Make sure we got the hook
                if (recognizer_info == null)
                    ExitProgram("There's no speech recognizer on your system.  Please install one from the README.");

                // Get a hook into iTunes using the COM library
                iTunesApp itunes_application = new iTunesApp();
                if (itunes_application == null)
                    ExitProgram("There was a problem getting access to iTunes.");

                using (SpeechRecognitionEngine speech_recognizer = new SpeechRecognitionEngine(recognizer_info.Id)) {
                    // First, we create a grammar with basic iTunes instructions
                    Choices basic_itunes_options = new Choices();
                    basic_itunes_options.Add("itunes play");
                    basic_itunes_options.Add("itunes pause");
                    basic_itunes_options.Add("itunes stop");
                    basic_itunes_options.Add("itunes next");
                    basic_itunes_options.Add("itunes previous");
                    basic_itunes_options.Add("itunes mute");
                    basic_itunes_options.Add("itunes volume up");
                    basic_itunes_options.Add("itunes volume down");

                    GrammarBuilder basic_itunes_grammar = new GrammarBuilder();
                    basic_itunes_grammar.Append(basic_itunes_options);

                    // Next, we make an iTunes library-specific set of grammars for granular control
                    //  The following is inspired by but not directly lifted from KinecTunes.  Credit
                    //  is due for inspiration though
                    Choices dynamic_itunes_options = new Choices();
                    IITLibraryPlaylist itunes_library = itunes_application.LibraryPlaylist;

                    // The library is one-based so we go through each track and pull out relevant data into the grammar
                    //   We maintain lists to avoid duplicate grammars, which can cause errors with the Kinect
                    List<string> artists = new List<string>();
                    List<string> songs = new List<string>();
                    List<string> albums = new List<string>();
                    for (int i = 1; i <= itunes_library.Tracks.Count; i++) {
                        IITTrack track = itunes_library.Tracks[i];
                        if (track != null && track.KindAsString.Contains("audio")) {
                            if (track.Name != null && !artists.Contains(track.Name)) {
                                dynamic_itunes_options.Add(string.Format("itunes play {0}", track.Name));
                                dynamic_itunes_options.Add(string.Format("itunes play song {0}", track.Name));
                                songs.Add(track.Name);
                            }
                            if (track.Artist != null && !artists.Contains(track.Artist)) {
                                dynamic_itunes_options.Add(string.Format("itunes play {0}", track.Artist));
                                dynamic_itunes_options.Add(string.Format("itunes play artist {0}", track.Artist));
                                artists.Add(track.Artist);
                            }
                            if (track.Album != null && !albums.Contains(track.Album)) {
                                dynamic_itunes_options.Add(string.Format("itunes play {0}", track.Album));
                                dynamic_itunes_options.Add(string.Format("itunes play album {0}", track.Album));
                                albums.Add(track.Album);
                            }
                        }
                    }

                    // Treat the playlists specially
                    List<string> playlists = new List<string>();
                    for (int i = 1; i <= itunes_application.LibrarySource.Playlists.Count; i++) {
                        var playlist = itunes_application.LibrarySource.Playlists[i];
                        if (playlist.Name != null && !playlists.Contains(playlist.Name)) {
                            playlists.Add(playlist.Name);
                            dynamic_itunes_options.Add(string.Format("itunes play {0}", playlist.Name));
                            dynamic_itunes_options.Add(string.Format("itunes play playlist {0}", playlist.Name));
                        }
                    }

                    GrammarBuilder dynamic_itunes_grammar = new GrammarBuilder();
                    dynamic_itunes_grammar.Append(dynamic_itunes_options);

                    // Load all the grammars into a grammar object, then our speech recognition engine
                    Grammar itunes_grammar_one = new Grammar(basic_itunes_grammar);
                    Grammar itunes_grammar_two = new Grammar(dynamic_itunes_grammar);

                    // Notice that we don't care when the speech is hypothesized or rejected, only accepted
                    speech_recognizer.LoadGrammar(itunes_grammar_one);
                    speech_recognizer.LoadGrammar(itunes_grammar_two);
                    speech_recognizer.SpeechRecognized += SpeechWasRecognized;

                    using (Stream s = source.Start()) {
                        speech_recognizer.SetInputToAudioStream(s, new SpeechAudioFormatInfo(EncodingFormat.Pcm,
                                                                                             16000, 16, 1, 32000,
                                                                                             2, null));
                        Console.Write("Kinect has loaded iTunes Library.  Initializing speech recognition...");
                        // Why is signal handling so difficult in C#?  Whatever, let's just use any keystrokes for interrupt
                        speech_recognizer.RecognizeAsync(RecognizeMode.Multiple);
                        Console.WriteLine("OK.\nPress any key to exit...");
                        Console.ReadLine();
                        speech_recognizer.RecognizeAsyncStop();
                    }
                }
            }
        }
예제 #40
0
파일: Kinect.cs 프로젝트: Osceus/Kinect
 private void speechRecognitionGenerator()
 {
     RecognizerInfo ri = GetKinectRecognizer();
     using (var sre = new SpeechRecognitionEngine(ri.Id))
     {
         var options = new Choices();
         options.Add("password");
         options.Add("oscar");
         options.Add("zeus");
         var gb = new GrammarBuilder();
         //Specify the culture to match the recognizer in case we are running in a different culture.
         gb.Culture = ri.Culture;
         gb.Append(options);
         var g = new Grammar(gb);
         sre.LoadGrammar(g);
         sre.SpeechHypothesized += new EventHandler<SpeechHypothesizedEventArgs>(sre_SpeechHypothesized);
         sre.SpeechRecognitionRejected += new EventHandler<SpeechRecognitionRejectedEventArgs>(sre_SpeechRecognitionRejected);
         sre.SpeechRecognized += new EventHandler<SpeechRecognizedEventArgs>(sre_SpeechRecognized);
         Stream audioStream = this.KinectDevice.AudioSource.Start();
         Microsoft.Speech.AudioFormat.SpeechAudioFormatInfo info  = new Microsoft.Speech.AudioFormat.SpeechAudioFormatInfo(Microsoft.Speech.AudioFormat.EncodingFormat.Pcm, 16000, 16, 1, 32000, 2, null);
         sre.SetInputToAudioStream(audioStream, info);
         sre.RecognizeAsync(RecognizeMode.Multiple);
         while (ShouldRun)
         {
             Thread.Sleep(1000);
         }
         sre.RecognizeAsyncStop();
     }
 }
        /// <summary>
        /// Starts the audio.
        /// </summary>
        private static void startAudio()
        {
            if (sensor == null)
            {
                Console.ForegroundColor = ConsoleColor.Red;
                Console.WriteLine("No Kinect sensors are attached to this computer");
                return;
            }

            // Get the Kinect Audio Source
            KinectAudioSource audioSource = sensor.AudioSource;

            audioSource.AutomaticGainControlEnabled = false;
            audioSource.NoiseSuppression = true;

            RecognizerInfo ri = GetKinectRecognizer();

            if (ri == null)
            {
                Console.ForegroundColor = ConsoleColor.Red;
                Console.WriteLine("Could not find Kinect speech recognizer. Please refer to the sample requirements.");
                return;
            }
            Console.ForegroundColor = ConsoleColor.Green;
            Console.WriteLine("Using: {0}", ri.Name);

            // NOTE: Need to wait 4 seconds for device to be ready right after initialization
            int wait = 4;
            while (wait > 0)
            {
                Console.ForegroundColor = ConsoleColor.Yellow;
                Console.WriteLine("Device will be ready for speech recognition in {0} second(s).\r", wait--);
                Thread.Sleep(1000);
            }

            using (var sre = new SpeechRecognitionEngine(ri.Id))
            {
                var options = new Choices();
                options.Add("Red");
                options.Add("Green");
                options.Add("Blue");
                options.Add("Yellow");


                var gb = new GrammarBuilder { Culture = ri.Culture };

                // Specify the culture to match the recognizer in case we are running in a different culture.                                 
                gb.Append(options);

                // Create the actual Grammar instance, and then load it into the speech recognizer.
                var g = new Grammar(gb);

                sre.LoadGrammar(g);
                sre.SpeechRecognized += SreSpeechRecognized;
                sre.SpeechHypothesized += SreSpeechHypothesized;
                sre.SpeechRecognitionRejected += SreSpeechRecognitionRejected;


                using (Stream s = audioSource.Start())
                {
                    sre.SetInputToAudioStream(
                        s, new SpeechAudioFormatInfo(EncodingFormat.Pcm, 16000, 16, 1, 32000, 2, null));
                    Console.ForegroundColor = ConsoleColor.Blue;
                    Console.WriteLine("Recognizing speech. Read: 'Red', 'Green', 'Blue', 'Yellow'");
                    sre.RecognizeAsync(RecognizeMode.Multiple);
                    Console.ReadLine();
                    Console.WriteLine("Stopping recognizer ...");
                    sre.RecognizeAsyncStop();
                }

            }
           
        }
        void icers_carnamerecognized(object sender, SpeechRecognizedEventArgs e)
        {
            recognizer = new SpeechRecognitionEngine();
            recognizer.SetInputToDefaultAudioDevice();
            obj.SpeakAsync("You have chosen: " + e.Result.Text);
            obj.SpeakAsync("Say yes or no");
            recognizer.RecognizeAsyncStop();
            carmodel = e.Result.Text;

            if (model1.Content.ToString().Equals(e.Result.Text.ToString(), StringComparison.OrdinalIgnoreCase))
            {
                model1.IsChecked = true;
            }
            if (model2.Content.ToString().Equals(e.Result.Text.ToString(), StringComparison.OrdinalIgnoreCase))
            {
                model2.IsChecked = true;
            }
            if (model3.Content.ToString().Equals(e.Result.Text.ToString(), StringComparison.OrdinalIgnoreCase))
            {
                model3.IsChecked = true;
            }
            if (model4.Content.ToString().Equals(e.Result.Text.ToString(), StringComparison.OrdinalIgnoreCase))
            {
                model4.IsChecked = true;
            }
            if (model5.Content.ToString().Equals(e.Result.Text.ToString(), StringComparison.OrdinalIgnoreCase))
            {
                model5.IsChecked = true;
            }
            Choices choices = new Choices("yes", "no");
            GrammarBuilder grammarBuilder = new GrammarBuilder(choices);

            Grammar grammar = new Grammar(grammarBuilder);
            recognizer.UnloadAllGrammars();
            recognizer.LoadGrammar(grammar);
            Thread.Sleep(4000);
            recognizer.RecognizeAsync(RecognizeMode.Single);
            recognizer.SpeechRecognized +=
          new EventHandler<SpeechRecognizedEventArgs>(car_check);
        }
        private void check_enq()
        {
            recognizer = new SpeechRecognitionEngine();
            obj.SpeakAsync("Speak Enquiry ID:");
            Thread.Sleep(1500);
            recognizer.RecognizeAsyncStop();
            GrammarBuilder gb = new GrammarBuilder();
            Choices menu = new Choices();
            try
            {
                if (con.State == ConnectionState.Closed)
                    con.Open();
                SqlCeCommand cmd = new SqlCeCommand("select id from enquiry", con);


                SqlCeDataReader dr = cmd.ExecuteReader();

                    while(dr.Read())
                    {
                       
                        menu.Add(dr["id"].ToString());
                    }
                
                   
                
            }
            catch (Exception ers)
            {
                Console.Write(ers.Message);
            }
            gb.Append(menu);
            
            // Create the Grammar instance and load it into the speech recognition engine.
            Grammar g = new Grammar(gb);
            recognizer.LoadGrammar(g);
            recognizer.SetInputToDefaultAudioDevice();
            Thread.Sleep(1500);

            recognizer.RecognizeAsync(RecognizeMode.Single);
            recognizer.SpeechRecognized +=
           new EventHandler<SpeechRecognizedEventArgs>(enqRecog);
        }
        private SpeechRecognizer(KinectSensor kinect)
        {
            RecognizerInfo ri = GetKinectRecognizer();
            this.speechRecognizer = new SpeechRecognitionEngine(ri);
            // Obtain the KinectAudioSource to do audio capture
            KinectAudioSource source = kinect.AudioSource;
            source.EchoCancellationMode = EchoCancellationMode.None; // No AEC for this sample
            source.AutomaticGainControlEnabled = false; // Important to turn this off for speech recognition

            //this.LoadGrammar(kinect);
               // }

            /*public static void Main(string[] args)
            {
            // Obtain a KinectSensor if any are available
            KinectSensor sensor = (from sensorToCheck in KinectSensor.KinectSensors where sensorToCheck.Status == KinectStatus.Connected select sensorToCheck).FirstOrDefault();
            if (sensor == null)
            {
                Console.WriteLine(
                        "No Kinect sensors are attached to this computer or none of the ones that are\n" +
                        "attached are \"Connected\".\n" +
                        "Attach the KinectSensor and restart this application.\n" +
                        "If that doesn't work run SkeletonViewer-WPF to better understand the Status of\n" +
                        "the Kinect sensors.\n\n" +
                        "Press any key to continue.\n");

                // Give a chance for user to see console output before it is dismissed
                Console.ReadKey(true);
                return;
            }

            sensor.Start();

            // Obtain the KinectAudioSource to do audio capture
            KinectAudioSource source = sensor.AudioSource;
            source.EchoCancellationMode = EchoCancellationMode.None; // No AEC for this sample
            source.AutomaticGainControlEnabled = false; // Important to turn this off for speech recognition
               // */
            // private void LoadGrammar(KinectSensor kinect)
             //{
               // RecognizerInfo ri = GetKinectRecognizer();

            if (ri == null)
            {
                Console.WriteLine("Could not find Kinect speech recognizer. Please refer to the sample requirements.");
                return;
            }

            Console.WriteLine("Using: {0}", ri.Name);

            // NOTE: Need to wait 4 seconds for device to be ready right after initialization
            int wait = 4;
            while (wait > 0)
            {
                Console.Write("Device will be ready for speech recognition in {0} second(s).\r", wait--);
                Thread.Sleep(1000);
            }

            //using (var sre = new SpeechRecognitionEngine(ri.Id))
            //{
               // speechRecognizer
                var colors = new Choices();
                colors.Add("red");
                colors.Add("green");
                colors.Add("blue");

                var gb = new GrammarBuilder { Culture = ri.Culture };

                // Specify the culture to match the recognizer in case we are running in a different culture.
                gb.Append(colors);

                // Create the actual Grammar instance, and then load it into the speech recognizer.
                var g = new Grammar(gb);

                speechRecognizer.LoadGrammar(g);
                speechRecognizer.SpeechRecognized += SreSpeechRecognized;
                speechRecognizer.SpeechHypothesized += SreSpeechHypothesized;
                speechRecognizer.SpeechRecognitionRejected += SreSpeechRecognitionRejected;
                speechRecognizer.SpeechDetected += new EventHandler<SpeechDetectedEventArgs>(SpeechDetectedHandler);
                Console.WriteLine("IN Speech Reconizer load function");
               // speechRecognizer.RecognizeAsync(RecognizeMode.Multiple);
             ///*
                using (Stream s = kinect.AudioSource.Start())
                {
                    speechRecognizer.SetInputToAudioStream(
                        s, new SpeechAudioFormatInfo(EncodingFormat.Pcm, 16000, 16, 1, 32000, 2, null));

                    Console.WriteLine("Recognizing speech. Say: 'red', 'green' or 'blue'. Press ENTER to stop");

                    speechRecognizer.RecognizeAsync(RecognizeMode.Multiple);
                    Console.ReadLine();
                    Console.WriteLine("Stopping recognizer ...");
                    speechRecognizer.RecognizeAsyncStop();
                }
              //* */
               // }
        }
예제 #45
0
파일: Program.cs 프로젝트: rdodgen/Ezri
        static void Main(string[] args)
        {
            AppDomain.CurrentDomain.UnhandledException += new UnhandledExceptionEventHandler(CurrentDomain_UnhandledException);

            voice = new Voice();

            commandProcessor = ConfigureCommands().CreateCommandProcessor();
            commandProcessor.CommandRecognized += sound.NotifyRecognizedCommandAsync;
            commandProcessor.CommandRejected += sound.NotifyUnrecognizedCommandAsync;

            Console.WriteLine("Attached PIR-1 devices:");
            foreach (var pir in PIRDriver.Instance.QueryAttachedDevices())
                Console.WriteLine("\t{0}", pir);

            ConfigureLightShow();
            Console.WriteLine("Configured LightShow");

            var recognizer = GetKinectRecognizer();
            using (var sensor = GetKinectSensor())
            {
                /* Skeleton-based beam control is disabled due to an OOM issue when long running.
                var beamController = new SkeletonBasedBeamControl();
                beamController.AttentionGestureDetected += delegate(SkeletonBasedBeamControl controller)
                {
                    sound.NotifyAttentionGestureRecognized();
                };
                beamController.Start(sensor);
                */

                sensor.Start();
                var source = sensor.AudioSource;

                source.AutomaticGainControlEnabled = false;
                source.EchoCancellationMode = EchoCancellationMode.None;
                source.NoiseSuppression = true;

                Console.WriteLine("Using: {0}", recognizer.Name);

                using (Stream s = source.Start())
                {
                    SpeechRecognitionEngine sre = null;
                    var sreLock = new object();

                    EventHandler<SpeechDetectedEventArgs> SreSpeechDetected = delegate(object sender, SpeechDetectedEventArgs dea) { SpeechDetected(source, dea); };

                    Action startRecognizer = delegate()
                    {
                        SpeechRecognitionEngine oldSre = null;

                        lock (sreLock)
                        {
                            if (sre != null)
                            {
                                oldSre = sre;
                            }
                            sre = new SpeechRecognitionEngine(recognizer.Id);
                            sre.UpdateRecognizerSetting("AdaptationOn", 1);
                            sre.UpdateRecognizerSetting("PersistedBackgroundAdaptation", 1);
                            sre.LoadGrammar(commandProcessor.CreateGrammar());

                            sre.SpeechDetected += SreSpeechDetected;
                            sre.SpeechHypothesized += SreSpeechHypothesized;
                            sre.SpeechRecognitionRejected += SreSpeechRecognitionRejected;
                            sre.AudioSignalProblemOccurred += SreAudioSignalProblemOccurred;

                            sre.EndSilenceTimeoutAmbiguous = TimeSpan.FromMilliseconds(AmbiguousSilenceTimeout);
                            sre.EndSilenceTimeout = TimeSpan.FromMilliseconds(UnambiguousSilenceTimeout);

                            sre.SpeechRecognized += delegate(object sender, SpeechRecognizedEventArgs r)
                            {
                                Console.WriteLine("Handling text {0} in command processor", r.Result.Text);
                                try
                                {
                                    commandProcessor.ProcessSpeech(r.Result);
                                }
                                catch (Exception ex)
                                {
                                    Console.WriteLine("Command handler failed: " + ex.ToString());
                                    voice.SpeakAsync("Failed to execute command. Sorry!");
                                }
                            };

                            sre.SetInputToAudioStream(s,
                                                      new SpeechAudioFormatInfo(
                                                          EncodingFormat.Pcm, 16000, 16, 1,
                                                          32000, 2, null));
                            sre.RecognizeAsync(RecognizeMode.Multiple);
                            Trace.TraceInformation("New recognizer started");

                            if (oldSre != null)
                            {
                                oldSre.RecognizeAsyncStop();

                                oldSre.SpeechDetected -= SreSpeechDetected;
                                oldSre.SpeechHypothesized -= SreSpeechHypothesized;
                                oldSre.SpeechRecognitionRejected -= SreSpeechRecognitionRejected;
                                oldSre.AudioSignalProblemOccurred -= SreAudioSignalProblemOccurred;

                                oldSre.Dispose();
                                Trace.TraceInformation("Old recognizer disposed");
                            }
                        }
                    };

                    var recognizerRecycleTimer = new System.Timers.Timer()
                    {
                        AutoReset = false,
                        Enabled = false,
                        Interval = RecognizerRecyleTime.TotalMilliseconds,
                    };
                    recognizerRecycleTimer.Elapsed += (sender, elapsedEventArgs) =>
                    {
                        Trace.TraceInformation("Recycling recognizer...");
                        startRecognizer();
                        recognizerRecycleTimer.Start();
                        Trace.TraceInformation("Done recycling recognizer.");
                    };

                    startRecognizer();
                    Console.WriteLine("Ready.");
                    voice.SpeakAsync("Ez-ree is now online.");

                    recognizerRecycleTimer.Start();

                    Console.ReadLine();
                    Console.WriteLine("Stopping recognizer ...");
                    // TODO: poison flag so the recycle timer doesn't get in the way
                    lock (sreLock)
                    {
                        sre.RecognizeAsyncStop();
                        sre.Dispose();
                    }
                    // beamController.Stop();
                }
            }
        }
예제 #46
0
        public static void Main(string[] args)
        {
            //string baud_string;
            string name;

            #region Kinect Finding
            // Obtain a KinectSensor if any are available
            KinectSensor sensor = (from sensorToCheck in KinectSensor.KinectSensors where sensorToCheck.Status == KinectStatus.Connected select sensorToCheck).FirstOrDefault();
            #endregion

            #region Kinect Checking
            if (sensor == null)
            {
                Console.WriteLine(
                        "No Kinect sensors are attached to this computer or none of the ones that are\n" +
                        "attached are \"Connected\".\n" +
                    //"Attach the KinectSensor and restart this application.\n" +
                    //"If that doesn't work run SkeletonViewer-WPF to better understand the Status of\n" +
                    //"the Kinect sensors.\n\n" +
                        "Press any key to continue.\n");

                // Give a chance for user to see console output before it is dismissed
                Console.ReadKey(true);
                return;
            }
            #endregion

            #region Port Checking + Counting
            System.IO.Ports.SerialPort.GetPortNames().Count(); //counts available ports (set this as a name somewhere)
            #endregion

            #region Activates Kinect Sensor
            sensor.Start();
            #endregion

            #region Obtains KinectAudioSource
            KinectAudioSource source = sensor.AudioSource;
            source.EchoCancellationMode = EchoCancellationMode.None; // No AEC :(
            source.AutomaticGainControlEnabled = false; // Important to turn this off for speech recognition
            #endregion

            #region Check for Audio SDK
            if (GetKinectRecognizer() == null)
            {
                Console.WriteLine("Could not find Kinect speech recognizer! You should probably install the Audio SDK for Kinect (released by Microsoft). Download here: http://www.microsoft.com/en-us/download/details.aspx?id=27226"); //Put a download link here to get the audio sdk --DONE
                return;
            }
            #endregion

            #region Writes Options
            Console.WriteLine("Enter your parameters to begin");
            Console.WriteLine(" ");
            Console.WriteLine("If no ports are displayed below, please check your connection to the serial device");
            Console.WriteLine("Available ports:");
            #endregion

            #region Available Port Printing
            if (System.IO.Ports.SerialPort.GetPortNames().Count() >= 0)
            {
                foreach (string p in System.IO.Ports.SerialPort.GetPortNames())
                {
                    Console.WriteLine(p);
                }
            }
            else
            {
                Console.WriteLine("No Ports are available. Press any key to quit!");
                Console.ReadLine();
                return; //Quit
            }
            #endregion

            #region Gets Port Name + Baud
            Console.WriteLine("Port Name:");
            name = Console.ReadLine();
            Console.WriteLine(" \n");
            Console.WriteLine("Baud rate:\n" +
                               "1. 300\n" +
                               "2. 1200\n" +
                               "3. 2400\n" +
                               "4. 4800\n" +
                               "5. 9600\n" +
                               "6. 14400\n" +
                               "7. 19200\n" +
                               "8. 28800\n" +
                               "9. 38400\n" +
                               "10. 57600\n" +
                               "11. 115200\n");
            /*baud_string = Console.ReadLine();
            int baud = int.Parse(baud_string); //Somewhat rigged*/
            //Console.WriteLine("You selected {0} as your baud rate\n", baud);
            int baud = GetBaudRate();

            Console.WriteLine(" ");
            Console.WriteLine("Beginning Serial...");
            BeginSerial(baud, name);
            port.DataReceived += new System.IO.Ports.SerialDataReceivedEventHandler(port_DataReceived);
            port.Open();

            #endregion

            int wait = 5;
            while (wait > -1)//stops printing at 0 seconds
            {
                Console.Write("Device will be ready for speech recognition in {0} second(s).\r", wait--);//overwrite last printed statement
                Thread.Sleep(1000);
            }

            using (var sre = new SpeechRecognitionEngine(GetKinectRecognizer().Id))
            {
                var commands = new Choices();
                //POSSIBLY: Change commands and all of this shit(being the setup for it, if statments and all) to a simple XML file read my System.Xml.Linq; **http://stackoverflow.com/questions/10762715/c-sharp-parsing-specific-xml**
                //TODO: Reformat as well as organize this entire thing by subject, etc: Broswer stuff then inside broswre stuff is reddit, fb, gmail. Computer apps = steam, task manager
                #region commands
                commands.Add("pull up the weather");
                commands.Add("Open task manager");
                commands.Add("Ha gay");
                commands.Add("Play good feeling radio");
                commands.Add("Open Reddit");
                commands.Add("Close chrome");
                commands.Add("Close task manager");
                commands.Add("Play pandora");
                commands.Add("Play good feeling radio");
                commands.Add("Play dead mouse radio");
                commands.Add("Boom");
                commands.Add("Sleep");
                commands.Add("Shut down");//Program this
                commands.Add("Open my email");
                commands.Add("Open steam");//Also add launching games individually, Program this as well
                commands.Add("Open facebook");//Program this, also you may have to split the word
                commands.Add("battle stations");//Program this
                commands.Add("cable p**n");
                commands.Add("ask science");
                commands.Add("github");//Program this (Make it open the CL and web interface)
                commands.Add("Raise Volume");//Setup and increment setting (Even in XML you can set the rate!)
                #endregion

                var gb = new GrammarBuilder { Culture = GetKinectRecognizer().Culture };
                gb.Append(commands);

                var g = new Grammar(gb);

                sre.LoadGrammar(g);
                sre.SpeechRecognized += SreSpeechRecognized;
                sre.SpeechHypothesized += SreSpeechHypothesized;
                sre.SpeechRecognitionRejected += SreSpeechRecognitionRejected;

                using (Stream s = source.Start())
                {
                    sre.SetInputToAudioStream(
                        s, new SpeechAudioFormatInfo(EncodingFormat.Pcm, 16000, 16, 1, 32000, 2, null));

                    Console.WriteLine(" ");
                    Console.WriteLine("What would you like me to do?\n" +
                                       /* "  \n" +
                                        "1) Dim Plus Far Window Shades\n" +
                                        "  \n" +
                                        "2) Dim Minus Far Window Shades\n" +
                                        "  \n" +
                                        "3) Dim Plus Computer Window Shades\n" +
                                        "  \n" +
                                        "4) Dim Minus Computer Window Shades\n" +
                                        "  \n" +
                                        "5) Open Far Window Shades\n" +
                                        "  \n" +
                                        "6) Close Far Window Shades\n" +
                                        "  \n" +
                                        "7) Open Computer Window Shades\n" +
                                        "  \n" +
                                        "8) Close Computer Window Shades\n"*/);

                    sre.RecognizeAsync(RecognizeMode.Multiple);
                    Console.ReadLine();
                    Console.WriteLine("Stopping everything...\n");
                    sre.RecognizeAsyncStop();
                }
            }

            sensor.Stop();
        }
예제 #47
0
        static void Main( string[] args )
        {
            try {
                using ( var source = new KinectAudioSource() ) {
                    source.FeatureMode = true;
                    source.AutomaticGainControl = false; //Important to turn this off for speech recognition
                    source.SystemMode = SystemMode.OptibeamArrayOnly; //No AEC for this sample

                    RecognizerInfo ri = SpeechRecognitionEngine.InstalledRecognizers().Where( r => "ja-JP".Equals( r.Culture.Name, StringComparison.InvariantCultureIgnoreCase ) ).FirstOrDefault();

                    if ( ri == null ) {
                        Console.WriteLine( "Could not find speech recognizer: {0}. Please refer to the sample requirements.", RecognizerId );
                        return;
                    }

                    Console.WriteLine( "Using: {0}", ri.Name );

                    using ( var sre = new SpeechRecognitionEngine( ri.Id ) ) {
                        GrammarBuilder dictaphoneGB = new GrammarBuilder();

                        GrammarBuilder dictation = new GrammarBuilder();
                        dictation.AppendDictation();

                        dictaphoneGB.Append( new SemanticResultKey( "StartDictation", new SemanticResultValue( "Start Dictation", true ) ) );
                        dictaphoneGB.Append( new SemanticResultKey( "dictationInput", dictation ) );
                        dictaphoneGB.Append( new SemanticResultKey( "EndDictation", new SemanticResultValue( "Stop Dictation", false ) ) );

                        GrammarBuilder spellingGB = new GrammarBuilder();

                        GrammarBuilder spelling = new GrammarBuilder();
                        spelling.AppendDictation( "spelling" );

                        spellingGB.Append( new SemanticResultKey( "StartSpelling", new SemanticResultValue( "Start Spelling", true ) ) );
                        spellingGB.Append( new SemanticResultKey( "spellingInput", spelling ) );
                        spellingGB.Append( new SemanticResultKey( "StopSpelling", new SemanticResultValue( "Stop Spelling", true ) ) );

                        GrammarBuilder both = GrammarBuilder.Add( (GrammarBuilder)new SemanticResultKey( "Dictation", dictaphoneGB ),
                                                                (GrammarBuilder)new SemanticResultKey( "Spelling", spellingGB ) );

                        Grammar grammar = new Grammar( new SemanticResultKey( "Dictation", dictaphoneGB ) );
                        grammar.Enabled = true;
                        grammar.Name = "Dictaphone and Spelling ";

                        sre.LoadGrammar( grammar ); // Exception thrown here

                        sre.SpeechRecognized += SreSpeechRecognized;
                        sre.SpeechHypothesized += SreSpeechHypothesized;
                        sre.SpeechRecognitionRejected += SreSpeechRecognitionRejected;

                        using ( Stream s = source.Start() ) {
                            sre.SetInputToAudioStream( s, new SpeechAudioFormatInfo(
                                                        EncodingFormat.Pcm, 16000, 16, 1,
                                                        32000, 2, null ) );

                            Console.WriteLine( "Recognizing. Say: 'red', 'green' or 'blue'. Press ENTER to stop" );

                            sre.RecognizeAsync( RecognizeMode.Multiple );
                            Console.ReadLine();
                            Console.WriteLine( "Stopping recognizer ..." );
                            sre.RecognizeAsyncStop();
                        }
                    }
                }
            }
            catch ( Exception ex ) {
                Console.WriteLine( ex.Message );
            }
        }
예제 #48
0
        static void Main(string[] args)
        {                    
            using (var source = new KinectAudioSource())
            {
                source.FeatureMode = true;
                source.AutomaticGainControl = false; //Important to turn this off for speech recognition
				source.SystemMode = SystemMode.OptibeamArrayOnly; //No AEC for this sample

                RecognizerInfo ri = GetKinectRecognizer();

                if (ri == null)
                {
                    Console.WriteLine("Could not find Kinect speech recognizer. Please refer to the sample requirements.");
                    return;
                }

                Console.WriteLine("Using: {0}", ri.Name);

                using (var sre = new SpeechRecognitionEngine(ri.Id))
                {                
                    var colors = new Choices();
                    colors.Add("red");
                    colors.Add("green");
                    colors.Add("blue");

                    var gb = new GrammarBuilder();
                    //Specify the culture to match the recognizer in case we are running in a different culture.                                 
                    gb.Culture = ri.Culture;
                    gb.Append(colors);
                    

                    // Create the actual Grammar instance, and then load it into the speech recognizer.
                    var g = new Grammar(gb);                    

                    sre.LoadGrammar(g);
                    sre.SpeechRecognized += SreSpeechRecognized;
                    sre.SpeechHypothesized += SreSpeechHypothesized;
                    sre.SpeechRecognitionRejected += SreSpeechRecognitionRejected;

                    using (Stream s = source.Start())
                    {
                        sre.SetInputToAudioStream(s,
                                                  new SpeechAudioFormatInfo(
                                                      EncodingFormat.Pcm, 16000, 16, 1,
                                                      32000, 2, null));

						Console.WriteLine("Recognizing. Say: 'red', 'green' or 'blue'. Press ENTER to stop");

                        sre.RecognizeAsync(RecognizeMode.Multiple);
                        Console.ReadLine();
                        Console.WriteLine("Stopping recognizer ...");
                        sre.RecognizeAsyncStop();                       
                    }
                }
            }
        }
예제 #49
0
    public SpeechRecognizer()
    {
        using (var source = new KinectAudioSource())
            {
                source.FeatureMode = true;
                source.AutomaticGainControl = false; //Important to turn this off for speech recognition
                source.SystemMode = SystemMode.OptibeamArrayOnly; //No AEC for this sample

                RecognizerInfo ri = GetKinectRecognizer();

                if (ri == null)
                {
                    Console.WriteLine("Could not find Kinect speech recognizer. Please refer to the sample requirements.");
                    return;
                }

                Console.WriteLine("Using: {0}", ri.Name);

                using (var sre = new SpeechRecognitionEngine(ri.Id))
                {
                    var commands = new Choices();
                    commands.Add("Xbox Route");
                    commands.Add("Xbox Next Direction");
                    commands.Add("Xbox Previous Direction");
                    commands.Add("Xbox Spell");
                    commands.Add("Stanford");
                    commands.Add("San Jose");
                    commands.Add("Home");
                    commands.Add("650 Escondido Road");
                    commands.Add("California");
                    commands.Add("San Jose International Airport");
                    var letters = new Choices();
                    letters.Add("A");
                    letters.Add("B");
                    letters.Add("C");
                    letters.Add("D");
                    letters.Add("E");
                    letters.Add("F");
                    letters.Add("G");
                    letters.Add("H");
                    letters.Add("I");
                    letters.Add("J");
                    letters.Add("K");
                    letters.Add("L");
                    letters.Add("M");
                    letters.Add("N");
                    letters.Add("O");
                    letters.Add("P");
                    letters.Add("Q");
                    letters.Add("R");
                    letters.Add("S");
                    letters.Add("T");
                    letters.Add("U");
                    letters.Add("V");
                    letters.Add("X");
                    letters.Add("W");
                    letters.Add("Y");
                    letters.Add("Z");

                    var gb = new GrammarBuilder();
                    //Specify the culture to match the recognizer in case we are running in a different culture.
                    gb.Culture = ri.Culture;
                    gb.Append(commands);
                    var gbletter = new GrammarBuilder();
                    gbletter.Culture = ri.Culture;
                    gbletter.Append(letters);

                    // Create the actual Grammar instance, and then load it into the speech recognizer.
                    var g = new Grammar(gb);
                    var gbl = new Grammar(gbletter);

                    sre.LoadGrammar(g);
                    sre.SpeechRecognized += SreSpeechRecognized;
                    sre.SpeechHypothesized += SreSpeechHypothesized;
                    sre.SpeechRecognitionRejected += SreSpeechRecognitionRejected;

                    using (Stream s = source.Start())
                    {
                        sre.SetInputToAudioStream(s,
                                                  new SpeechAudioFormatInfo(
                                                      EncodingFormat.Pcm, 16000, 16, 1,
                                                      32000, 2, null));

                        Console.WriteLine("Recognizing. Say: 'Xbox Route', 'Xbox Next Direction', 'Xbox Previous Direction' or 'Xbox Spell (to spell your point)'. Press ENTER to stop");

                        sre.RecognizeAsync(RecognizeMode.Multiple);
                        Console.ReadLine();
                        Console.WriteLine("Stopping recognizer ...");
                        sre.RecognizeAsyncStop();
                    }
                }
            }
    }
예제 #50
0
        private void BuildGrammarforRecognizer(object recognizerInfo)
        {
            EnableKinectAudioSource();

            var grammarBuilder = new GrammarBuilder { Culture = (recognizerInfo as RecognizerInfo).Culture };

            // Creating another Grammar and load
            var newGrammarBuilder = new GrammarBuilder();
            newGrammarBuilder.Append(new Choices("Schließe die Anwendung", "Ich hasse euch alle", "nächsten Folie"));
            var grammarClose = new Grammar(newGrammarBuilder);

            int SamplesPerSecond = 16000;
            int bitsPerSample = 16;
            int channels = 1;
            int averageBytesPerSecond = 32000;
            int blockAlign = 2;

            using (var speechRecognizer = new SpeechRecognitionEngine((recognizerInfo as RecognizerInfo).Id))
            {
                speechRecognizer.LoadGrammar(grammarClose);

                speechRecognizer.SpeechRecognized += SreSpeechRecognized;
                speechRecognizer.SpeechHypothesized += SreSpeechHypothesized;
                speechRecognizer.SpeechRecognitionRejected += SreSpeechRecognitionRejected;

                using (Stream s = source.Start())
                {
                    speechRecognizer.SetInputToAudioStream(
                        s, new SpeechAudioFormatInfo(EncodingFormat.Pcm, SamplesPerSecond, bitsPerSample, channels, averageBytesPerSecond, blockAlign, null));


                    while (keepRunning)
                    {
                        RecognitionResult result = speechRecognizer.Recognize(new TimeSpan(0, 0, 5));
                    }

                    speechRecognizer.RecognizeAsyncStop();
                }
            }
        }
        public void Carname_Load()
        {


            recognizer = new SpeechRecognitionEngine();

            recognizer.SetInputToDefaultAudioDevice();

            modelPic.Visibility = System.Windows.Visibility.Visible;
            recognizer.RecognizeAsyncStop();
            model.Visibility = System.Windows.Visibility.Visible;
            if (title == "BMW")
            {


                obj.SpeakAsync("Choose from BMW cars");
                obj.SpeakAsync("3 Series");
                obj.SpeakAsync("5 Series");
                obj.SpeakAsync("7 Series");
                obj.SpeakAsync("C class");
                mod.Content = "BMW";
                model1.Content = "3 Series";
                model2.Content = "5 Series";
                model3.Content = "7 Series";
                model4.Content = "C Class";
                model1.Visibility = System.Windows.Visibility.Visible;
                model2.Visibility = System.Windows.Visibility.Visible;
                model3.Visibility = System.Windows.Visibility.Visible;
                model4.Visibility = System.Windows.Visibility.Visible;
                Choices models = new Choices();
                models.Add(new string[] { "3 series", "5 series", "7 series", "c class" });

                // Create a GrammarBuilder object and append the Choices object.
                GrammarBuilder gb = new GrammarBuilder();

                gb.Append(models);

                // Create the Grammar instance and load it into the speech recognition engine.
                Grammar g = new Grammar(gb);
                recognizer.LoadGrammar(g);
                Thread.Sleep(10000);
                recognizer.RecognizeAsync(RecognizeMode.Single);
                recognizer.SpeechRecognized +=
                  new EventHandler<SpeechRecognizedEventArgs>(icers_carnamerecognized);

            }
            else if (title == "Toyota")
            {
                // Image image = Image.FromFile("toyota.jpg");

                //pictureBox6.Image = image;
                obj.SpeakAsync("Choose from TOYOTA cars");
                obj.SpeakAsync("Corolla");
                obj.SpeakAsync("Camry");
                obj.SpeakAsync("Prado");
                obj.SpeakAsync("prius");
                obj.SpeakAsync("hilux");
                mod.Content = "Toyota";
                model1.Content = "Corolla";
                model2.Content = "Camry";
                model3.Content = "Prado";
                model4.Content = "Prius";
                model5.Content = "Hilux";
                model1.Visibility = System.Windows.Visibility.Visible;
                model2.Visibility = System.Windows.Visibility.Visible;
                model3.Visibility = System.Windows.Visibility.Visible;
                model4.Visibility = System.Windows.Visibility.Visible;
                model5.Visibility = System.Windows.Visibility.Visible;
                Choices toyomodels = new Choices();
                toyomodels.Add(new string[] { "Corolla", "Camry", "Prado", "Prius", "Hilux" });

                // Create a GrammarBuilder object and append the Choices object.
                GrammarBuilder gb = new GrammarBuilder();

                gb.Append(toyomodels);

                // Create the Grammar instance and load it into the speech recognition engine.
                Grammar g = new Grammar(gb);
                recognizer.LoadGrammar(g);
                Thread.Sleep(10000);
                recognizer.RecognizeAsync(RecognizeMode.Single);
                // Register a handler for the SpeechRecognized event.
                recognizer.SpeechRecognized +=
                  new EventHandler<SpeechRecognizedEventArgs>(icers_carnamerecognized);
            }
            else if (title == "Suzuki")
            {
                obj.SpeakAsync("Choose from suzuki cars");
                obj.SpeakAsync("mehran");
                obj.SpeakAsync("cultus");
                obj.SpeakAsync("bolan");
                obj.SpeakAsync("swift");
                obj.SpeakAsync("liana");
                mod.Content = "Suzuki";
                model1.Content = "Mehran";
                model2.Content = "Cultus";
                model3.Content = "Bolan";
                model4.Content = "Swift";
                model5.Content = "Liana";
                model1.Visibility = System.Windows.Visibility.Visible;
                model2.Visibility = System.Windows.Visibility.Visible;
                model3.Visibility = System.Windows.Visibility.Visible;
                model4.Visibility = System.Windows.Visibility.Visible;
                model5.Visibility = System.Windows.Visibility.Visible;
                Choices suzmodels = new Choices();
                suzmodels.Add(new string[] { "Mehran", "Cultus", "Bolan", "Swift", "Liana" });

                // Create a GrammarBuilder object and append the Choices object.
                GrammarBuilder gb = new GrammarBuilder();

                gb.Append(suzmodels);

                // Create the Grammar instance and load it into the speech recognition engine.
                Grammar g = new Grammar(gb);
                recognizer.LoadGrammar(g);
                // recognizer.Enabled = true;

                Thread.Sleep(10000);
                recognizer.RecognizeAsync(RecognizeMode.Single);
                // Register a handler for the SpeechRecognized event.
                recognizer.SpeechRecognized +=
                  new EventHandler<SpeechRecognizedEventArgs>(icers_carnamerecognized);

            }
            else if (title == "Kia")
            {
                obj.SpeakAsync("Choose from kia cars");
                obj.SpeakAsync("spectra");
                obj.SpeakAsync("sportage");
                mod.Content = "Kia";
                model1.Content = "Spectra";
                model2.Content = "Sportage";

                model1.Visibility = System.Windows.Visibility.Visible;
                model2.Visibility = System.Windows.Visibility.Visible;
                Choices kiamodels = new Choices();
                kiamodels.Add(new string[] { "Spectra", "Sportage" });

                // Create a GrammarBuilder object and append the Choices object.
                GrammarBuilder gb = new GrammarBuilder();

                gb.Append(kiamodels);

                // Create the Grammar instance and load it into the speech recognition engine.
                Grammar g = new Grammar(gb);
                recognizer.LoadGrammar(g);
                Thread.Sleep(6000);
                recognizer.RecognizeAsync(RecognizeMode.Single);
                // Register a handler for the SpeechRecognized event.
                recognizer.SpeechRecognized +=
                  new EventHandler<SpeechRecognizedEventArgs>(icers_carnamerecognized);

            }
            else if (title == "Honda")
            {
                obj.SpeakAsync("Choose from honda cars");
                obj.SpeakAsync("City");
                obj.SpeakAsync("civic");
                obj.SpeakAsync("accord");
                mod.Content = "Honda";
                model1.Content = "City";
                model2.Content = "Civic";
                model3.Content = "Accord";

                model1.Visibility = System.Windows.Visibility.Visible;
                model2.Visibility = System.Windows.Visibility.Visible;
                model3.Visibility = System.Windows.Visibility.Visible;


                Choices hondamodels = new Choices();
                hondamodels.Add(new string[] { "City", "Civic", "Accord" });

                // Create a GrammarBuilder object and append the Choices object.
                GrammarBuilder gb = new GrammarBuilder();

                gb.Append(hondamodels);

                // Create the Grammar instance and load it into the speech recognition engine.
                Grammar g = new Grammar(gb);
                recognizer.LoadGrammar(g);
                //  recognizer.Enabled = true;
                Thread.Sleep(8000);
                recognizer.RecognizeAsync(RecognizeMode.Single);
                // Register a handler for the SpeechRecognized event.
                recognizer.SpeechRecognized +=
                  new EventHandler<SpeechRecognizedEventArgs>(icers_carnamerecognized);

            }


        }
예제 #52
0
        /// \brief This loads plugin grammars into the recognizer,
        /// sets up callback methods,
        /// and starts speech recognition on the first loop through.
        private void startRecognition()
        {
            speechRecogEngine = new SpeechRecognitionEngine(recogInfo.Id);

            // load grammars
            foreach (Plugin plugin in pluginsDict.Values)
            {
                foreach (Grammar grammar in plugin.Grammars)
                {
                    speechRecogEngine.LoadGrammar(grammar);
                }
            }

            // hook handlers
            speechRecogEngine.SpeechRecognized += speechRecognizedHandler;
            speechRecogEngine.SpeechHypothesized += speechHypothesizedHandler;
            speechRecogEngine.SpeechRecognitionRejected += speechRejectedHandler;
            speechRecogEngine.SpeechDetected += speechDetectedHandler;
            speechRecogEngine.RecognizeCompleted += speechRecognizeCompleteHandler;

            using (Stream voiceStream = source.Start())
            {
                speechRecogEngine.SetInputToAudioStream(voiceStream, new SpeechAudioFormatInfo(EncodingFormat.Pcm, 16000, 16, 1, 32000, 2, null));
                Console.WriteLine("Recognizing speech. Press ENTER to stop");

                speechRecogEngine.RecognizeAsync(RecognizeMode.Single);

                Console.ReadLine();
                Console.WriteLine("Stopping recognizer ...");

                speechRecogEngine.RecognizeAsyncStop();
            }

            sensor.Stop();
        }
        private static void RecognitionStart(KinectAudioSource source, SpeechRecognitionEngine sre)
        {
            using (Stream s = source.Start())
            {
                sre.SetInputToAudioStream(
                    s, new SpeechAudioFormatInfo(EncodingFormat.Pcm, 16000, 16, 1, 32000, 2, null));

                Console.WriteLine("�л� �A�n��? �� �A�X��?�C  ���U ENTER �������");

                sre.RecognizeAsync(RecognizeMode.Multiple);
                Console.ReadLine();
                Console.WriteLine("������� ...");
                sre.RecognizeAsyncStop();
            }
        }