public void StartDesign()
        {
            //In Process SpeewchRecognizer
            SpeechRecognitionEngine recognizer = new SpeechRecognitionEngine();

            recognizer.LoadGrammarCompleted += new EventHandler<LoadGrammarCompletedEventArgs>(LoadGrammarCompleted);
            recognizer.SpeechRecognized += new EventHandler<SpeechRecognizedEventArgs>(SpeechRecognized);
            recognizer.SpeechRecognitionRejected += new EventHandler<SpeechRecognitionRejectedEventArgs>(SpeechRejected);
            recognizer.SetInputToDefaultAudioDevice();

            GrammarBuilder clear = new GrammarBuilder("Clear");

            GrammarBuilder insert = new GrammarBuilder("Insert");
            Choices gates = new Choices(new string[] { "and", "or", "not", "ex or", "nor", "nand" });
            Choices columns = new Choices(new string[] { "one", "too", "three", "four", "five", "six", "seven", "eight" });
            Choices rows = new Choices(new string[] { "one", "too", "three", "four", "five" });
            Choices orientation = new Choices(new string[] { "left", "right", "up", "down" });
            insert.Append(gates);
            insert.Append(columns);
            insert.Append(rows);
            insert.Append("towards");
            insert.Append(orientation);

            GrammarBuilder connect = new GrammarBuilder("Connect");
            connect.Append("output");
            connect.Append(columns);
            connect.Append(rows);
            connect.Append("to");
            connect.Append("input");
            connect.Append(columns);
            connect.Append(rows);

            Grammar _clear_grammar = new Grammar(clear);
            Grammar _insert_grammar = new Grammar(insert);
            Grammar _connect_grammar = new Grammar(connect);

            recognizer.LoadGrammarAsync(_clear_grammar);
            recognizer.LoadGrammarAsync(_insert_grammar);
            recognizer.LoadGrammarAsync(_connect_grammar);
            Application.EnableVisualStyles();
            Application.SetCompatibleTextRenderingDefault(false);
            Application.Run(new Form1());

            //recognizer.RecognizeAsync(RecognizeMode.Multiple);
            while (true)
            {
                recognizer.Recognize();
            }
        }
Exemple #2
0
        static void Main(string[] args)
        {
            try
            {
                ss.SetOutputToDefaultAudioDevice();
                Console.WriteLine("\n(Speaking: I am awake)");
                ss.Speak("I am awake");

                CultureInfo ci = new CultureInfo("en-us");
                sre = new SpeechRecognitionEngine(ci);
                sre.SetInputToDefaultAudioDevice();
                sre.SpeechRecognized += sre_SpeechRecognized;

                Choices ch_StartStopCommands = new Choices();
                ch_StartStopCommands.Add("Alexa record");
                ch_StartStopCommands.Add("speech off");
                ch_StartStopCommands.Add("klatu barada nikto");
                GrammarBuilder gb_StartStop = new GrammarBuilder();
                gb_StartStop.Append(ch_StartStopCommands);
                Grammar g_StartStop = new Grammar(gb_StartStop);

                sre.LoadGrammarAsync(g_StartStop);
                sre.RecognizeAsync(RecognizeMode.Multiple); // multiple grammars

                while (done == false) { ; }

                Console.WriteLine("\nHit <enter> to close shell\n");
                Console.ReadLine();
            }
            catch (Exception ex)
            {
                Console.WriteLine(ex.Message);
                Console.ReadLine();
            }
        }
 public static SpeechRecognitionEngine getEngine(String lang)
 {
     if(init)
         recEngine.Dispose();
     Console.WriteLine("Kastat current engine");
     culture = new System.Globalization.CultureInfo(lang);
     choices = new Choices();
     grammarBuilder = new GrammarBuilder();
     VoiceCommands.Init(lang);
     choices.Add(VoiceCommands.GetAllCommands());
     grammarBuilder.Culture = culture;
     grammarBuilder.Append(choices);
     grammar = new Grammar(grammarBuilder);
     Console.WriteLine("Initialiserat svenskt grammar");
     try
     {
         recEngine = new SpeechRecognitionEngine(culture);
         recEngine.LoadGrammarAsync(grammar);
         Console.WriteLine("Laddat enginen med " + lang);
     }
     catch (UnauthorizedAccessException e)
     {
         Console.WriteLine("Error: UnauthorizedAccessException");
         Console.WriteLine(e.ToString());
     } 
     init = true;
     recEngine.SetInputToDefaultAudioDevice();
     return recEngine;
 }
Exemple #4
0
        void SetupSpeech()
        {
            speechEngine = new SpeechRecognitionEngine();
            speechTalk = new SpeechSynthesizer();

            speechEngine.LoadGrammarAsync(new DictationGrammar());
            speechEngine.SpeechRecognized += speechEngine_SpeechRecognized;

            speechEngine.SetInputToDefaultAudioDevice();
            speechEngine.RecognizeAsync(RecognizeMode.Multiple);
        }
Exemple #5
0
        // ********* RECONHECIMENTO DE VOZ *********
        public void Gramatica()
        {
            try
            {
                //reconhecedor = new SpeechRecognitionEngine(new System.Globalization.CultureInfo("en-us"));
                reconhecedor = new SpeechRecognitionEngine(ci);
            }
            catch (Exception ex)
            {
                MessageBox.Show("ERRO ao integrar lingua escolhida:" + ex.Message);
            }

            // criacao da gramatica simples que o programa vai entender
            // usando um objeto Choices
            var gramatica = new Choices();

            gramatica.Add(listaPalavras); // inclui a gramatica criada

            // cria o construtor gramatical
            // e passa o objeto criado com as palavras
            var gb = new GrammarBuilder();

            gb.Append(gramatica);

            // cria a instancia e carrega a engine de reconhecimento
            // passando a gramatica construida anteriomente
            try
            {
                var g = new Grammar(gb);

                try
                {
                    // carrega o arquivo de gramatica
                    reconhecedor.RequestRecognizerUpdate();
                    reconhecedor.LoadGrammarAsync(g);

                    // registra a voz como mecanismo de entrada para o evento de reconhecimento
                    reconhecedor.SpeechRecognized += Sre_Reconhecimento;

                    reconhecedor.SetInputToDefaultAudioDevice();         // microfone padrao
                    resposta.SetOutputToDefaultAudioDevice();            // auto falante padrao
                    reconhecedor.RecognizeAsync(RecognizeMode.Multiple); // multiplo reconhecimento
                }
                catch (Exception ex)
                {
                    MessageBox.Show("ERRO ao criar reconhecedor: " + ex.Message);
                }
            }
            catch (Exception ex)
            {
                MessageBox.Show("ERRO ao criar a gramática: " + ex.Message);
            }
        }
Exemple #6
0
        public void newGame()
        {
            pBuild.ClearContent();
            pBuild.AppendText("making new game");
            sSynth.Speak(pBuild);

            Choices commands = new Choices();

            commands.Add(new string[] { "start game",
                                        "five", "six", "seven", "eight", "nine", "ten",
                                        "eleven", "twelve", "thirteen", "fourteen", "fifteen",
                                        "sixteen", "seventeen", "eighteen", "nineteen", "twenty", "thirty", "forty", "fifty", "help" });
            gBuilder = new GrammarBuilder();
            gBuilder.Append(commands);
            m = 0;
            n = 0;
            Grammar grammar = new Grammar(gBuilder);

            recEngine.LoadGrammarAsync(grammar);
            mFlag = true;
        }
        private void frmUtama_Load(object sender, EventArgs e)
        {
            btn_disable_voice.Enabled = false;

            frmlist.Show();


            try
            {
                Choices commands = new Choices();
                commands.Add(new string[] {
                    data_personal[0]
                    , data_personal[1]
                    , data_personal[2]
                    , data_personal[3]
                    , data_personal[4]
                    , data_personal[5]
                    , data_personal[6]
                    , data_personal[7]
                    , data_personal[8]
                    , data_personal[9]
                    , data_personal[10]
                    , data_todo[0]
                    , data_todo[1]
                    , data_todo[2]
                    , data_todo[3]
                    , data_todo[4]
                    , data_todo[5]
                    , data_todo[6]
                    , data_todo[7]
                    , data_todo[8]
                    , data_todo[9]
                    , data_todo[10]
                    , data_todo[11]
                    , data_todo[12]
                    , data_todo[13]
                    , data_todo[14]
                });



                GrammarBuilder gBuilder = new GrammarBuilder();
                gBuilder.Append(commands);
                Grammar grammar = new Grammar(gBuilder);

                recEngine.LoadGrammarAsync(grammar);
                recEngine.SetInputToDefaultAudioDevice();
                recEngine.SpeechRecognized += recEngine_SpeechRecognized;
            }
            catch
            {
            }
        }
Exemple #8
0
        void processing()
        {
            //First of all storing commands
            commands.Add(new string[] { "Blue On", "Red On", "Green On", "Blue Off", "Red Off", "Green Off", "Exit", "All On", "All Off", "Arduino Say Good Bye to makers" });
            //Now we will create object of Grammer in which we will pass commands as parameter
            Grammar gr = new Grammar(new GrammarBuilder(commands));

            re.RequestRecognizerUpdate();      // Pause Speech Recognition Engine before loading commands
            re.LoadGrammarAsync(gr);
            re.SetInputToDefaultAudioDevice(); // As Name suggest input device builtin microphone or you can also connect earphone etc...
            re.SpeechRecognized += new EventHandler <SpeechRecognizedEventArgs>(re_SpeechRecognized);
        }
Exemple #9
0
    public VoiceCommandEngine(string callName)
    {
        this.callName = callName;

        engine = new SpeechRecognitionEngine();
        engine.SetInputToDefaultAudioDevice();

        var choice = new Choices(new[] {
            "open",
            "open file",
            "mute",
            "unmute",
            "increase volume",
            "raise volume",
            "volume up",
            "decrease volume",
            "lower volume",
            "volume down",
            "hide",
            "show",
            "help",
            "stop listening",
            "close",
            "play",
            "pause",
            "rewind",
            "stop",
            "next chapter",
            "skip chapter",
            "previous chapter",
            "next",
            "next file",
            "previous",
            "previous file",
            "fullscreen",
            "view fullscreen",
            "go fullscreen",
            "exit fullscreen",
            "leave fullscreen",
            "whats playing"
        });

        var grammarBuilder = new GrammarBuilder(callName);

        grammarBuilder.Append(choice.ToGrammarBuilder());

        // add the grammars
        engine.LoadGrammarAsync(new Grammar(grammarBuilder));

        // adds handlers for the grammar's speech recognized event.
        engine.SpeechRecognized  += recognizer_SpeechRecognized;
        engine.AudioLevelUpdated += engine_AudioLevelUpdated;
    }
Exemple #10
0
        private void gaver_Load(object sender, EventArgs e)
        {
            commands.Add(new string[] { "kill wifi", "computer", "stop" });//Initiziling commands.
            gBuilder = new GrammarBuilder();
            gBuilder.Append(commands);
            grammar = new Grammar(gBuilder);

            recEngine.LoadGrammarAsync(grammar);
            recEngine.SetInputToDefaultAudioDevice();
            recEngine.SpeechRecognized += RecEngine_SpeechRecognized;
            recEngine.RecognizeAsync(RecognizeMode.Multiple);
        }
    public VoiceCommandEngine(string callName)
    {
        this.callName = callName;

        engine = new SpeechRecognitionEngine();
        engine.SetInputToDefaultAudioDevice();

        var choice = new Choices(new[] {
            "open",
            "open file",
            "mute",
            "unmute",
            "increase volume",
            "raise volume",
            "volume up",
            "decrease volume",
            "lower volume",
            "volume down",
            "hide",
            "show",
            "help",
            "stop listening",
            "close",
            "play",
            "pause",
            "rewind",
            "stop",
            "next chapter",
            "skip chapter",
            "previous chapter",
            "next",
            "next file",
            "previous",
            "previous file",
            "fullscreen",
            "view fullscreen",
            "go fullscreen",
            "exit fullscreen",
            "leave fullscreen",
            "whats playing"
        });

        var grammarBuilder = new GrammarBuilder(callName);
        grammarBuilder.Append(choice.ToGrammarBuilder());

        // add the grammars
        engine.LoadGrammarAsync(new Grammar(grammarBuilder));

        // adds handlers for the grammar's speech recognized event.
        engine.SpeechRecognized += recognizer_SpeechRecognized;
        engine.AudioLevelUpdated += engine_AudioLevelUpdated;
    }
        // Initialize an in-process speech recognition engine.
        static void Main(string[] args)
        {
            using (SpeechRecognitionEngine recognizer =
             new SpeechRecognitionEngine())
              {

            // Create and load a grammar.
              string[] myWords = new string[] { "Me", "Kiss", "Fluff", "Yell", "Kind", "Crack", "Hope", "Check", "Lake", "Steep", "Shell", "Bark", "Tooth", "Mouse", "Force", "Fringe", "Flight", "Haunt", "Asked", "Going", "Table", "Giant", "Bully", "Treated", "Spying", "Wiggle", "Shredded", "Picnic", "Decoy", "Slaying", "Scheming", "Happier", "Joyous", "Riotous", "Chow", "Cookie", "Feud", "Eighty", "Host", "Weather", "Crawl", "Stew" }; //Sets the words that it will be listening for
            Choices commands = new Choices(); //Usually the system is sets up the words expected to be commands. Our "commands" will be the 42 words given.
            commands.Add(myWords); //This adds my 42 words to the commands to be recognized (commands being a list of words).
            GrammarBuilder gBuilder = new GrammarBuilder(); //This is setting up the system that will understand the words
            gBuilder.Append(commands);
            Grammar grammar = new Grammar(gBuilder);

            recognizer.LoadGrammarAsync(grammar);
            recognizer.SetInputToDefaultAudioDevice();

              //Grammar dictation = new DictationGrammar();
            //dictation.Name = "Dictation Grammar";

               // recognizer.LoadGrammar(dictation);

            StreamReader sr = new StreamReader(@"c:\Users\Taylor\Desktop\AllVoiceSamples\Input.txt");
            // Read the input file to a string
            String line = sr.ReadToEnd();

            // Configure the input to the recognizer.
            recognizer.SetInputToWaveFile(@"c:\Users\Taylor\Desktop\AllVoiceSamples\" + line);

            // Attach event handlers for the results of recognition.
            recognizer.SpeechRecognized +=
              new EventHandler<SpeechRecognizedEventArgs>(recognizer_SpeechRecognized);
            recognizer.RecognizeCompleted +=
              new EventHandler<RecognizeCompletedEventArgs>(recognizer_RecognizeCompleted);

            // Perform recognition on the entire file.
            Console.WriteLine("Starting asynchronous recognition...");
            completed = false;
            recognizer.RecognizeAsync();

            // Keep the console window open.
            while (!completed)
            {
              Console.ReadLine();
            }
            Console.WriteLine("Done.");
              }

              Console.WriteLine();
              Console.WriteLine("Press any key to exit...");
              Console.ReadKey();
        }
Exemple #13
0
        private void Speech(object sender, EventArgs e)
        {
            Choices commands = new Choices();

            commands.Add(new string[] { "Say Hello " });
            GrammarBuilder grammerBuilder = new GrammarBuilder();

            grammerBuilder.Append(commands);
            Grammar grammer = new Grammar(grammerBuilder);

            engine.LoadGrammarAsync(grammer);
            engine.SetInputToDefaultAudioDevice();
        }
        private void Form1_Load(object sender, EventArgs e)
        {
            // On tue TOUT les process excel existant sur le poste
            KillSpecificExcelFileProcess(""); // appel une void stocker un eu plus lmoin


            //Ouvrir le fichier excel avec la grammaire et les actions
            // ou lance l'interop Excel
            Microsoft.Office.Interop.Excel.Application xlApp = new Microsoft.Office.Interop.Excel.Application();
            //on ouvre le fichier dont on a besoin
            Workbook  MonClasseur = xlApp.Workbooks.Open(@"C:\\BddRecoVocal.xlsx");
            Worksheet MaFeuil     = (Worksheet)MonClasseur.Worksheets["Contact"];
            //nombre de boucle à faire pour créer grammaire


            Choices commands = new Choices();
            //boucle création liste grammaire

            string NbVal = MaFeuil.Cells[1, 1].value;
            // int NbPhrase = Convert.ToInt32(NbVal) + 2;
            int NbPhrase = 5;
            int compteur;

            for (compteur = 2; compteur < NbPhrase; compteur++)
            {
                string Phrase = MaFeuil.Cells[compteur, 1].value;
                commands.Add(new string[] { Phrase });
            }


            GrammarBuilder gBuilder = new GrammarBuilder();

            gBuilder.Append(commands);
            Grammar grammar = new Grammar(gBuilder);

            recEngine.LoadGrammarAsync(grammar);
            recEngine.SetInputToDefaultAudioDevice();
            recEngine.SpeechRecognized += recEngine_SpeechRecognized;
        }
Exemple #15
0
        private void SpeechToText_Load(object sender, EventArgs e)
        {
            MessageBox.Show("What is your name? Enter in the name field.");
            recEngine.SetInputToDefaultAudioDevice();
            recEngine.SpeechRecognized += new EventHandler <SpeechRecognizedEventArgs>(recEngine_SpeechRecognized);
            Grammar gr = new Grammar(CreateGrammar());

            recEngine.LoadGrammarAsync(gr);

            sythesizer.Rate = -1;
            sythesizer.GetInstalledVoices();
            sythesizer.SelectVoiceByHints(VoiceGender.Female, VoiceAge.Adult);
        }
Exemple #16
0
        private void FrmPatientRecord_Load(object sender, EventArgs e)
        {
            string[]       lista    = { "List", "Add", "Clear" };
            Choices        opcionet = new Choices(lista);
            GrammarBuilder gb       = new GrammarBuilder(opcionet);
            Grammar        g        = new Grammar(gb);

            recEngine.LoadGrammarAsync(g);
            recEngine.SetInputToDefaultAudioDevice();


            recEngine.SpeechRecognized += recEngine_SpeechRecognized;
        }
Exemple #17
0
        /// <summary>
        /// Commands of jarvis
        /// </summary>
        private void jVoice()
        {
            command.Add(new String[] { " open system", "open Main Screen", "jarvis", " open cmd", "close cmd", "lock pc", "do log off",
                                       " do hibernate", "goto sleep", "open chrome", "open notepad",
                                       "find", "change Voice", "close notepad", "close chrome", "what is network speed",
                                       "go offline", "open google", "open fb", "open facebook", "open wiki", "open wikipedia",
                                       " open youtube", "open mail", "open gmail", "open charge condition", "show commands", "hide commands" });

            //command.Add(File.ReadAllLines(@"C:\Users\Aksh\Desktop\commands.txt"));
            //// command.Add(File.ReadAllLines(@"A:\WORKSPACE CODES\Innotion WebSites\Personal Assistant\Personal Assistant\selfGrammer.txt"));

            GrammarBuilder gbuilder = new GrammarBuilder();

            gbuilder.Append(command);
            Grammar grammar = new Grammar(gbuilder);

            recEngine.LoadGrammarAsync(grammar);
            recEngine.SetInputToDefaultAudioDevice();
            recEngine.SpeechRecognized += recEngine_SpeechRecognized;

            recEngine.RecognizeAsync(RecognizeMode.Multiple);
        }
Exemple #18
0
        private void initializeEditCommands()
        {
            Choices eCommands = new Choices();

            eCommands.Add(new string[] { "next", "previous", "prev", "choose mode" });
            GrammarBuilder eBuilder = new GrammarBuilder();

            eBuilder.Append(eCommands);
            Grammar editGrammar = new Grammar(eBuilder);

            recEngine2.LoadGrammarAsync(editGrammar);
            recEngine2.SetInputToDefaultAudioDevice();
        }
Exemple #19
0
        public void CreateNewSynthesizer(string[] commandList, SpeechRecognitionEngine recognizer, SpeechSynthesizer sythesizer, SpeechRecognitionEngine listener, EventHandler <SpeechRecognizedEventArgs> DefaultSpeechRecognized, EventHandler <SpeechDetectedEventArgs> RecognizerSpeechRecognized, EventHandler <SpeechRecognizedEventArgs> ListenerSpeechRecognize)
        {
            recognizer.SetInputToDefaultAudioDevice();
            recognizer.LoadGrammarAsync(new Grammar(new GrammarBuilder(new Choices(commandList))));
            recognizer.SpeechRecognized += new EventHandler <SpeechRecognizedEventArgs>(DefaultSpeechRecognized);
            recognizer.SpeechDetected   += new EventHandler <SpeechDetectedEventArgs>(RecognizerSpeechRecognized);
            recognizer.RecognizeAsync(RecognizeMode.Multiple);

            listener.SetInputToDefaultAudioDevice();
            listener.LoadGrammarAsync(new Grammar(new GrammarBuilder(new Choices(commandList))));
            listener.SpeechRecognized += new EventHandler <SpeechRecognizedEventArgs>(ListenerSpeechRecognize);
            listener.RecognizeAsync(RecognizeMode.Multiple);
        }
Exemple #20
0
        private void initializeReviewCommands()
        {
            Choices rCommands = new Choices();

            rCommands.Add(new string[] { "next", "previous", "prev", "root", "choose mode" });
            GrammarBuilder rBuilder = new GrammarBuilder();

            rBuilder.Append(rCommands);
            Grammar reviewGrammar = new Grammar(rBuilder);

            recEngine3.LoadGrammarAsync(reviewGrammar);
            recEngine3.SetInputToDefaultAudioDevice();
        }
Exemple #21
0
        public LaunchForm()
        {
            //Display Blackrock capital corporation
            this.Hide();
            List <NasdaqStock>      nasdaqStocks = CompanyListBuilder.readInStocks();
            Grammar                 newGrammar   = CommandsBuilder.buildGrammar(nasdaqStocks);
            SpeechRecognitionEngine recEngine    = new SpeechRecognitionEngine();

            recEngine.LoadGrammarAsync(newGrammar);
            recEngine.SpeechRecognized += (sender, e) => RecEngine_SpeechDetected(sender, e, nasdaqStocks);
            recEngine.SetInputToDefaultAudioDevice();
            recEngine.RecognizeAsync(RecognizeMode.Multiple);
        }
Exemple #22
0
        private void LoadGrammer()
        {
            GrammarBuilder grammarBuilder = new GrammarBuilder();

            string[] phrases = new string[_db.Keys.Count];
            _db.Keys.CopyTo(phrases, 0);
            grammarBuilder.Append(new Choices(phrases));
            Grammar customGrammar = new Grammar(grammarBuilder);

            recognizer.LoadGrammarCompleted += grammer_completed_handler;
            recognizer.UnloadAllGrammars();
            recognizer.LoadGrammarAsync(customGrammar);
        }
Exemple #23
0
        private void Form1_Load(object sender, EventArgs e)
        {
            //Загрузка lib распознования голоса
            System.Globalization.CultureInfo ci        = new System.Globalization.CultureInfo("ru-RU");
            SpeechRecognitionEngine          recEngine = new SpeechRecognitionEngine(new System.Globalization.CultureInfo("ru-RU"));

            Choices commands = new Choices();

            commands.Add(new string[] { "Компьютер прибавь один час", "Компьютер погнали", "Компьютер остановись", "Компьютер выключайся", "Сколько осталось до выключения" });
            GrammarBuilder gBuilder = new GrammarBuilder();

            gBuilder.Culture = ci;
            gBuilder.Append(commands);
            Grammar grammar = new Grammar(gBuilder);

            recEngine.LoadGrammarAsync(grammar);
            recEngine.SetInputToDefaultAudioDevice();
            recEngine.RecognizeAsync(RecognizeMode.Multiple);
            recEngine.SpeechRecognized += recEngine_SpeechRecognized;

            notifyIcon1.Visible = false;
            if (Properties.Settings.Default.AutoRunMnz == true)
            {
                this.WindowState = FormWindowState.Minimized;
            }

            i++; //Счётчик запусков программы (загрузки формы)
            //Console.WriteLine(i);
            //Console.WriteLine(DateTime.Now.TimeOfDay);
            duration       = 0;
            label2.Visible = true;
            label2.Text    = "Ожидание...";

            btnChoice.Text = "Выключение ПК";

            if (btnChoice.Text == "Выключение ПК" && ShutdownNow.Checked == true)
            {
                Acceptbtn.Enabled = true; groupBox2.Enabled = true;
            }
            if (isShuttingDown == true)
            {
                Cancelbtn.Enabled = true;
            }

            //Если 1ый запуск И АвтоЗапуск таймера
            if (i == 1 && Properties.Settings.Default.AutoRunTimer)
            {
                Acceptbtn_Click(sender, e);
            }
            i++;
        }
Exemple #24
0
        private void MainForm_Load(object sender, EventArgs e)
        {
            var cultureTag = ConfigurationManager.AppSettings["culture"];

            culture     = CultureInfo.GetCultureInfoByIetfLanguageTag(cultureTag);
            synthesizer = new SpeechSynthesizer();
            synthesizer.SetOutputToDefaultAudioDevice();
            synthesizer.SpeakStarted   += Synthesizer_SpeakStarted;
            synthesizer.SpeakProgress  += Synthesizer_SpeakProgress;
            synthesizer.SpeakCompleted += Synthesizer_SpeakCompleted;
            recognizer = new SpeechRecognitionEngine(culture);
            recognizer.SetInputToDefaultAudioDevice();
            recognizer.LoadGrammarCompleted      += recognizer_LoadGrammarCompleted;
            recognizer.SpeechRecognized          += recognizer_SpeechRecognized;
            recognizer.SpeechRecognitionRejected += recognizer_SpeechRecognitionRejected;
            // Get all grammar files and read them
            var currentDir   = Environment.CurrentDirectory;
            var grammarFiles = Directory.GetFiles(currentDir, "*.cg.xml", SearchOption.AllDirectories);
            var srgsDocs     = grammarFiles.Select(f => new SrgsDocument(f)).ToArray();
            // Extract all rules
            var allRules  = srgsDocs.SelectMany(d => d.Rules);
            var privRules = allRules.Where(r => r.Scope == SrgsRuleScope.Private).ToArray();
            // Build grammars and load them
            var grammars = srgsDocs.SelectMany(d => d.Rules.Where(r => r.Scope == SrgsRuleScope.Public)
                                               .Select(pr => new Grammar(d, pr.Id))).ToArray();

            Array.ForEach(grammars, g => recognizer.LoadGrammarAsync(g));
            // Generate on-the-fly grammars
            smg = new StartMenuGrammar();
            wsg = new WinSearchGrammar();
            tig = new TimerGrammar();
            var onTheFly = smg.GenerateGrammar().Concat(wsg.GenerateGrammar())
                           .Concat(tig.GenerateGrammar()).ToArray();

            Array.ForEach(onTheFly, d => recognizer.LoadGrammarAsync(new Grammar(d)));
            // Greet the user
            Speak(SpeakResource.Welcome);
        }
Exemple #25
0
        private void Grammer()
        {
            //anbindungen an sql
            string        constring = ConfigurationManager.ConnectionStrings["MyDataBase"].ConnectionString;
            SqlConnection con       = new SqlConnection(constring);

            con.Open();
            SqlCommand sc = new SqlCommand();

            sc.Connection  = con;
            sc.CommandText = "select * FROM MyTable";
            SqlDataReader sdr = sc.ExecuteReader();

            while (sdr.Read())
            {
                var     Loadcmd        = sdr["Commands"].ToString();
                Grammar commandgrammar = new Grammar(new GrammarBuilder(new Choices(Loadcmd)));
                speechRec.LoadGrammarAsync(commandgrammar);
                commandgrammar.Priority = 3;
                commandgrammar.Weight   = 1f;
            }
            sdr.Close();
            con.Close();

            //aus einer text datei lesen
            Grammar xmlG = new Grammar(@"SpeakEck\XMLFile.xml");

            xmlG.Name = "SRGS File Command Grammar";
            speechRec.LoadGrammarAsync(xmlG);
            xmlG.Priority = 2;
            xmlG.Weight   = 0.6f;

            //anbindungen intern Speech
            DictationGrammar dictation = new DictationGrammar();

            speechRec.LoadGrammar(dictation);
            dictation.Weight = 0.4f;
        }
Exemple #26
0
        private void Form1_Load(object sender, EventArgs e)
        {
            Choices commands = new Choices();

            commands.Add(new string[] { "H", "P", "C" });
            GrammarBuilder gbuilder = new GrammarBuilder();

            gbuilder.Append(commands);
            Grammar grammar = new Grammar(gbuilder);

            recengine.LoadGrammarAsync(grammar);
            recengine.SetInputToDefaultAudioDevice();
            recengine.SpeechRecognized += recengine_speech_recognized;
        }
Exemple #27
0
        // Defined Function processing where main instruction will be executed !
        void processing()
        {
            //First of all storing commands
            commands.Add(new string[] { "Blue On", "Red On", "Green On", "Blue Off", "Red Off", "Green Off", "Exit", "All On", "All Off", "Arduino Say Good Bye to makers" });

            //Now we will create object of Grammer in which we will pass commands as parameter
            Grammar gr = new Grammar(new GrammarBuilder(commands));

            // For more information about below funtions refer to site https://docs.microsoft.com/en-us/dotnet/api/system.speech.recognition?view=netframework-4.7.2
            re.RequestRecognizerUpdate();      // Pause Speech Recognition Engine before loading commands
            re.LoadGrammarAsync(gr);
            re.SetInputToDefaultAudioDevice(); // As Name suggest input device builtin microphone or you can also connect earphone etc...
            re.SpeechRecognized += new EventHandler <SpeechRecognizedEventArgs>(re_SpeechRecognized);
        }
Exemple #28
0
        private void Form1_Load(object sender, EventArgs e)
        {
            Choices commands = new Choices();

            commands.Add(new string [] { "say hello", "print my name" });
            GrammarBuilder gBuild = new GrammarBuilder();

            gBuild.Append(commands);
            Grammar grammer = new Grammar(gBuild);

            recEngine.LoadGrammarAsync(grammer);
            recEngine.SetInputToDefaultAudioDevice();
            recEngine.SpeechRecognized += recEngine_SpeechRecognized;
        }
Exemple #29
0
        private void InitializeRecognizer()
        {
            Grammar grammar = BuildGrammar();

            recognizer.LoadGrammarAsync(grammar);
            recognizer.SetInputToDefaultAudioDevice();
            recognizer.SpeechRecognized          += Recognizer_SpeechRecognized;
            recognizer.SpeechDetected            += Recognizer_SpeechDetected;
            recognizer.SpeechRecognitionRejected += Recognizer_SpeechRecognitionRejected;
            grammar = BuildGrammar();
            startListening.LoadGrammarAsync(grammar);
            startListening.SetInputToDefaultAudioDevice();
            startListening.SpeechRecognized += StartListening_SpeechRecognized;
        }
Exemple #30
0
        private void Form1_Load(object sender, EventArgs e)
        {
            Choices inputs = new Choices();

            inputs.Add(new string[] { "Hello", "What are you?", "How are you?", "Good", "Bad", "Shutdown Interface", "Yes", "No", "Calculate" });
            string[]       Numbers = new string[] { "zero", "one", "two", "three", "four", "five", "six", "seven", "eight", "nine", "ten", "eleven", "Twelve", "Thirteen", "Fourteen", "Fifteen", "Sixteen", "Seventeen", "Eighteen", "Nineteen", "Twenty", "Thirty", "Fourty", "Fifty", "Sixty", "Seventy", "Eighty", "Ninety", "Hundred", "Thousand" };
            GrammarBuilder gb      = new GrammarBuilder();

            gb.Append(inputs);
            Choices Nums = new Choices();

            Nums.Add(Numbers);
            //gb.Append(Nums);
            Grammar G    = new Grammar(gb);
            Grammar Dict = new DictationGrammar();

            Dict.Weight = 0.4f;
            recEngine.LoadGrammarAsync(G);
            recEngine.LoadGrammarAsync(Dict);
            recEngine.SetInputToDefaultAudioDevice();

            recEngine.SpeechRecognized += recEngine_SpeechRecognized;
        }
        private void Form1_Load(object sender, EventArgs e)
        {
            Choices commands = new Choices();

            commands.Add(new string[] { "attack", "left", "right" });
            GrammarBuilder gBuilder = new GrammarBuilder();

            gBuilder.Append(commands);
            Grammar grammar = new Grammar(gBuilder);

            recEngine.LoadGrammarAsync(grammar);
            recEngine.SetInputToDefaultAudioDevice();
            recEngine.SpeechRecognized += recEngine_SpeechRecognized;
        }
Exemple #32
0
        private void Form1_Load(object sender, EventArgs e)
        {
            Choices commands = new Choices();

            commands.Add(new string[] { "Hello Shower", "Warmer", "Colder", "Goodbye" });
            GrammarBuilder gBuilder = new GrammarBuilder();

            gBuilder.Append(commands);
            Grammar grammar = new Grammar(gBuilder);

            Engine.LoadGrammarAsync(grammar);
            Engine.SetInputToDefaultAudioDevice();
            Engine.SpeechRecognized += Engine_SpeechRecognized;
        }
Exemple #33
0
        private void initializeStates()
        {
            Choices states = new Choices();

            states.Add(new string[] { "review mode", "edit mode" });
            GrammarBuilder sBuilder = new GrammarBuilder();

            sBuilder.Append(states);
            Grammar stateGrammar = new Grammar(sBuilder);

            recEngine.LoadGrammarAsync(stateGrammar);
            recEngine.SetInputToDefaultAudioDevice();
            recEngine.SpeechRecognized += RecEngine_StateRecognized;
        }
Exemple #34
0
        private void Form1_Load(object sender, EventArgs e)
        {
            Choices preCmd = new Choices();

            preCmd.Add(new string[] { "天气", "你几岁啦", "许陈飞是猪吗", "叶涵是仙女吗" });
            GrammarBuilder gb = new GrammarBuilder();

            gb.Append(preCmd);
            Grammar gr = new Grammar(gb);

            recEngine.LoadGrammarAsync(gr);
            recEngine.SetInputToDefaultAudioDevice();
            recEngine.SpeechRecognized += recEngine_SpeechRecognized;
        }
Exemple #35
0
        private void consulterNotes_Load(object sender, EventArgs e)
        {
            Choices commands = new Choices();

            commands.Add(new String[] { "show", "dashboard" });
            GrammarBuilder builder = new GrammarBuilder();

            builder.Append(commands);
            Grammar grammar = new Grammar(builder);

            recEngine.LoadGrammarAsync(grammar);
            recEngine.SetInputToDefaultAudioDevice();
            recEngine.SpeechRecognized += recEngine_SpeechRecognized;
        }
Exemple #36
0
        public SpeechListerner(Speech speech)
        {
            this.speech = speech;

            for (int i = 0; i < listOfProunces.Length; i++)
            {
                pronounces.Add(listOfProunces[i]);
            }

            reconizer.LoadGrammarAsync(new DictationGrammar());
            reconizer.SetInputToDefaultAudioDevice();
            reconizer.RecognizeAsync(RecognizeMode.Multiple);
            reconizer.SpeechRecognized += new EventHandler <SpeechRecognizedEventArgs>(reconizer_reconized);
        }
Exemple #37
0
        static void Main(string[] args)
        {
            // Create an in-process speech recognizer.
            using (SpeechRecognitionEngine recognizer = new SpeechRecognitionEngine(new CultureInfo("en-US")))
            {
                // Create a grammar for choosing commandChoices for a flight.
                Choices commandChoices = new Choices(new string[] { "Lights On", "Lights Off", "All Off", "Say Time"});

                GrammarBuilder gb = new GrammarBuilder();
                gb.Append(Properties.Settings.Default.AssistantName);
                gb.Append(commandChoices);

                // Construct a Grammar object and load it to the recognizer.
                Grammar commandChooser = new Grammar(gb);
                commandChooser.Name = ("Command Chooser");
                recognizer.LoadGrammarAsync(commandChooser);

                // Attach event handlers.
                recognizer.SpeechDetected += new EventHandler<SpeechDetectedEventArgs>(SpeechDetectedHandler);
                recognizer.SpeechHypothesized += new EventHandler<SpeechHypothesizedEventArgs>(SpeechHypothesizedHandler);
                recognizer.SpeechRecognitionRejected += new EventHandler<SpeechRecognitionRejectedEventArgs>(SpeechRecognitionRejectedHandler);
                recognizer.SpeechRecognized += new EventHandler<SpeechRecognizedEventArgs>(SpeechRecognizedHandler);
                recognizer.RecognizeCompleted += new EventHandler<RecognizeCompletedEventArgs>(RecognizeCompletedHandler);

                // Assign input to the recognizer and start asynchronous
                recognizer.SetInputToDefaultAudioDevice();

                _completed = false;
                Console.WriteLine("Starting asynchronous recognition...");
                //recognizer.RecognizeAsync(RecognizeMode.Multiple);

                recognizer.EmulateRecognizeAsync("Nigel Lights On");

                // Wait 30 seconds, and then cancel asynchronous recognition.
                Thread.Sleep(TimeSpan.FromSeconds(30));
                recognizer.RecognizeAsyncCancel();

                // Wait for the operation to complete.
                while (!_completed)
                {
                    Thread.Sleep(333);
                }
                Console.WriteLine("Done.");
            }

            Console.WriteLine();
            Console.WriteLine("Press any key to exit...");
            Console.ReadKey();
        }
        private void SpeechRecognition_Initialize()
        {
            recognitionEngine = new SpeechRecognitionEngine();
            Choices commands = new Choices();
            string[] choices = {"hi zira", "how are you today?", "i feel sick", "good bye zira", "yes" };
            commands.Add(choices);
            GrammarBuilder grammarBuilder = new GrammarBuilder();
            grammarBuilder.Append(commands);
            Grammar grammar = new Grammar(grammarBuilder);
            recognitionEngine.LoadGrammarAsync(grammar);
            recognitionEngine.SetInputToDefaultAudioDevice();

            recognitionEngine.RecognizeAsync(RecognizeMode.Multiple);
            recognitionEngine.SpeechRecognized += recognitionEngine_SpeechRecognized;
        }
        private void InitializeSpeechRecognition()
        {
            try
            {
                var c = new Choices(_cache.Commands.Keys.ToArray());
                var gb = new GrammarBuilder(c);
                var g = new Grammar(gb);

                _rec = new SpeechRecognitionEngine();
                _rec.InitialSilenceTimeout = TimeSpan.FromSeconds(3);
                _rec.SpeechHypothesized += OnSpeechHypothesized;
                _rec.SpeechRecognitionRejected += OnSpeechRecognitionRejected;
                _rec.RecognizeCompleted += OnSpeechRecognized;

                _rec.LoadGrammarAsync(g);
                _rec.SetInputToDefaultAudioDevice();

                _isEnabled = true;
            }
            catch { /* Speech Recognition hasn't been enabled on Windows */ }
        }
Exemple #40
0
        void Flow_StateChanged(object sender, MediaFlowStateChangedEventArgs e)
        {
            Log("ControlAVCall Flow_StateChanged PreviousState=" + e.PreviousState + " State=" + e.State);

            AudioVideoFlow avFlow = (AudioVideoFlow)sender;

            if (avFlow.State == MediaFlowState.Active)
            {
                SpeechRecognitionConnector speechRecognitionConnector = new SpeechRecognitionConnector();
                speechRecognitionConnector.AttachFlow(avFlow);

                SpeechRecognitionStream stream = speechRecognitionConnector.Start();

                _speechRecognitionEngine = new SpeechRecognitionEngine();
                _speechRecognitionEngine.SpeechRecognized += new EventHandler<SpeechRecognizedEventArgs>(_speechRecognitionEngine_SpeechRecognized);
                _speechRecognitionEngine.LoadGrammarCompleted += new EventHandler<LoadGrammarCompletedEventArgs>(_speechRecognitionEngine_LoadGrammarCompleted);

                Choices pathChoice = new Choices(new string[] { "previous", "next" });
                Grammar gr = new Grammar(new GrammarBuilder(pathChoice));
                _speechRecognitionEngine.LoadGrammarAsync(gr);

                SpeechAudioFormatInfo speechAudioFormatInfo = new SpeechAudioFormatInfo(8000, AudioBitsPerSample.Sixteen, Microsoft.Speech.AudioFormat.AudioChannel.Mono);
                _speechRecognitionEngine.SetInputToAudioStream(stream, speechAudioFormatInfo);
                _speechRecognitionEngine.RecognizeAsync(RecognizeMode.Multiple);
            }
            else
            {
                if (avFlow.SpeechRecognitionConnector != null)
                {
                    avFlow.SpeechRecognitionConnector.DetachFlow();
                }
            }
        }
        private void initializeSpeech()
        {
            inSpeech = true;
            System.Console.Write("Initialize speech");
            SS.Recognition.RecognizerInfo ri = SS.Recognition.SpeechRecognitionEngine.InstalledRecognizers().FirstOrDefault();
            sre = new SpeechRecognitionEngine(ri.Id);

            Choices letters = new Choices(new string[] { "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z" });

            GrammarBuilder gb = new GrammarBuilder("Guess");
            gb.Append(letters);

            Grammar grammar = new Grammar(gb);
            grammar.Name = "DisK of Demise";

            sre.LoadGrammarCompleted += new EventHandler<LoadGrammarCompletedEventArgs>(LoadGrammarCompleted);
            sre.SpeechRecognized += new EventHandler<SpeechRecognizedEventArgs>(SpeechRecognized);
            sre.SpeechRecognitionRejected += new EventHandler<SpeechRecognitionRejectedEventArgs>(SpeechRejected);

            //sre.SetInputToDefaultAudioDevice();
            sre.LoadGrammarAsync(grammar);
        }
        private SpeechRecognitionEngine CreateSRE(string culture, Choices choices, Action<object, SpeechRecognizedEventArgs> speechRecognizedEvent)
        {
            SpeechRecognitionEngine sre = new SpeechRecognitionEngine(new CultureInfo(culture));

            // Create a GrammarBuilder object and append the Choices object.
            GrammarBuilder gb = new GrammarBuilder();
            gb.Append(choices);

            // Create the Grammar instance and load it into the speech recognition engine.
            Grammar g = new Grammar(gb);
            sre.LoadGrammarAsync(g);

            //sre.InitialSilenceTimeout = TimeSpan.FromSeconds(1);
            //sre.BabbleTimeout = TimeSpan.FromSeconds(1);
            sre.EndSilenceTimeout = TimeSpan.FromSeconds(1);
            sre.EndSilenceTimeoutAmbiguous = TimeSpan.FromSeconds(.5);

            // Register a handler for the SpeechRecognized event.
            sre.SpeechRecognized += new EventHandler<SpeechRecognizedEventArgs>(speechRecognizedEvent);
            sre.SetInputToDefaultAudioDevice();
            sre.RecognizeAsync(RecognizeMode.Multiple);

            return sre;
        }
Exemple #43
0
        private void backgroundWorker1_DoWork(object sender, DoWorkEventArgs e)
        {
            if (controlvar != 0)
            {
                using (
                  SpeechRecognitionEngine recognizer =
                  new SpeechRecognitionEngine(
                    new System.Globalization.CultureInfo("en-IN")))
                {
                    // ### Questions
                    // Create a grammar for finding services in different cities.
                    Choices questions = new Choices(new string[] { "What", "Where", "Describe"});
                    GrammarBuilder findServices = new GrammarBuilder("Sam");
                    findServices.Append(questions);

                    // Create a Grammar object from the GrammarBuilder and load it to the recognizer.
                    Grammar servicesGrammar = new Grammar(findServices);
                    recognizer.LoadGrammarAsync(servicesGrammar);

                    // Add a handler for the speech recognized event.
                    recognizer.SpeechRecognized +=
                      new EventHandler<SpeechRecognizedEventArgs>(recognizer_SpeechRecognized);

                    // Configure the input to the speech recognizer.
                    recognizer.SetInputToDefaultAudioDevice();

                    // Start asynchronous, continuous speech recognition.
                    recognizer.RecognizeAsync(RecognizeMode.Multiple);

                    while (true)
                    {
                        Console.ReadLine();
                    }

                }
            }
        }
Exemple #44
0
        private void LoadGrammar(SpeechRecognitionEngine speechRecognitionEngine)
        {
            startListeningChoices = new Choices();
            foreach (var phrase in this.startListeningPhrases)
            {
                startListeningChoices.Add(phrase.Key);
            }

            stopListeningChoices = new Choices();
            foreach (var phrase in this.stopListeningPhrases)
            {
                stopListeningChoices.Add(phrase.Key);
            }

            booleanChoices = new Choices();
            foreach (var phrase in this.booleanPhrases)
            {
                booleanChoices.Add(phrase.Key);
            }

            kinectMotorChoices = new Choices();
            foreach (var phrase in this.kinectMotorPhrases)
            {
                kinectMotorChoices.Add(phrase.Key);
            }

            startScreenChoices = new Choices();
            foreach (var phrase in this.startScreenPhrases)
            {
                startScreenChoices.Add(phrase.Key);
            }

            instrumentChoices = new Choices();
            foreach (var phrase in this.instrumentPhrases)
            {
                instrumentChoices.Add(phrase.Key);
            }

            wallChoices = new Choices();
            foreach (var phrase in this.wallPhrases)
            {
                wallChoices.Add(phrase.Key);
            }

            /*
             * ADD NEW GRAMMARS HERE
             * Copy code from above, and place it just above this comment
             * Amend "allChoices" to add the new dictionary
             * Add to "allDicts" further down
             */

            var allChoices = new Choices();
            allChoices.Add(startScreenChoices);
            allChoices.Add(kinectMotorChoices);

            // This is needed to ensure that it will work on machines with any culture, not just en-us.
            var gb = new GrammarBuilder(startListeningChoices) { Culture = speechRecognitionEngine.RecognizerInfo.Culture };
            gb.Append(allChoices);

            var g = new Grammar(gb);
            var g2 = new Grammar(startListeningChoices);
            speechRecognitionEngine.LoadGrammarAsync(g);
            speechRecognitionEngine.LoadGrammarAsync(g2);
            speechRecognitionEngine.SpeechRecognized += this.SreSpeechRecognized;
            speechRecognitionEngine.SpeechHypothesized += this.SreSpeechHypothesized;
            speechRecognitionEngine.SpeechRecognitionRejected += this.SreSpeechRecognitionRejected;
        }
        //only method publically available
        public static void GrammarLoader(ref SpeechRecognitionEngine sre)
        {
            // main method that loads the S.R.E with all the grammars
            speechEngine = sre;
            #region Grammar for Basic Commands
            Grammar basicGrammar = Task.Factory.StartNew<Grammar>(new Func<Grammar>(BasicGrammar)).Result;
            basicGrammar.Name = "basicGrammar";
            basicGrammar.Priority = 10;
            basicGrammar.SpeechRecognized += BasicGrammar_SpeechRecognized;
            BasicResponse += ResponseGenerator.BasicGrammar_ResponseHandler;
            #endregion

            #region Grammar for Primary Commands
            Grammar primaryGrammar = Task.Factory.StartNew<Grammar>(new Func<Grammar>(PrimaryGrammar)).Result;
            primaryGrammar.Name = "primaryGrammar";
            primaryGrammar.Priority = 12;
            primaryGrammar.SpeechRecognized += PrimaryGrammar_SpeechRecognized;
            PrimaryResponse += ResponseGenerator.PrimaryGrammar_ResponseHandler;
            #endregion

            #region Grammar for Open Type commands
            Grammar open_typeGrammar = Task.Factory.StartNew<Grammar>(new Func<Grammar>(OpenCommandGrammar)).Result;
            open_typeGrammar.Name = "open_typeGrammar";
            open_typeGrammar.Priority = 8;
            open_typeGrammar.SpeechRecognized += Open_typeGrammar_SpeechRecognized;
            Open_SearchTypeResponse += ResponseGenerator.Open_SearchType_ResponseHandler;
            #endregion

            #region Grammar for Response Box
            Grammar responseBoxGrammar = Task.Factory.StartNew<Grammar>(new Func<Grammar>(ResponseBoxSelection)).Result;
            responseBoxGrammar.Name = "responseBoxGrammar";
            responseBoxGrammar.Priority = 5;
            responseBoxGrammar.SpeechRecognized += ResponseBoxGrammar_SpeechRecognized;
            ResponseBoxResponse += ResponseGenerator.ResponseBox_ResponseHandler;
            #endregion

            #region Grammar for NonOperative Commands
            Grammar nonOperative = Task.Factory.StartNew<Grammar>(new Func<Grammar>(NonOperative)).Result;
            nonOperative.Name = "NonOperativeCommands";
            nonOperative.Priority = 4;
            nonOperative.SpeechRecognized += NonOperative_SpeechRecognized;
            NonOperativeResponse += ResponseGenerator.NonOperational_ResponseHandler;
            #endregion

            #region Grammar for UI commands
            Grammar uiGrammar = Task.Run<Grammar>(new Func<Grammar>(UIGrammar)).Result;
            uiGrammar.Name = "UIGrammar";
            uiGrammar.Priority = 3;
            uiGrammar.SpeechRecognized += UiGrammar_SpeechRecognized;
            UIResponse += ResponseGenerator.UI_ResponseHandler;
            #endregion

            #region Grammar for Close command
            Grammar closeProgramGrammar = Task.Factory.StartNew<Grammar>(new Func<Grammar>(CloseProgramGrammar)).Result;
            closeProgramGrammar.Name = "closeProgramGrammar";
            closeProgramGrammar.Priority = 6;
            closeProgramGrammar.SpeechRecognized += CloseProgramGrammar_SpeechRecognized;
            CloseProgramResponse += ResponseGenerator.CloseProgram_ResponseHandler;
            #endregion

            #region Loading all Grammars in the SRE
            //loading all the grammars into the S.R.E
            speechEngine.LoadGrammarAsync(primaryGrammar);
            speechEngine.LoadGrammarAsync(basicGrammar);
            speechEngine.LoadGrammarAsync(open_typeGrammar);
            speechEngine.LoadGrammarAsync(responseBoxGrammar);
            speechEngine.LoadGrammarAsync(nonOperative);
            speechEngine.LoadGrammarAsync(uiGrammar);
            speechEngine.LoadGrammarAsync(closeProgramGrammar);
            #endregion
        }
        public void RecognizeSpeech()
        {
            SpeechRecognitionEngine recognizer = new SpeechRecognitionEngine();

            recognizer.LoadGrammarCompleted += new EventHandler<LoadGrammarCompletedEventArgs>(LoadGrammarCompleted);
            recognizer.SpeechRecognized += new EventHandler<SpeechRecognizedEventArgs>(SpeechRecognized);
            recognizer.SpeechRecognitionRejected += new EventHandler<SpeechRecognitionRejectedEventArgs>(SpeechRejected);
            recognizer.SetInputToDefaultAudioDevice();

            GrammarBuilder commandStarter = new GrammarBuilder("Command");

            GrammarBuilder clear = new GrammarBuilder("Clear");

            GrammarBuilder insert = new GrammarBuilder("Insert");
            Choices gates = new Choices(new string[] { "and", "or", "not", "exor", "nor", "nand" });
            Choices columns = new Choices(new string[] { "zero","one", "two", "three", "four", "five", "six", "seven", "eight", "nine", "ten", "eleven", "twelve", "thirteen", "fourteen", "fifteen", "sixteen", "seventeen", "eighteen", "nineteen" });
            Choices rows = new Choices(new string[] { "zero" ,"one", "two", "three", "four", "five", "six", "seven", "eight", "nine" });
            //Choices orientation = new Choices(new string[] { "left", "right", "up", "down" });
            insert.Append(gates);
            insert.Append("R");
            insert.Append(rows);
            insert.Append("C");
            insert.Append(columns);

            //insert.Append("towards");
            //insert.Append(orientation);

            GrammarBuilder connect = new GrammarBuilder("Connect");
            connect.Append("output");
            connect.Append(columns);
            connect.Append(rows);
            connect.Append("to");
            connect.Append("input");
            connect.Append(columns);
            connect.Append(rows);

            Grammar _clear_grammar = new Grammar(clear);
            Grammar _insert_grammar = new Grammar(insert);
            Grammar _connect_grammar = new Grammar(connect);
            Grammar _command_starter = new Grammar(commandStarter);

            recognizer.LoadGrammarAsync(_clear_grammar);
            recognizer.LoadGrammarAsync(_insert_grammar);
            recognizer.LoadGrammarAsync(_connect_grammar);
            recognizer.LoadGrammarAsync(_command_starter);

            while (true)
            {
                recognizer.Recognize();
                //recognizer.RecognizeAsync(RecognizeMode.Multiple);
            }
        }
 /// <summary>
 /// Adds a new microphone instance
 /// </summary>
 /// <param name="instance">The instance id of the microphone</param>
 /// <param name="stream">The audio stream</param>
 /// <param name="status">The status of the microphone</param>
 /// <param name="shouldBeOn">Whether the speech recognition engine should be turned on</param>
 public void AddInputMic(string instance, UDPClient client, string status, bool shouldBeOn)
 {
     try 
     {
         var sre = new SpeechRecognitionEngine(new CultureInfo("en-US"));
         sre.SetInputToAudioStream(client.AudioStream, new SpeechAudioFormatInfo(16000, AudioBitsPerSample.Sixteen, AudioChannel.Mono));
         sre.SpeechRecognized += new EventHandler<SpeechRecognizedEventArgs>(RecognitionHandler);
         sre.SpeechRecognitionRejected += new EventHandler<SpeechRecognitionRejectedEventArgs>(RecognitionRejectedHandler);
         DictationGrammar customDictationGrammar  = new DictationGrammar("grammar:dictation");
         customDictationGrammar.Name = "dictation";
         customDictationGrammar.Enabled = true;
         sre.LoadGrammar(customDictationGrammar);
         mics.Add(instance, new Microphone(sre,client, status, shouldBeOn,port));
         foreach (var g in grammars)
         {
             var gram = new CombinedGrammar(g.Key, g.Value);
             sre.LoadGrammarAsync(gram.compiled);
         }
         if (shouldBeOn)
         {
             sre.RecognizeAsync(RecognizeMode.Multiple);
         }
     }
     catch (IOException) 
     {
         //negotiating connection with mic failed.
     }
 }
Exemple #48
0
 private void backgroundWorker1_DoWork(object sender, DoWorkEventArgs e)
 {
     if (controlvar != 0 || lastcontrolvar!=0)
     {
         using (
           SpeechRecognitionEngine recognizer =
           new SpeechRecognitionEngine(
             new System.Globalization.CultureInfo("en-IN")))
         {
             Choices questions = new Choices(new string[] {"Where", "Read", "Repeat"});
             GrammarBuilder findServices = new GrammarBuilder("Navi");
             findServices.Append(questions);
             Grammar servicesGrammar = new Grammar(findServices);
             recognizer.LoadGrammarAsync(servicesGrammar);
             recognizer.SpeechRecognized +=
               new EventHandler<SpeechRecognizedEventArgs>(recognizer_SpeechRecognized);
             recognizer.SetInputToDefaultAudioDevice();
             recognizer.RecognizeAsync(RecognizeMode.Multiple);
             while (true)
             {
                 Console.ReadLine();
             }
         }
     }
 }