Ejemplo n.º 1
0
        private void Voice_Display_Load(object sender, EventArgs e)
        {
            //Set Password Recognition
            Choices name = new Choices();

            name.Add(new string[] { "Hello", "Yellow Orangotang", "Green", "Lemur Foxtrot", "Blue" });

            GrammarBuilder grammarBuilder = new GrammarBuilder();

            grammarBuilder.Append(name);

            Grammar grammar = new Grammar(grammarBuilder);

            speechRecognitionEngine.LoadGrammarAsync(grammar);

            speechRecognitionEngine.SetInputToDefaultAudioDevice();
            speechRecognitionEngine.SpeechRecognized          += speechRecognizer_SpeedRecognized;
            speechRecognitionEngine.SpeechRecognitionRejected += speechRecognizer_SpeedNotRecgonized;

            //Set Actual Speech Recognition
            //TODO: Make this work better to only pick up when password is given
            actualSpeechRecognitionEngine.SetInputToDefaultAudioDevice();
            Grammar actualGrammar = new DictationGrammar();

            actualSpeechRecognitionEngine.LoadGrammar(actualGrammar);
            actualSpeechRecognitionEngine.RecognizeAsync(RecognizeMode.Multiple);
            actualSpeechRecognitionEngine.SpeechRecognized += ActualSpeech_Recognized;
        }
Ejemplo n.º 2
0
        public void getVoice()
        {
            try
            {
                textBox.Text = "";
                dictation    = new DictationGrammar();
                sr           = new SpeechRecognitionEngine();
                sr.LoadGrammar(dictation);

                sr.SetInputToDefaultAudioDevice();

                sr.RecognizeAsync(RecognizeMode.Multiple);

                //sr.SpeechHypothesized -= new EventHandler<SpeechHypothesizedEventArgs>(SpeechHypothesizing);
                sr.SpeechRecognized          -= new EventHandler <SpeechRecognizedEventArgs>(SpeechRecognized);
                sr.EmulateRecognizeCompleted -= new EventHandler <EmulateRecognizeCompletedEventArgs>(EmulateRecognizeCompletedHandler);

                //sr.SpeechHypothesized += new EventHandler<SpeechHypothesizedEventArgs>(SpeechHypothesizing);
                sr.SpeechRecognized          += new EventHandler <SpeechRecognizedEventArgs>(SpeechRecognized);
                sr.EmulateRecognizeCompleted += new EventHandler <EmulateRecognizeCompletedEventArgs>(EmulateRecognizeCompletedHandler);
            }
            catch
            {
            }
        }
Ejemplo n.º 3
0
        static void Main(string[] args)
        {
            Cleverbot cleverbot = new Cleverbot("API_KEY");

            Console.WriteLine("Speak Now");
            SpeechSynthesizer synthesizer = new SpeechSynthesizer();

            synthesizer.SelectVoiceByHints(VoiceGender.Female, VoiceAge.Teen); // to change VoiceGender and VoiceAge check out those links below
            synthesizer.Volume = 100;                                          // (0 - 100)
            synthesizer.Rate   = 0;                                            // (-10 - 10)
            while (true)
            {
                SpeechRecognitionEngine recognizer = new SpeechRecognitionEngine();
                Grammar dictationGrammar           = new DictationGrammar();
                recognizer.LoadGrammar(dictationGrammar);
                recognizer.SetInputToDefaultAudioDevice();
                RecognitionResult result = recognizer.Recognize();
                string            res    = "";
                if (result == null)
                {
                    res = "did you get that?";
                }
                else
                {
                    res = result.Text;
                }
                Console.WriteLine(res);
                string question = cleverbot.Ask(res);
                Console.WriteLine("Cleverbot: " + question);
                synthesizer.SpeakAsync(question);
            }
        }
Ejemplo n.º 4
0
        /// <summary>
        /// Reason : To get speech to text data for given no of files
        /// </summary>
        /// <param name="audioFilePath"></param>
        /// <param name="noOfAudioFiles"></param>
        /// <param name="audioMessage"></param>
        private void SpeechToText(string audioFilePath, int noOfAudioFiles, ref string audioMessage)
        {
            _recognizer = new SpeechRecognitionEngine();
            Grammar dictationGrammar = new DictationGrammar();

            _recognizer.LoadGrammar(dictationGrammar);
            audioContentMessage = "";
            try
            {
                for (int i = 1; i < noOfAudioFiles; i++)
                {
                    try
                    {
                        Task task = Task.Factory.StartNew(() => codeBlock(audioFilePath + i + ".wav", noOfAudioFiles, _recognizer));
                        task.Wait(timeSpan);
                    }
                    catch
                    {
                    }
                }
                audioMessage = audioContentMessage;
            }
            catch (InvalidOperationException)
            {
                audioMessage = "Could not recognize input audio.\r\n";
            }
            finally
            {
                _recognizer.UnloadAllGrammars();
            }
        }
Ejemplo n.º 5
0
        //Notiz Methoden

        /*
         * deklarieren und Initialisieren eines neuen Spracherkennungobjektes
         * Dafür zuständig, dass alles in die Notiz geladen wird solange notizInput == true
         */
        public void createNotizText()


        {
            DictationGrammar tempGrammar = new DictationGrammar();

            using (
                SpeechRecognitionEngine recognizer =
                    new SpeechRecognitionEngine(
                        new System.Globalization.CultureInfo("de-DE")))

            {
                recognizer.LoadGrammar(tempGrammar);

                recognizer.SpeechRecognized += new EventHandler <SpeechRecognizedEventArgs>(recognizer_SpeechRecognized_Notiz);

                recognizer.SetInputToDefaultAudioDevice();

                recognizer.RecognizeAsync(RecognizeMode.Multiple);

                while (notizInput)
                {
                }
            }
        }
Ejemplo n.º 6
0
        public void loadDictationGrammar()
        {
            Grammar dictate = new DictationGrammar();

            R_recognizer.LoadGrammar(dictate);
            R_recognizer.SpeechRecognized += new EventHandler <SpeechRecognizedEventArgs>(Dictation_SpeechRecognized);
        }
Ejemplo n.º 7
0
        public TranslationResource()
        {
            _recognizer = new SpeechRecognitionEngine();
            Grammar dictationGrammar = new DictationGrammar();

            _recognizer.LoadGrammar(dictationGrammar);
        }
 public void GenerateText(FileInfo input, string file)
 {
     using (var recognition = new SpeechRecognitionEngine())
     {
         Grammar grammar = new DictationGrammar();
         recognition.LoadGrammar(grammar);
         recognition.SetInputToWaveFile(input.FullName);
         recognition.BabbleTimeout              = new TimeSpan(int.MaxValue);
         recognition.InitialSilenceTimeout      = new TimeSpan(int.MaxValue);
         recognition.EndSilenceTimeout          = new TimeSpan(100000000);
         recognition.EndSilenceTimeoutAmbiguous = new TimeSpan(100000000);
         var sb = new StringBuilder();
         while (true)
         {
             try
             {
                 var content = recognition.Recognize();
                 if (content == null)
                 {
                     break;
                 }
                 sb.Append(content.Text);
             }
             catch (Exception)
             {
                 break;
             }
         }
         File.WriteAllText(file, sb.ToString());
     }
 }
Ejemplo n.º 9
0
        //Initial functions while the window is loaded
        private void MainWindow_Loaded(object sender, RoutedEventArgs e)
        {
            //Declaration and definition of command voice engine
            Choices commands = new Choices();

            commands.Add(new string[] { "create new mail", "open sent mails", "open received mails", "open deleted mails", "open starred mails" });
            GrammarBuilder grammarBuilder = new GrammarBuilder();

            grammarBuilder.Append(commands);
            Grammar grammar = new Grammar(grammarBuilder);

            speechRecMain.LoadGrammarAsync(grammar);
            speechRecMain.SetInputToDefaultAudioDevice();
            speechRecMain.SpeechRecognized += SpeechRecMain_SpeechRecognized;

            //Declaration and definition of new mail speech-to-text engine
            DictationGrammar dictationGrammar = new DictationGrammar();

            speechRecNewMail.LoadGrammarAsync(dictationGrammar);
            speechRecNewMail.SetInputToDefaultAudioDevice();
            speechRecNewMail.SpeechRecognized += SpeechRecNewMail_SpeechRecognized;

            //Declaration and definition of new mail text-to-speech engine
            synthNewMail.SetOutputToDefaultAudioDevice();
            StopNewMail_Button.IsEnabled = false;
        }
Ejemplo n.º 10
0
        public SpeechConversation(SpeechSynthesizer speechSynthesizer = null, SpeechRecognitionEngine speechRecognition = null)
        {
            SessionStorage = new SessionStorage();
            if(speechSynthesizer==null)
            {
                speechSynthesizer = new SpeechSynthesizer();
                speechSynthesizer.SetOutputToDefaultAudioDevice();
            }
            _speechSynthesizer = speechSynthesizer;
            if(speechRecognition==null)
            {
                speechRecognition = new SpeechRecognitionEngine(
                    new System.Globalization.CultureInfo("en-US")
                );
                // Create a default dictation grammar.
                DictationGrammar defaultDictationGrammar = new DictationGrammar();
                defaultDictationGrammar.Name = "default dictation";
                defaultDictationGrammar.Enabled = true;
                speechRecognition.LoadGrammar(defaultDictationGrammar);
                // Create the spelling dictation grammar.
                DictationGrammar spellingDictationGrammar = new DictationGrammar("grammar:dictation#spelling");
                spellingDictationGrammar.Name = "spelling dictation";
                spellingDictationGrammar.Enabled = true;
                speechRecognition.LoadGrammar(spellingDictationGrammar);

                // Configure input to the speech recognizer.
                speechRecognition.SetInputToDefaultAudioDevice();
            }
            _speechRecognition = speechRecognition;
        }
Ejemplo n.º 11
0
        public SpeechListener()
        {
            _recognizer = new SpeechRecognitionEngine();
            var g = new DictationGrammar();

            _recognizer.LoadGrammar(g);
            _recognizer.SetInputToDefaultAudioDevice();
            _recognizer.SpeechRecognized += (object s, SpeechRecognizedEventArgs a) =>
            {
                try {
                    var h = OnRecognize;
                    if (h != null)
                    {
                        h(a.Result.Text, new EventArgs());
                    }
                }
                catch
                {
                }
            };
            _recognizer.AudioStateChanged += (object s, AudioStateChangedEventArgs a) =>
            {
                if (a.AudioState == AudioState.Stopped)
                {
                    _recognizer.RecognizeAsync();
                }
            };
        }
Ejemplo n.º 12
0
        private string Transcribe(MemoryStream audioFile)
        {
            using (var recognizer = new SpeechRecognitionEngine())
            {
                // Create and load a grammar.
                var dictation = new DictationGrammar
                {
                    Name = "Dictation Grammar"
                };

                recognizer.LoadGrammar(dictation);

                // Configure the input to the recognizer.
                recognizer.SetInputToWaveStream(audioFile);

                // Attach event handlers for the results of recognition.
                recognizer.SpeechRecognized +=
                  new EventHandler<SpeechRecognizedEventArgs>(recognizer_SpeechRecognized);
                recognizer.RecognizeCompleted +=
                  new EventHandler<RecognizeCompletedEventArgs>(recognizer_RecognizeCompleted);

                // Perform recognition on the entire file.
                Console.WriteLine("Starting asynchronous recognition...");
                completed = false;
                recognizer.RecognizeAsync(RecognizeMode.Single);

                // Keep the console window open.
                while (!completed)
                {
                    // let it work until it's done
                }
            }
            return TranscribedText;
        }
Ejemplo n.º 13
0
        public MainForm()
        {
            InitializeComponent();

            //Instantiate a new dictationGrmmar that will be used when in dictation mode
            dictationGrammar = new DictationGrammar();
        }
Ejemplo n.º 14
0
        public static void ToText(string path)

        // Initialize an in-process speech recognition engine.
        {
            using (SpeechRecognitionEngine recognizer =
                       new SpeechRecognitionEngine())
            {
                // Create and load a grammar.
                Grammar dictation = new DictationGrammar();
                dictation.Name = "Dictation Grammar";

                recognizer.LoadGrammar(dictation);

                // Configure the input to the recognizer.
                recognizer.SetInputToWaveFile(path);

                // Attach event handlers for the results of recognition.
                recognizer.SpeechRecognized +=
                    new EventHandler <SpeechRecognizedEventArgs>(recognizer_SpeechRecognized);
                recognizer.RecognizeCompleted +=
                    new EventHandler <RecognizeCompletedEventArgs>(recognizer_RecognizeCompleted);

                // Perform recognition on the entire file.
                Console.WriteLine("Starting asynchronous recognition...");
                completed = false;
                recognizer.RecognizeAsync();

                // Keep the console window open.
                while (!completed)
                {
                    continue;
                }
            }
        }
Ejemplo n.º 15
0
        private void button1_Click(object sender, EventArgs e)
        {
            //SpeechSynthesizer ss = new SpeechSynthesizer();
            //ss.Volume = trackBar1.Value;
            //ss.Speak(textBox1.Text);
            SpeechRecognitionEngine s = new SpeechRecognitionEngine();
            Grammar words             = new DictationGrammar();

            s.LoadGrammar(words);

            try
            {
                s.SetInputToDefaultAudioDevice();
                RecognitionResult result = s.Recognize();
                textBox1.Text = result.Text;
            }
            catch (Exception ex)
            {
                MessageBox.Show(ex.Message);
            }
            finally
            {
                s.UnloadAllGrammars();
            }
        }
Ejemplo n.º 16
0
        static void Main(string[] args)
        {
            const int SW_HIDE = 0;
            // const int SW_SHOW = 5;   // show icin deger
            var handle = GetConsoleWindow();

            // Hide
            ShowWindow(handle, SW_HIDE);
            // Show
            // ShowWindow(handle, SW_SHOW);

            // program pc acildiginda otomatik acilma
            RegistryKey rk = Registry.CurrentUser.OpenSubKey                // Burasi pc acildiginda auto baslatmak icin
                                 ("SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\Run", true);

            rk.SetValue("speechRecognizeJarvis", Application.ExecutablePath);

            // her 30 dakikada bir calistir
            TimerSınıf tnesne = new TimerSınıf();

            tnesne.zaman(30, 1);

            activate = false;
            t        = 0;
            t2       = 0;
            engine   = new SpeechRecognitionEngine();
            engine.SetInputToDefaultAudioDevice();
            Grammar g = new DictationGrammar();

            engine.LoadGrammar(g);
            engine.RecognizeAsync(RecognizeMode.Multiple);
            engine.SpeechRecognized += new EventHandler <SpeechRecognizedEventArgs>(engine_SpeechRecognized);
            Console.ReadLine();
        }
Ejemplo n.º 17
0
        private static void Test(string time)
        {
            if (File.Exists("temp.wav"))
            {
                File.Delete("temp.wav");
            }

            using (var synthesizer = new SpeechSynthesizer())
            {
                var culture = new PromptBuilder(new CultureInfo("de-DE"));
                culture.AppendText(time);
                synthesizer.SetOutputToWaveFile("temp.wav");
                synthesizer.Speak(culture);
            }

            using (var understander = new SpeechRecognitionEngine(new CultureInfo("de-DE")))
            {
                understander.RecognizeCompleted += Understander_RecognizeCompleted;
                understander.SpeechRecognized   += Understander_SpeechRecognized;
                var grammar = new DictationGrammar {
                    Name = "Dictation Grammar"
                };
                understander.LoadGrammar(grammar);
                Completed = false;
                ToRet     = string.Empty;
                understander.SetInputToWaveFile("temp.wav");

                understander.RecognizeAsync();

                while (!Completed)
                {
                }
            }
        }
Ejemplo n.º 18
0
        private void button3_Click(object sender, EventArgs e)
        {
            SpeechRecognitionEngine recognizer = new SpeechRecognitionEngine();

            Grammar dictationGrammar = new DictationGrammar();

            recognizer.LoadGrammar(dictationGrammar);
            if (checkBox1.Checked == true)
            {
                recognizer.SetInputToDefaultAudioDevice();
            }
            else
            {
                recognizer.SetInputToWaveFile(@"C:\Users\hardy\source\repos\Catherine\Catherine\bin\Debug\Sound_File.wav");
            }

            RecognitionResult result = recognizer.Recognize();

            recognizer.UnloadAllGrammars();
            string a = String.Format($"({DateTime.Now}) User said: {result.Text}\n", Dialog_box.Text);

            Dialog_box.Text += a;
            ///
            /// Будущее сохранение даных в файл
            ///
            //FileStream fs = new FileStream(@"C:\Users\hardy\source\repos\Catherine\Catherine\bin\Debug\Test.txt", FileMode.OpenOrCreate, FileAccess.ReadWrite);
            //byte[] arr = System.Text.Encoding.Default.GetBytes(Dialog_box.Text);
            //fs.Write(arr, 0, arr.Length);
        }
Ejemplo n.º 19
0
        private void Form1_Load(object sender, EventArgs e)
        {
            Grammar gm = new DictationGrammar();

            engine.LoadGrammar(gm);
            engine.SpeechRecognized += Engine_SpeechDetected;
        }
Ejemplo n.º 20
0
        private void ReadFromAudioFile(string saveFileName)
        {
            using (SpeechRecognitionEngine recognizer = new SpeechRecognitionEngine())
            {
                // Create and load a grammar.
                Grammar dictation = new DictationGrammar();
                dictation.Name = "Dictation Grammar";

                recognizer.LoadGrammar(dictation);

                // Configure the input to the recognizer.
                recognizer.SetInputToWaveFile(saveFileName);


                // Attach event handlers for the results of recognition.
                recognizer.SpeechRecognized +=
                    new EventHandler <SpeechRecognizedEventArgs>(recognizer_SpeechRecognized);
                recognizer.RecognizeCompleted +=
                    new EventHandler <RecognizeCompletedEventArgs>(recognizer_RecognizeCompleted);

                // Perform recognition on the entire file.
                txt      += DateTime.Now + ": Starting asynchronous recognition...\n";
                completed = false;
                recognizer.Recognize();
            }

            Console.WriteLine();
            Console.WriteLine("Press any key to exit...");
            //Console.ReadKey();
        }
Ejemplo n.º 21
0
        static void Main(string[] args)
        {
            //title
            Console.WriteLine("win Speech to Text converter");
            for (int i = 0; i <= 28; i++)
            {
                Console.Write("*");
            }

            Console.WriteLine("\n");
            Console.WriteLine("type record");
            string inputType = Console.ReadLine();

            if (inputType == "r")
            {
                Console.WriteLine("recognizing...");
                SpeechRecognitionEngine speechRecognitionEngine = new SpeechRecognitionEngine(new System.Globalization.CultureInfo("en-GB"));
                Grammar grammer = new DictationGrammar();
                speechRecognitionEngine.LoadGrammar(grammer);

                try
                {
                    speechRecognitionEngine.SetInputToDefaultAudioDevice();
                    RecognitionResult recognitionResult = speechRecognitionEngine.Recognize();
                    Console.WriteLine(recognitionResult.Text);
                }
                catch (Exception ex)
                {
                    Console.WriteLine("Something went wrong" + ex.StackTrace);
                }
            }

            Console.ReadLine();
        }
Ejemplo n.º 22
0
 /// <summary>
 /// Enable dictation mode in the given speech engine
 /// </summary>
 /// <param name="completion">Speech recognition engine used</param>
 public Dictate(SpeechRecognitionEngine speech)
 {
     Speech         = speech;
     Grammar        = new DictationGrammar();
     Grammar.Name   = "dictation";
     Grammar.Weight = 0.1f;
 }
Ejemplo n.º 23
0
 private void loadGrammarAndCommands()
 {
     #region loadgrammars
     speechRecognitionEngine.LoadGrammarAsync(gbLib.RandomNum());
     speechRecognitionEngine.LoadGrammarAsync(gbLib.RandomGame());
     speechRecognitionEngine.LoadGrammarAsync(gbLib.Search());
     speechRecognitionEngine.LoadGrammarAsync(gbLib.WhoIs());
     speechRecognitionEngine.LoadGrammarAsync(gbLib.Opener());
     speechRecognitionEngine.LoadGrammarAsync(gbLib.Converter());
     speechRecognitionEngine.LoadGrammarAsync(gbLib.Greeting());
     speechRecognitionEngine.LoadGrammarAsync(gbLib.Farewell());
     speechRecognitionEngine.LoadGrammarAsync(gbLib.ShowTime());
     speechRecognitionEngine.LoadGrammarAsync(gbLib.KeyboardCommand());
     speechRecognitionEngine.LoadGrammarAsync(gbLib.CheckInternet());
     speechRecognitionEngine.LoadGrammarAsync(gbLib.Type());
     speechRecognitionEngine.LoadGrammarAsync(gbLib.Complement());
     speechRecognitionEngine.LoadGrammarAsync(gbLib.Screenshot());
     speechRecognitionEngine.LoadGrammarAsync(gbLib.Reader());
     speechRecognitionEngine.LoadGrammarAsync(gbLib.PlaySong());
     speechRecognitionEngine.LoadGrammarAsync(gbLib.PlayList());
     speechRecognitionEngine.LoadGrammarAsync(gbLib.MusicCommand());
     speechRecognitionEngine.LoadGrammarAsync(gbLib.Weather());
     #endregion
     DictationGrammar dict = new DictationGrammar();
     speechRecognitionEngine.LoadGrammar(dict);
 }
Ejemplo n.º 24
0
        private readonly DictationGrammar m_grammar;           //自然语法

        public Recognizer()
        {
            var myCIintl = new CultureInfo("en-US");
            var rs       = SpeechRecognitionEngine.InstalledRecognizers();

            if (rs.Count > 0)
            {
                foreach (var config in rs)//获取所有语音引擎
                {
                    if (config.Culture.Equals(myCIintl) && config.Id == "MS-1033-80-DESK")
                    {
                        m_recognizer = new SpeechRecognitionEngine(config);
                        break;
                    }//选择美国英语的识别引擎
                }
                if (m_recognizer == null)//如果没有适合的语音引擎,则选用第一个
                {
                    m_recognizer = new SpeechRecognitionEngine(rs[0]);
                }
            }
            if (m_recognizer != null)
            {
                var kws = Settings.Default.Keywords;
                var fg  = new string[kws.Count];
                kws.CopyTo(fg, 0);
                InitializeSpeechRecognitionEngine(fg);//初始化语音识别引擎
                m_grammar = new DictationGrammar();
            }
            else
            {
                Console.WriteLine("创建语音识别失败");
            }
        }
Ejemplo n.º 25
0
        private void Form1_Load(object sender, EventArgs e)
        {
            //GUI time und date
            lbl_time.Text = DateTime.Now.ToShortTimeString();
            lbl_date.Text = DateTime.Now.ToLongDateString();

            tab_control.SelectTab(tab_text);

            //commands
            Choices commands = new Choices();
            string  path     = Directory.GetCurrentDirectory() + "\\commands.txt";

            commands.Add(File.ReadAllLines(path));

            //grammar
            GrammarBuilder gbuilder = new GrammarBuilder();

            gbuilder.Append(commands);

            //grammar (nicht genutzt in dieser version)
            Grammar          grammar  = new Grammar(gbuilder);
            DictationGrammar dgrammar = new DictationGrammar();

            //laden aller Engines
            h.LoadGrammarAsync(grammar);

            h.SetInputToDefaultAudioDevice();
            h.SpeechRecognized += recEngine_SpeechRecognized;

            h.RecognizeAsync(RecognizeMode.Multiple);
            s.SelectVoiceByHints(VoiceGender.Female, VoiceAge.Adult);

            s.SpeakAsync("Wie kann ich dir helfen");
        }
Ejemplo n.º 26
0
        public SpeechConversation(SpeechSynthesizer speechSynthesizer = null, SpeechRecognitionEngine speechRecognition = null)
        {
            SessionStorage = new SessionStorage();
            if (speechSynthesizer == null)
            {
                speechSynthesizer = new SpeechSynthesizer();
                speechSynthesizer.SetOutputToDefaultAudioDevice();
            }
            _speechSynthesizer = speechSynthesizer;
            if (speechRecognition == null)
            {
                speechRecognition = new SpeechRecognitionEngine(
                    new System.Globalization.CultureInfo("en-US")
                    );
                // Create a default dictation grammar.
                DictationGrammar defaultDictationGrammar = new DictationGrammar();
                defaultDictationGrammar.Name    = "default dictation";
                defaultDictationGrammar.Enabled = true;
                speechRecognition.LoadGrammar(defaultDictationGrammar);
                // Create the spelling dictation grammar.
                DictationGrammar spellingDictationGrammar = new DictationGrammar("grammar:dictation#spelling");
                spellingDictationGrammar.Name    = "spelling dictation";
                spellingDictationGrammar.Enabled = true;
                speechRecognition.LoadGrammar(spellingDictationGrammar);

                // Configure input to the speech recognizer.
                speechRecognition.SetInputToDefaultAudioDevice();
            }
            _speechRecognition = speechRecognition;
        }
Ejemplo n.º 27
0
        private void Interface_Load(object sender, EventArgs e)
        {
            btn.BackColor = Color.WhiteSmoke;
            Host          = new Networker();
            Choices inputs = new Choices();

            inputs.Add(new string[] { "Hello", "What are you?", "How are you?", "Good", "Bad", "Shutdown interface", "Yes", "No", "Calculate", "Goto a youtube channel", "Open google" });
            GrammarBuilder gb = new GrammarBuilder();

            gb.Append(inputs);
            Grammar G         = new Grammar(gb);
            Choices FakeWords = new Choices();

            FakeWords.Add(new string[] { "The yogscast", "Hat films" });
            GrammarBuilder fb = new GrammarBuilder();

            fb.Append(FakeWords);
            Grammar F = new Grammar(fb);

            F.Weight = 0.45f;
            Grammar Dict = new DictationGrammar();

            Dict.Weight = 0.3f;
            recEngine.LoadGrammarAsync(G);
            recEngine.LoadGrammarAsync(Dict);
            recEngine.LoadGrammarAsync(F);
            recEngine.SetInputToDefaultAudioDevice();
            recEngine.SpeechRecognized += recEngine_SpeechRecognized;
        }
Ejemplo n.º 28
0
        private void button1_Click_3(object sender, EventArgs e)
        {
            SpeechRecognitionEngine recognizer = new SpeechRecognitionEngine(new System.Globalization.CultureInfo("en-US"));
            Grammar dictationGrammar           = new DictationGrammar();

            recognizer.LoadGrammar(dictationGrammar);
            try
            {
                button1.Text = "Speak Now";
                recognizer.SetInputToDefaultAudioDevice();
                RecognitionResult result = recognizer.Recognize();
                //string ai =result.Text.ToLower();
                if (result.Text.ToLower() == "can you do")
                {
                    if (result.Text.ToLower() == "for me")
                    {
                        MessageBox.Show("yes");
                    }
                }
                richTextBox1.Text += "\n" + result.Text;
            }
            catch (InvalidOperationException exception)
            {
                richTextBox1.Text = String.Format("Could not recognize input from default aduio device. Is a microphone or sound card available?\r\n{0} - {1}.", exception.Source, exception.Message);
            }
            finally
            {
                recognizer.UnloadAllGrammars();
            }
        }
Ejemplo n.º 29
0
 //---------------------------------------------------------------------
 /// <summary>
 /// Author: Darkstrumn:\created::160105.21
 /// Function: SpeechRecognizer provide voice prompts, where the user can be prompted
 /// aurally to respond verbally and the response is converted to string and
 /// returned for further processing
 /// </summary>
 /// <param name="str_voice_prompt"></param>
 /// <returns></returns>
 public static string SpeechRecognizer(string str_voice_prompt)
   {
   string str_return = "";
   SpeechRecognitionEngine recognizer = new SpeechRecognitionEngine();
   Grammar dictationGrammar = new DictationGrammar();
   recognizer.LoadGrammar(dictationGrammar);
   BFS.Speech.TextToSpeech(str_voice_prompt);
   //
   try
     {
     recognizer.SetInputToDefaultAudioDevice();
     RecognitionResult result = recognizer.Recognize();
     if( result != null ) { str_return = result.Text; } else {; }
     //<diagnostics>BFS.Speech.TextToSpeech(result.Text);
     }
   catch( InvalidOperationException exception )
     {
     BFS.Dialog.ShowMessageError("Error detected during sound acquisition: {SOURCE} - {MESSAGF}.".Replace("{SOURCE}", exception.Source).Replace("{MESSAGF}", exception.Message));
     }
   finally
     {
     recognizer.UnloadAllGrammars();
     }//</try>
   return (str_return);
   }//</SpeachRecognizer()>
        internal void LoadCurrentSyllabus(SyllabusTracker syllabusTracker)
        {
            if (_speechRecognitionEngine == null) return; // not currently running recognition

            _speechRecognitionEngine.RequestRecognizerUpdate();
            _speechRecognitionEngine.UnloadAllGrammars();

            // new choices consolidation for commands - one command per syllabus file line
            var commandLoad = new Choices();
            foreach (var baseSyllabus in syllabusTracker.Syllabi)
            {
                foreach (var command in baseSyllabus.Commands)
                {
                    commandLoad.Add(command);
                }
            }

            // add commands - should be per input language, but now English
            VoiceCommands.AddCommands(commandLoad);

            var gBuilder = new GrammarBuilder();
            gBuilder.Append(commandLoad);
            var grammar = new Grammar(gBuilder) { Name = "Syllabus" };
            _speechRecognitionEngine.LoadGrammar(grammar);

            var dictgrammar = new DictationGrammar("grammar:dictation#pronunciation") { Name = "Random" };
            _speechRecognitionEngine.LoadGrammar(dictgrammar);
        }
Ejemplo n.º 31
0
        private SpeechRecognitionEngine LoadDictationGrammars()
        {
            // Create a default dictation grammar.
            DictationGrammar defaultDictationGrammar = new DictationGrammar();

            defaultDictationGrammar.Name    = "default dictation";
            defaultDictationGrammar.Enabled = true;

            // Create the spelling dictation grammar.
            DictationGrammar spellingDictationGrammar =
                new DictationGrammar("grammar:dictation#spelling");

            spellingDictationGrammar.Name    = "spelling dictation";
            spellingDictationGrammar.Enabled = true;

            // Create the question dictation grammar.
            DictationGrammar customDictationGrammar =
                new DictationGrammar("grammar:dictation");

            customDictationGrammar.Name    = "question dictation";
            customDictationGrammar.Enabled = true;

            // Create a SpeechRecognitionEngine object and add the grammars to it.
            SpeechRecognitionEngine recoEngine = new SpeechRecognitionEngine();

            recoEngine.LoadGrammar(defaultDictationGrammar);
            recoEngine.LoadGrammar(spellingDictationGrammar);
            recoEngine.LoadGrammar(customDictationGrammar);

            // Add a context to customDictationGrammar.
            customDictationGrammar.SetDictationContext("How do you", null);

            return(recoEngine);
        }
Ejemplo n.º 32
0
        public static string AudioToWord()
        {
            var     sre = new SpeechRecognitionEngine();
            Grammar gr  = new DictationGrammar();

            sre.LoadGrammar(gr);
            sre.SetInputToWaveFile(@"c:\temp\apls.wav");
            sre.BabbleTimeout              = new TimeSpan(Int32.MaxValue);
            sre.InitialSilenceTimeout      = new TimeSpan(Int32.MaxValue);
            sre.EndSilenceTimeout          = new TimeSpan(100000000);
            sre.EndSilenceTimeoutAmbiguous = new TimeSpan(100000000);
            var sb = new StringBuilder();

            while (true)
            {
                try
                {
                    var recText = sre.Recognize();
                    if (recText == null)
                    {
                        break;
                    }
                    sb.Append(recText.Text);
                }
                catch (Exception ex)
                {
                    break;
                }
            }
            return(sb.ToString());
        }
Ejemplo n.º 33
0
        public void dictationGrammar()
        {
            DictationGrammar dictation = new DictationGrammar();

            dictation.Name = "Dictation Grammar";
            sre.UnloadAllGrammars();
            sre.LoadGrammar(dictation);
        }
Ejemplo n.º 34
0
 public static string Listen()
 {
     RecognitionResult result;
     Grammar dictationGrammar = new DictationGrammar();
     recognition.LoadGrammar(dictationGrammar);
     recognition.SetInputToDefaultAudioDevice();
     result = recognition.Recognize();
     return result.Text;
 }
Ejemplo n.º 35
0
        public string recognizeDictation()
        {
            // Use the dictationgrammer, which allows free text
            Grammar g = new DictationGrammar();
            sr.UnloadAllGrammars();
            sr.LoadGrammar(g);

            // Register a handler for the SpeechRecognized event.
            sr.SpeechRecognized += new EventHandler<SpeechRecognizedEventArgs>(rec_SpeechRecognized);
            sr.Recognize();

            return temp;
        }
Ejemplo n.º 36
0
        protected void decode()
        {
            try
            {
                SpeechRecognitionEngine SpeechRecognitionEngine = new SpeechRecognitionEngine();
                DictationGrammar DictationGrammer = new DictationGrammar();
                SpeechRecognitionEngine.LoadGrammar(DictationGrammer);

                SpeechRecognitionEngine.SetInputToWaveFile(Input);
                RecognitionResult Result = SpeechRecognitionEngine.Recognize();
                mOutput = Result.Text;
            }
            catch (Exception E)
            {
                MessageBox.Show(E.Message);
            }
        }
Ejemplo n.º 37
0
 internal WinRecog( )
 {
     try
     {
         rec = new SpeechRecognizer();
         dGrammar = new DictationGrammar();
         rec.LoadGrammar(dGrammar);
         cGrammars = new Dictionary<string, Grammar>();
     }
     catch (Exception e)
     {
         rec = null;
         Logger.Log("Speech recognition disabled, " + e.Message,
             Helpers.LogLevel.Warning);
         return;
     }
 }
Ejemplo n.º 38
0
       public System.Windows.Forms.Control cDisplay; //显示控件   
 
       public SRecognition(string[] fg) //创建关键词语列表   
       {   
           CultureInfo myCIintl = new CultureInfo("zh-CN",false);   
           foreach (RecognizerInfo config in SpeechRecognitionEngine. InstalledRecognizers())//获取所有语音引擎   
           {   
               if (config.Culture.Equals(myCIintl) && config.Id == "MS-2052-80-DESK")   
               {   
                   recognizer = new SpeechRecognitionEngine(config);   
                   break;   
               }//选择美国英语的识别引擎   
           }   
           if (recognizer != null)   
           {   
               InitializeSpeechRecognitionEngine(fg);//初始化语音识别引擎   
               dictationGrammar = new DictationGrammar();   
           }   
           else   
           {   
               MessageBox.Show("创建语音识别失败");   
           }   
       }   
Ejemplo n.º 39
0
Archivo: Form1.cs Proyecto: tyhu/Git
 private void button1_Click(object sender, EventArgs e)
 {
     SpeechRecognitionEngine recognizer = new SpeechRecognitionEngine();
     Grammar dictationGrammar = new DictationGrammar();
     recognizer.LoadGrammar(dictationGrammar);
     try
     {
         button1.Text = "Speak Now";
         recognizer.SetInputToDefaultAudioDevice();
         RecognitionResult result = recognizer.Recognize();
         button1.Text = result.Text;
     }
     catch (InvalidOperationException exception)
     {
         button1.Text = String.Format("Could not recognize input from default aduio device. Is a microphone or sound card available?\r\n{0} - {1}.", exception.Source, exception.Message);
     }
     finally
     {
         recognizer.UnloadAllGrammars();
     }
 }
Ejemplo n.º 40
0
        private void loadGrammarAndCommands()
        {
            try
            {
                DictationGrammar defaultDictationGrammar = new DictationGrammar();
                defaultDictationGrammar.Name = "default dictation";
                defaultDictationGrammar.Enabled = true;

                // Create the spelling dictation grammar.
                DictationGrammar spellingDictationGrammar =
                  new DictationGrammar("grammar:dictation#spelling");
                spellingDictationGrammar.Name = "spelling dictation";
                spellingDictationGrammar.Enabled = true;

                Choices texts = new Choices();
                string[] lines = File.ReadAllLines(Environment.CurrentDirectory + "\\example.txt");
                foreach (string line in lines)
                {
                    // skip commentblocks and empty lines..
                    if (line.StartsWith("--") || line == String.Empty) continue;

                    // split the line
                    var parts = line.Split(new char[] { '|' });

                    // add commandItem to the list for later lookup or execution
                    words.Add(new Word() { Text = parts[0], AttachedText = parts[1], IsShellCommand = (parts[2] == "true") });

                    // add the text to the known choices of speechengine
                    texts.Add(parts[0]);
                }
                Grammar wordsList = new Grammar(new GrammarBuilder(texts));
                speechRecognitionEngine.LoadGrammar(defaultDictationGrammar);
                speechRecognitionEngine.LoadGrammar(spellingDictationGrammar);
                speechRecognitionEngine.LoadGrammar(wordsList);
            }
            catch (Exception ex)
            {
                throw ex;
            }
        }
Ejemplo n.º 41
0
 private void button2_Click(object sender, EventArgs e)
 {
     SpeechRecognitionEngine engineSpeech = new SpeechRecognitionEngine();
     Grammar gram = new DictationGrammar();
     engineSpeech.LoadGrammar(gram);
     try
     {
         engineSpeech.SetInputToDefaultAudioDevice();
         RecognitionResult result = engineSpeech.Recognize();
         label1.Text = result.Text;
         textBox1.Text = result.Text;
         button1_Click(new Object(),new EventArgs());
     }
     catch (Exception)
     {
         //Do nothing
     }
     finally
     {
         engineSpeech.UnloadAllGrammars();
     }
 }
Ejemplo n.º 42
0
        public bool setupKinect()
        {
            if(KinectSensor.KinectSensors.Count > 0)
            {
                skeletons = new Skeleton[6];
                this.kinect = KinectSensor.KinectSensors[0];

                this.kinect.AudioSource.BeamAngleChanged += AudioSource_BeamAngleChanged;
                this.kinect.AudioSource.SoundSourceAngleChanged += AudioSource_SoundSourceAngleChanged;
                this.kinect.AudioSource.BeamAngleMode = BeamAngleMode.Manual;
                this.kinect.AudioSource.ManualBeamAngle = 0;
                this.kinect.ColorFrameReady += kinect_ColorFrameReady;
                this.kinect.ColorStream.Enable();
                this.kinect.SkeletonFrameReady += kinect_SkeletonFrameReady;
                this.kinect.SkeletonStream.Enable();
                this.kinect.Start();

                this.colorPixels = new byte[this.kinect.ColorStream.FramePixelDataLength];
                this.imageRectangle = new Rectangle(0, 0, this.kinect.ColorStream.FrameWidth, this.kinect.ColorStream.FrameHeight);
                this.colorBitmap = new Bitmap(this.kinect.ColorStream.FrameWidth, this.kinect.ColorStream.FrameHeight, PixelFormat.Format32bppRgb);

                this.dictationGrammar = new DictationGrammar();
                this.dictationGrammar.Name = "default dication";
                this.dictationGrammar.Enabled = true;

                this.speechEngine = new SpeechRecognitionEngine();
                this.speechEngine.LoadGrammar(this.dictationGrammar);
                this.speechEngine.SpeechHypothesized += speechEngine_SpeechHypothesized;
                this.speechEngine.SpeechRecognized += speechEngine_SpeechRecognized;
                this.speechEngine.SetInputToAudioStream(
                                    this.kinect.AudioSource.Start(), 
                                    new SpeechAudioFormatInfo(EncodingFormat.Pcm, 16000, 16, 1, 32000, 2, null));
                this.speechEngine.RecognizeAsync(RecognizeMode.Multiple);

                return true;
            }
            return false;
        }
Ejemplo n.º 43
0
        public static void SpeechChat(List<BotRule> rules)
        {
            using(SpeechRecognitionEngine speechRecognition = new SpeechRecognitionEngine(
                new System.Globalization.CultureInfo("en-US")
            ))
            {
                // Create a default dictation grammar.
                DictationGrammar defaultDictationGrammar = new DictationGrammar()
                {
                    Name = "default dictation",
                    Enabled = true

                };
                speechRecognition.LoadGrammar(defaultDictationGrammar);
                // Create the spelling dictation grammar.
                DictationGrammar spellingDictationGrammar = new DictationGrammar("grammar:dictation#spelling")
                {
                    Name = "spelling dictation",
                    Enabled = true
                };
                speechRecognition.LoadGrammar(spellingDictationGrammar);

                // Add Grammar for the demo, to make it more reliable:
                //  https://msdn.microsoft.com/en-us/library/hh362944(v=office.14).aspx
                //  http://dailydotnettips.com/2014/01/18/using-wildcard-with-grammar-builder-kinect-speech-recognition/

                // Configure input to the speech recognizer.
                speechRecognition.SetInputToDefaultAudioDevice();

                using (_SpeechConversation = new SpeechConversation(speechRecognition: speechRecognition))
                {
                    ChatBot cb = new ChatBot(rules);
                    cb.talkWith(_SpeechConversation);
                    Console.ReadKey();
                }
            }
        }
Ejemplo n.º 44
0
        private void BeginRecording()
        {
            try
            {
                speechRecognitionEngine = new SpeechRecognitionEngine(new CultureInfo("es-ES", true));

            }
            catch (Exception)
            {
                speechRecognitionEngine = new SpeechRecognitionEngine();
            }

            speechRecognitionEngine.SetInputToDefaultAudioDevice();
            DictationGrammar dictationGrammar = new DictationGrammar();
            
            speechRecognitionEngine.LoadGrammar(dictationGrammar);
            speechRecognitionEngine.RecognizeAsync(RecognizeMode.Multiple);
            speechRecognitionEngine.SpeechRecognized += SpeechRecognitionEngine_SpeechRecognized;

            this.waveFileName = Path.Combine(Path.GetTempPath(), Guid.NewGuid().ToString() + ".wav");
            recorder.BeginRecording(waveFileName);
            RaisePropertyChanged("MicrophoneLevel");
            RaisePropertyChanged("ShowWaveForm");
        }
 /// <summary>
 /// Adds a new microphone instance
 /// </summary>
 /// <param name="instance">The instance id of the microphone</param>
 /// <param name="stream">The audio stream</param>
 /// <param name="status">The status of the microphone</param>
 /// <param name="shouldBeOn">Whether the speech recognition engine should be turned on</param>
 public void AddInputMic(string instance, UDPClient client, string status, bool shouldBeOn)
 {
     try 
     {
         var sre = new SpeechRecognitionEngine(new CultureInfo("en-US"));
         sre.SetInputToAudioStream(client.AudioStream, new SpeechAudioFormatInfo(16000, AudioBitsPerSample.Sixteen, AudioChannel.Mono));
         sre.SpeechRecognized += new EventHandler<SpeechRecognizedEventArgs>(RecognitionHandler);
         sre.SpeechRecognitionRejected += new EventHandler<SpeechRecognitionRejectedEventArgs>(RecognitionRejectedHandler);
         DictationGrammar customDictationGrammar  = new DictationGrammar("grammar:dictation");
         customDictationGrammar.Name = "dictation";
         customDictationGrammar.Enabled = true;
         sre.LoadGrammar(customDictationGrammar);
         mics.Add(instance, new Microphone(sre,client, status, shouldBeOn,port));
         foreach (var g in grammars)
         {
             var gram = new CombinedGrammar(g.Key, g.Value);
             sre.LoadGrammarAsync(gram.compiled);
         }
         if (shouldBeOn)
         {
             sre.RecognizeAsync(RecognizeMode.Multiple);
         }
     }
     catch (IOException) 
     {
         //negotiating connection with mic failed.
     }
 }
 /// <summary>
 /// Reason : To get speech to text data for given no of files
 /// </summary>
 /// <param name="audioFilePath"></param>
 /// <param name="noOfAudioFiles"></param>
 /// <param name="audioMessage"></param>
 private void SpeechToText(string audioFilePath,int noOfAudioFiles, ref string audioMessage)
 {
     _recognizer = new SpeechRecognitionEngine();
     Grammar dictationGrammar = new DictationGrammar();
     _recognizer.LoadGrammar(dictationGrammar);
     audioContentMessage = "";
     try
     {
         for (int i = 1; i < noOfAudioFiles; i++)
         {
             try
             {
                 Task task = Task.Factory.StartNew(() => codeBlock(audioFilePath + i + ".wav", noOfAudioFiles, _recognizer));
                 task.Wait(timeSpan);
             }
             catch
             {
             }
         }
         audioMessage = audioContentMessage;
     }
     catch (InvalidOperationException)
     {
         audioMessage = "Could not recognize input audio.\r\n";
     }
     finally
     {
         _recognizer.UnloadAllGrammars();
     }
 }
Ejemplo n.º 47
0
        private SpeechRecognitionEngine createSpeechRecogntionEngine(MainSystemTray form)
        {
            SpeechRecognitionEngine newSpeechRecognizer = new SpeechRecognitionEngine(CultureInfo.CurrentCulture);

            //Setting up the grammars for the voice recognizer
            Grammar commandGrammar = createCommandGrammar();
            commandGrammar.Weight = 1f;

            //"grammar:dictation#pronunciation"
            Grammar dictationGrammar = new DictationGrammar("grammar:dictation");
            dictationGrammar.Name = DictationState.GRAMMARNAME;
            dictationGrammar.Weight = .3f;

            //Setting up the voice recognizer to start listening for commands and send them to the SpeechRecognised method
            newSpeechRecognizer.RequestRecognizerUpdate();
            newSpeechRecognizer.LoadGrammar(commandGrammar);
            newSpeechRecognizer.LoadGrammar(dictationGrammar);
            try
            {
                newSpeechRecognizer.SetInputToDefaultAudioDevice();
                newSpeechRecognizer.RecognizeAsync(RecognizeMode.Multiple);
            }
            catch (System.InvalidOperationException)
            {
                if (MessageBox.Show(
                    "You do not have an audio capture device installed \nPlease install a microphone and restart the program",
                    "No Capture Device", MessageBoxButtons.OK) == DialogResult.OK)
                {
                    form.ExitProgram();
                }

            }

            return newSpeechRecognizer;
        }
Ejemplo n.º 48
0
 private void InitializeSpeechEngine()
 {
     recognizer.SetInputToDefaultAudioDevice();
     Grammar G  = new DictationGrammar();
     recognizer.LoadGrammar(G);
     G.SpeechRecognized += new EventHandler<SpeechRecognizedEventArgs>(G_SpeechRecognized);
 }
Ejemplo n.º 49
0
        //-------------------------------------
        //Load form.
        private void Form1_Load(object sender, EventArgs e)
        {
            //Display the default audio level & the first line of the text box.
            textBox5.Text = GodAudioLevel.ToString();
            textBox1.Text = "Begin speaking.";
            textBox6.Text = "Error Log" + Environment.NewLine + "-----------------" + Environment.NewLine;

            //Wipe the results text file, for a clean start.
            WipeLatestResults();

            //Audio Box defaults.
            radioButton1.Text = "Receiving Audio";
            radioButton1.Checked = true;
            radioButton1.ForeColor = Color.Red;

            //Create and initialise the speech recognition, with a UK culture marker.
            SpeechRecognitionEngine GodListener = new SpeechRecognitionEngine(new CultureInfo("en-UK"));

            //The specific phrases that the god will be able to recognise, split into different grammars.

            Choices TestPhrases = new Choices();
            TestPhrases.Add(TestPhrasesArray);
            Grammar TestGrammar = new Grammar(TestPhrases);
            TestGrammar.Name = "TestGrammar";

            //-------------

            Choices RandomNoisePhrases = new Choices();
            RandomNoisePhrases.Add(new string[] {"tap"});
            Grammar RandomNoiseGrammar = new Grammar(RandomNoisePhrases);
            RandomNoiseGrammar.Name = "RandomNoiseGrammar";

            //-------------

            // etc....

            //A DictationGrammar to handle minor differences.

            DictationGrammar DictationGrammar = new DictationGrammar("grammar:dictation#pronunciation");
            DictationGrammar.Name = "DictationGrammar";

            //Start recognising.
            try
            {
                GodListener.SetInputToDefaultAudioDevice();
                GodListener.LoadGrammar(TestGrammar);
                GodListener.LoadGrammar(DictationGrammar);
                GodListener.MaxAlternates = 2;
                GodListener.RecognizeAsync(RecognizeMode.Multiple);
            }
            //Show up any errors in beginning recognition.
            catch(Exception error)
            {
                radioButton1.Text = "Error...";
                GodListener.RecognizeAsyncCancel();
                textBox5.Text = "";
                textBox3.Text = "";
                textBox6.Text = textBox6.Text + Environment.NewLine + error.Message;
            }

            //Handling events from audio recognition.
            GodListener.AudioStateChanged += GodListener_AudioStateChanged;
            GodListener.AudioLevelUpdated += GodListener_AudioLevelUpdated;
            GodListener.SpeechRecognized += GodListener_SpeechRecognized;
            GodListener.SpeechRecognitionRejected += GodListener_SpeechRecognitionRejected;
            GodListener.SpeechDetected += GodListener_SpeechDetected;
        }
        private void button1_Click(object sender, EventArgs e)
        {
            SpeechRecognitionEngine SRE = new SpeechRecognitionEngine();
            //SRE.SetInputToDefaultAudioDevice();
            GrammarBuilder GB = new GrammarBuilder();
            GB.Append(new Choices(new string[] { "銅葉綠素", "華夏技術學院","服貿","馬來西亞航空","王鴻遠","北捷殺人","阿帕契","課綱"}));
            Grammar G = new Grammar(GB);
            G.Name = "main command grammar";
            SRE.LoadGrammar(G);
            DictationGrammar DG = new DictationGrammar(); //自然發音
            DG.Name = "dictation";
            SRE.LoadGrammar(DG);
            try
            {

                label4.Text = "Speak Now";
                SRE.SetInputToDefaultAudioDevice();
                
                //SRE.RecognizeAsync(RecognizeMode.Multiple);
                RecognitionResult result = SRE.Recognize();
                if (result == null) return; 
                label8.Text = result.Text;
                txtKeyword1.Text = result.Text;
            }
            catch (InvalidOperationException exception)
            {
                //button1.Text = String.Format("Could not recognize input from default aduio device. Is a microphone or sound card available?\r\n{0} - {1}.", exception.Source, exception.Message);
            }
            finally
            {
                SRE.UnloadAllGrammars();
            }                          
           
        }
 public TranslationResource()
 {
     _recognizer = new SpeechRecognitionEngine();
     Grammar dictationGrammar = new DictationGrammar();
     _recognizer.LoadGrammar(dictationGrammar);
 }
        // We create the dictation grammars only once
        private void CreateDictationGrammars()
        {
            Grammar defaultDictationGrammar = new DictationGrammar();
            defaultDictationGrammar.Name = "Default Dictation";
            defaultDictationGrammar.Enabled = true;
            _dictationGrammars.Items.Add(CreateGrammarItem(defaultDictationGrammar));

            Grammar spellingDictationGrammar = new DictationGrammar("grammar:dictation#spelling");
            spellingDictationGrammar.Name = "Spelling Dictation";
            spellingDictationGrammar.Enabled = false;
            _dictationGrammars.Items.Add(CreateGrammarItem(spellingDictationGrammar));

            _dictationGrammars.IsExpanded = true;
        }
Ejemplo n.º 53
0
        private void Main_Load(object sender, EventArgs e)
        {
            RunUpdateChecker(false);
            //if (File.Exists(Application.ExecutablePath + ".bak"))
            //    File.Delete(Application.ExecutablePath + ".bak");
            //if (File.Exists(Application.StartupPath + "\\updateNC.bat"))
            //    File.Delete(Application.StartupPath + "\\updateNC.bat");

            this.Text = "NetCheat PS3 " + versionNum + " by Dnawrkshp";

            int x = 0;
            //Set the settings file and load the settings
            settFile = Application.StartupPath + "\\ncps3.ini";
            if (System.IO.File.Exists(settFile))
            {
                string[] settLines = System.IO.File.ReadAllLines(settFile);
                try
                {
                    //Read the keybinds from the array
                    for (x = 0; x < keyBinds.Length; x++)
                        keyBinds[x] = (Keys)int.Parse(settLines[x]);

                    //Read the colors and update the form
                    ncBackColor = Color.FromArgb(int.Parse(settLines[x], System.Globalization.NumberStyles.HexNumber)); BackColor = ncBackColor; x++;
                    ncForeColor = Color.FromArgb(int.Parse(settLines[x], System.Globalization.NumberStyles.HexNumber)); ForeColor = ncForeColor; x++;

                    //Read the recently opened ranges
                    string[] strRangeOrder = settLines[x].Split(';');
                    Array.Resize(ref rangeOrder, strRangeOrder.Length - 1);
                    for (int valRO = 0; valRO < rangeOrder.Length; valRO++)
                        if (strRangeOrder[valRO] != "")
                            rangeOrder[valRO] = int.Parse(strRangeOrder[valRO]);

                    x++;
                    rangeImports = settLines[x].Split(';');
                    //Get rid of extra "" at the end
                    Array.Resize(ref rangeImports, rangeImports.Length - 1);
                    UpdateRecRangeBox();
                    x++;

                    apiDLL = int.Parse(settLines[x]);
                    x++;
                }
                catch
                {
                }
            }

            PS3.ChangeAPI((apiDLL == 0) ? SelectAPI.TargetManager : SelectAPI.ControlConsole);
            if (apiDLL == 0)
                PS3.PS3TMAPI_NET();
            else
            {
                SchPWS.Visible = false; //Can't stop/continue process with CCAPI
                pauseGameButt.Visible = false;
                startGameButt.Visible = false;
            }

            refPlugin_Click(null, null);

            attachProcessButton.Enabled = false;

            //Add the first Code
            cbList.Items.Add("NEW CODE");
            //Set backcolor
            cbList.Items[0].ForeColor = ncForeColor;
            cbList.Items[0].BackColor = ncBackColor;

            Codes[CodesCount].name = "NEW CODE";
            Codes[CodesCount].state = false;
            cbSchAlign.SelectedIndex = 2;
            compBox.SelectedIndex = 0;
            dFileName = Application.StartupPath + "\\dump.txt";

            cbList.Items[0].Selected = true;
            cbList.Items[0].Selected = false;

            //Add first range
            string[] a = { "00000000", "FFFFFFFC" };
            ListViewItem b = new ListViewItem(a);
            rangeView.Items.Add(b);

            //Update range array
            UpdateMemArray();

            //Update all the controls on the form
            int ctrl = 0;
            for (ctrl = 0; ctrl < Controls.Count; ctrl++)
            {
                Controls[ctrl].BackColor = ncBackColor;
                Controls[ctrl].ForeColor = ncForeColor;
            }

            //Update all the controls on the tabs
            for (ctrl = 0; ctrl < TabCon.TabPages.Count; ctrl++)
            {
                TabCon.TabPages[ctrl].BackColor = ncBackColor;
                TabCon.TabPages[ctrl].ForeColor = ncForeColor;
                //Color each control in the tab too
                for (int tabCtrl = 0; tabCtrl < TabCon.TabPages[ctrl].Controls.Count; tabCtrl++)
                {
                    TabCon.TabPages[ctrl].Controls[tabCtrl].BackColor = ncBackColor;
                    TabCon.TabPages[ctrl].Controls[tabCtrl].ForeColor = ncForeColor;
                }
            }

            toolStripDropDownButton1.BackColor = Color.Maroon;

            try
            {
                sRecognize.RequestRecognizerUpdate();
                DictationGrammar _dictationGrammar = new DictationGrammar();
                sRecognize.LoadGrammar(_dictationGrammar);
                sRecognize.SpeechRecognized += sr_SpeechRecognized;
                sRecognize.SetInputToDefaultAudioDevice();
            }
            catch
            {
                return;
            }
        }
Ejemplo n.º 54
0
        protected void LoadGrammar()
        {
            this.loading = 0;

            // Unload All Grammar
            Console.WriteLine("[Grammar] Unload");
            SpeechRecognitionEngine sre = GetEngine();
            sre.UnloadAllGrammars();

            // Load Grammar
            DirectoryInfo dir = new DirectoryInfo(directory);
            abspath = dir.FullName;

            Console.WriteLine("[Grammar] Load directory: " + abspath);
            foreach (FileInfo f in dir.GetFiles("*.xml")) {
                this.loading++;
                LoadGrammar(f.FullName, f.Name);
            }

            // Add a Dictation Grammar
            dication = new DictationGrammar("grammar:dictation");
            dication.Name = "dictation";
            dication.Enabled = false;
            GetEngine().LoadGrammarAsync(dication);
        }
Ejemplo n.º 55
0
 private void LoadGrammars()
 {
     var dictation = new DictationGrammar { Name = "Dictation Grammar" };
     recognitionEngine.LoadGrammar(dictation);
 }
Ejemplo n.º 56
0
        private void LoadDictationCommands()
        {
            // The default dictation grammar by Windows Desktop speech
            var defDictationGrammar = new DictationGrammar
            {
                Name = "default",
                Enabled = true
            };

            // Spelling dictation grammar
            var spellingGrammar = new DictationGrammar("grammar:dictation#spelling")
            {
                Name = "spelling dictation",
                Enabled = true
            };

            // TODO: choices should compare against a dictionary so that the spoken word and related exe don't need to be the same
            var cmdChoices = new Choices("OneNote", "Notepad");
            var grammarBuilder = new GrammarBuilder("start");
            grammarBuilder.Append(cmdChoices);
            var cmdGrammar = new Grammar(grammarBuilder);

            SpeechRecognitionEngine.LoadGrammar(defDictationGrammar);
            SpeechRecognitionEngine.LoadGrammar(spellingGrammar);
            SpeechRecognitionEngine.LoadGrammar(cmdGrammar);
        }
Ejemplo n.º 57
-1
        public void button1_Click(object sender, EventArgs e)
        {
            button1.Enabled = false;
            button1.Text = "God Called";
            label2.Text = "The god is listening...";
            label2.ForeColor = Color.Red;

            SpeechRecognitionEngine GodListener = new SpeechRecognitionEngine(new System.Globalization.CultureInfo("en-UK"));
            DictationGrammar GodGrammar = new DictationGrammar();

            GodListener.MaxAlternates = 2;

            try
            {
                GodListener.RequestRecognizerUpdate();
                GodListener.LoadGrammar(GodGrammar);
                GodListener.SetInputToDefaultAudioDevice();
                GodListener.SpeechRecognized += GodListener_SpeechRecognized;
                GodListener.AudioStateChanged += GodListener_AudioStateChanged;
                GodListener.AudioLevelUpdated += GodListener_AudioLevelUpdated;
                GodListener.RecognizeAsync(RecognizeMode.Multiple);
            }
            catch
            {
                return;
            }
        }