public SpeechConversation(SpeechSynthesizer speechSynthesizer = null, SpeechRecognitionEngine speechRecognition = null)
        {
            SessionStorage = new SessionStorage();
            if(speechSynthesizer==null)
            {
                speechSynthesizer = new SpeechSynthesizer();
                speechSynthesizer.SetOutputToDefaultAudioDevice();
            }
            _speechSynthesizer = speechSynthesizer;
            if(speechRecognition==null)
            {
                speechRecognition = new SpeechRecognitionEngine(
                    new System.Globalization.CultureInfo("en-US")
                );
                // Create a default dictation grammar.
                DictationGrammar defaultDictationGrammar = new DictationGrammar();
                defaultDictationGrammar.Name = "default dictation";
                defaultDictationGrammar.Enabled = true;
                speechRecognition.LoadGrammar(defaultDictationGrammar);
                // Create the spelling dictation grammar.
                DictationGrammar spellingDictationGrammar = new DictationGrammar("grammar:dictation#spelling");
                spellingDictationGrammar.Name = "spelling dictation";
                spellingDictationGrammar.Enabled = true;
                speechRecognition.LoadGrammar(spellingDictationGrammar);

                // Configure input to the speech recognizer.
                speechRecognition.SetInputToDefaultAudioDevice();
            }
            _speechRecognition = speechRecognition;
        }
Exemple #2
0
 public void Start(ListenContext listenCtx, SpeechRecognitionEngine sre)
 {
     _listenCtx = listenCtx;
     using (var memoryStream = new MemoryStream(Encoding.ASCII.GetBytes(Resources.RecogPlay)))
         sre.LoadGrammar(new Grammar(memoryStream));
     var gb = new GrammarBuilder { Culture = new CultureInfo("en-US") };
     gb.Append(_choices);
     sre.LoadGrammar(new Grammar(gb));
     _lastKnownGood = null;
 }
 void AddGrammars(SpeechRecognitionEngine recognizer)
 {
     Grammar dictationGrammar = BuildGrammar();
     Grammar mulliganGrammer = MakeRepeatedGrammar(new string[] { "mulligan" }, new string[] { "1", "2", "3", "4", "confirm" }, 99);
     Grammar moveGrammar = MakeMoveGrammar();
     Grammar removeGrammar = RemoveCardGrammar();
     recognizer.LoadGrammar(dictationGrammar);
     recognizer.LoadGrammar(mulliganGrammer);
     recognizer.LoadGrammar(moveGrammar);
     recognizer.LoadGrammar(removeGrammar);
 }
Exemple #4
0
 private void button1_Click(object sender, EventArgs e)
 {
     _completed = new ManualResetEvent(false);
     SpeechRecognitionEngine _recognizer = new SpeechRecognitionEngine();
     _recognizer.RequestRecognizerUpdate(); // request for recognizer update
     _recognizer.LoadGrammar(new Grammar(new GrammarBuilder("test"))); // load a grammar
     _recognizer.RequestRecognizerUpdate(); // request for recognizer update
     _recognizer.LoadGrammar(new Grammar(new GrammarBuilder("exit"))); // load a "exit" grammar
     _recognizer.SpeechRecognized += _recognizer_SpeechRecognized;
     _recognizer.SetInputToDefaultAudioDevice(); // set the input of the speech recognizer to the default audio device
     _recognizer.RecognizeAsync(RecognizeMode.Multiple); // recognize speech asynchronous
     _completed.WaitOne(); // wait until speech recognition is completed
     _recognizer.Dispose(); // dispose the speech recognition engine
 }
        public void Transcribe(MediaSegment segment)
        {
            using (var engine = new SpeechRecognitionEngine())
            {
                engine.LoadGrammar(new DictationGrammar());
                engine.SetInputToWaveFile(segment.File.FullName);

                var result = engine.Recognize();

                var metaDatum = new Metadata();
                metaDatum.Start = result.Audio.AudioPosition.TotalMilliseconds + segment.OffsetMs;
                metaDatum.End = metaDatum.Start + segment.DurationMs;
                metaDatum.EngineMetadata = new SpeechResults
                {
                    Text = result.Text,
                    Confidence = result.Confidence
                };

                _concurrentDictionary.AddOrUpdate(segment.FileId, new List<Metadata> {metaDatum}, (x, y) =>
                {
                    y.Add(metaDatum);
                    return y;
                });
            }
        }
Exemple #6
0
        //here is the fun part: create the speech recognizer
        private SpeechRecognitionEngine CreateSpeechRecognizer()
        {
            //set recognizer info
            RecognizerInfo ri = GetKinectRecognizer();
            //create instance of SRE
            SpeechRecognitionEngine sre;
            sre = new SpeechRecognitionEngine(ri.Id);

            //Now we need to add the words we want our program to recognise
            var grammar = new Choices();
            grammar.Add("Record");
            grammar.Add("Store");
            grammar.Add("Replay");
            grammar.Add("Stop");
            grammar.Add("Learn");
            grammar.Add("Finish");

            //set culture - language, country/region
            var gb = new GrammarBuilder { Culture = ri.Culture };
            gb.Append(grammar);

            //set up the grammar builder
            var g = new Grammar(gb);
            sre.LoadGrammar(g);

            //Set events for recognizing, hypothesising and rejecting speech
            sre.SpeechRecognized += SreSpeechRecognized;
            sre.SpeechHypothesized += SreSpeechHypothesized;
            sre.SpeechRecognitionRejected += SreSpeechRecognitionRejected;
            return sre;
        }
        public void InicializeSpeechRecognize()
        {
            RecognizerInfo ri = GetKinectRecognizer();
            if (ri == null)
            {
                throw new RecognizerNotFoundException();
            }

            try
            {
                    _sre = new SpeechRecognitionEngine(ri.Id);
            }
            catch(Exception e)
            {
                Console.WriteLine(e.Message);
                throw e;
            }

               var choises = new Choices();
            foreach(CommandSpeechRecognition cmd in _commands.Values)
            {
                choises.Add(cmd.Choise);
            }

            var gb = new GrammarBuilder {Culture = ri.Culture};
            gb.Append(choises);
            var g = new Grammar(gb);

            _sre.LoadGrammar(g);
            _sre.SpeechRecognized += SreSpeechRecognized;
            _sre.SpeechHypothesized += SreSpeechHypothesized;
            _sre.SpeechRecognitionRejected += SreSpeechRecognitionRejected;
        }
    /*
     * SpeechRecognizer
     *
     * @param GName - grammar file name
     */
    public SpeechRecognizer(string GName, int minConfidence)
    {
        //creates the speech recognizer engine
        sr = new SpeechRecognitionEngine(new System.Globalization.CultureInfo("pt-PT"));
        sr.SetInputToDefaultAudioDevice();
        Console.WriteLine("confiança : " + minConfidence);
        sr.UpdateRecognizerSetting("CFGConfidenceRejectionThreshold", minConfidence);

        Grammar gr = null;

        //verifies if file exist, and loads the Grammar file, else load defualt grammar
        if (System.IO.File.Exists(GName))
        {
            gr = new Grammar(GName);
            gr.Enabled = true;
        }
        else
            Console.WriteLine("Can't read grammar file");

        //load Grammar to speech engine
        sr.LoadGrammar(gr);

        //assigns a method, to execute when speech is recognized
        sr.SpeechRecognized += new EventHandler<SpeechRecognizedEventArgs>(SpeechRecognized);

        //assigns a method, to execute when speech is NOT recognized
        sr.SpeechRecognitionRejected +=
          new EventHandler<SpeechRecognitionRejectedEventArgs>(SpeechRecognitionRejected);

        // Start asynchronous, continuous speech recognition.
        sr.RecognizeAsync(RecognizeMode.Multiple);
    }
Exemple #9
0
        private void btn_connect_Click(object sender, EventArgs e)
        {
            ushort port;
            ushort.TryParse(txt_port.Text, out port);
            try
            {
                current_player = new AssPlayer(players[cmb_players.SelectedItem.ToString()], txt_host.Text, port);
            }
            catch(Exception ex)
            {
                MessageBox.Show("Could not connect: " + ex.Message);
                return;
            }
            voice_threshold = (float)num_voice_threshold.Value;

            recognizer = new SpeechRecognitionEngine(new System.Globalization.CultureInfo("en-US"));
            Grammar player_gramar = prepare_grammar(current_player.commands);
            recognizer.LoadGrammar(player_gramar);
            recognizer.SpeechRecognized += new EventHandler<SpeechRecognizedEventArgs>(recognizer_SpeechRecognized);
            recognizer.SetInputToDefaultAudioDevice();
            recognizer.RecognizeAsync(RecognizeMode.Multiple);

            taskbar_icon.Visible = true;
            Hide();
        }
Exemple #10
0
        public VoiceInput()
        {
            recognizer = new SpeechRecognitionEngine(new CultureInfo("en-US"));

            recognizer.SetInputToDefaultAudioDevice();
            Choices choices = new Choices();
            foreach (String command in commands)
            {
                choices.Add(command);
            }
            choices.Add(startListening);
            choices.Add(stopListening);
            choices.Add(stop);
            /*choices.Add("Close");
            choices.Add("Left");
            choices.Add("Right");
            choices.Add("Tilt Left");
            choices.Add("Tilt Right");
            choices.Add("Move");
            choices.Add("Back");
            choices.Add("Move Up");
            choices.Add("Down");
            choices.Add("Exit");
            choices.Add("Stop");
            choices.Add("Start Listening");
            choices.Add("Stop Listening");*/
            Grammar grammar = new Grammar(new GrammarBuilder(choices));
            recognizer.LoadGrammar(grammar);

            recognizer.SpeechRecognized +=
                new EventHandler<SpeechRecognizedEventArgs>(SpeechRecognized);
            recognizer.RecognizeAsync(RecognizeMode.Multiple);
        }
        public void initRS()
        {
            try
            {
                SpeechRecognitionEngine sre = new SpeechRecognitionEngine(new CultureInfo("en-US"));

                var words = new Choices();
                words.Add("Hello");
                words.Add("Jump");
                words.Add("Left");
                words.Add("Right");

                var gb = new GrammarBuilder();
                gb.Culture = new System.Globalization.CultureInfo("en-US");
                gb.Append(words);
                Grammar g = new Grammar(gb);

                sre.LoadGrammar(g);
                
                sre.SpeechRecognized += new EventHandler<SpeechRecognizedEventArgs>(sre_SpeechRecognized);
                sre.SetInputToDefaultAudioDevice();
                sre.RecognizeAsync(RecognizeMode.Multiple);
            }
            catch (Exception e)
            {
                label1.Text = "init RS Error : " + e.ToString();
            }
        }
Exemple #12
0
        public void StartListening()
        {
            if (null != _ri)
            {
                _speechEngine = new SpeechRecognitionEngine(_ri.Id);

                // Create a grammar from grammar definition XML file.
                using (var memoryStream = new MemoryStream(Encoding.ASCII.GetBytes(KAIT.Kinect.Service.Properties.Resources.SpeechGrammar)))
                {
                    var g = new Grammar(memoryStream);
                    _speechEngine.LoadGrammar(g);
                }

                _speechEngine.SpeechRecognized += _speechEngine_SpeechRecognized;
                _speechEngine.SpeechRecognitionRejected += _speechEngine_SpeechRecognitionRejected;

                // let the convertStream know speech is going active
                _convertStream.SpeechActive = true;

                // For long recognition sessions (a few hours or more), it may be beneficial to turn off adaptation of the acoustic model.
                // This will prevent recognition accuracy from degrading over time.
                ////speechEngine.UpdateRecognizerSetting("AdaptationOn", 0);

                _speechEngine.SetInputToAudioStream(
                    _convertStream, new SpeechAudioFormatInfo(EncodingFormat.Pcm, 16000, 16, 1, 32000, 2, null));
                _speechEngine.RecognizeAsync(RecognizeMode.Multiple);

                //_isInTrainingMode = true;
            }
            //else
            //    throw new InvalidOperationException("RecognizerInfo cannot be null");
        }
Exemple #13
0
Fichier : VI.cs Projet : jjonj/AVPI
        public void load_listen(VI_Profile profile, VI_Settings settings, ListView statusContainer)
        {
            this.profile = profile;
            this.settings = settings;
            this.statusContainer = statusContainer;

            vi_syn = profile.synth;
            vi_syn.SelectVoice(settings.voice_info);
            vi_sre = new SpeechRecognitionEngine(settings.recognizer_info);

            GrammarBuilder phrases_grammar = new GrammarBuilder();
            List<string> glossory = new List<string>();

            foreach (VI_Phrase trigger in profile.Profile_Triggers)
            {
                glossory.Add(trigger.value);
            }
            if (glossory.Count == 0)
            {
                MessageBox.Show("You need to add at least one Trigger");
                return;
            }
            phrases_grammar.Append(new Choices(glossory.ToArray()));

            vi_sre.LoadGrammar(new Grammar(phrases_grammar));
            //set event function
            vi_sre.SpeechRecognized += phraseRecognized;
            vi_sre.SpeechRecognitionRejected += _recognizer_SpeechRecognitionRejected;
            vi_sre.SetInputToDefaultAudioDevice();
            vi_sre.RecognizeAsync(RecognizeMode.Multiple);
        }
        public VoiceSelect()
        {
            precision = .5;
            newWordReady = false;

            RecognizerInfo ri = GetKinectRecognizer();

            SpeechRecognitionEngine tempSpeechRec;

            tempSpeechRec = new SpeechRecognitionEngine(ri.Id);

            var grammar = new Choices();
            grammar.Add("select one", "SELECT ONE", "Select One");
            grammar.Add("select two", "SELECT TWO", "Select Two");
            grammar.Add("pause", "PAUSE");
            grammar.Add("exit", "EXIT");
            grammar.Add("single player", "SINGLE PLAYER");
            grammar.Add("co op mode", "CO OP MODE");
            grammar.Add("settings", "SETTINGS");
            grammar.Add("instructions", "INSTRUCTIONS");
            grammar.Add("statistics", "STATISTICS");
            grammar.Add("Main Menu", "MAIN MENU");
            grammar.Add("resume", "RESUME");
            grammar.Add("restart level", "RESTART LEVEL");
            grammar.Add("replay", "REPLAY");
            grammar.Add("next", "NEXT");
            grammar.Add("Easy", "EASY");
            grammar.Add("Hard", "HARD");
            /*
            grammar.Add("level one");
            grammar.Add("level two");
            grammar.Add("level three");
            grammar.Add("level four");
            grammar.Add("level five");
            grammar.Add("level six");
            grammar.Add("player one left");
            grammar.Add("player one right");
            grammar.Add("player two left");
            grammar.Add("player two right");
            grammar.Add("room low");
            grammar.Add("room medium");
            grammar.Add("room high");
            grammar.Add("sounds on");
            grammar.Add("sounds off");
            grammar.Add("reset stats");
            */

            var gb = new GrammarBuilder { Culture = ri.Culture };
            gb.Append(grammar);

            // Create the actual Grammar instance, and then load it into the speech recognizer.
            var g = new Grammar(gb);

            tempSpeechRec.LoadGrammar(g);
            tempSpeechRec.SpeechRecognized += phraseRecognized;
            tempSpeechRec.SpeechHypothesized += phraseHyphothesized;
            tempSpeechRec.SpeechRecognitionRejected += phraseRejected;

            speechRec = tempSpeechRec;
        }
Exemple #15
0
        public void StartListening()
        {
            if (null != _ri)
            {
                _speechEngine = new SpeechRecognitionEngine(_ri.Id);

                // Create a grammar from grammar definition XML file.
                using (var memoryStream = new MemoryStream(Encoding.ASCII.GetBytes(KAIT.Kinect.Service.Properties.Resources.SpeechGrammar)))
                {
                    var g = new Grammar(memoryStream);
                    _speechEngine.LoadGrammar(g);
                }

                _speechEngine.SpeechRecognized += _speechEngine_SpeechRecognized;
                _speechEngine.SpeechRecognitionRejected += _speechEngine_SpeechRecognitionRejected;

                // let the convertStream know speech is going active
                _convertStream.SpeechActive = true;

     
                _speechEngine.SetInputToAudioStream(
                    _convertStream, new SpeechAudioFormatInfo(EncodingFormat.Pcm, 16000, 16, 1, 32000, 2, null));
                _speechEngine.RecognizeAsync(RecognizeMode.Multiple);

     
            }
        }
Exemple #16
0
        /// <summary>
        /// Initializes a new instance of the <see cref="MainWindow"/> class.
        /// </summary>
        public MainWindow()
        {
            InitializeComponent();

            try
            {
                // create the engine
                //speechRecognitionEngine = createSpeechEngine("de-DE");
                //speechRecognitionEngine = createSpeechEngine(CultureInfo.CurrentCulture.Name);
                speechRecognitionEngine = createSpeechEngine("es-ES");

                // hook to events
                speechRecognitionEngine.AudioLevelUpdated += new EventHandler<AudioLevelUpdatedEventArgs>(engine_AudioLevelUpdated);

                // Create and load a dictation grammar.
                speechRecognitionEngine.LoadGrammar(new DictationGrammar());

                speechRecognitionEngine.SpeechRecognized += new EventHandler<SpeechRecognizedEventArgs>(engine_SpeechRecognized);

                // use the system's default microphone
                speechRecognitionEngine.SetInputToDefaultAudioDevice();

                // start listening
                speechRecognitionEngine.RecognizeAsync(RecognizeMode.Multiple);
            }
            catch (Exception ex)
            {
                MessageBox.Show(ex.Message, "Voice recognition failed");
            }
        }
Exemple #17
0
        public void InitializeSpeechRecognitionEngine(String filePath)
        {
            MySpeechRecognitionEngine = new SpeechRecognitionEngine();
            //MySpeechRecognitionEngine.SetInputToDefaultAudioDevice();

            MySpeechRecognitionEngine.UnloadAllGrammars();

            try
            {

                MySpeechRecognitionEngine.SetInputToWaveFile(filePath);

                Process.Start("C:\\Program Files\\Windows Media Player\\wmplayer.exe", ("\"" + filePath + "\""));

                MySpeechRecognitionEngine.LoadGrammar(new DictationGrammar());

                MySpeechRecognitionEngine.RecognizeAsync(RecognizeMode.Single);

                MySpeechRecognitionEngine.AudioLevelUpdated += MySpeechRecognitionEngine_AudioLevelUpdated;

                MySpeechRecognitionEngine.SpeechRecognized += MySpeechRecognitionEnginee_SpeechRecognized;

                MySpeechRecognitionEngine.AudioStateChanged += MySpeechRecognitionEnginee_AudioStateChanged;

                MySpeechRecognitionEngine.RecognizeCompleted += MySpeechRecognitionEngine_RecognizeCompleted;

            }

            catch (Exception ex)
            {

                Console.Write(ex.Message.ToString());

            }
        }
Exemple #18
0
        public MainWindow()
        {
            InitializeComponent();

            var config = new JsonConfigHandler( System.IO.Path.Combine( Environment.GetFolderPath( Environment.SpecialFolder.ApplicationData ), "LeagueTag" ) );
            //config.Populate();
            config.Save();
            //config.Save(
            return;
            var engine = new SpeechRecognitionEngine();

            var builder = new GrammarBuilder();
            builder.Append( "tag" );
            builder.Append( new Choices( "baron", "dragon" ) );

            engine.RequestRecognizerUpdate();
            engine.LoadGrammar( new Grammar( builder ) );

            engine.SpeechRecognized += engine_SpeechRecognized;

            engine.SetInputToDefaultAudioDevice();
            engine.RecognizeAsync( RecognizeMode.Multiple );

            CompositionTarget.Rendering += CompositionTarget_Rendering;

            this.DataContext = this;
        }
        public SpeechRecogniser()
        {
            RecognizerInfo ri = SpeechRecognitionEngine.InstalledRecognizers().Where(r => r.Id == RecognizerId).FirstOrDefault();
            if (ri == null)
                return;

            sre = new SpeechRecognitionEngine(ri.Id);

            // Build a simple grammar of shapes, colors, and some simple program control
            var instruments = new Choices();
            foreach (var phrase in InstrumentPhrases)
                instruments.Add(phrase.Key);

            var objectChoices = new Choices();
            objectChoices.Add(instruments);

            var actionGrammar = new GrammarBuilder();
            //actionGrammar.AppendWildcard();
            actionGrammar.Append(objectChoices);

            var gb = new GrammarBuilder();
            gb.Append(actionGrammar);

            var g = new Grammar(gb);
            sre.LoadGrammar(g);
            sre.SpeechRecognized += sre_SpeechRecognized;
            sre.SpeechHypothesized += sre_SpeechHypothesized;
            sre.SpeechRecognitionRejected += new EventHandler<SpeechRecognitionRejectedEventArgs>(sre_SpeechRecognitionRejected);

            var t = new Thread(StartDMO);
            t.Start();

            valid = true;
        }
Exemple #20
0
        public SpeechRecognizer(string file, KinectSensor sensor)
        {
            this.grammarFile = file;
            this.kinectSensor = sensor;
            audioSource = kinectSensor.AudioSource;
            audioSource.AutomaticGainControlEnabled = false;
            audioSource.BeamAngleMode = BeamAngleMode.Adaptive;

            Func<RecognizerInfo, bool> matchingFunc = r =>
            {
                string value;
                r.AdditionalInfo.TryGetValue("Kinect", out value);
                return "True".Equals(value, StringComparison.InvariantCultureIgnoreCase) && "en-US".Equals(r.Culture.Name, StringComparison.InvariantCultureIgnoreCase);
            };
            var recognizerInfo = SpeechRecognitionEngine.InstalledRecognizers().Where(matchingFunc).FirstOrDefault();
            if (recognizerInfo == null)
                return;

            speechRecognitionEngine = new SpeechRecognitionEngine(recognizerInfo.Id);

            var grammar = new Grammar(grammarFile);
            speechRecognitionEngine.LoadGrammar(grammar);

            audioStream = audioSource.Start();
            speechRecognitionEngine.SetInputToAudioStream(audioStream, new SpeechAudioFormatInfo(EncodingFormat.Pcm, 16000, 16, 1, 32000, 2, null));

            speechRecognitionEngine.AudioStateChanged += onAudioStateChanged;
            speechRecognitionEngine.SpeechRecognized += onSpeechRecognized;
            speechRecognitionEngine.RecognizeCompleted += onSpeechRecognizeCompleted;
            speechRecognitionEngine.EmulateRecognizeCompleted += onEmulateRecognizeCompleted;
        }
        //Speech recognizer
        private SpeechRecognitionEngine CreateSpeechRecognizer()
        {
            RecognizerInfo ri = GetKinectRecognizer();

            SpeechRecognitionEngine sre;
            sre = new SpeechRecognitionEngine(ri.Id);

            //words we need the program to recognise
            var grammar = new Choices();
            grammar.Add(new SemanticResultValue("moustache", "MOUSTACHE"));
            grammar.Add(new SemanticResultValue("top hat", "TOP HAT"));
            grammar.Add(new SemanticResultValue("glasses", "GLASSES"));
            grammar.Add(new SemanticResultValue("sunglasses", "SUNGLASSES"));
            grammar.Add(new SemanticResultValue("tie", "TIE"));
            grammar.Add(new SemanticResultValue("bow", "BOW"));
            grammar.Add(new SemanticResultValue("bear", "BEAR"));
            //etc

            var gb = new GrammarBuilder { Culture = ri.Culture };
            gb.Append(grammar);

            var g = new Grammar(gb);
            sre.LoadGrammar(g);

            //Events for recognising and rejecting speech
            sre.SpeechRecognized += SreSpeechRecognized;
            sre.SpeechRecognitionRejected += SreSpeechRecognitionRejected;
            return sre;
        }
Exemple #22
0
        private static SpeechRecognitionEngine GetSpeechRecognitionEngine()
        {
            var sre = new SpeechRecognitionEngine();

            sre.LoadGrammar(new DictationGrammar());
            sre.SetInputToDefaultAudioDevice();

            sre.SpeechRecognized += (s, e) =>
            {
                if (e.Result != null &&
                    !String.IsNullOrEmpty(e.Result.Text))
                {
                    using (new ConsoleForegroundColor(ConsoleColor.Green))
                    {
                        Console.WriteLine(e.Result.Text);
                    }

                    return;
                }

                using (new ConsoleForegroundColor(ConsoleColor.Red))
                {
                    Console.WriteLine("Recognized text not available.");
                }
            };
            //sr.SpeechRecognized += SpeechRecognizedHandler;

            return sre;
        }
Exemple #23
0
        private string Transcribe(MemoryStream audioFile)
        {
            using (var recognizer = new SpeechRecognitionEngine())
            {
                // Create and load a grammar.
                var dictation = new DictationGrammar
                {
                    Name = "Dictation Grammar"
                };

                recognizer.LoadGrammar(dictation);

                // Configure the input to the recognizer.
                recognizer.SetInputToWaveStream(audioFile);

                // Attach event handlers for the results of recognition.
                recognizer.SpeechRecognized +=
                  new EventHandler<SpeechRecognizedEventArgs>(recognizer_SpeechRecognized);
                recognizer.RecognizeCompleted +=
                  new EventHandler<RecognizeCompletedEventArgs>(recognizer_RecognizeCompleted);

                // Perform recognition on the entire file.
                Console.WriteLine("Starting asynchronous recognition...");
                completed = false;
                recognizer.RecognizeAsync(RecognizeMode.Single);

                // Keep the console window open.
                while (!completed)
                {
                    // let it work until it's done
                }
            }
            return TranscribedText;
        }
Exemple #24
0
        private void worker_DoWork(object sender, DoWorkEventArgs e)
        {
            Thread.CurrentThread.Name = "Kinect audio thread";
            if(_device.Type == DeviceType.KINECT_1)
            {
                SpeechRecognizer = new SpeechRecognitionEngine(recognizerInfo.Id);
                SpeechRecognizer.LoadGrammar(GetCurrentGrammar());
                SpeechRecognizer.SpeechRecognized += SreSpeechRecognized;
                SpeechRecognizer.SpeechHypothesized += SreSpeechHypothesized;
                SpeechRecognizer.SpeechRecognitionRejected += SreSpeechRecognitionRejected;

                //set sensor audio source to variable
                audioSource = _device.sensor.AudioSource;
                //Set the beam angle mode - the direction the audio beam is pointing
                //we want it to be set to adaptive
                audioSource.BeamAngleMode = BeamAngleMode.Adaptive;
                //start the audiosource
                var kinectStream = audioSource.Start();
                //configure incoming audio stream
                SpeechRecognizer.SetInputToAudioStream(
                    kinectStream, new SpeechAudioFormatInfo(EncodingFormat.Pcm, 16000, 16, 1, 32000, 2, null));
                //make sure the recognizer does not stop after completing
                SpeechRecognizer.RecognizeAsync(RecognizeMode.Multiple);
                //reduce background and ambient noise for better accuracy
                _device.sensor.AudioSource.EchoCancellationMode = EchoCancellationMode.None;
                _device.sensor.AudioSource.AutomaticGainControlEnabled = false;
                audioStarted = true;
            }
            Console.WriteLine("END OF WORKER AUDIO");
        }
void BuildSpeechEngine(RecognizerInfo rec)
{
    _speechEngine = new SpeechRecognitionEngine(rec.Id);

    var choices = new Choices();
    choices.Add("venus");
    choices.Add("mars");
    choices.Add("earth");
    choices.Add("jupiter");
    choices.Add("sun");

    var gb = new GrammarBuilder { Culture = rec.Culture };
    gb.Append(choices);

    var g = new Grammar(gb);

    _speechEngine.LoadGrammar(g);
    //recognized a word or words that may be a component of multiple complete phrases in a grammar.
    _speechEngine.SpeechHypothesized += new EventHandler<SpeechHypothesizedEventArgs>(SpeechEngineSpeechHypothesized);
    //receives input that matches any of its loaded and enabled Grammar objects.
    _speechEngine.SpeechRecognized += new EventHandler<SpeechRecognizedEventArgs>(_speechEngineSpeechRecognized);
    //receives input that does not match any of its loaded and enabled Grammar objects.
    _speechEngine.SpeechRecognitionRejected += new EventHandler<SpeechRecognitionRejectedEventArgs>(_speechEngineSpeechRecognitionRejected);


    //C# threads are MTA by default and calling RecognizeAsync in the same thread will cause an COM exception.
    var t = new Thread(StartAudioStream);
    t.Start();
}
Exemple #26
0
        public VoiceRecognizer()
        {
            try
            {
                // Create a new SpeechRecognitionEngine instance.
                voiceEngine = new SpeechRecognitionEngine(new CultureInfo("en-US"));

                // Setup the audio device
                voiceEngine.SetInputToDefaultAudioDevice();

                // Create the Grammar instance and load it into the speech recognition engine.
                Grammar g = new Grammar(CommandPool.BuildSrgsGrammar());
                voiceEngine.LoadGrammar(g);

                //voiceEngine.EndSilenceTimeout = new TimeSpan(0, 0, 1);

                // Register a handler for the SpeechRecognized event
                voiceEngine.SpeechRecognized += new EventHandler<SpeechRecognizedEventArgs>(sre_SpeechRecognized);

                // Start listening in multiple mode (that is, don't quit after a single recongition)
                voiceEngine.RecognizeAsync(RecognizeMode.Multiple);
                IsSetup = true;
            }
            catch(Exception e)
            {
                IsSetup = false;
            }
        }
Exemple #27
0
        public static SpeechRecognitionEngine InitializeSRE()
        {
            //Create the speech recognition engine
            SpeechRecognitionEngine sre = new SpeechRecognitionEngine();
            using (sre)
            {

                //Set the audio device to the OS default
                sre.SetInputToDefaultAudioDevice();

                // Reset the Grammar
                sre.UnloadAllGrammars();

                // Load the plugins
                LoadPlugins();

                //Load all of the grammars
                foreach (IJarvisPlugin plugin in _plugins)
                    sre.LoadGrammar(plugin.getGrammar());

                //Set the recognition mode

                sre.RecognizeAsync(RecognizeMode.Multiple);

                //Add an event Handler
                sre.SpeechRecognized += new EventHandler<SpeechRecognizedEventArgs>(Engine.SpeechRecognized);
                while (!Jarvis.JarvisMain.stop)
                {
                }
            }
            return sre;
        }
Exemple #28
0
        private void KomutlariOlustur()
        {
            string[] komutlar = new string[]
            {
                "Yes", "No", "Open İmage", "Open Video", "Thank You", "Search", "Run Notepad", "Run Paint", "Open Google", "Open Translate", "Open Facebook", "What is your name", "What time is it", "Ney's today", "Date", "I love you"
            };



            Choices        insChoices        = new Choices(komutlar);
            GrammarBuilder insGrammarBuilder = new GrammarBuilder(insChoices);
            Grammar        insGrammar        = new Grammar(insGrammarBuilder);

            sre.LoadGrammar(insGrammar);
        }
Exemple #29
0
        public static void Initialize()                                          // Initialize components
        {
            synth = new SpeechSynthesizer();
            synth.SetOutputToDefaultAudioDevice();                                                                    // Configure output to the speech synthesizer.

            recog = new SpeechRecognitionEngine(new CultureInfo("en-US"));                                            // Create an in-process speech recognizer for the en-US locale.
            recog.LoadGrammar(CreateGrammarBuilderGrammarForAssistant());                                             // Create and load a grammar.
            recog.SetInputToNull();                                                                                   // Disable audio input to the recognizer.

            recog.SpeechRecognized          += new EventHandler <SpeechRecognizedEventArgs>(SpeechRecognizedHandler); // Attach event handlers for recognition events.
            recog.SpeechDetected            += new EventHandler <SpeechDetectedEventArgs>(SpeechDetectedHandler);
            recog.SpeechHypothesized        += new EventHandler <SpeechHypothesizedEventArgs>(SpeechHypothesizedHandler);
            recog.SpeechRecognitionRejected += new EventHandler <SpeechRecognitionRejectedEventArgs>(SpeechRecognitionRejectedHandler);
            recog.EmulateRecognizeCompleted += new EventHandler <EmulateRecognizeCompletedEventArgs>(EmulateRecognizeCompletedHander);
        }
        private void button1_Click_1(object sender, EventArgs e)
        {
            label2.Visible = false;
            ou.Dispose(); label11.Visible = false; sSynth.SelectVoice(sSynth.GetInstalledVoices()[0].VoiceInfo.Name);
            label7.Visible      = false; label6.Visible = false;
            label8.Visible      = false; button3.Visible = false; button1.Visible = false;
            pictureBox1.Visible = false; pictureBox2.Visible = false; pictureBox3.Visible = false;
            pictureBox4.Visible = false; pictureBox5.Visible = false; pictureBox6.Visible = false; label5.Visible = true;
            button2.Visible     = false; label3.Visible = label4.Visible = true; reconizer.SetInputToDefaultAudioDevice();
            GrammarBuilder gb = new GrammarBuilder(new Choices(choices));

            reconizer.LoadGrammar(new Grammar(gb));
            reconizer.RecognizeAsync(RecognizeMode.Multiple);
            reconizer.SpeechRecognized += new EventHandler <SpeechRecognizedEventArgs>(reconizer_reconized);
        }
Exemple #31
0
        public Recognizer(string name, Choices alternateChoices)
        {
            info   = GetRecognizer(name);
            engine = new SpeechRecognitionEngine(info.Id);

            builder         = new GrammarBuilder();
            builder.Culture = info.Culture;
            builder.Append(alternateChoices);

            grammar = new Grammar(builder);
            engine.LoadGrammar(grammar);
            engine.SpeechRecognized          += Recognizer_SpeechRecognized;
            engine.SpeechHypothesized        += Recognizer_SpeechHypothesized;
            engine.SpeechRecognitionRejected += Recognizer_SpeechRecognitionRejected;
        }
        public void escuchar()
        {
            string selectdoc = "select cedula from alu01";

            rec.SetInputToDefaultAudioDevice();
            Choices listadocente = new Choices();

            listadocente.Add(nuevoescucha.gramaticadocente(selectdoc));
            Grammar gramatica = new Grammar(new GrammarBuilder(listadocente));

            //Grammar gramatica = ;
            rec.LoadGrammar(gramatica);
            rec.SpeechRecognized += _Recognition_SpeechRecognized;
            rec.RecognizeAsync(RecognizeMode.Multiple);
        }
 public GrammarManager()
 {
     SRE = new SpeechRecognitionEngine(pRecognitionLanguage);
     SRE.SpeechRecognized += SRE_SpeechRecognized;
     SRE.SetInputToDefaultAudioDevice();
     pGrammars              = new Grammar[4];
     YesNoGrammar           = new YesNoGrammar(pRecognitionLanguage);
     WhichProductGrammar    = new WhichProductGrammar(pRecognitionLanguage);
     pGrammars[0]           = WhichProductGrammar.grammar;
     HowManyProductsGrammar = new HowManyProductsGrammar(pRecognitionLanguage);
     pGrammars[1]           = HowManyProductsGrammar.grammar;
     pGrammars[2]           = new PersonNameGrammar(pRecognitionLanguage).grammar;
     pGrammars[3]           = new AddressGrammar(pRecognitionLanguage).grammar;
     SRE.LoadGrammar(pGrammars[0]);
 }
Exemple #34
0
        private void speech_Setup()
        {
            engine = new SpeechRecognitionEngine();
            engine.SetInputToDefaultAudioDevice();
            engine.SpeechRecognized += new EventHandler <SpeechRecognizedEventArgs>(speech_Recognized);

            var commandBuilder          = new GrammarBuilder();
            var commandBuilderWithInput = new GrammarBuilder();

            var triggers = new Choices();

            triggers.Add(new SemanticResultValue("mator", 1));
            triggers.Add(new SemanticResultValue("alexa", 2));
            commandBuilder.Append(new SemanticResultKey("trigger", triggers));
            commandBuilderWithInput.Append(new SemanticResultKey("trigger", triggers));

            var commands = new Choices(speech_GetCommands().Keys.ToArray());

            commandBuilder.Append(new SemanticResultKey("command", commands));
            commandBuilderWithInput.Append(new SemanticResultKey("command", commands));

            var input = new Choices();

            input.Add(new SemanticResultValue("30 minutes", 30));
            input.Add(new SemanticResultValue("next", "next"));
            commandBuilderWithInput.AppendWildcard();
            commandBuilderWithInput.Append(new SemanticResultKey("input", input));

            Grammar grammar          = new Grammar(commandBuilder);
            Grammar grammarWithInput = new Grammar(commandBuilderWithInput);

            engine.LoadGrammar(grammar);
            engine.LoadGrammar(grammarWithInput);

            engine.RecognizeAsync(RecognizeMode.Multiple);
        }
Exemple #35
0
        /*.............................................//voice//.........................................................*/

        private void Loadgrammer()
        {
            try
            {
                Choices  txt  = new Choices();
                string[] line = File.ReadAllLines(Environment.CurrentDirectory + "\\commands.txt");
                txt.Add(line);
                Grammar Wordlist = new Grammar(new GrammarBuilder(txt));
                speech.LoadGrammar(Wordlist);
            }
            catch (Exception ex)
            {
                MessageBox.Show(ex.Message);
            }
        }
Exemple #36
0
 private void loadGrammarAndCommands()
 {
     try
     {
         Choices  texts = new Choices();
         string[] lines = File.ReadAllLines(Environment.CurrentDirectory + "\\jarvisnewscommnds.txt");
         texts.Add(lines);
         Grammar wordsList = new Grammar(new GrammarBuilder(texts));
         speechRecognitionEngine.LoadGrammar(wordsList);
     }
     catch (Exception ex)
     {
         throw ex;
     }
 }
Exemple #37
0
 private void Form1_Load(object sender, EventArgs e)
 {
     try
     {
         escucha.SetInputToDefaultAudioDevice();
         escucha.LoadGrammar(new DictationGrammar());
         escucha.SpeechRecognized += new EventHandler <SpeechRecognizedEventArgs>(ReconocedorDeVoz);
         escucha.RecognizeAsync(RecognizeMode.Multiple);
         escucha.AudioLevelUpdated += NivelAudio;
     }
     catch (InvalidOperationException)
     {
         MessageBox.Show("No hay acceso al microfono.. o no esta conectado");
     }
 }
 private void InitializeKeyWordRecognizer()
 {
     try
     {
         _keyWordRecognizer = new SpeechRecognitionEngine(new System.Globalization.CultureInfo("en-US"));
         _keyWordRecognizer.SetInputToDefaultAudioDevice();
         _keyWordRecognizer.SpeechRecognized += OnWakeUp;
         _keyWordRecognizer.LoadGrammar(new Grammar(new GrammarBuilder(new Choices("hey artemis"))));
         _keyWordRecognizer.RecognizeAsync(RecognizeMode.Multiple);
     }
     catch (Exception exception)
     {
         Console.WriteLine(exception.Message);
     }
 }
Exemple #39
0
 private void tt_Load(object sender, EventArgs e)
 {
     sr.LoadGrammar(gr);
     sr.SetInputToDefaultAudioDevice();
     cbdil1.Text = "en";
     cbdil2.Text = "tr";
     if (Test())
     {
         lblmessage.Text = "Internet connection provided.";
     }
     else
     {
         lblmessage.Text = "Internet connection could not be established.";
     }
 }
Exemple #40
0
        private void BuildGrammar()
        {
            var builder = new GrammarBuilder();
            var choices = new Choices();

            foreach (var effect in Bot.SoundEffectRepository.Effects)
            {
                choices.Add(effect.Value.Name);
            }

            builder.Append("soundbot");
            builder.Append(choices);

            speech.LoadGrammar(new Grammar(builder));
        }
Exemple #41
0
        /// <summary>
        /// Creates a new speech recognition engine.
        /// </summary>
        /// <param name="language">The language for the recognition engine.</param>
        /// <param name="grammars">The grammars to load.</param>
        /// <returns>A new speech recognition engine object.</returns>
        internal static SpeechRecognitionEngine CreateSpeechRecognitionEngine(string language, Microsoft.Psi.Speech.GrammarInfo[] grammars)
        {
            var recognizer = new SpeechRecognitionEngine(new CultureInfo(language));

            foreach (var grammarInfo in grammars)
            {
                Grammar grammar = new Grammar(grammarInfo.FileName)
                {
                    Name = grammarInfo.Name
                };
                recognizer.LoadGrammar(grammar);
            }

            return(recognizer);
        }
        /*
         #region Real Sense
         * void LeftHand_Moved(Position obj)
         * {
         *  if (!CurrentState.CtlSetting.isGestureEnable) return;
         *  if (obj.Image.Y < 200)
         *  {
         *
         *      Dispatcher.BeginInvoke(
         *         new ThreadStart(() =>
         *         {
         *             CurrentState.CtlContent.OnScrollDown(null, null);
         *         }
         *         ));
         *
         *  }
         *  else
         *  {
         *
         *      Dispatcher.BeginInvoke(
         *         new ThreadStart(() =>
         *         {
         *             CurrentState.CtlContent.OnScrollUp(null, null);
         *         }
         *         ));
         *
         *  }
         * }
         * void RightHand_Moved(Position obj)
         * {
         *  if (!CurrentState.CtlSetting.isGestureEnable) return;
         *  if (obj.Image.Y < 200)
         *  {
         *
         *      Dispatcher.BeginInvoke(
         *         new ThreadStart(() =>
         *         {
         *             CurrentState.CtlContent.OnScrollDown(null, null);
         *         }
         *         ));
         *
         *  }
         *  else
         *  {
         *
         *      Dispatcher.BeginInvoke(
         *         new ThreadStart(() =>
         *         {
         *             CurrentState.CtlContent.OnScrollUp(null, null);
         *         }
         *         ));
         *
         *  }
         * }
         * void dispatcherTimer_Tick(object sender, EventArgs e)
         * {
         *  if (!cam.Face.IsVisible)
         *  {
         *      FaceOffCount++;
         *  }
         *  else
         *  {
         *      FaceOffCount = 0;
         *  }
         *  if (FaceOffCount > 1 && CurrentState.CtlSetting.isAutoShutdownEnable)
         *  {
         *      Dispatcher.BeginInvoke(
         *           new ThreadStart(() =>
         *           {
         *               speechSynthesizer.Speak("I will shutdown this application");
         *               Application.Current.Shutdown();
         *           }
         *           ));
         *  }
         * }
         *
         * void Gestures_SwipeRight(Hand obj)
         * {
         *  if (CurrentState.CtlSetting.isGestureEnable)
         *  {
         *      Dispatcher.BeginInvoke(
         *          new ThreadStart(() =>
         *          {
         *              GoToNextChapter(null, null);
         *          }
         *          ));
         *  }
         *  //throw new NotImplementedException();
         * }
         *
         * void Gestures_SwipeLeft(Hand obj)
         * {
         *  if (CurrentState.CtlSetting.isGestureEnable)
         *  {
         *      Dispatcher.BeginInvoke(
         *            new ThreadStart(() =>
         *            {
         *                GoToPrevChapter(null, null);
         *            }
         *            ));
         *  }
         *  //throw new NotImplementedException();
         * }
         *
         * void Face_NotVisible()
         * {
         *
         *  //throw new NotImplementedException();
         * }
         *
         * void Face_Visible()
         * {
         *  //FaceOffCount = 0;
         *  //throw new NotImplementedException();
         * }
         #endregion
         */

        #region Speech
        void ListenToBoss()
        {
            CultureInfo ci = new CultureInfo("en-US");

            _recognizer = new SpeechRecognitionEngine(ci);
            // Select a voice that matches a specific gender.
            Perintah = new Dictionary <string, BossCommand>();


            Perintah.Add("read now", BossCommand.Read);
            Perintah.Add("stop read", BossCommand.Stop);
            Perintah.Add("next chapter", BossCommand.NextChapter);
            Perintah.Add("previous chapter", BossCommand.PrevChapter);
            Perintah.Add("next item", BossCommand.NextItem);
            Perintah.Add("previous item", BossCommand.PrevItem);
            Perintah.Add("zoom in", BossCommand.ZoomIn);
            Perintah.Add("zoom out", BossCommand.ZoomOut);
            Perintah.Add("volume up", BossCommand.VolumeUp);
            Perintah.Add("volume down", BossCommand.VolumeDown);
            Perintah.Add("add bookmark", BossCommand.AddBookmark);
            Perintah.Add("open bookmark", BossCommand.OpenBookmark);
            Perintah.Add("please turn off", BossCommand.Exit);

            foreach (KeyValuePair <string, BossCommand> entry in Perintah)
            {
                _recognizer.LoadGrammar(new Grammar(new GrammarBuilder(entry.Key)));
            }
            //special grammar
            _recognizer.LoadGrammar(specialGrammar());

            isRecognizing = false;
            _recognizer.SpeechRecognized          += _recognizer_SpeechRecognized; // if speech is recognized, call the specified method
            _recognizer.SpeechRecognitionRejected += _recognizer_SpeechRecognitionRejected;
            _recognizer.SetInputToDefaultAudioDevice();                            // set the input to the default audio device
            _recognizer.RecognizeAsync(RecognizeMode.Multiple);                    // recognize speech asynchronous
        }
Exemple #43
0
 // Chamadado quando algo é reconhecido
 private void rec1(object s, SpeechRecognizedEventArgs e)
 {
     if (!operationComplete && !alreadyRecognizing)
     {
         this.alreadyRecognizing = true;
         if (this.number1 == -1)
         {
             txtBoxNumber1.Text = e.Result.Text;
             this.number1       = Convert.ToInt32(e.Result.Text);
             engine.LoadGrammar(new Grammar(new GrammarBuilder(new Choices(OperationRepository.Operadores))));
         }
         else
         {
             if (string.IsNullOrEmpty(this.operation))
             {
                 this.operation            = e.Result.Text;
                 this.txtBoxOperation.Text = this.RecognizeSignal(this.operation);
                 engine.LoadGrammar(new Grammar(new GrammarBuilder(new Choices(NumberRepository.Numeros))));
             }
             else
             {
                 if (this.number2 == -1)
                 {
                     txtBoxNumber2.Text = e.Result.Text;
                     this.number2       = Convert.ToInt32(e.Result.Text);
                     double result = Math.Round(this.operationBetweenVariables(number1, operation, number2), 2);
                     this.txtBoxResult.Text = Convert.ToString(result);
                     MessageBox.Show(this.OperationToDo(this.number1, this.operation, this.number2));
                     this.operationComplete = true;
                     this.btnReset.Enabled  = true;
                 }
             }
         }
     }
     this.alreadyRecognizing = false;
 }
 private void LoadGrammarAndCommands()
 {
     try
     {
         Choices  Text  = new Choices();
         string[] Lines = File.ReadAllLines(Environment.CurrentDirectory + "\\Commands.txt");
         Text.Add(Lines);
         Grammar WordList = new Grammar(new GrammarBuilder(Text));
         speechRecognitionEngine.LoadGrammar(WordList);
     }
     catch (Exception ex)
     {
         //MessageBox.Show(ex.Message);
     }
 }
Exemple #45
0
 // Generic Windows Recognizer
 static void CreateRecognizer(string _CultureInfo)
 {
     using (SpeechRecognitionEngine _rec = new SpeechRecognitionEngine(new CultureInfo(_CultureInfo)))
     {
         Console.WriteLine("Listening...");
         _rec.LoadGrammar(new DictationGrammar());
         _rec.SetInputToDefaultAudioDevice();
         _rec.RecognizeAsync(RecognizeMode.Multiple);
         _rec.SpeechRecognized += new EventHandler <SpeechRecognizedEventArgs>(_rec_SpeechRecognized);
         while (true)
         {
             Console.ReadLine();
         }
     }
 }
Exemple #46
0
        public Form1()
        {
            InitializeComponent();
            se();
            this.FormBorderStyle = System.Windows.Forms.FormBorderStyle.None;

            //Set output, load grammar and set up speech recognisition event handler
            myVoice.SetInputToDefaultAudioDevice();

            string[] phrases = getPhrases();
            myVoice.LoadGrammar(new Grammar(new GrammarBuilder(new Choices(phrases))));

            myVoice.SpeechRecognized += new EventHandler <SpeechRecognizedEventArgs>(myVoice_SpeechRecognized);
            myVoice.RecognizeAsync(RecognizeMode.Multiple);
        }
        public MainWindow()
        {
            InitializeComponent();

            speechRecognitionEngine = new SpeechRecognitionEngine();
            speechRecognitionEngine.SetInputToDefaultAudioDevice();

            speechSynthesizer = new SpeechSynthesizer();
            speechSynthesizer.SetOutputToDefaultAudioDevice();

            var choices = new Choices();

            choices.Add("Ligar luz quarto");
            choices.Add("Desligar luz quarto");
            choices.Add("Ligar computador quarto");
            choices.Add("Desligar computador quarto");
            choices.Add("Ligar ar condicionado quarto");
            choices.Add("Desligar ar condicionado quarto");
            choices.Add("Ligar luz banheiro");
            choices.Add("Desligar luz banheiro");
            choices.Add("Ligar chuveiro");
            choices.Add("Desligar chuveiro");
            choices.Add("Ligar luz cozinha");
            choices.Add("Desligar luz cozinha");
            choices.Add("Ligar ventilador");
            choices.Add("Desligar ventilador");
            choices.Add("Ligar micro-ondas");
            choices.Add("Desligar micro-ondas");
            choices.Add("Ligar luz sala");
            choices.Add("Desligar luz sala");
            choices.Add("Ligar TV sala");
            choices.Add("Desligar TV sala");
            choices.Add("Ligar ar condicionado sala");
            choices.Add("Desligar ar condicionado sala");

            var grammarBuilder = new GrammarBuilder {
                Culture = CultureInfo.CurrentCulture
            };

            grammarBuilder.Append(choices);
            var grammar = new Grammar(grammarBuilder);

            speechRecognitionEngine.LoadGrammar(grammar);
            speechRecognitionEngine.SpeechRecognized          += SpeechRecognitionEngineSpeechRecognized;
            speechRecognitionEngine.SpeechRecognitionRejected += SpeechRejected;

            speechRecognitionEngine.RecognizeAsync(RecognizeMode.Multiple);
        }
        public void Run()
        {
            // Create AudioVideoFlow
            AudioVideoFlowHelper audioVideoFlowHelper = new AudioVideoFlowHelper();

            _audioVideoFlow = audioVideoFlowHelper.CreateAudioVideoFlow(
                null,
                audioVideoFlow_StateChanged);

            // Create a speech recognition connector and attach it to a AudioVideoFlow
            SpeechRecognitionConnector speechRecognitionConnector = new SpeechRecognitionConnector();

            speechRecognitionConnector.AttachFlow(_audioVideoFlow);

            //Start recognizing
            SpeechRecognitionStream stream = speechRecognitionConnector.Start();

            // Create speech recognition engine and start recognizing by attaching connector to engine
            SpeechRecognitionEngine speechRecognitionEngine = new SpeechRecognitionEngine();

            speechRecognitionEngine.SpeechRecognized += new EventHandler <SpeechRecognizedEventArgs>(speechRecognitionEngine_SpeechRecognized);


            string[] recognizedString = { "zero", "one", "two", "three", "four", "five", "six", "seven", "eight", "nine", "ten", "exit" };
            Choices  numberChoices    = new Choices(recognizedString);

            speechRecognitionEngine.LoadGrammar(new Grammar(new GrammarBuilder(numberChoices)));

            SpeechAudioFormatInfo speechAudioFormatInfo = new SpeechAudioFormatInfo(8000, AudioBitsPerSample.Sixteen, Microsoft.Speech.AudioFormat.AudioChannel.Mono);

            speechRecognitionEngine.SetInputToAudioStream(stream, speechAudioFormatInfo);
            Console.WriteLine("\r\nGrammar loaded from zero to ten, say exit to shutdown.");

            speechRecognitionEngine.RecognizeAsync(RecognizeMode.Multiple);

            _waitForXXXCompleted.WaitOne();
            //Stop the connector
            speechRecognitionConnector.Stop();
            Console.WriteLine("Stopping the speech recognition connector");

            //speech recognition connector must be detached from the flow, otherwise if the connector is rooted, it will keep the flow in memory.
            speechRecognitionConnector.DetachFlow();

            // Shutdown the platform
            ShutdownPlatform();

            _waitForShutdownEventCompleted.WaitOne();
        }
        private void LoadContent()
        {
            kinectDevice = new Runtime();
            kinectDevice.Initialize(RuntimeOptions.UseDepthAndPlayerIndex | RuntimeOptions.UseSkeletalTracking | RuntimeOptions.UseColor);

            kinectDevice.SkeletonEngine.TransformSmooth = true;
            kinectDevice.VideoStream.Open(ImageStreamType.Video, 2, ImageResolution.Resolution640x480, ImageType.Color);
            kinectDevice.DepthStream.Open(ImageStreamType.Depth, 2, ImageResolution.Resolution320x240, ImageType.DepthAndPlayerIndex);

            kinectDevice.SkeletonFrameReady += new EventHandler <SkeletonFrameReadyEventArgs>(kinectDevice_SkeletonFrameReady);
            kinectDevice.VideoFrameReady    += new EventHandler <ImageFrameReadyEventArgs>(kinectDevice_VideoFrameReady);

            kinectAudio = new KinectAudioSource();

            kinectAudio.FeatureMode          = true;
            kinectAudio.AutomaticGainControl = false;
            kinectAudio.SystemMode           = SystemMode.OptibeamArrayOnly;

            ri           = SpeechRecognitionEngine.InstalledRecognizers().Where(r => r.Id == RecognizerId).FirstOrDefault();
            sre          = new SpeechRecognitionEngine(ri.Id);
            audioChoices = new Choices();
            audioChoices.Add("stop");
            audioChoices.Add("start");
            audioChoices.Add("kinect shutdown");
            audioChoices.Add("reset time");
            audioChoices.Add("spree");
            audioChoices.Add("reset hand");
            audioChoices.Add("faster");
            audioChoices.Add("slower");
            grammerBuilder         = new GrammarBuilder();
            grammerBuilder.Culture = ri.Culture;
            grammerBuilder.Append(audioChoices);
            grammer = new Grammar(grammerBuilder);

            sre.LoadGrammar(grammer);

            sre.SpeechRecognized += new EventHandler <SpeechRecognizedEventArgs>(sre_SpeechRecognized);

            sre.SetInputToAudioStream(kinectAudio.Start(), new SpeechAudioFormatInfo(EncodingFormat.Pcm, 16000, 16, 1, 32000, 2, null));
            sre.RecognizeAsync(RecognizeMode.Multiple);

            player = new NinjaPlayer(this);

            backGround = Content.Load <Texture2D>("wood_paneling");
            font       = Content.Load <SpriteFont>("font");

            sound = new SoundPlayer();
        }
Exemple #50
0
        /// <summary>
        /// Tries to load a SGRS grammar for SAPI 5.3 and above
        /// </summary>
        /// <returns>true if grammar was loaded successfully, false otherwise</returns>
        protected override bool LoadGrammar(FileInfo grammarFile)
        {
            Grammar grammar;

            if ((grammarFile == null) || !grammarFile.Exists)
            {
                return(false);
            }

            grammar         = LoadSapi53Grammar(grammarFile.FullName);
            grammarForFiles = LoadSapi53Grammar(grammarFile.FullName);
            if (grammar == null)
            {
                grammar         = LoadSapi51Grammar(grammarFile.FullName);
                grammarForFiles = LoadSapi51Grammar(grammarFile.FullName);
            }
            if (grammar == null)
            {
                return(false);
            }

            lock (oLock)
            {
                this.engine.UnloadAllGrammars();
                try
                {
                    this.engine.LoadGrammar(grammar);
                }
                catch
                {
                    this.grammar    = null;
                    this.hasGrammar = false;
                    return(false);
                }
                this.grammar     = grammar;
                this.hasGrammar  = true;
                this.grammarFile = grammarFile.FullName;
            }

            lock (engineForFiles)
            {
                engineForFiles.UnloadAllGrammars();
                engineForFiles.LoadGrammar(grammarForFiles);
            }

            AddFreeDictationGrammar();
            return(true);
        }
Exemple #51
0
        public NotesWindow()
        {
            InitializeComponent();

            //Establecemos el dataContext del Dockpanel aquí... Como al NotesVM
            //también estamos accediendo desde la vista, a través de namespace, para no tener duplicados
            //los viewModel, lo que hacemos es acceder al declarado en la vista a través de Resources


            viewModel             = this.Resources["vm"] as NotesVM;
            container.DataContext = viewModel;

            viewModel.SelectedNoteChanged += ViewModel_SelectedNoteChanged;


            //Initialize recognizer
            var currentCulture = (from r in SpeechRecognitionEngine.InstalledRecognizers()
                                  where r.Culture.Equals(Thread.CurrentThread.CurrentCulture)
                                  select r).FirstOrDefault();

            recognizer = new SpeechRecognitionEngine(currentCulture);

            GrammarBuilder builder = new GrammarBuilder();

            builder.AppendDictation();
            Grammar grammaer = new Grammar(builder);

            recognizer.LoadGrammar(grammaer);


            recognizer.SetInputToDefaultAudioDevice();

            //Linkamos el recognizer con un método que trabajará con el speech analizado
            //Este método (Recognizer_SpeechRecognized()  ) será el event handler que tratará el evento una vez sea  lanzado
            recognizer.SpeechRecognized += Recognizer_SpeechRecognized;

            //Rellenamos los comboBox de tamaño y tipo de letra
            var fontFamilies = Fonts.SystemFontFamilies.OrderBy(f => f.Source);

            fontFamilyComboBox.ItemsSource = fontFamilies;

            List <double> fontSizes = new List <double>()
            {
                8, 9, 10, 11, 12, 14, 16, 28, 48, 72
            };

            fontSizeComboBox.ItemsSource = fontSizes;
        }
Exemple #52
0
        private SpeechRecognitionEngine CreateSpeechRecognizer()
        {
            //set recognizer info
            RecognizerInfo ri = GetKinectRecognizer();
            //create instance of SRE
            SpeechRecognitionEngine sre;

            sre = new SpeechRecognitionEngine(ri.Id);

            //Add the words we want our program to recognise
            var grammar = new Choices();

            grammar.Add(myKinectName);
            grammar.Add(openMyMediaPlayer);
            grammar.Add(closeMyMediaPlayer);
            grammar.Add(play);
            grammar.Add(pause);
            grammar.Add(stop);
            grammar.Add(rewind);
            grammar.Add(fastforward);
            grammar.Add(next);
            grammar.Add(previous);
            grammar.Add(volUp);
            grammar.Add(volDwn);
            grammar.Add(mute);
            grammar.Add(fullscreen);
            grammar.Add(exitFullscreen);
            grammar.Add(browse);
            grammar.Add(hide);

            //set culture - language, country/region
            var gb = new GrammarBuilder {
                Culture = ri.Culture
            };

            gb.Append(grammar);

            //set up the grammar builder
            var g = new Grammar(gb);

            sre.LoadGrammar(g);

            //Set events for recognizing, hypothesising and rejecting speech
            sre.SpeechRecognized          += SreSpeechRecognized;
            sre.SpeechHypothesized        += SreSpeechHypothesized;
            sre.SpeechRecognitionRejected += SreSpeechRecognitionRejected;
            return(sre);
        }
Exemple #53
0
        static void Main()
        {
            // Some global setup stuff
            ScreenWidth  = Screen.PrimaryScreen.Bounds.Width;
            ScreenHeight = Screen.PrimaryScreen.Bounds.Height;
            ScreenSize   = new Position(ScreenWidth, ScreenHeight);

            // Fetch the embedded positions resource
            var assembly     = Assembly.GetExecutingAssembly();
            var resourceName = "PointToIcon.new-positions.json";

            string result = string.Empty;

            // Read it from a stream
            using (Stream stream = assembly.GetManifestResourceStream(resourceName))
                using (StreamReader reader = new StreamReader(stream))
                {
                    result = reader.ReadToEnd();
                }

            // Create some position objects from the json array.
            List <float[]> jsonObject = JsonConvert.DeserializeObject <List <float[]> >(result);

            Positions = new List <Position>();
            jsonObject.ForEach(i =>
            {
                Positions.Add(new Position(i[0], i[1]));
            });

            Desktop desktop = new Desktop();

            DesktopIcons = desktop.GetIconsPositions();

            // Setup speech recognition
            using (SpeechRecognitionEngine recognizer = new SpeechRecognitionEngine(new System.Globalization.CultureInfo("en-GB"))) // bri un
            {
                // Not sure what a lot of this does, it's just the example in the Microsoft Docs
                recognizer.LoadGrammar(new DictationGrammar());

                recognizer.SpeechRecognized += new EventHandler <SpeechRecognizedEventArgs>(Recognizer_SpeechRecognized);

                recognizer.SetInputToDefaultAudioDevice();
                recognizer.RecognizeAsync(RecognizeMode.Multiple);

                // Will close the program on the next key press in the console
                Console.ReadKey();
            }
        }
Exemple #54
0
        //SpeechSynthesizer JARVIS = new SpeechSynthesizer();


        public MainWindow()
        {
            InitializeComponent();

            haarCascade = new HaarCascade(@"haarcascade_frontalface_alt_tree.xml");
            capture     = new Capture();
            JarvisData.load();

            clockTimer.Tick    += clockTimer_Tick;
            clockTimer.Interval = 1000;
            clockTimer.Start();

            TwitterCheck.Tick    += TwitterCheck_Tick;
            TwitterCheck.Interval = 2000;
            TwitterCheck.Start();

            TwitterCollectorTimer.Tick    += TwitterCollectorTimer_Tick;
            TwitterCollectorTimer.Interval = 1800000;
            TwitterCollectorTimer.Start();

            facialRecTimer.Tick    += facialRecTimer_Tick;
            facialRecTimer.Interval = 1000;
            facialRecTimer.Start();
            //capture.QueryFrame();
            // autoSave.Tick += autoSave_Tick;
            //  autoSave.Start();
            // autoSave.Interval = 10000;

            if (JarvisData.isMiniMic.Contains("true"))
            {
                this.micLabel.Content = "Mic: Mini Mic";
            }
            else
            {
                this.micLabel.Content = "Mic: Kinect";
            }


            _recognizer.SetInputToDefaultAudioDevice();



            build.AppendDictation();
            finalGrammar = new Grammar(build);
            _recognizer.LoadGrammar(finalGrammar);
            _recognizer.SpeechRecognized += new EventHandler <SpeechRecognizedEventArgs>(Commands);
            _recognizer.RecognizeAsync(RecognizeMode.Multiple);
        }
Exemple #55
0
        public void impot()
        {
            synthesizer.SelectVoiceByHints(VoiceGender.Male, VoiceAge.Adult);

            sre.RequestRecognizerUpdate();
            GrammarBuilder gra = new GrammarBuilder(list);

            Grammar gr = new Grammar(gra);

            sre.LoadGrammar(gr);
            sre.SetInputToDefaultAudioDevice();

            sre.SpeechRecognized       += sre_SpeechRecognized;
            synthesizer.SpeakCompleted += synthesizer_SpeakCompleted;
            sre.RecognizeAsync(RecognizeMode.Multiple);
        }
Exemple #56
0
        private void StartSetup()
        {
            if (_dictationGrammar == null)
            {
                _dictationGrammar = new DictationGrammar();
            }

            _speechRecognitionEngine.LoadGrammar(_dictationGrammar);
            _speechRecognitionEngine.RecognizeAsync(RecognizeMode.Multiple);

            _speechRecognitionEngine.SpeechRecognized   -= new EventHandler <SpeechRecognizedEventArgs>(SpeechRecognized);
            _speechRecognitionEngine.SpeechHypothesized -= new EventHandler <SpeechHypothesizedEventArgs>(SpeechHypothesizing);

            _speechRecognitionEngine.SpeechRecognized   += new EventHandler <SpeechRecognizedEventArgs>(SpeechRecognized);
            _speechRecognitionEngine.SpeechHypothesized += new EventHandler <SpeechHypothesizedEventArgs>(SpeechHypothesizing);
        }
Exemple #57
0
        //====================================================================

        //====================================================================
        // Iniciar speechEngine
        //====================================================================
        public void IniciarEngine()
        {
            List <string> tempList = new List <string>();
            Choices       choices  = new Choices();

            string[] listaComandos = montarComandos();
            if (listaComandos.Length > 0)
            {
                choices.Add(listaComandos);
                speechEngine.LoadGrammar(new Grammar(new GrammarBuilder(choices)));
                speechEngine.SetInputToDefaultAudioDevice();
                speechEngine.RecognizeAsync(RecognizeMode.Multiple);
                speechEngine.SpeechRecognized += SpeechEngine_SpeechRecognized;
                executar = new BM_Executar();
            }
        }
        public Form1()
        {
            SpeechRecognitionEngine rec = new SpeechRecognitionEngine();
            Choices list = new Choices();

            list.Add(new String[] { "hello", "how are you" , "i'm fine" });

            Grammar gr = new Grammar(new GrammarBuilder(list));

            try
            {

                rec.RequestRecognizerUpdate();
                rec.LoadGrammar(gr);
                rec.SpeechRecognized += rec_SpeechRecognized;
                rec.SetInputToDefaultAudioDevice();
                rec.RecognizeAsync(RecognizeMode.Multiple);

            }
            catch { return; }

            s.SelectVoiceByHints(VoiceGender.Female);
            s.Speak("Hello , My name is VoiceBot");

            InitializeComponent();
        }
Exemple #59
-1
        public void button1_Click(object sender, EventArgs e)
        {
            button1.Enabled = false;
            button1.Text = "God Called";
            label2.Text = "The god is listening...";
            label2.ForeColor = Color.Red;

            SpeechRecognitionEngine GodListener = new SpeechRecognitionEngine(new System.Globalization.CultureInfo("en-UK"));
            DictationGrammar GodGrammar = new DictationGrammar();

            GodListener.MaxAlternates = 2;

            try
            {
                GodListener.RequestRecognizerUpdate();
                GodListener.LoadGrammar(GodGrammar);
                GodListener.SetInputToDefaultAudioDevice();
                GodListener.SpeechRecognized += GodListener_SpeechRecognized;
                GodListener.AudioStateChanged += GodListener_AudioStateChanged;
                GodListener.AudioLevelUpdated += GodListener_AudioLevelUpdated;
                GodListener.RecognizeAsync(RecognizeMode.Multiple);
            }
            catch
            {
                return;
            }
        }
Exemple #60
-1
        public void button1_Click(object sender, EventArgs e)
        {
            button1.Enabled = false;
            button1.Text = "God Called";
            label2.Text = "The god is listening...";
            label2.ForeColor = Color.Red;

            SpeechRecognitionEngine GodListener = new SpeechRecognitionEngine();

            Choices GodList = new Choices();
            GodList.Add(new string[] { "Make toast", "Make me toast", "Make me some toast", "Make me immortal", "Make rain", "call rain", "call the rain", "make it rain", "wink out of existence", "begone", "go now", "wink yourself out of existence" });

            GrammarBuilder gb = new GrammarBuilder();
            gb.Append(GodList);

            Grammar GodGrammar = new Grammar(gb);

            GodListener.MaxAlternates = 2;

            try
            {
                GodListener.RequestRecognizerUpdate();
                GodListener.LoadGrammar(GodGrammar);
                GodListener.SetInputToDefaultAudioDevice();
                GodListener.SpeechRecognized += GodListener_SpeechRecognized;
                GodListener.AudioStateChanged += GodListener_AudioStateChanged;
                GodListener.AudioLevelUpdated += GodListener_AudioLevelUpdated;
                GodListener.RecognizeAsync(RecognizeMode.Multiple);
            }
            catch
            {
                return;
            }
        }