internal void CreateEngine(SpeechController speechController, string source)
        {
            _speechController = speechController;

            _cultureInfo = LanguageSelector.GetVoice(source); // listen in source language (speech limitation)
            _speechRecognitionEngine = new SpeechRecognitionEngine(_cultureInfo);
        }
示例#2
0
		public MainWindow()
		{
			InitializeComponent();
			NewGame();
			try
			{
				var sre = new SpeechRecognitionEngine(new CultureInfo("en-US"));
			}
			catch (Exception ex)
			{
				MessageBox.Show("Печаль, печаль, печаль.\nНе хочу тебя расстраивать дружище, но кажется на твоем компе не установлен Speech Recognition для английского языка.\nУстанови его, и может быть удача улыбнется тебе.");
				Close();
			}
			allCommand.Text = @"Aa [ ei ] [эй]
Bb [ bi: ] [би]
Cc [ si: ] [си]
Dd [ di: ] [ди]
Ee [ i: ] [и]
Ff [ ef ] [эф]
Gg [ dʒi: ] [джи]
Hh [ eitʃ ] [эйч]
Ii [ ai ] [ай]
Jj [ dʒei ] [джей]";
			Task.Factory.StartNew(Run);
		}
示例#3
0
        public VoiceRecognizer()
        {
            try
            {
                // Create a new SpeechRecognitionEngine instance.
                voiceEngine = new SpeechRecognitionEngine(new CultureInfo("en-US"));

                // Setup the audio device
                voiceEngine.SetInputToDefaultAudioDevice();

                // Create the Grammar instance and load it into the speech recognition engine.
                Grammar g = new Grammar(CommandPool.BuildSrgsGrammar());
                voiceEngine.LoadGrammar(g);

                //voiceEngine.EndSilenceTimeout = new TimeSpan(0, 0, 1);

                // Register a handler for the SpeechRecognized event
                voiceEngine.SpeechRecognized += new EventHandler<SpeechRecognizedEventArgs>(sre_SpeechRecognized);

                // Start listening in multiple mode (that is, don't quit after a single recongition)
                voiceEngine.RecognizeAsync(RecognizeMode.Multiple);
                IsSetup = true;
            }
            catch(Exception e)
            {
                IsSetup = false;
            }
        }
示例#4
0
        public void initRS()
        {
            try
            {
                SpeechRecognitionEngine sre = new SpeechRecognitionEngine(new CultureInfo("en-US"));

                var words = new Choices();
                words.Add("Hello");
                words.Add("Jump");
                words.Add("Left");
                words.Add("Right");

                var gb = new GrammarBuilder();
                gb.Culture = new System.Globalization.CultureInfo("en-US");
                gb.Append(words);
                Grammar g = new Grammar(gb);

                sre.LoadGrammar(g);
                
                sre.SpeechRecognized += new EventHandler<SpeechRecognizedEventArgs>(sre_SpeechRecognized);
                sre.SetInputToDefaultAudioDevice();
                sre.RecognizeAsync(RecognizeMode.Multiple);
            }
            catch (Exception e)
            {
                label1.Text = "init RS Error : " + e.ToString();
            }
        }
示例#5
0
        private void CreateSpeechRecongnition()
        {
            //Initialize speech recognition
            var recognizerInfo = (from a in SpeechRecognitionEngine.InstalledRecognizers()
                                  where a.Culture.Name == this.language
                                  select a).FirstOrDefault();

            if (recognizerInfo != null)
            {
                this.speechEngine = new SpeechRecognitionEngine(recognizerInfo.Id);
                Choices recognizerString = new Choices();

                recognizerString.Add(this.words);

                GrammarBuilder grammarBuilder = new GrammarBuilder();

                //Specify the culture to match the recognizer in case we are running in a different culture.
                grammarBuilder.Culture = recognizerInfo.Culture;
                grammarBuilder.Append(recognizerString);

                // Create the actual Grammar instance, and then load it into the speech recognizer.
                var grammar = new Grammar(grammarBuilder);

                //載入辨識字串
                this.speechEngine.LoadGrammarAsync(grammar);
                this.speechEngine.SpeechRecognized += SreSpeechRecognized;

                this.speechEngine.SetInputToDefaultAudioDevice();
                this.speechEngine.RecognizeAsync(RecognizeMode.Multiple);
            }
        }
        private void Init(string[] objects)
        {
            try
            {
                mRecog = new SpeechRecognitionEngine();

                GrammarBuilder builder = new GrammarBuilder();
                builder.Append(triggerPhrase);
                Grammar defaultGrammar = new Grammar(builder);
                defaultGrammar.Name = "Trigger Only";
                mRecog.LoadGrammar(defaultGrammar);

                builder = new GrammarBuilder();
                builder.Append(triggerPhrase);
                builder.Append(new Choices(objects));
                Grammar grammar = new Grammar(builder);
                grammar.Name = "Tracker Triggers";
                mRecog.LoadGrammar(grammar);

                // Configure audio input and events
                mRecog.SetInputToDefaultAudioDevice();
                mRecog.SpeechDetected += mRecog_SpeechDetected;
                mRecog.SpeechHypothesized += mRecog_SpeechHypothesized;
                mRecog.SpeechRecognized += mRecog_SpeechRecognized;
                mRecog.SpeechRecognitionRejected += mRecog_SpeechRecognitionRejected;
                mRecog.RecognizeCompleted += mRecog_RecognizeCompleted;
            }
            catch (Exception ex)
            {
            }
        }
示例#7
0
 // ==========================================
 //  CONSTRUCTOR
 // ==========================================
 public WSRSpeechEngine(String name, String language, double confidence)
 {
     this.Name = name;
       this.Confidence = confidence;
       this.cfg = WSRConfig.GetInstance();
       this.engine = new SpeechRecognitionEngine(new System.Globalization.CultureInfo(language));
 }
示例#8
0
        private void btn_connect_Click(object sender, EventArgs e)
        {
            ushort port;
            ushort.TryParse(txt_port.Text, out port);
            try
            {
                current_player = new AssPlayer(players[cmb_players.SelectedItem.ToString()], txt_host.Text, port);
            }
            catch(Exception ex)
            {
                MessageBox.Show("Could not connect: " + ex.Message);
                return;
            }
            voice_threshold = (float)num_voice_threshold.Value;

            recognizer = new SpeechRecognitionEngine(new System.Globalization.CultureInfo("en-US"));
            Grammar player_gramar = prepare_grammar(current_player.commands);
            recognizer.LoadGrammar(player_gramar);
            recognizer.SpeechRecognized += new EventHandler<SpeechRecognizedEventArgs>(recognizer_SpeechRecognized);
            recognizer.SetInputToDefaultAudioDevice();
            recognizer.RecognizeAsync(RecognizeMode.Multiple);

            taskbar_icon.Visible = true;
            Hide();
        }
    /*
     * SpeechRecognizer
     *
     * @param GName - grammar file name
     */
    public SpeechRecognizer(string GName, int minConfidence)
    {
        //creates the speech recognizer engine
        sr = new SpeechRecognitionEngine(new System.Globalization.CultureInfo("pt-PT"));
        sr.SetInputToDefaultAudioDevice();
        Console.WriteLine("confiança : " + minConfidence);
        sr.UpdateRecognizerSetting("CFGConfidenceRejectionThreshold", minConfidence);

        Grammar gr = null;

        //verifies if file exist, and loads the Grammar file, else load defualt grammar
        if (System.IO.File.Exists(GName))
        {
            gr = new Grammar(GName);
            gr.Enabled = true;
        }
        else
            Console.WriteLine("Can't read grammar file");

        //load Grammar to speech engine
        sr.LoadGrammar(gr);

        //assigns a method, to execute when speech is recognized
        sr.SpeechRecognized += new EventHandler<SpeechRecognizedEventArgs>(SpeechRecognized);

        //assigns a method, to execute when speech is NOT recognized
        sr.SpeechRecognitionRejected +=
          new EventHandler<SpeechRecognitionRejectedEventArgs>(SpeechRecognitionRejected);

        // Start asynchronous, continuous speech recognition.
        sr.RecognizeAsync(RecognizeMode.Multiple);
    }
        private void InitSpeechRecognition()
        {
            speechRecognizer = new SpeechRecognitionEngine();
            speechRecognizer.SetInputToDefaultAudioDevice();

            speechRecognizer.SpeechRecognized += new EventHandler<SpeechRecognizedEventArgs>(speechRecognizer_SpeechRecognized);
        }
示例#11
0
        /// <summary>
        /// コンストラクタ
        /// </summary>
        public MainWindow()
        {
            this.InitializeComponent();
            this.SetEventHandler();

            this.kinectSensorChooser = null;

            this.pixelBuffer = null;
            this.skeletonBuffer = null;
            this.mainImageBuffer = null;

            this.speechRecognitionEngine = null;

            this.gameState = GameState.None;
            this.sound = new Sound();

            this.barArray = new Bar[2];

            for (int i = 0; i < this.barArray.Length; i++)
                this.barArray[i] = new Bar(this, i + 1);

            this.ball = new Ball(this);
            this.stage = new Stage();
            this.score = new Score(this.sound);

            this.life = 0;

            this.blockArray = null;
        }
示例#12
0
        public ComponentControl()
        {
            this.AudioSource = new KinectAudioSource();

            this.AudioSource.FeatureMode = true;
            this.AudioSource.AutomaticGainControl = false;
            this.AudioSource.SystemMode = SystemMode.OptibeamArrayOnly;
            this.AudioSource.BeamChanged += new EventHandler<BeamChangedEventArgs>(AudioSource_BeamChanged);

            this.Recognizer = SpeechRecognitionEngine.InstalledRecognizers().Where(r => r.Id == RecognizerId).FirstOrDefault();

            if(this.Recognizer == null) {
                throw new Exception("Could not find Kinect speech recognizer");
            }

            this.Engine = new SpeechRecognitionEngine(Recognizer.Id);
            this.Engine.UnloadAllGrammars();

            this.LoadGrammer();

            this.AudioStream = this.AudioSource.Start();
            this.Engine.SetInputToAudioStream(this.AudioStream, new SpeechAudioFormatInfo(EncodingFormat.Pcm, 16000, 16, 1,
                                                      32000, 2, null));

            this.Engine.SpeechHypothesized += new EventHandler<SpeechHypothesizedEventArgs>(Engine_SpeechHypothesized);

            this.Engine.RecognizeAsync(RecognizeMode.Multiple);
            Console.WriteLine("Speech recognition initialized");
        }
示例#13
0
        //here is the fun part: create the speech recognizer
        private SpeechRecognitionEngine CreateSpeechRecognizer()
        {
            //set recognizer info
            RecognizerInfo ri = GetKinectRecognizer();
            //create instance of SRE
            SpeechRecognitionEngine sre;
            sre = new SpeechRecognitionEngine(ri.Id);

            //Now we need to add the words we want our program to recognise
            var grammar = new Choices();
            grammar.Add("hello");
            grammar.Add("goodbye");

            //set culture - language, country/region
            var gb = new GrammarBuilder { Culture = ri.Culture };
            gb.Append(grammar);

            //set up the grammar builder
            var g = new Grammar(gb);
            sre.LoadGrammar(g);

            //Set events for recognizing, hypothesising and rejecting speech
            sre.SpeechRecognized += SreSpeechRecognized;
            sre.SpeechHypothesized += SreSpeechHypothesized;
            sre.SpeechRecognitionRejected += SreSpeechRecognitionRejected;
            return sre;
        }
示例#14
0
        public void StartListening()
        {
            if (null != _ri)
            {
                _speechEngine = new SpeechRecognitionEngine(_ri.Id);

                // Create a grammar from grammar definition XML file.
                using (var memoryStream = new MemoryStream(Encoding.ASCII.GetBytes(KAIT.Kinect.Service.Properties.Resources.SpeechGrammar)))
                {
                    var g = new Grammar(memoryStream);
                    _speechEngine.LoadGrammar(g);
                }

                _speechEngine.SpeechRecognized += _speechEngine_SpeechRecognized;
                _speechEngine.SpeechRecognitionRejected += _speechEngine_SpeechRecognitionRejected;

                // let the convertStream know speech is going active
                _convertStream.SpeechActive = true;

     
                _speechEngine.SetInputToAudioStream(
                    _convertStream, new SpeechAudioFormatInfo(EncodingFormat.Pcm, 16000, 16, 1, 32000, 2, null));
                _speechEngine.RecognizeAsync(RecognizeMode.Multiple);

     
            }
        }
示例#15
0
        /// <summary>
        /// Initializes a new instance of the <see cref="MainWindow"/> class.
        /// </summary>
        public MainWindow()
        {
            InitializeComponent();

            try
            {
                // create the engine
                //speechRecognitionEngine = createSpeechEngine("de-DE");
                //speechRecognitionEngine = createSpeechEngine(CultureInfo.CurrentCulture.Name);
                speechRecognitionEngine = createSpeechEngine("es-ES");

                // hook to events
                speechRecognitionEngine.AudioLevelUpdated += new EventHandler<AudioLevelUpdatedEventArgs>(engine_AudioLevelUpdated);

                // Create and load a dictation grammar.
                speechRecognitionEngine.LoadGrammar(new DictationGrammar());

                speechRecognitionEngine.SpeechRecognized += new EventHandler<SpeechRecognizedEventArgs>(engine_SpeechRecognized);

                // use the system's default microphone
                speechRecognitionEngine.SetInputToDefaultAudioDevice();

                // start listening
                speechRecognitionEngine.RecognizeAsync(RecognizeMode.Multiple);
            }
            catch (Exception ex)
            {
                MessageBox.Show(ex.Message, "Voice recognition failed");
            }
        }
void BuildSpeechEngine(RecognizerInfo rec)
{
    _speechEngine = new SpeechRecognitionEngine(rec.Id);

    var choices = new Choices();
    choices.Add("venus");
    choices.Add("mars");
    choices.Add("earth");
    choices.Add("jupiter");
    choices.Add("sun");

    var gb = new GrammarBuilder { Culture = rec.Culture };
    gb.Append(choices);

    var g = new Grammar(gb);

    _speechEngine.LoadGrammar(g);
    //recognized a word or words that may be a component of multiple complete phrases in a grammar.
    _speechEngine.SpeechHypothesized += new EventHandler<SpeechHypothesizedEventArgs>(SpeechEngineSpeechHypothesized);
    //receives input that matches any of its loaded and enabled Grammar objects.
    _speechEngine.SpeechRecognized += new EventHandler<SpeechRecognizedEventArgs>(_speechEngineSpeechRecognized);
    //receives input that does not match any of its loaded and enabled Grammar objects.
    _speechEngine.SpeechRecognitionRejected += new EventHandler<SpeechRecognitionRejectedEventArgs>(_speechEngineSpeechRecognitionRejected);


    //C# threads are MTA by default and calling RecognizeAsync in the same thread will cause an COM exception.
    var t = new Thread(StartAudioStream);
    t.Start();
}
        public SpeechRecogniser()
        {
            RecognizerInfo ri = SpeechRecognitionEngine.InstalledRecognizers().Where(r => r.Id == RecognizerId).FirstOrDefault();
            if (ri == null)
                return;

            sre = new SpeechRecognitionEngine(ri.Id);

            // Build a simple grammar of shapes, colors, and some simple program control
            var instruments = new Choices();
            foreach (var phrase in InstrumentPhrases)
                instruments.Add(phrase.Key);

            var objectChoices = new Choices();
            objectChoices.Add(instruments);

            var actionGrammar = new GrammarBuilder();
            //actionGrammar.AppendWildcard();
            actionGrammar.Append(objectChoices);

            var gb = new GrammarBuilder();
            gb.Append(actionGrammar);

            var g = new Grammar(gb);
            sre.LoadGrammar(g);
            sre.SpeechRecognized += sre_SpeechRecognized;
            sre.SpeechHypothesized += sre_SpeechHypothesized;
            sre.SpeechRecognitionRejected += new EventHandler<SpeechRecognitionRejectedEventArgs>(sre_SpeechRecognitionRejected);

            var t = new Thread(StartDMO);
            t.Start();

            valid = true;
        }
示例#18
0
        private static SpeechRecognitionEngine GetSpeechRecognitionEngine()
        {
            var sre = new SpeechRecognitionEngine();

            sre.LoadGrammar(new DictationGrammar());
            sre.SetInputToDefaultAudioDevice();

            sre.SpeechRecognized += (s, e) =>
            {
                if (e.Result != null &&
                    !String.IsNullOrEmpty(e.Result.Text))
                {
                    using (new ConsoleForegroundColor(ConsoleColor.Green))
                    {
                        Console.WriteLine(e.Result.Text);
                    }

                    return;
                }

                using (new ConsoleForegroundColor(ConsoleColor.Red))
                {
                    Console.WriteLine("Recognized text not available.");
                }
            };
            //sr.SpeechRecognized += SpeechRecognizedHandler;

            return sre;
        }
        public void InicializeSpeechRecognize()
        {
            RecognizerInfo ri = GetKinectRecognizer();
            if (ri == null)
            {
                throw new RecognizerNotFoundException();
            }

            try
            {
                    _sre = new SpeechRecognitionEngine(ri.Id);
            }
            catch(Exception e)
            {
                Console.WriteLine(e.Message);
                throw e;
            }

               var choises = new Choices();
            foreach(CommandSpeechRecognition cmd in _commands.Values)
            {
                choises.Add(cmd.Choise);
            }

            var gb = new GrammarBuilder {Culture = ri.Culture};
            gb.Append(choises);
            var g = new Grammar(gb);

            _sre.LoadGrammar(g);
            _sre.SpeechRecognized += SreSpeechRecognized;
            _sre.SpeechHypothesized += SreSpeechHypothesized;
            _sre.SpeechRecognitionRejected += SreSpeechRecognitionRejected;
        }
        public SpeechInput(Settings settings, MusicList musicCollection, string playerPath) {
            ModeTimer = new CommandModeTimer();
            RNG = new Random();
            AppSettings = settings;
            SRecognize = new SpeechRecognitionEngine();
            Player = new Aimp3Player(playerPath);
            if(musicCollection != null) {
                MusicCollection = musicCollection;
            } else {
                throw new ArgumentNullException(nameof(musicCollection));
            }
            InitCommands();

            try {
                LoadGrammar();

                SRecognize.SetInputToDefaultAudioDevice();
                SRecognize.RecognizeAsync(RecognizeMode.Multiple);
            } catch(Exception e) {
                System.Windows.Forms.MessageBox.Show("Error while starting SpeechInput\n" + e.ToString());
            }
            SRecognize.SpeechRecognized += SRecognize_SpeechRecognized;

            MusicCollection.SongListUpdated += (s, a) => LoadGrammar();
        }
示例#21
0
        public SpeechConversation(SpeechSynthesizer speechSynthesizer = null, SpeechRecognitionEngine speechRecognition = null)
        {
            SessionStorage = new SessionStorage();
            if(speechSynthesizer==null)
            {
                speechSynthesizer = new SpeechSynthesizer();
                speechSynthesizer.SetOutputToDefaultAudioDevice();
            }
            _speechSynthesizer = speechSynthesizer;
            if(speechRecognition==null)
            {
                speechRecognition = new SpeechRecognitionEngine(
                    new System.Globalization.CultureInfo("en-US")
                );
                // Create a default dictation grammar.
                DictationGrammar defaultDictationGrammar = new DictationGrammar();
                defaultDictationGrammar.Name = "default dictation";
                defaultDictationGrammar.Enabled = true;
                speechRecognition.LoadGrammar(defaultDictationGrammar);
                // Create the spelling dictation grammar.
                DictationGrammar spellingDictationGrammar = new DictationGrammar("grammar:dictation#spelling");
                spellingDictationGrammar.Name = "spelling dictation";
                spellingDictationGrammar.Enabled = true;
                speechRecognition.LoadGrammar(spellingDictationGrammar);

                // Configure input to the speech recognizer.
                speechRecognition.SetInputToDefaultAudioDevice();
            }
            _speechRecognition = speechRecognition;
        }
 /// <summary>
 /// Constructor for a Microphone
 /// </summary>
 /// <param name="sre">The speech recognition engine associate with this microphone</param>
 /// <param name="status">The status of the microphone</param>
 /// <param name="shouldBeOn">Should the speech recognition engine for this microphone be on</param>
 /// <param name="port">The por this microphone is asociated with</param>
 public Microphone(SpeechRecognitionEngine sre, UDPClient client, string status, bool shouldBeOn, int port)
 {
     this.client = client;
     this.sre = sre;
     this.status = status;
     this.port = port;
 }
示例#23
0
        public static SpeechRecognitionEngine InitializeSRE()
        {
            //Create the speech recognition engine
            SpeechRecognitionEngine sre = new SpeechRecognitionEngine();
            using (sre)
            {

                //Set the audio device to the OS default
                sre.SetInputToDefaultAudioDevice();

                // Reset the Grammar
                sre.UnloadAllGrammars();

                // Load the plugins
                LoadPlugins();

                //Load all of the grammars
                foreach (IJarvisPlugin plugin in _plugins)
                    sre.LoadGrammar(plugin.getGrammar());

                //Set the recognition mode

                sre.RecognizeAsync(RecognizeMode.Multiple);

                //Add an event Handler
                sre.SpeechRecognized += new EventHandler<SpeechRecognizedEventArgs>(Engine.SpeechRecognized);
                while (!Jarvis.JarvisMain.stop)
                {
                }
            }
            return sre;
        }
示例#24
0
 public static SpeechRecognitionEngine getEngine(String lang)
 {
     if(init)
         recEngine.Dispose();
     Console.WriteLine("Kastat current engine");
     culture = new System.Globalization.CultureInfo(lang);
     choices = new Choices();
     grammarBuilder = new GrammarBuilder();
     VoiceCommands.Init(lang);
     choices.Add(VoiceCommands.GetAllCommands());
     grammarBuilder.Culture = culture;
     grammarBuilder.Append(choices);
     grammar = new Grammar(grammarBuilder);
     Console.WriteLine("Initialiserat svenskt grammar");
     try
     {
         recEngine = new SpeechRecognitionEngine(culture);
         recEngine.LoadGrammarAsync(grammar);
         Console.WriteLine("Laddat enginen med " + lang);
     }
     catch (UnauthorizedAccessException e)
     {
         Console.WriteLine("Error: UnauthorizedAccessException");
         Console.WriteLine(e.ToString());
     } 
     init = true;
     recEngine.SetInputToDefaultAudioDevice();
     return recEngine;
 }
示例#25
0
        public SpeechRecognizer(string file, KinectSensor sensor)
        {
            this.grammarFile = file;
            this.kinectSensor = sensor;
            audioSource = kinectSensor.AudioSource;
            audioSource.AutomaticGainControlEnabled = false;
            audioSource.BeamAngleMode = BeamAngleMode.Adaptive;

            Func<RecognizerInfo, bool> matchingFunc = r =>
            {
                string value;
                r.AdditionalInfo.TryGetValue("Kinect", out value);
                return "True".Equals(value, StringComparison.InvariantCultureIgnoreCase) && "en-US".Equals(r.Culture.Name, StringComparison.InvariantCultureIgnoreCase);
            };
            var recognizerInfo = SpeechRecognitionEngine.InstalledRecognizers().Where(matchingFunc).FirstOrDefault();
            if (recognizerInfo == null)
                return;

            speechRecognitionEngine = new SpeechRecognitionEngine(recognizerInfo.Id);

            var grammar = new Grammar(grammarFile);
            speechRecognitionEngine.LoadGrammar(grammar);

            audioStream = audioSource.Start();
            speechRecognitionEngine.SetInputToAudioStream(audioStream, new SpeechAudioFormatInfo(EncodingFormat.Pcm, 16000, 16, 1, 32000, 2, null));

            speechRecognitionEngine.AudioStateChanged += onAudioStateChanged;
            speechRecognitionEngine.SpeechRecognized += onSpeechRecognized;
            speechRecognitionEngine.RecognizeCompleted += onSpeechRecognizeCompleted;
            speechRecognitionEngine.EmulateRecognizeCompleted += onEmulateRecognizeCompleted;
        }
示例#26
0
        //Speech recognizer
        private SpeechRecognitionEngine CreateSpeechRecognizer()
        {
            RecognizerInfo ri = GetKinectRecognizer();

            SpeechRecognitionEngine sre;
            sre = new SpeechRecognitionEngine(ri.Id);

            //words we need the program to recognise
            var grammar = new Choices();
            grammar.Add(new SemanticResultValue("moustache", "MOUSTACHE"));
            grammar.Add(new SemanticResultValue("top hat", "TOP HAT"));
            grammar.Add(new SemanticResultValue("glasses", "GLASSES"));
            grammar.Add(new SemanticResultValue("sunglasses", "SUNGLASSES"));
            grammar.Add(new SemanticResultValue("tie", "TIE"));
            grammar.Add(new SemanticResultValue("bow", "BOW"));
            grammar.Add(new SemanticResultValue("bear", "BEAR"));
            //etc

            var gb = new GrammarBuilder { Culture = ri.Culture };
            gb.Append(grammar);

            var g = new Grammar(gb);
            sre.LoadGrammar(g);

            //Events for recognising and rejecting speech
            sre.SpeechRecognized += SreSpeechRecognized;
            sre.SpeechRecognitionRejected += SreSpeechRecognitionRejected;
            return sre;
        }
示例#27
0
        public void StartListening()
        {
            if (null != _ri)
            {
                _speechEngine = new SpeechRecognitionEngine(_ri.Id);

                // Create a grammar from grammar definition XML file.
                using (var memoryStream = new MemoryStream(Encoding.ASCII.GetBytes(KAIT.Kinect.Service.Properties.Resources.SpeechGrammar)))
                {
                    var g = new Grammar(memoryStream);
                    _speechEngine.LoadGrammar(g);
                }

                _speechEngine.SpeechRecognized += _speechEngine_SpeechRecognized;
                _speechEngine.SpeechRecognitionRejected += _speechEngine_SpeechRecognitionRejected;

                // let the convertStream know speech is going active
                _convertStream.SpeechActive = true;

                // For long recognition sessions (a few hours or more), it may be beneficial to turn off adaptation of the acoustic model.
                // This will prevent recognition accuracy from degrading over time.
                ////speechEngine.UpdateRecognizerSetting("AdaptationOn", 0);

                _speechEngine.SetInputToAudioStream(
                    _convertStream, new SpeechAudioFormatInfo(EncodingFormat.Pcm, 16000, 16, 1, 32000, 2, null));
                _speechEngine.RecognizeAsync(RecognizeMode.Multiple);

                //_isInTrainingMode = true;
            }
            //else
            //    throw new InvalidOperationException("RecognizerInfo cannot be null");
        }
示例#28
0
        public MainWindow()
        {
            InitializeComponent();

            var config = new JsonConfigHandler( System.IO.Path.Combine( Environment.GetFolderPath( Environment.SpecialFolder.ApplicationData ), "LeagueTag" ) );
            //config.Populate();
            config.Save();
            //config.Save(
            return;
            var engine = new SpeechRecognitionEngine();

            var builder = new GrammarBuilder();
            builder.Append( "tag" );
            builder.Append( new Choices( "baron", "dragon" ) );

            engine.RequestRecognizerUpdate();
            engine.LoadGrammar( new Grammar( builder ) );

            engine.SpeechRecognized += engine_SpeechRecognized;

            engine.SetInputToDefaultAudioDevice();
            engine.RecognizeAsync( RecognizeMode.Multiple );

            CompositionTarget.Rendering += CompositionTarget_Rendering;

            this.DataContext = this;
        }
示例#29
0
        public void InitializeSpeechRecognitionEngine(String filePath)
        {
            MySpeechRecognitionEngine = new SpeechRecognitionEngine();
            //MySpeechRecognitionEngine.SetInputToDefaultAudioDevice();

            MySpeechRecognitionEngine.UnloadAllGrammars();

            try
            {

                MySpeechRecognitionEngine.SetInputToWaveFile(filePath);

                Process.Start("C:\\Program Files\\Windows Media Player\\wmplayer.exe", ("\"" + filePath + "\""));

                MySpeechRecognitionEngine.LoadGrammar(new DictationGrammar());

                MySpeechRecognitionEngine.RecognizeAsync(RecognizeMode.Single);

                MySpeechRecognitionEngine.AudioLevelUpdated += MySpeechRecognitionEngine_AudioLevelUpdated;

                MySpeechRecognitionEngine.SpeechRecognized += MySpeechRecognitionEnginee_SpeechRecognized;

                MySpeechRecognitionEngine.AudioStateChanged += MySpeechRecognitionEnginee_AudioStateChanged;

                MySpeechRecognitionEngine.RecognizeCompleted += MySpeechRecognitionEngine_RecognizeCompleted;

            }

            catch (Exception ex)
            {

                Console.Write(ex.Message.ToString());

            }
        }
示例#30
0
        /// <summary>
        /// Initializes a new instance of the MainWindow class
        /// </summary>
        public MainWindow()
        {
            // initialize the MainWindow
            this.InitializeComponent();

            // only one sensor is currently supported
            this.kinectSensor = KinectSensor.GetDefault();

            // set IsAvailableChanged event notifier
            this.kinectSensor.IsAvailableChanged += this.Sensor_IsAvailableChanged;

            // open the sensor
            this.kinectSensor.Open();

            // set the status text
            this.StatusText = this.kinectSensor.IsAvailable ? Properties.Resources.RunningStatusText
                                                            : Properties.Resources.NoSensorStatusText;

            // open the reader for the body frames
            this.bodyFrameReader = this.kinectSensor.BodyFrameSource.OpenReader();

            // set the BodyFramedArrived event notifier
            this.bodyFrameReader.FrameArrived += this.Reader_BodyFrameArrived;

            // initialize the BodyViewer object for displaying tracked bodies in the UI
            this.kinectBodyView = new KinectBodyView(this.kinectSensor);

            // initialize the GestureDetector object
            this.gestureResultView = new GestureResultView(false, false, false, 0.0f, 0.0f, -1.0f);
            this.gestureDetector   = new GestureDetector(this.kinectSensor, this.gestureResultView);

            this.gestureResultGrid.DataContext = this.gestureResultView;

            // set our data context objects for display in UI
            this.DataContext = this;
            this.kinectBodyViewbox.DataContext = this.kinectBodyView;

            using (MasterEntities db = new MasterEntities())
            {
                //Retrieve data from stored procedure
                var data = db.TodayMob();

                foreach (var x in data)
                {
                    this.today_mobility.Text = Convert.ToString(x.Minute.Value) + "  minutes";
                }

                //Retrieve data from stored procedure
                var liters = db.TodayHydratation();

                foreach (var x in liters)
                {
                    this.today_hydratation.Text = Convert.ToString(x.Liters.Value) + "  Liters";
                }

                //Retrieve data from stored procedure
                var alarms = db.GetTotalAlarms();

                foreach (var x in alarms)
                {
                    this.totalAlarms.Text = Convert.ToString(x.Value);
                }
            }

            //SPEECH RECOGNITION
            // grab the audio stream
            IReadOnlyList <AudioBeam> audioBeamList = this.kinectSensor.AudioSource.AudioBeams;

            System.IO.Stream audioStream = audioBeamList[0].OpenInputStream();

            // create the convert stream
            this.kinectAudioStream = new KinectAudioStream(audioStream);

            RecognizerInfo ri = TryGetKinectRecognizer();

            if (null != ri)
            {
                this.speechEngine = new SpeechRecognitionEngine(ri.Id);
                Choices commands = new Choices();
                commands.Add(new SemanticResultValue("help", "HELP"));
                commands.Add(new SemanticResultValue("please help", "HELP"));
                commands.Add(new SemanticResultValue("please", "PLEASE"));
                commands.Add(new SemanticResultValue("ambulance", "AMBULANCE"));
                commands.Add(new SemanticResultValue("police", "POLICE"));

                var gb = new GrammarBuilder {
                    Culture = ri.Culture
                };
                gb.Append(commands);
                var g = new Grammar(gb);
                this.speechEngine.LoadGrammar(g);

                this.speechEngine.SpeechRecognized += this.SpeechRecognized;

                this.kinectAudioStream.SpeechActive = true;
                this.speechEngine.SetInputToAudioStream(
                    this.kinectAudioStream, new SpeechAudioFormatInfo(EncodingFormat.Pcm, 16000,
                                                                      16, 1, 32000, 2, null));
                this.speechEngine.RecognizeAsync(RecognizeMode.Multiple);
            }
            else
            {
                Application.Current.Shutdown();
            }
        }
示例#31
0
        private void Form1_Load(object sender, EventArgs e)
        {
            negro.Visible    = false;
            azul.Visible     = false;
            rojo.Visible     = false;
            verde.Visible    = false;
            amarillo.Visible = false;
            blanco.Visible   = false;
            marron.Visible   = false;
            rosa.Visible     = false;
            morado.Visible   = false;

            //Creamos la lista que va a guardar el texto a reconocer y creamos tambien un objeto que será el motor de
            //nuestro reconocimiento de voz, despues añadimos las palabras permitidas a la lista

            Choices lista = new Choices();
            SpeechRecognitionEngine grabar = new SpeechRecognitionEngine();

            lista.Add(new string[] { "negro", "azul", "rojo", "verde", "amarillo", "blanco",
                                     "marron", "rosa", "morado", "todos", "ninguno", "salir" });

            Grammar gramatica = new Grammar(new GrammarBuilder(lista));

            try
            {
                grabar.SetInputToDefaultAudioDevice();
                grabar.LoadGrammar(gramatica);
                grabar.SpeechRecognized += reconocimiento;
                grabar.RecognizeAsync(RecognizeMode.Multiple);
            }
            catch (Exception)
            {
                throw;
            }

            //Creamos una variable para la respuesta de la web
            WebResponse response = null;

            try
            {
                //creamos una lista de bombillas en funcion de los dispositivos que tengamos
                bombilla bombilla1 = new bombilla(true, 200, "none", new List <string>()
                {
                    "0.4043", "0.4368"
                }, true, 254, "colorloop");
                bombilla bombilla2 = new bombilla(true, 200, "none", new List <string>()
                {
                    "0.4043", "0.4368"
                }, true, 254, "colorloop");


                List <bombilla> bombillas = new List <bombilla>
                {
                    bombilla1,
                    bombilla2
                };

                //Indicamos la URL de nuestro controlador Philips HUE
                string usuario = "josemanuelDAM";
                string URL     = "http://192.168.1.158/api";
                string uri     = String.Format("{0}/{1}/lights", URL, usuario);

                HttpWebRequest httpWebRequest = (HttpWebRequest)WebRequest.Create(uri);

                httpWebRequest.ContentType = "application/JSON";

                //Aqui ponemos el metodo de acceso a la API
                httpWebRequest.Method = "POST";

                string requestBody = string.Empty;
                int    contador    = 1;

                foreach (var bombilla in bombillas)
                {
                    //Para añadir el nombre del dispositivo que queremos modificar
                    //en este caso las bombillas empezaran desde el 1 en adelante
                    requestBody += "\"" + contador.ToString() + "\":";
                    //en nuestro body añade los datos de la clase bombilla en formato JSON
                    requestBody += JsonConvert.SerializeObject(bombilla);
                    requestBody += ",";
                    contador     = contador + 1;
                }

                //Para quitar la ultima , del JSON
                if (!String.IsNullOrEmpty(requestBody))
                {
                    requestBody = requestBody.Remove(requestBody.Length - 1);
                }

                httpWebRequest.AllowAutoRedirect = true;

                //transformo el string en un array de bytes que mandare al request para enviar a la API
                byte[] bytes = Encoding.UTF8.GetBytes(requestBody);

                httpWebRequest.ContentLength = bytes.Length;

                using (Stream outputStream = httpWebRequest.GetRequestStream())
                {
                    outputStream.Write(bytes, 0, bytes.Length);
                }

                //Aqui recibo la respuesta y la trato como una cadena de texto
                string strResult;

                response = httpWebRequest.GetResponse();

                using (StreamReader stream = new StreamReader(response.GetResponseStream()))
                {
                    strResult = stream.ReadToEnd();

                    //Aqui tenemos que transformar el JSON que nos llega en un string
                    string reports = JsonConvert.DeserializeObject <string>(strResult);

                    stream.Close();
                }
            }
            catch (Exception ex)
            {
                string mensaje = ex.Message;
            }
            finally
            {
                if (response != null)
                {
                    response.Close();
                    response = null;
                }
            }
        }
示例#32
0
        /// <summary>
        /// Execute initialization tasks.
        /// </summary>
        /// <param name="sender">object sending the event</param>
        /// <param name="e">event arguments</param>
        private void WindowLoaded(object sender, RoutedEventArgs e)
        {
            LoadCrayons(sender, e);
            // Only one sensor is supported
            CurrentSensor = KinectSensor.GetDefault();

            if (CurrentSensor != null)
            {
                // open the sensor
                CurrentSensor.Open();

                // grab the audio stream
                IReadOnlyList <AudioBeam> audioBeamList = CurrentSensor.AudioSource.AudioBeams;
                System.IO.Stream          audioStream   = audioBeamList[0].OpenInputStream();

                // create the convert stream
                this._convertStream = new KinectAudioStream(audioStream);

                _bodyReader = CurrentSensor.BodyFrameSource.OpenReader();
                _bodyReader.FrameArrived += BodyReader_FrameArrived;

                _bodies = new Body[CurrentSensor.BodyFrameSource.BodyCount];
            }
            else
            {
                // on failure, set the status text
                this.StatusBarText.Text = Properties.Resources.NoKinectReady;
                return;
            }

            RecognizerInfo ri = TryGetKinectRecognizer();

            if (null != ri)
            {
                this._speechEngine = new SpeechRecognitionEngine(ri.Id);

                // Create a grammar from grammar definition XML file.
                using (var memoryStream = new MemoryStream(Encoding.ASCII.GetBytes(Properties.Resources.SpeechGrammar)))
                {
                    var g = new Grammar(memoryStream);
                    this._speechEngine.LoadGrammar(g);
                }

                this._speechEngine.SpeechRecognized          += SpeechRecognized;
                this._speechEngine.SpeechRecognitionRejected += SpeechRejected;

                // let the convertStream know speech is going active
                this._convertStream.SpeechActive = true;

                // For long recognition sessions (a few hours or more), it may be beneficial to turn off adaptation of the acoustic model.
                // This will prevent recognition accuracy from degrading over time.
                // _speechEngine.UpdateRecognizerSetting("AdaptationOn", 0);

                this._speechEngine.SetInputToAudioStream(
                    this._convertStream, new SpeechAudioFormatInfo(EncodingFormat.Pcm, 16000, 16, 1, 32000, 2, null));
                this._speechEngine.RecognizeAsync(RecognizeMode.Multiple);
            }
            else
            {
                this.StatusBarText.FontSize = 45;
                this.StatusBarText.Text     = Properties.Resources.NoSpeechRecognizer;
            }
        }
示例#33
0
 public YesNoQuestion()
 {
     pSRE = new SpeechRecognitionEngine(MyCultureInfo.PolishCulture);
     pSRE.SetInputToDefaultAudioDevice();
     PrepareGrammar(pSRE);
 }
示例#34
0
        //
        //  public bool load_listen()
        //
        //  load_listen() establishes the speech recognition engine based on the command glossary stored within the
        //  currently loaded Profile.  load_listen() may fail, returning Boolean FALSE, if a Profile's glossary does
        //  not meet the engine's grammar requirements; load_listen() will also fail, returning Boolean FALSE, should
        //  an exception occur that cannot be resolved within the method.  load_listen() will return Boolean TRUE upon
        //  success.
        //
        // Optimizations : 04.28.15
        //
        public bool load_listen()
        {
            // Don't allocate anything if we have no phrases to hook.
            if (GAVPI.Profile.Profile_Triggers != null &&
                GAVPI.Profile.Profile_Triggers.Count == 0)
            {
                MessageBox.Show("You need to add at least one Trigger");
                return(false);
            }

            vi_syn = GAVPI.Profile.synth;
            vi_syn.SelectVoice(GAVPI.Settings.voice_info);
            vi_sre = new SpeechRecognitionEngine(GAVPI.Settings.recognizer_info);

            GrammarBuilder phrases_grammar = new GrammarBuilder();

            // Grammer must match speech recognition language localization
            phrases_grammar.Culture = GAVPI.Settings.recognizer_info;

            List <string> glossory = new List <string>();

            // Add trigger phrases to glossory of voice recognition engine.
            foreach (VI_Phrase trigger in GAVPI.Profile.Profile_Triggers)
            {
                glossory.Add(trigger.value);
            }


            phrases_grammar.Append(new Choices(glossory.ToArray()));
            vi_sre.LoadGrammar(new Grammar(phrases_grammar));

            // event function hook
            vi_sre.SpeechRecognized          += phraseRecognized;
            vi_sre.SpeechRecognitionRejected += _recognizer_SpeechRecognitionRejected;

            try
            {
                vi_sre.SetInputToDefaultAudioDevice();
            }
            catch (InvalidOperationException exception)
            {
                //  For the time being, we're only catching failures to address an input device (typically a
                //  microphone).
                MessageBox.Show("Have you connected a microphone to this computer?\n\n" +
                                "Please ensure that you have successfull connected and configured\n" +
                                "your microphone before trying again.",
                                "I cannot hear you! (" + exception.Message + ")",
                                MessageBoxButtons.OK,
                                MessageBoxIcon.Exclamation,
                                MessageBoxDefaultButton.Button1);

                return(false);
            }

            vi_sre.RecognizeAsync(RecognizeMode.Multiple);

            try
            {
                // Install Push to talk key hooks.
                KeyboardHook.KeyDown += pushtotalk_keyDownHook;
                KeyboardHook.KeyUp   += pushtotalk_keyUpHook;
                KeyboardHook.InstallHook();
            }
            catch (OverflowException exception)
            {
                //  TODO:
                //  InputManager library, which we rely upon, has issues with .Net 4.5 and throws an Overflow exception.
                //  We'll catch it here and pretty much let it go for now (since Push-to-Talk isn't implemented yet)
                //  with the intent of resolving it later.
                //  Now that push to talk _is_ implemented what the hell do we do.
            }

            if (GAVPI.Settings.pushtotalk_mode != "Hold" && GAVPI.Settings.pushtotalk_mode != "PressOnce")
            {
                pushtotalk_active = true;
            }

            //  We have successfully establish an instance of a SAPI engine with a well-formed grammar.

            IsListening = true;

            return(true);
        }
示例#35
0
        private void Window_Loaded_1(object sender, RoutedEventArgs e)
        {
            if (KinectSensor.KinectSensors.Count > 0)
            {
                //Could be more than one just set to the first
                _sensor = KinectSensor.KinectSensors[0];

                //Check the State of the sensor
                if (_sensor.Status == KinectStatus.Connected)
                {
                    colorBitmap = new WriteableBitmap
                                      (_sensor.ColorStream.FrameWidth,
                                      _sensor.ColorStream.FrameHeight,
                                      96.0, 96.0, PixelFormats.Bgr32, null);
                    //Enable the features
                    _sensor.ColorStream.Enable();
                    _sensor.DepthStream.Enable();
                    _sensor.SkeletonStream.Enable();
                    _sensor.AllFramesReady += _sensor_AllFramesReady; //Double Tab
                    // Start the sensor!
                    try
                    {
                        _sensor.Start();
                    }
                    catch (IOException)
                    {
                        _sensor = null;
                    }
                }
            }
            clearAll();

            _kinectSensor = KinectSensor.KinectSensors[0];
            _kinectSensor.SkeletonStream.Enable();
            _kinectSensor.SkeletonFrameReady     += Sensor_SkeletonFrameReady;
            _gestureRight.GestureRecognizedRight += Gesture_GestureRecognizedRight;
            _gestureLeft.GestureRecognizedLeft   += Gesture_GestureRecognizedLeft;
            _kinectSensor.Start();

            var            recInstalled = SpeechRecognitionEngine.InstalledRecognizers();
            RecognizerInfo rec          = (RecognizerInfo)recInstalled[0];

            _speechEngine = new SpeechRecognitionEngine(rec.Id);

            var choices = new Choices();

            choices.Add("brian");
            choices.Add("jimmy");
            choices.Add("elaine");
            choices.Add("binila");
            choices.Add("clear");
            //choices.Add("MyPictures");
            //choices.Add("paint");


            var gb = new GrammarBuilder {
                Culture = rec.Culture
            };

            gb.Append(choices);
            var g = new Grammar(gb);

            _speechEngine.LoadGrammar(g);
            //recognized a word or words that may be a component of multiple complete phrases in a grammar.
            _speechEngine.SpeechHypothesized += new EventHandler <SpeechHypothesizedEventArgs>(SpeechEngineSpeechHypothesized);
            //receives input that matches any of its loaded and enabled Grammar objects.
            _speechEngine.SpeechRecognized += new EventHandler <SpeechRecognizedEventArgs>(_speechEngineSpeechRecognized);

            //C# threads are MTA by default and calling RecognizeAsync in the same thread will cause an COM exception.
            var t = new Thread(StartAudioStream);

            t.Start();
        }
示例#36
0
        public bool LoadListen()
        {
            // Don't allocate anything if we have no phrases to hook.

            if (App.ActiveProfile == null)
            {
                return(false);
            }

            if (App.ActiveProfile.ProfileTriggers != null &&
                App.ActiveProfile.ProfileTriggers.Count == 0)
            {
                Diagnostics.Log("LoadListen() called without a trigger added.");
                MessageBox.Show("At least one Trigger must be added!");

                return(false);
            }

            _synthesizer = App.ActiveProfile.Synthesizer;
            _synthesizer.SelectVoice(App.Settings.VoiceInfo);
            _speechRecognitionEngine = new SpeechRecognitionEngine(App.Settings.RecognizerInfo);

            GrammarBuilder grammarPhrases = new GrammarBuilder {
                Culture = App.Settings.RecognizerInfo
            };

            // Grammar must match speech recognition language localization

            List <string> glossary = new List <string>();

            // Add trigger phrases to glossary of voice recognition engine.
            if (App.ActiveProfile.ProfileTriggers != null)
            {
                glossary.AddRange(from trigger in App.ActiveProfile.ProfileTriggers
                                  let phrase = (Phrase)trigger
                                               select trigger.Value);
            }

            grammarPhrases.Append(new Choices(glossary.ToArray()));
            _speechRecognitionEngine.LoadGrammar(new Grammar(grammarPhrases));

            // event function hook
            _speechRecognitionEngine.SpeechRecognized          += PhraseRecognized;
            _speechRecognitionEngine.SpeechRecognitionRejected += Recognizer_SpeechRecognitionRejected;

            try
            {
                _speechRecognitionEngine.SetInputToDefaultAudioDevice();
            }

            catch (InvalidOperationException e)
            {
                Diagnostics.Log(e, "No microphone was detected.");
                MessageBox.Show("No microphone was detected!", "Error", MessageBoxButton.OK, MessageBoxImage.Error);
                return(false);
            }

            catch (Exception e)
            {
                Diagnostics.Log(e, "An Unknown error occured when attempting to set default input device.");

                MessageBox.Show("An unknown error has occured, contact support if the problem persists.", "Error",
                                MessageBoxButton.OK, MessageBoxImage.Error);
                return(false);
            }

            _speechRecognitionEngine.RecognizeAsync(RecognizeMode.Multiple);

            // subscribe to Push-to-Talk key hooks.
            KeyboardHook.KeyDown += _pushToTalkKeyDownHook;
            KeyboardHook.KeyUp   += _pushToTalkKeyUpHook;
            KeyboardHook.InstallHook();

            if (App.Settings.PushToTalkMode != "Hold" && App.Settings.PushToTalkMode != "Toggle" &&
                App.Settings.PushToTalkMode != "Single")
            {
                _pushToTalkActive = true;
            }

            //  successfully established an instance of SAPI engine with well-formed grammar.

            IsListening = true;

            return(true);
        }
示例#37
0
        private void Button_Click_1(object sender, RoutedEventArgs e)
        {
            recognizer = new SpeechRecognitionEngine(new CultureInfo("zh-CN"));

            #region 非必须代码
            Grammar grammar = new Grammar(new GrammarBuilder("线程"))
            {
                Name = "命令"
            };                                                                      //没有这个,通常识别为“县城”
            recognizer.LoadGrammar(grammar);

            GrammarBuilder gb = new GrammarBuilder("设置背景");
            gb.Append(new SemanticResultKey("Color", new Choices(
                                                new SemanticResultValue("红色", Brushes.Red.Color.ToString()),
                                                new SemanticResultValue("绿色", Brushes.Green.Color.ToString()),
                                                new SemanticResultValue("蓝色", Brushes.Blue.Color.ToString())
                                                )));
            grammar = new Grammar(gb)
            {
                Name = "设置背景颜色"
            };
            recognizer.LoadGrammar(grammar);
            grammar.SpeechRecognized += (sender1, e1) => Console.WriteLine(e1.Result.Text);

            gb = new GrammarBuilder();
            Choices choices = new Choices(Enumerable.Range(1, 9).Select(n => n.ToString()).ToArray());
            gb.Append(new SemanticResultKey("num1", choices));
            gb.Append("加");
            gb.Append(new SemanticResultKey("num2", choices));
            gb.Append("等于");
            grammar = new Grammar(gb)
            {
                Name = "加法运算"
            };
            recognizer.LoadGrammar(grammar);

            grammar = new Grammar(new GrammarBuilder("停止识别"))
            {
                Name = "Cancel"
            };
            recognizer.LoadGrammar(grammar);

            recognizer.SpeechRecognized += (sender1, e1) =>
            {
                string s = e1.Result.Text;
                switch (e1.Result.Grammar.Name)
                {
                case "Cancel":
                    recognizer.RecognizeAsyncCancel();
                    break;

                case "命令":
                    s = $"识别到命令,{s}";
                    break;

                case "设置背景颜色":
                    s          = $"识别到命令,{s}";
                    Background = new SolidColorBrush((Color)ColorConverter.ConvertFromString(e1.Result.Semantics["Color"].Value.ToString()));
                    break;

                case "加法运算":
                    var i1 = int.Parse(e1.Result.Semantics["num1"].Value.ToString());
                    var i2 = int.Parse(e1.Result.Semantics["num2"].Value.ToString());
                    s = $"识别到命令,{s}{i1 + i2}";
                    break;
                }
                listBox.Items.Insert(0, s);
                new SpeechSynthesizer().SpeakAsync(s);
            };
            #endregion

            recognizer.LoadGrammar(new DictationGrammar());
            recognizer.SetInputToDefaultAudioDevice();
            recognizer.RecognizeAsync(RecognizeMode.Multiple);
        }
示例#38
0
        static bool CreateRecognizer()
        {
            // Was the SRE already defined ?
            if (recognizer != null)
            {
                // Yes
                recognizer = null;
            }

            try
            {
                recogInfo = null;
                Console.WriteLine("\nSpeech Processor: The following Speech Recognizers are available:");

                foreach (RecognizerInfo ri in SpeechRecognitionEngine.InstalledRecognizers())
                {
                    Console.Write("  "); Console.WriteLine(ri.Description);
                    if (ri.Culture.Name.Equals(strCulture))
                    {
                        recogInfo = ri;
                    }
                }

                if (recogInfo != null)
                {
                    recognizer = new SpeechRecognitionEngine(recogInfo);
                    Console.WriteLine("\nUsing recognizer " + recognizer.RecognizerInfo.Name + " for culture " + strCulture);

                    // Attach event handlers.
                    recognizer.SpeechDetected +=
                        new EventHandler <SpeechDetectedEventArgs>(
                            SpeechDetectedHandler);

                    /*    recognizer.SpeechHypothesized +=
                     *      new EventHandler<SpeechHypothesizedEventArgs>(SpeechHypothesizedHandler);
                     *    recognizer.SpeechRecognitionRejected +=
                     *      new EventHandler<SpeechRecognitionRejectedEventArgs>(SpeechRecognitionRejectedHandler);
                     */
                    recognizer.SpeechRecognized +=
                        new EventHandler <SpeechRecognizedEventArgs>(
                            SpeechRecognizedHandler);
                    recognizer.RecognizeCompleted +=
                        new EventHandler <RecognizeCompletedEventArgs>(
                            RecognizeCompletedHandler);
                    recognizer.RecognizerUpdateReached +=
                        new EventHandler <RecognizerUpdateReachedEventArgs>(RecognizerUpdateReachedHandler);

                    // Assign input to the recognizer and start asynchronous
                    // recognition.
                    recognizer.SetInputToDefaultAudioDevice();

                    return(true);
                }
                else
                {
                    Console.WriteLine("\nSpeech Processor: could not find a recognizer for culture ..." + strCulture);
                }
            }
            catch (Exception ex)
            {
                Console.WriteLine("\nSpeech Processor: unable to create Speech Recognition Engine");
            }

            return(false);
        }
示例#39
0
        private void Window_Loaded(object sender, RoutedEventArgs e)
        {
            //this.colorBitmap = new WriteableBitmap(colorFrameDescription.Width, colorFrameDescription.Height, 96.0, 96.0, PixelFormats.Bgr32, null);
            _sensor = KinectSensor.GetDefault();
            this.coordinateMapper = _sensor.CoordinateMapper;
            if (_sensor != null)
            {
                _sensor.Open();
                _backgroundRemoval = new BackgroundRemoval(_sensor.CoordinateMapper, _sensor);
                _reader            = _sensor.OpenMultiSourceFrameReader(FrameSourceTypes.Color | FrameSourceTypes.Depth | FrameSourceTypes.BodyIndex | FrameSourceTypes.Body);
                _reader.MultiSourceFrameArrived += Reader_MultiSourceFrameArrived;

                //grab the audio stream
                IReadOnlyList <AudioBeam> audioBeamList = this._sensor.AudioSource.AudioBeams;
                System.IO.Stream          audioStream   = audioBeamList[0].OpenInputStream();

                //create the convert stream
                this.convertStream = new KinectAudioStream(audioStream);


                BitmapImage image = new BitmapImage();
                image.BeginInit();
                image.UriSource = new Uri("jason.png", UriKind.RelativeOrAbsolute);
                image.EndInit();
                ImageBrush myImageBrush = new ImageBrush(image);
                images[0] = myImageBrush;

                BitmapImage image1 = new BitmapImage();
                image1.BeginInit();
                image1.UriSource = new Uri("ironman.jpg", UriKind.RelativeOrAbsolute);
                image1.EndInit();
                ImageBrush myImageBrush1 = new ImageBrush(image1);
                images[1] = myImageBrush1;

                BitmapImage image2 = new BitmapImage();
                image2.BeginInit();
                image2.UriSource = new Uri("scream.JPG", UriKind.RelativeOrAbsolute);
                image2.EndInit();
                ImageBrush myImageBrush2 = new ImageBrush(image2);
                images[2] = myImageBrush2;

                BitmapImage image3 = new BitmapImage();
                image3.BeginInit();
                image3.UriSource = new Uri("ventena.jpg", UriKind.RelativeOrAbsolute);
                image3.EndInit();
                ImageBrush myImageBrush3 = new ImageBrush(image3);
                images[3] = myImageBrush3;

                images[4] = null;

                random = new Random();
            }

            RecognizerInfo ri = TryGetKinectRecognizer();

            if (null != ri)
            {
                //this.recognitionSpans = new List<Span> { forwardSpan, backSpan, rightSpan, leftSpan };

                this.speechEngine = new SpeechRecognitionEngine(ri.Id);


                //Use this code to create grammar programmatically rather than froma grammar file.

                var directions = new Choices();
                directions.Add(new SemanticResultValue("mask", "MASK"));
                directions.Add(new SemanticResultValue("masks", "MASK"));



                directions.Add(new SemanticResultValue("shirt", "SHIRT"));
                directions.Add(new SemanticResultValue("shirts", "SHIRT"));
                directions.Add(new SemanticResultValue("upper", "SHIRT"));
                directions.Add(new SemanticResultValue("uppers", "SHIRT"));

                directions.Add(new SemanticResultValue("pant", "PANT"));
                directions.Add(new SemanticResultValue("pants", "PANT"));
                directions.Add(new SemanticResultValue("lower", "PANT"));
                directions.Add(new SemanticResultValue("lowers", "PANT"));

                directions.Add(new SemanticResultValue("change", "CHANGE"));
                directions.Add(new SemanticResultValue("changes", "CHANGE"));
                directions.Add(new SemanticResultValue("swap", "CHANGE"));

                directions.Add(new SemanticResultValue("off", "OFF"));
                directions.Add(new SemanticResultValue("offs", "OFF"));

                directions.Add(new SemanticResultValue("finish", "DONE"));
                directions.Add(new SemanticResultValue("done", "DONE"));



                var gb = new GrammarBuilder {
                    Culture = ri.Culture
                };
                gb.Append(directions);

                var g = new Grammar(gb);


                // Create a grammar from grammar definition XML file.
                //var memoryStream = new MemoryStream(Encoding.ASCII.GetBytes(Properties.Resources.SpeechGrammar));

                //var g = new Grammar(memoryStream);
                this.speechEngine.LoadGrammar(g);


                this.speechEngine.SpeechRecognized          += this.SpeechRecognized;
                this.speechEngine.SpeechRecognitionRejected += this.SpeechRejected;

                // let the convertStream know speech is going active
                this.convertStream.SpeechActive = true;

                // For long recognition sessions (a few hours or more), it may be beneficial to turn off adaptation of the acoustic model.
                // This will prevent recognition accuracy from degrading over time.
                ////speechEngine.UpdateRecognizerSetting("AdaptationOn", 0);

                this.speechEngine.SetInputToAudioStream(
                    this.convertStream, new SpeechAudioFormatInfo(EncodingFormat.Pcm, 16000, 16, 1, 32000, 2, null));
                this.speechEngine.RecognizeAsync(RecognizeMode.Multiple);
            }
            else
            {
            }
        }
示例#40
0
        /// <summary>
        ///
        /// </summary>
        public override void StartSpeechRecognition()
        {
            RecognizerInfo ri = GetKinectRecognizer();

            if (null == ri)
            {
                return;
            }

            // Check if selected recognizer is for Kinect
            string value;

            ri.AdditionalInfo.TryGetValue("Kinect", out value);
            IsKinectRecognizer = "True".Equals(value, StringComparison.OrdinalIgnoreCase);


            iSpeechEngine = new SpeechRecognitionEngine(ri.Id);

            // Create a speech recognition grammar based on our speech events
            var  ear       = Properties.Settings.Default.EarManager;
            var  choices   = new Choices();
            bool noChoices = true;

            foreach (EventSpeech e in ear.Events.Where(e => e.GetType() == typeof(EventSpeech)))
            {
                if (!e.Enabled)
                {
                    continue;
                }

                // For each events associates its phrases with its semantic
                string[] phrases = e.Phrases.Split(new string[] { Environment.NewLine }, StringSplitOptions.None);
                foreach (string phrase in phrases)
                {
                    if (string.IsNullOrWhiteSpace(phrase))
                    {
                        // defensive
                        continue;
                    }
                    choices.Add(new SemanticResultValue(phrase, e.Semantic));
                    noChoices = false;
                }
            }

            if (noChoices)
            {
                // Grammar build throws exception if no choice registered
                // TODO: review error handling in that function.
                // I guess we should have a Try variant.
                return;
            }

            // Set our culture
            Culture = ri.Culture;
            var gb = new GrammarBuilder {
                Culture = ri.Culture
            };

            gb.Append(choices);

            var g = new Grammar(gb);

            iSpeechEngine.LoadGrammar(g);

            iSpeechEngine.SpeechRecognized          += this.SpeechRecognized;
            iSpeechEngine.SpeechRecognitionRejected += this.SpeechRejected;

            // For long recognition sessions (a few hours or more), it may be beneficial to turn off adaptation of the acoustic model.
            // This will prevent recognition accuracy from degrading over time.
            iSpeechEngine.UpdateRecognizerSetting("AdaptationOn", 0);

            iSpeechEngine.SetInputToDefaultAudioDevice();
            iSpeechEngine.RecognizeAsync(RecognizeMode.Multiple);
        }
        private void LoadSpeech()
        {
            try
            {
                engine = new SpeechRecognitionEngine(); // instancia
                engine.SetInputToDefaultAudioDevice();  // microfone
                                                        // string[] words = { "olá", "boa noite" };
                                                        // Operações
                Choices c_numero = new Choices();
                for (int i = 0; i <= 100; i++)
                {
                    c_numero.Add(i.ToString());
                }


                // video 03
                Choices c_commandsOfSystem = new Choices();
                c_commandsOfSystem.Add(GrammarRules.WhatTimeIS.ToArray());
                c_commandsOfSystem.Add(GrammarRules.WhatDateIS.ToArray());
                // comando pare de ouvir e o comando pra voltar a ouvir ->> shmyt
                c_commandsOfSystem.Add(GrammarRules.ShmytStopListening.ToArray());
                c_commandsOfSystem.Add(GrammarRules.ShmytStartListening.ToArray());
                c_commandsOfSystem.Add(GrammarRules.MinimizeWindow.ToArray());
                c_commandsOfSystem.Add(GrammarRules.MaximizaWindow.ToArray());
                c_commandsOfSystem.Add(GrammarRules.NormalizaWindow.ToArray());
                c_commandsOfSystem.Add(GrammarRules.ChangeVoice.ToArray());
                c_commandsOfSystem.Add(GrammarRules.OpenProgram.ToArray());
                c_commandsOfSystem.Add(GrammarRules.MediaPlayComands.ToArray());

                GrammarBuilder gb_comandOfSystem = new GrammarBuilder();// 4:22
                gb_comandOfSystem.Append(c_commandsOfSystem);

                Grammar g_comandsOfSystem = new Grammar(gb_comandOfSystem);
                g_comandsOfSystem.Name = "sys";

                engine.LoadGrammar(g_comandsOfSystem);//carrega gramatica na memoria

                // gramabuilder numeros
                GrammarBuilder gb_number = new GrammarBuilder();
                gb_number.Append(c_numero);
                gb_number.Append(new Choices("mais", "menos", "vezes", "por"));
                gb_number.Append(c_numero);

                Grammar g_numero = new Grammar(gb_number);
                g_numero.Name = "calc";
                engine.LoadGrammar(g_numero);
                // carregar gramatica substituido por choise
                // engine.LoadGrammar(new Grammar(new GrammarBuilder(new Choices(words))));
                // Chamar o evento do reconhecimento comentado pelo video 03

                #region SpeechRecognition Events
                engine.SpeechRecognized += new EventHandler <SpeechRecognizedEventArgs>(rec);
                //barra de progresso
                engine.AudioLevelUpdated         += new EventHandler <AudioLevelUpdatedEventArgs>(audioLevel);
                engine.SpeechRecognitionRejected += new EventHandler <SpeechRecognitionRejectedEventArgs>(rej);
                //engine.SpeechRecognitionRejected += new EventHandler<SpeechRecognitionRejectedEventArgs>(rej);
                #endregion

                #region SpeechRecognition Events
                synthesizer.SpeakStarted  += new EventHandler <SpeakStartedEventArgs>(speakStarted);
                synthesizer.SpeakProgress += new EventHandler <SpeakProgressEventArgs>(speakProgress);
                #endregion

                // inicia o reconhecimento
                engine.RecognizeAsync(RecognizeMode.Multiple);

                SPEAKER.Speak("estou carregando as configurações");
            }
            catch (Exception ex)
            {
                MessageBox.Show("Ocorreu erro no LoadSpeech() " + ex.Message);
            }
        }
示例#42
0
 void initializeSpeechRecognition()
 {
     engine = new SpeechRecognitionEngine(new System.Globalization.CultureInfo("en-US"));
     engine.SetInputToDefaultAudioDevice();
     engine.SpeechRecognized += new EventHandler <SpeechRecognizedEventArgs>(recognizer_SpeechRecognized);
 }
示例#43
0
        static void OnDataReceived(IAsyncResult asyn)
        {
            try
            {
                CSocketPacket theSockId = (CSocketPacket)asyn.AsyncState;
                //end receive...
                int iRx = 0;
                iRx = theSockId.thisSocket.EndReceive(asyn);
                char[] chars          = new char[iRx];
                System.Text.Decoder d = System.Text.Encoding.Default.GetDecoder();
                int           charLen = d.GetChars(theSockId.dataBuffer, 0, iRx, chars, 0);
                System.String message = ((new System.String(chars)).Trim()); //.ToLower();


                if (iRx > 0)
                {
                    //System.String message = enc.GetString(theSockId.dataBuffer);
                    //System.String message = new System.String(theSockId.dataBuffer, 0, iRx);

                    // Console.WriteLine("Received and trimmend:" + message);

                    string[] tokenList = message.Split('#');

                    foreach (string token in tokenList)
                    {
                        // Console.WriteLine("Speech Processor: act token =" + token);

                        if (String.Compare(token, "@close@") == 0)
                        {
                            Console.WriteLine("Speech Processor: received CLOSE");
                            completed = true;
                        }
                        else if (String.Compare(token, "@stop@") == 0)
                        {
                            Console.WriteLine("Speech Processor: received STOP, cleaning up Recognizer !");
                            recognizer.UnloadAllGrammars();
                            recognizer.RequestRecognizerUpdate();
                            recognizer.Dispose();

                            recognizer = null;
                            tts        = null;
                        }
                        else if (token.StartsWith("say:"))
                        {
                            Console.WriteLine("Speech Processor: saying:" + token.Substring(4, token.Length - 4));
                            Speak(token.Substring(4, token.Length - 4));
                        }
                        else if (token.StartsWith("culture:"))
                        {
                            string newCulture = (token.Substring(8, token.Length - 8));
                            Console.WriteLine("Speech Processor: Initialising Engines for new culture:" + newCulture);
                            strCulture = newCulture;
                            CreateSynthesizer();
                            CreateRecognizer();
                        }
                        else if (token.StartsWith("ttsonly:"))
                        {
                            string newCulture = (token.Substring(8, token.Length - 8));
                            Console.WriteLine("Speech Processor: Initialising TTS-Engine for new culture:" + newCulture);
                            strCulture = newCulture;
                            CreateSynthesizer();
                        }
                        else if (token.StartsWith("grammar:"))
                        {
                            string[] words = (token.Substring(8, token.Length - 8)).Split(';');

                            if (recognizer != null)
                            {
                                UpdateGrammar(words);

                                Console.WriteLine("Speech Processor: Starting asynchronous recognition ...");
                                try
                                {
                                    recognizer.RecognizeAsync(RecognizeMode.Multiple);
                                    SocketSend("@SpeechProcessor OK@");
                                }
                                catch (Exception ex)
                                { Console.WriteLine("Speech Processor: could not start asynchronous recognition ..."); }
                            }
                            else
                            {
                                Console.WriteLine("Speech Processor: could not start asynchronous recognition due to missing recognizer.");
                            }
                        }
                        else if (token.StartsWith("confidence:"))
                        {
                            confidenceLevel = double.Parse(token.Substring(11, token.Length - 11), System.Globalization.CultureInfo.InvariantCulture);
                            //Console.WriteLine("confidence update:" + token.Substring(11, token.Length - 11));
                            Console.WriteLine("Speech Processor: confidence update:" + confidenceLevel);
                        }
                        else if (token.StartsWith("speechLoopDelay:"))
                        {
                            speechLoopDelay = int.Parse(token.Substring(16, token.Length - 16), System.Globalization.CultureInfo.InvariantCulture);
                        }
                    }
                }
                else
                {
                    Console.WriteLine("Speech Processor: received empty message, closing down socket !"); completed = true;
                }

                if (!completed)
                {
                    WaitForData();
                }
            }
            catch (ObjectDisposedException)
            {
                Console.WriteLine("Speech Processor: OnDataReceived: Socket has been closed");
            }
            catch (SocketException se)
            {
                Console.WriteLine(se.Message);
            }
        }
示例#44
0
        /// <summary>
        /// Initializes a new instance of the MainWindow class.
        /// </summary>
        public MainWindow()
        {
            // one sensor is currently supported
            this.kinectSensor = KinectSensor.GetDefault();

            if (this.kinectSensor != null)
            {
                // open the sensor
                this.kinectSensor.Open();

                // grab the audio stream
                IReadOnlyList <AudioBeam> audioBeamList = this.kinectSensor.AudioSource.AudioBeams;
                System.IO.Stream          audioStream   = audioBeamList[0].OpenInputStream();

                // create the convert stream
                this.convertStream = new KinectAudioStream(audioStream);
            }
            else
            {
                // on failure, set the status text
                //this.statusBarText.Text = Properties.Resources.NoKinectReady;
                return;
            }

            RecognizerInfo ri = TryGetKinectRecognizer();

            if (null != ri)
            {
                this.speechEngine = new SpeechRecognitionEngine(ri.Id);

                /****************************************************************
                *
                * Use this code to create grammar programmatically rather than from
                * a grammar file.
                *
                * var directions = new Choices();
                * directions.Add(new SemanticResultValue("forward", "FORWARD"));
                * directions.Add(new SemanticResultValue("forwards", "FORWARD"));
                * directions.Add(new SemanticResultValue("straight", "FORWARD"));
                * directions.Add(new SemanticResultValue("backward", "BACKWARD"));
                * directions.Add(new SemanticResultValue("backwards", "BACKWARD"));
                * directions.Add(new SemanticResultValue("back", "BACKWARD"));
                * directions.Add(new SemanticResultValue("turn left", "LEFT"));
                * directions.Add(new SemanticResultValue("turn right", "RIGHT"));
                *
                * var gb = new GrammarBuilder { Culture = ri.Culture };
                * gb.Append(directions);
                *
                * var g = new Grammar(gb);
                *
                ****************************************************************/

                // Create a grammar from grammar definition XML file.
                using (var memoryStream = new MemoryStream(Encoding.ASCII.GetBytes(Properties.Resources.SpeechGrammar)))
                {
                    var g = new Grammar(memoryStream);
                    this.speechEngine.LoadGrammar(g);
                }

                this.speechEngine.SpeechRecognized          += this.SpeechRecognized;
                this.speechEngine.SpeechRecognitionRejected += this.SpeechRejected;

                // let the convertStream know speech is going active
                this.convertStream.SpeechActive = true;

                // For long recognition sessions (a few hours or more), it may be beneficial to turn off adaptation of the acoustic model.
                // This will prevent recognition accuracy from degrading over time.
                ////speechEngine.UpdateRecognizerSetting("AdaptationOn", 0);

                this.speechEngine.SetInputToAudioStream(
                    this.convertStream, new SpeechAudioFormatInfo(EncodingFormat.Pcm, 16000, 16, 1, 32000, 2, null));
                this.speechEngine.RecognizeAsync(RecognizeMode.Multiple);
            }
            else
            {
                statusText = "No Speech Recogniser";
            }


            // get the coordinate mapper
            this.coordinateMapper = this.kinectSensor.CoordinateMapper;

            // get the depth (display) extents
            FrameDescription frameDescription = this.kinectSensor.DepthFrameSource.FrameDescription;

            // get size of joint space
            this.displayWidth  = frameDescription.Width;
            this.displayHeight = frameDescription.Height;

            // open the reader for the body frames
            this.bodyFrameReader = this.kinectSensor.BodyFrameSource.OpenReader();

            // populate body colors, one for each BodyIndex
            this.bodyColors = new List <Pen>();

            this.bodyColors.Add(new Pen(Brushes.Red, 6));
            this.bodyColors.Add(new Pen(Brushes.Orange, 6));
            this.bodyColors.Add(new Pen(Brushes.Green, 6));
            this.bodyColors.Add(new Pen(Brushes.Blue, 6));
            this.bodyColors.Add(new Pen(Brushes.Indigo, 6));
            this.bodyColors.Add(new Pen(Brushes.Violet, 6));

            // set IsAvailableChanged event notifier
            this.kinectSensor.IsAvailableChanged += this.Sensor_IsAvailableChanged;

            // open the sensor
            this.kinectSensor.Open();

            // set the status text
            this.StatusText = this.kinectSensor.IsAvailable ? Properties.Resources.RunningStatusText
                                                            : Properties.Resources.NoSensorStatusText;

            // Create the drawing group we'll use for drawing
            this.drawingGroup = new DrawingGroup();

            // Create an image source that we can use in our image control
            this.imageSource = new DrawingImage(this.drawingGroup);

            // use the window object as the view model in this simple example
            this.DataContext = this;

            // initialize the components (controls) of the window
            this.InitializeComponent();
        }
        public bool LoadListen()
        {
            // Don't allocate anything if we have no phrases to hook.

            if (App.ActiveProfile == null)
            {
                return(false);
            }

            if (App.ActiveProfile.ProfileTriggers != null &&
                App.ActiveProfile.ProfileTriggers.Count == 0)
            {
                MessageBox.Show("You need to add at least one Trigger");

                return(false);
            }

            synthesizer = App.ActiveProfile.Synthesizer;
            synthesizer.SelectVoice(App.Settings.VoiceInfo);
            speechRecognitionEngine = new SpeechRecognitionEngine(App.Settings.RecognizerInfo);

            GrammarBuilder grammarPhrases = new GrammarBuilder {
                Culture = App.Settings.RecognizerInfo
            };

            // Grammar must match speech recognition language localization

            List <string> glossary = new List <string>();

            // Add trigger phrases to glossary of voice recognition engine.
            if (App.ActiveProfile.ProfileTriggers != null)
            {
                glossary.AddRange(from trigger in App.ActiveProfile.ProfileTriggers
                                  let phrase = (Phrase)trigger
                                               select trigger.Value);
            }

            grammarPhrases.Append(new Choices(glossary.ToArray()));
            speechRecognitionEngine.LoadGrammar(new Grammar(grammarPhrases));

            // event function hook
            speechRecognitionEngine.SpeechRecognized          += PhraseRecognized;
            speechRecognitionEngine.SpeechRecognitionRejected += Recognizer_SpeechRecognitionRejected;

            try
            {
                speechRecognitionEngine.SetInputToDefaultAudioDevice();
            }

            catch (InvalidOperationException exception)
            {
                //  For the time being, we're only catching failures to address an input device (typically a
                //  microphone). // TODO: Show error message indicating a microphone was not detected.


                return(false);
            }

            catch (Exception)
            {
                // TODO: Show unknown error message here.
                return(false);
            }

            speechRecognitionEngine.RecognizeAsync(RecognizeMode.Multiple);

            // Install Push to talk key hooks.
            KeyboardHook.KeyDown += pushToTalkKeyDownHook;
            KeyboardHook.KeyUp   += pushToTalkKeyUpHook;
            KeyboardHook.InstallHook();

            if (App.Settings.PushToTalkMode != "Hold" && App.Settings.PushToTalkMode != "Toggle" &&
                App.Settings.PushToTalkMode != "Single")
            {
                pushToTalkActive = true;
            }

            //  We have successfully establish an instance of a SAPI engine with a well-formed grammar.

            IsListening = true;

            return(true);
        }
示例#46
0
        public override void HandleSpeechRecognized(object sender, SpeechRecognizedEventArgs e)
        {
            Console.WriteLine(e.Result.Grammar.Name);
            Console.WriteLine(e.Result.Text);

            if (e.Result.Grammar.Name == "OLD_" + this.smartGrammarName || e.Result.Grammar.Name == this.smartGrammarName || e.Result.Grammar.Name == "NEW_" + this.smartGrammarName)
            {
                System.Console.WriteLine(e.Result.Grammar.Name);
                String output = "";
                string thisword;
                //{ "camel", "score", "allcaps", "nocaps" }
                string keyword = e.Result.Words[0].Text;
                for (int i = 1; i < e.Result.Words.Count; i++)
                {
                    thisword = e.Result.Words[i].Text;
                    switch (keyword)
                    {
                    case "camel":
                        output += Char.ToUpper(thisword[0]) + thisword.Substring(1);
                        break;

                    case "snake":
                        output += thisword + "_";
                        break;

                    case "low-camel":
                        if (i != 1)
                        {
                            output += Char.ToUpper(thisword[0]) + thisword.Substring(1);
                        }
                        else
                        {
                            output += thisword;
                        }
                        break;

                    case "camel-snake":
                        output += Char.ToUpper(thisword[0]) + thisword.Substring(1) + "_";
                        break;

                    case "low-camel-snake":
                        if (i != 1)
                        {
                            output += Char.ToUpper(thisword[0]) + thisword.Substring(1) + "_";
                        }
                        else
                        {
                            output += thisword + "_";
                        }
                        break;

                    case "allcaps":
                        output += thisword.ToUpper();
                        break;

                    case "nocaps":
                        output += thisword.ToLower();
                        break;

                    default:
                        break;
                    }
                }
                if (keyword == "snake" || keyword == "camel-snake" || keyword == "low-camel-snake")
                {
                    output = output.TrimEnd('_');
                }

                System.Console.WriteLine(output);
                SendKeys.SendWait(output);
            }

            // Surreptitiously remove the old grammars
            SpeechRecognitionEngine sender_sre = (SpeechRecognitionEngine)sender;

            updateGrammars(ref sender_sre);
            //this.setSmartWords();
            //this.setSmartGrammar();
            //SpeechRecognitionEngine sre = (SpeechRecognitionEngine)sender;
            //RemoveOldGrammars(ref sre);
            //sre.LoadGrammar(this.smartGrammar);
        }
示例#47
0
        void conectaActiva()
        {
            //Nos aseguramos que la cuenta de sensores conectados sea de al menos 1
            if (KinectSensor.KinectSensors.Count > 0)
            {
                //Checamos que la variable _sensor sea nula
                if (this.sensor == null)
                {
                    //Asignamos el primer sensor Kinect a nuestra variable
                    this.sensor = KinectSensor.KinectSensors[0];
                }
                if (this.sensor != null)
                {
                    try
                    {
                        //Iniciamos el dispositivo Kinect
                        this.sensor.Start();
                        //Esto es opcional pero ayuda a colocar el dispositivo Kinect a un cierto angulo de inclinacion, desde -27 a 27
                        //   sensor.ElevationAngle = 3;
                        //Informamos que se ha conectado e inicializado correctamente el dispositivo Kinect
                        //  Error err = new VME.Error(RecursosLocalizables.StringResources.KinectDetect, 3);
                        // err.Show();
                    }
                    catch (Exception ex)
                    {
                        MessageBox.Show("error");
                    }

                    //Creamos esta variable ri que tratara de encontrar un language pack valido haciendo uso del metodo obtenerLP
                    RecognizerInfo ri = obtenerLP();

                    //Si se encontro el language pack requerido lo asignaremos a nuestra variable speechengine
                    if (ri != null)
                    {
                        this.speechengine = new SpeechRecognitionEngine(ri.Id);
                        //Creamos esta variable opciones la cual almacenara las opciones de palabras o frases que podran ser reconocidas por el dispositivo
                        Choices opciones = new Choices();
                        //Comenzamos a agregar las opciones comenzando por el valor de opcion que tratamos reconocer y una llave que identificara a ese valor
                        //Por ejemplo en esta linea "uno" es el valor de opcion y "UNO" es la llave

                        opciones.Add(RecursosLocalizables.StringResources.cerrar, "UNO");
                        //En esta linea "dos" es el valor de opcion y "DOS" es la llave
                        opciones.Add(RecursosLocalizables.StringResources.siguiente, "DOS");
                        opciones.Add(RecursosLocalizables.StringResources.atras, "DOS");
                        //En esta linea "windows ocho" es el valor de opcion y "TRES" es la llave y asi sucesivamente
                        opciones.Add(RecursosLocalizables.StringResources.movilidadBienvenida, "UNO");
                        opciones.Add(RecursosLocalizables.StringResources.movilidad2Bienvenida, "UNO");
                        opciones.Add(RecursosLocalizables.StringResources.movilidad3Bienvenida, "UNO");
                        opciones.Add(RecursosLocalizables.StringResources.movilidad4Bienvenida, "UNO");
                        opciones.Add(RecursosLocalizables.StringResources.movilidad5Bienvenida, "UNO");
                        opciones.Add(RecursosLocalizables.StringResources.reguKine);
                        //Esta variable creará todo el conjunto de frases y palabras en base a nuestro lenguaje elegido en la variable ri
                        var grammarb = new GrammarBuilder {
                            Culture = ri.Culture
                        };
                        //Agregamos las opciones de palabras y frases a grammarb
                        grammarb.Append(opciones);
                        //Creamos una variable de tipo Grammar utilizando como parametro a grammarb
                        var grammar = new Grammar(grammarb);
                        //Le decimos a nuestra variable speechengine que cargue a grammar
                        this.speechengine.LoadGrammar(grammar);
                        //mandamos llamar al evento SpeechRecognized el cual se ejecutara cada vez que una palabra sea detectada
                        speechengine.SpeechRecognized += new EventHandler <SpeechRecognizedEventArgs>(speechengine_SpeechRecognized);
                        //speechengine inicia la entrada de datos de tipo audio
                        speechengine.SetInputToAudioStream(sensor.AudioSource.Start(), new SpeechAudioFormatInfo(EncodingFormat.Pcm, 16000, 16, 1, 32000, 2, null));
                        speechengine.RecognizeAsync(RecognizeMode.Multiple);
                    }
                }
            }
        }
示例#48
0
        public static void VA_Init1(ref Dictionary <string, object> state, ref Dictionary <string, Int16?> shortIntValues, ref Dictionary <string, string> textValues, ref Dictionary <string, int?> intValues, ref Dictionary <string, decimal?> decimalValues, ref Dictionary <string, bool?> booleanValues, ref Dictionary <string, object> extendedValues)
        {
            try
            {
                Debug.Write("---------------------- Ocellus Plugin Initializing ----------------------");
                // Setup Speech engine
                if (EliteGrammar.downloadGrammar())
                {
                    SpeechRecognitionEngine recognitionEngine = new SpeechRecognitionEngine();
                    recognitionEngine.SetInputToDefaultAudioDevice();
                    Grammar grammar = new Grammar(Path.Combine(Config.Path(), "systems_grammar.xml"));
                    Task.Run(() => recognitionEngine.LoadGrammar(grammar));
                    state.Add("VAEDrecognitionEngine", recognitionEngine);
                }

                // Setup plugin storage directory - used for cookies and debug logs
                string appPath    = Config.Path();
                string cookieFile = Config.CookiePath();
                string debugFile  = Config.DebugPath();
                textValues["VAEDdebugPath"] = debugFile;

                // Determine Elite Dangerous directories
                string gamePath        = Elite.getGamePath();
                string gameStartString = PluginRegistry.getStringValue("startPath");
                string gameStartParams = PluginRegistry.getStringValue("startParams");
                state.Add("VAEDgamePath", gamePath);
                textValues["VAEDgameStartString"] = gameStartString;
                textValues["VAEDgameStartParams"] = gameStartParams;

                // Load EDDB Index into memory
                Eddb.loadEddbIndex(ref state);

                // Load Atlas Index into memory
                Atlas.loadAtlasIndex(ref state);

                Dictionary <string, dynamic> tempAtlas = (Dictionary <string, dynamic>)state["VAEDatlasIndex"];

                // Load Tracked Systems into memory
                TrackSystems.Load(ref state);

                CookieContainer cookieJar = new CookieContainer();
                if (File.Exists(cookieFile))
                {
                    // If we have cookies then we are likely already logged in
                    cookieJar = Web.ReadCookiesFromDisk(cookieFile);
                    Tuple <CookieContainer, string> tAuthentication = Companion.loginToAPI(cookieJar);
                    if (tAuthentication.Item2 == "ok")
                    {
                        cookieJar = tAuthentication.Item1;
                        state.Add("VAEDcookieContainer", cookieJar);
                        state["VAEDloggedIn"] = "ok";
                    }
                }
                else
                {
                    state.Add("VAEDloggedIn", "no");
                }

                EliteBinds eliteBinds = new EliteBinds();
                state.Add("VAEDeliteBinds", eliteBinds);
                string   bindsFile = Elite.getBindsFilename();
                DateTime fileTime  = File.GetLastWriteTime(bindsFile);
                state.Add("VAEDbindsFile", bindsFile);
                state.Add("VAEDbindsTimestamp", fileTime);
            }
            catch (Exception ex)
            {
                Debug.Write(ex.ToString());
            }
        }
示例#49
0
        private void SetUpWithFile(string filePath)
        {
            RecognizerInfo info = null;

            foreach (RecognizerInfo ri in SpeechRecognitionEngine.InstalledRecognizers())
            {
                if (ri.Culture.TwoLetterISOLanguageName.Equals("en"))
                {
                    info = ri;
                    break;
                }
            }
            if (info == null)
            {
                return;
            }

            /*Set up audio*/
            audioStream = new WaveFileReader(filePath);
            TimeSpan audioStreamTotalTime = audioStream.TotalTime;

            // Create the selected recognizer.
            recognitionEngine = new SpeechRecognitionEngine(info);
            recognitionEngine.LoadGrammar(new DictationGrammar());
            recognitionEngine.SetInputToWaveFile(filePath);
            StringBuilder sb = new StringBuilder();

            recognitionEngine.SpeechRecognized += (s, args) =>
            {
                TimeSpan currentTime = new TimeSpan(recognitionEngine.RecognizerAudioPosition.Ticks);
                string   time        = String.Format("[{0:D2}:{1:D2}.{2:D2}]", currentTime.Minutes, currentTime.Seconds, currentTime.Milliseconds);

                /*Update progress label*/
                String positionString = String.Format("{0} / {1}", currentTime, audioStreamTotalTime);
                toolStripStatusLabelPosition.Text = positionString;
                double progress = (double)currentTime.Ticks / (double)audioStreamTotalTime.Ticks * 100.0;
                toolStripProgressBar1.Value = (int)progress;

                List <WordInfo> wordInfos = new List <WordInfo>();
                foreach (RecognizedWordUnit word in args.Result.Words)
                {
                    WordInfo wi;
                    string   confidenceStr = String.Format("{0:0}", word.Confidence * 100);
                    if (word.Confidence >= confidence)
                    {
                        wi = WordInfo.Create(word.Text, confidenceStr, word.LexicalForm);
                    }
                    else
                    {
                        string text = String.Format("[SKIPPED]", confidenceStr);
                        wi = WordInfo.Create(text, confidenceStr, word.LexicalForm);
                    }
                    wordInfos.Add(wi);
                }
                txtOutput.AddLine(currentTime, wordInfos);
                txtOutput.RefreshText();
            };
            recognitionEngine.RecognizeCompleted += RecognitionEngine_RecognizeCompleted;
            btnStart.Enabled = true;
            toolStripStatusLabelStatus.Text = "File loaded: " + filePath;
            lblConfValue.Text = String.Format("{0:P2}", confidence);
        }
        /// <summary>
        /// Default constructor. Sets up the voice recognizer with default settings.
        ///
        /// Namely, default options are: en-US, default input device, listen always, confidence level at .90
        /// </summary>
        public VoiceRecognizer()
        {
            try
            {
                // detect the system locale and use the best recognizer for the job.
                CultureInfo cultureInfo = null;
                foreach (RecognizerInfo ri in SpeechRecognitionEngine.InstalledRecognizers())
                {
                    // TODO: change to support more languages as they get added in
                    if (ri.Culture.Equals(CultureInfo.CurrentCulture) && ri.Culture.TwoLetterISOLanguageName.Equals("en"))
                    {
                        cultureInfo = ri.Culture;
                    }
                }

                // default to en-US
                if (cultureInfo == null)
                {
                    cultureInfo = new CultureInfo("en-US");
                }

                // Setup members
                ConfidenceLock     = new Object();
                EngineShuttingDown = new AutoResetEvent(false);
                State = VoiceRecognizerState.Paused;

                // Create a new SpeechRecognitionEngine instance.
                Engine = new SpeechRecognitionEngine(cultureInfo);

                try
                {
                    // Setup the audio device
                    Engine.SetInputToDefaultAudioDevice();
                }
                catch (InvalidOperationException ex)
                {
                    // No default input device
                    Trace.WriteLine(ex.Message);
                    SetupError = "Check input device.\n\n";
                    State      = VoiceRecognizerState.Error;
                    return;
                }

                // Set the confidence setting
                ConfidenceMargin = 90;

                // Create the Grammar instance and load it into the speech recognition engine.
                Grammar g = new Grammar(CommandPool.BuildSrgsGrammar(cultureInfo));
                Engine.LoadGrammar(g);

                // Register a handlers for the SpeechRecognized and SpeechRecognitionRejected event
                Engine.SpeechRecognized          += sre_SpeechRecognized;
                Engine.SpeechRecognitionRejected += sre_SpeechRecognitionRejected;
                Engine.RecognizeCompleted        += sre_RecognizeCompleted;

                StartListening();
            }
            catch (Exception ex)
            {
                // Something went wrong setting up the voiceEngine.
                Trace.WriteLine(ex.Message);
                SetupError = ex.ToString();
                State      = VoiceRecognizerState.Error;
            }
        }
示例#51
0
 public RankCalculator(Player player, RecognitionResult speechInput, SpeechRecognitionEngine recEngine)
 {
     Player      = player;
     SpeechInput = speechInput;
     recEngine   = RecEngine;
 }
        public Form1()
        {
            InitializeComponent();
            try
            {
                speechreco = CreateSpeechEngine("en-US");
                speechreco.AudioLevelUpdated += new EventHandler <AudioLevelUpdatedEventArgs>(Speechreco_AudioRecognized);
                speechreco.SpeechRecognized  += new EventHandler <SpeechRecognizedEventArgs>(Speechreco_SpeechRecognized);
                loadGrammar();
                speechreco.SetInputToDefaultAudioDevice();
                speechreco.RecognizeAsync(RecognizeMode.Multiple);

                sophie.SpeakCompleted += new EventHandler <SpeakCompletedEventArgs>(sophie_SpeakCompleted);
            }
            catch (Exception ex)
            {
                sophie.SpeakAsync("Voice recognition failed" + ex.Message);
            }

            //Create Directory and Commands
            Directory.CreateDirectory(@"C:\Users\" + userName + "\\Documents\\Sophie_Commands"); //TODO CONTINUE HERE
            Properties.Settings.Default.ShellC  = @"C:\Users\" + userName + "\\Documents\\Sophie_Commands\\Shell_Commands.txt";
            Properties.Settings.Default.ShellR  = @"C:\Users\" + userName + "\\Documents\\Sophie_Commands\\Shell_Response.txt";
            Properties.Settings.Default.ShellL  = @"C:\Users\" + userName + "\\Documents\\Sophie_Commands\\Shell_Location.txt";
            Properties.Settings.Default.SocialC = @"C:\Users\" + userName + "\\Documents\\Sophie_Commands\\Social_Commands.txt";
            Properties.Settings.Default.SocialR = @"C:\Users\" + userName + "\\Documents\\Sophie_Commands\\Social_Response.txt";
            Properties.Settings.Default.UserS   = @"C:\Users\" + userName + "\\Documents\\Sophie_Commands\\User_Settings.txt";

            if (!File.Exists(Properties.Settings.Default.ShellC))
            {
                sw = File.CreateText(Properties.Settings.Default.ShellC);
            }
            if (!File.Exists(Properties.Settings.Default.ShellR))
            {
                sw = File.CreateText(Properties.Settings.Default.ShellR);
            }
            if (!File.Exists(Properties.Settings.Default.ShellL))
            {
                sw = File.CreateText(Properties.Settings.Default.ShellL);
            }
            if (!File.Exists(Properties.Settings.Default.SocialC))
            {
                sw = File.CreateText(Properties.Settings.Default.SocialC);
            }
            if (!File.Exists(Properties.Settings.Default.SocialR))
            {
                sw = File.CreateText(Properties.Settings.Default.SocialR);
            }
            if (!File.Exists(Properties.Settings.Default.UserS))
            {
                sw = File.CreateText(Properties.Settings.Default.UserS);
            }

            ArrayShellCommands   = File.ReadAllLines(Properties.Settings.Default.ShellC);
            ArrayShellResponses  = File.ReadAllLines(Properties.Settings.Default.ShellR);
            ArrayShellLocations  = File.ReadAllLines(Properties.Settings.Default.ShellL);
            ArraySocialCommands  = File.ReadAllLines(Properties.Settings.Default.SocialC);
            ArraySocialResponses = File.ReadAllLines(Properties.Settings.Default.SocialR);
            ArrayUserSettings    = File.ReadAllLines(Properties.Settings.Default.UserS);

            // Currently user setup : 1st line = Name, 2nd. (not in use (date) ). 3rd is different voice settings
            // Inbuilt voice settings are : Microsoft Zira Desktop, Microsoft David Desktop, Microsoft Mark Desktop
            //If No user Data collected
            if (ArrayUserSettings.Length == 0)
            {
                ArrayUserSettings = new string[] { "handsome", System.DateTime.Now.ToString(), "Microsoft Zira Desktop" };
            }
            // With this, you could also use different speakers. Just write them on the third line. and/or import the ArrayUserSettings[2] in something
            if (ArrayUserSettings.Length >= 2)
            {
                sophie.SelectVoice(ArrayUserSettings[2]);
            }
            else
            {
                sophie.SelectVoice("Microsoft Zira Desktop");
            }
        }
示例#53
0
        public static void VA_Invoke1(String context, ref Dictionary <string, object> state, ref Dictionary <string, Int16?> shortIntValues, ref Dictionary <string, string> textValues, ref Dictionary <string, int?> intValues, ref Dictionary <string, decimal?> decimalValues, ref Dictionary <string, bool?> booleanValues, ref Dictionary <string, object> extendedValues)
        {
            try
            {
                Debug.Write("COMMAND:  " + context);
                switch (context.ToLower())
                {
                case "check for upgrade":
                    if (Upgrade.needUpgrade(ref state))
                    {
                        booleanValues["VAEDupgradeAvailable"] = true;
                        state["VAEDupgradeAvailable"]         = true;
                    }
                    else
                    {
                        booleanValues["VAEDupgradeAvailable"] = false;
                        state["VAEDupgradeAvailable"]         = false;
                    }
                    break;

                case "distance from here":
                    bool worked = Companion.updateProfile(ref state, ref shortIntValues, ref textValues, ref intValues, ref decimalValues, ref booleanValues);
                    decimalValues["VAEDdecimalDistance"] = null;
                    decimalValues["VAEDintDistance"]     = null;
                    if (worked)
                    {
                        if (state.ContainsKey("VAEDcompanionDict") && textValues.ContainsKey("VAEDtargetSystem"))
                        {
                            Dictionary <string, dynamic> companion = (Dictionary <string, dynamic>)state["VAEDcompanionDict"];
                            string currentSystem = "";
                            if (companion.ContainsKey("lastSystem") && companion["lastSystem"].ContainsKey("name"))
                            {
                                currentSystem = companion["lastSystem"]["name"];
                            }
                            else
                            {
                                Debug.Write("Error:  Could not determine current location for command 'distance from here'");
                                booleanValues["VAEDerrorSourceSystem"] = true;
                                break;
                            }
                            Dictionary <string, dynamic> tempAtlas = (Dictionary <string, dynamic>)state["VAEDatlasIndex"];

                            int distance = Atlas.calcDistance(ref tempAtlas, currentSystem, textValues["VAEDtargetSystem"]);
                            if (distance < 0)
                            {
                                //Cound not find destination system
                                Debug.Write("Error:  Could not determine distance to target system");
                                booleanValues["VAEDerrorTargetSystem"] = true;
                                break;
                            }
                            // Found the system - return distance
                            intValues["VAEDintDistance"]           = distance;
                            booleanValues["VAEDerrorTargetSystem"] = false;
                            booleanValues["VAEDerrorSourceSystem"] = false;
                            break;
                        }
                    }
                    //Can't find the System
                    booleanValues["VAEDerrorSourceSystem"]      = true;
                    booleanValues["VAEDerrorDestinationSystem"] = false;
                    break;

                case "dictate system":
                    if (state.ContainsKey("VAEDrecognitionEngine"))
                    {
                        SpeechRecognitionEngine recognitionEngine = (SpeechRecognitionEngine)state["VAEDrecognitionEngine"];

                        Tuple <string, string> tSystemNames = EliteGrammar.dictateSystem(recognitionEngine, (List <String>)state["VAEDtrackedSystems"]);
                        textValues["VAEDdictateSystem"]         = tSystemNames.Item1;
                        textValues["VAEDdictateSystemPhonetic"] = tSystemNames.Item2;
                        break;
                    }
                    else
                    {
                        Debug.Write("Error:  Speech Engine not yet Initialized.  (Possibly still loading)");
                    }
                    textValues["VAEDdictateSystem"]         = null;
                    textValues["VAEDdictateSystemPhonetic"] = null;
                    break;

                case "press key bind":
                    // If the Binds file changes then reload the binds.
                    string   bindsFile    = (string)state["VAEDbindsFile"];
                    DateTime oldTimestamp = (DateTime)state["VAEDbindsTimestamp"];

                    DateTime   newTimestamp = File.GetLastWriteTime(bindsFile);
                    EliteBinds eliteBinds;
                    if (oldTimestamp != newTimestamp)
                    {
                        Debug.Write("Binds file change:  reloading");
                        eliteBinds = new EliteBinds();
                        state["VAEDbindsTimestamp"] = newTimestamp;
                        state["VAEDeliteBinds"]     = eliteBinds;
                    }
                    else
                    {
                        eliteBinds = (EliteBinds)state["VAEDeliteBinds"];
                    }
                    string[]    parts       = textValues["VAEDkeyBinding"].Split(new char[] { ':' }, 2);
                    List <uint> scanCodeExs = KeyMouse.MapVkToScanCodeExs(eliteBinds.GetCodes(parts[1]));
                    if (scanCodeExs.Count == 0)
                    {
                        Debug.Write("Warning: No key binding found for: " + textValues["VAEDkeyBinding"]);
                        booleanValues["VAEDkeyBindingError"] = true;
                        break;
                    }
                    booleanValues["VAEDkeyBindingError"] = false;
                    switch (parts[0])
                    {
                    // For now we only "best effort" focus the game before keypressing.  Igorning the setFocus return code.
                    case "KEYPRESS":
                        User32.setFocus(eliteWindowTitle);
                        KeyMouse.KeyPress(scanCodeExs);
                        break;

                    case "KEYUP":
                        User32.setFocus(eliteWindowTitle);
                        KeyMouse.KeyUp(scanCodeExs);
                        break;

                    case "KEYDOWN":
                        User32.setFocus(eliteWindowTitle);
                        KeyMouse.KeyDown(scanCodeExs);
                        break;

                    default:
                        booleanValues["VAEDkeyBindingError"] = true;
                        break;
                    }
                    break;

                case "clear debug":
                    Debug.Clear();
                    break;

                case "get debug":
                    string tempDebug = Debug.Path();
                    textValues["VAEDdebugFile"] = tempDebug;
                    break;

                case "export for ed shipyard":
                    Companion.updateProfile(ref state, ref shortIntValues, ref textValues, ref intValues, ref decimalValues, ref booleanValues);

                    if (state.ContainsKey("VAEDshipObj"))
                    {
                        Ship.Components shipObj = (Ship.Components)state["VAEDshipObj"];
                        StringBuilder   export  = EDShipyard.export(shipObj);
                        if (export != null)
                        {
                            booleanValues["VAEDexportEDShipyardError"] = false;
                            Clipboard.SetText(export.ToString());
                            break;
                        }
                    }
                    Debug.Write("Error:  Unable to form ED Shipyard.com Export");
                    Clipboard.Clear();
                    booleanValues["VAEDexportEDShipyuardError"] = true;
                    break;

                case "export for coriolis":
                    Companion.updateProfile(ref state, ref shortIntValues, ref textValues, ref intValues, ref decimalValues, ref booleanValues);
                    if (state.ContainsKey("VAEDshipObj"))
                    {
                        Ship.Components shipObj = (Ship.Components)state["VAEDshipObj"];
                        string          json    = Coriolis.export(shipObj);
                        if (json != null)
                        {
                            booleanValues["VAEDexportCoriolisError"] = false;
                            Clipboard.SetText(json);
                            break;
                        }
                    }
                    Debug.Write("Error:  Unable to form Coriolis.io JSON");
                    Clipboard.Clear();
                    booleanValues["VAEDexportCoriolisError"] = true;
                    break;

                case "edit web variable sources":
                    bool foundWindow = false;
                    foreach (Form form in Application.OpenForms)
                    {
                        if (form.GetType().Name == "EditWebVars")
                        {
                            Debug.Write("Edit Web Variable Sources window is already open");
                            foundWindow = true;
                        }
                    }
                    if (!foundWindow)
                    {
                        var webVarsForm = new WebVars.EditWebVars();
                        webVarsForm.ShowDialog();
                    }
                    break;

                case "get web variables":
                    GetWebVars.readWebVars(ref state, ref textValues, ref intValues, ref booleanValues);
                    break;

                case "get file variables":
                    FileVar.readFileVars(ref state, ref textValues, ref intValues, ref booleanValues);
                    break;

                case "get clipboard":
                    if (Clipboard.ContainsText(TextDataFormat.Text))
                    {
                        textValues.Add("VAEDclipboard", Clipboard.GetText());
                    }
                    break;

                case "get frontier credentials":
                    var credentialsForm = new Credentials.Login();
                    credentialsForm.ShowDialog();
                    CookieContainer loginCookies = credentialsForm.Cookie;
                    state["VAEDcookieContainer"] = loginCookies;
                    string loginResponse = credentialsForm.LoginResponse;
                    Debug.Write("LoginResponse: " + loginResponse);
                    textValues["VAEDloggedIn"] = loginResponse;
                    break;

                case "get frontier verification":
                    CookieContainer verifyCookies = new CookieContainer();
                    if (state.ContainsKey("VAEDcookieContainer"))
                    {
                        verifyCookies = (CookieContainer)state["VAEDcookieContainer"];
                    }
                    var verificationForm = new VerificationCode.Validate();
                    verificationForm.Cookie = verifyCookies;
                    verificationForm.ShowDialog();
                    verifyCookies = verificationForm.Cookie;
                    string verifyResponse = verificationForm.VerifyResponse;
                    state["VAEDloggedIn"]        = verifyResponse;
                    state["VAEDcookieContainer"] = verifyCookies;
                    textValues["VAEDloggedIn"]   = verifyResponse;
                    if (verifyResponse == "ok")
                    {
                        Web.WriteCookiesToDisk(Config.CookiePath(), verifyCookies);
                    }
                    break;

                case "update profile and eddn":
                    if (state["VAEDloggedIn"].ToString() == "ok" && state.ContainsKey("VAEDcookieContainer"))
                    {
                        Companion.updateProfile(ref state, ref shortIntValues, ref textValues, ref intValues, ref decimalValues, ref booleanValues);
                    }
                    else     // Not logged in
                    {
                        textValues["VAEDprofileStatus"] = "credentials";
                    }
                    break;

                default:
                    Debug.Write("ERROR: unknown command");
                    break;
                }
            }
            catch (Exception ex)
            {
                Debug.Write(ex.ToString());
            }
        }
示例#54
0
        public void getSpeechthings()
        {
            // SPEECH STUFF
            // grab the audio stream
            try
            {
                IReadOnlyList <AudioBeam> audioBeamList = this.kinectSensor.AudioSource.AudioBeams;
                System.IO.Stream          audioStream   = audioBeamList[0].OpenInputStream();

                // create the convert stream
                this.convertStream = new KinectAudioStream(audioStream);
            }
            catch
            {
            }

            RecognizerInfo ri = TryGetKinectRecognizer();

            if (null != ri)
            {
                this.speechEngine = new SpeechRecognitionEngine(ri.Id);

                /****************************************************************
                *
                * Use this code to create grammar programmatically rather than from
                * a grammar file.
                *
                * var directions = new Choices();
                * directions.Add(new SemanticResultValue("forward", "FORWARD"));
                * directions.Add(new SemanticResultValue("forwards", "FORWARD"));
                * directions.Add(new SemanticResultValue("straight", "FORWARD"));
                * directions.Add(new SemanticResultValue("backward", "BACKWARD"));
                * directions.Add(new SemanticResultValue("backwards", "BACKWARD"));
                * directions.Add(new SemanticResultValue("back", "BACKWARD"));
                * directions.Add(new SemanticResultValue("turn left", "LEFT"));
                * directions.Add(new SemanticResultValue("turn right", "RIGHT"));
                *
                * var gb = new GrammarBuilder { Culture = ri.Culture };
                * gb.Append(directions);
                *
                * var g = new Grammar(gb);
                *
                ****************************************************************/

                // Create a grammar from grammar definition XML file.
                using (var memoryStream = new MemoryStream(Encoding.ASCII.GetBytes(Properties.Resources.SpeechGrammar)))
                {
                    var g = new Grammar(memoryStream);
                    this.speechEngine.LoadGrammar(g);
                }

                this.speechEngine.SpeechRecognized          += this.SpeechRecognized;
                this.speechEngine.SpeechRecognitionRejected += this.SpeechRejected;


                // let the convertStream know speech is going active
                this.convertStream.SpeechActive = true;

                // For long recognition sessions (a few hours or more), it may be beneficial to turn off adaptation of the acoustic model.
                // This will prevent recognition accuracy from degrading over time.
                ////speechEngine.UpdateRecognizerSetting("AdaptationOn", 0);

                this.speechEngine.SetInputToAudioStream(
                    this.convertStream, new SpeechAudioFormatInfo(EncodingFormat.Pcm, 16000, 16, 1, 32000, 2, null));
                this.speechEngine.RecognizeAsync(RecognizeMode.Multiple);
            }
        }
示例#55
0
 public VoiceListener(SpeechRecognitionEngine speechRecognitionEngine)
 {
     _speechRecognitionEngine = speechRecognitionEngine;
 }
示例#56
0
        public static void Main(string[] args)
        {
            speechRecognitionEngine = new SpeechRecognitionEngine(SpeechRecognitionEngine.InstalledRecognizers()[0]);
            try
            {
                // create the engine
                // hook to events
                //	speechRecognitionEngine.AudioLevelUpdated += new EventHandler<AudioLevelUpdatedEventArgs>(engine_AudioLevelUpdated);
                speechRecognitionEngine.SpeechRecognized += new EventHandler <SpeechRecognizedEventArgs>(engine_SpeechRecognized);

                // load dictionary
                try
                {
                    Choices  texts = new Choices();
                    string[] lines = File.ReadAllLines(Environment.CurrentDirectory + "\\grammar.txt");
                    foreach (string line in lines)
                    {
                        //reco UDP Port
                        if (line.StartsWith("#P"))
                        {
                            var parts = line.Split(new char[] { ' ' });
                            port = Convert.ToInt32(parts[1]);
                            Console.WriteLine("Port : " + parts[1]);
                            continue;
                        }
                        // Reco endWord
                        if (line.StartsWith("#E"))
                        {
                            var parts = line.Split(new char[] { ' ' });
                            endWord = parts[1];
                            Console.WriteLine("End Word : " + parts[1]);
                            continue;
                        }
                        // Reco IP server
                        if (line.StartsWith("#I"))
                        {
                            var parts = line.Split(new char[] { ' ' });
                            ipServer = parts[1];
                            Console.WriteLine("ipServer : " + parts[1]);
                            continue;
                        }
                        // Reco validity
                        if (line.StartsWith("#V"))
                        {
                            var parts = line.Split(new char[] { ' ' });
                            validity = Convert.ToInt32(parts[1]) / 100.0f;
                            Console.WriteLine("Validity : " + parts[1]);
                            continue;
                        }

                        // skip commentblocks and empty lines..
                        if (line.StartsWith("#") || line == String.Empty)
                        {
                            continue;
                        }

                        texts.Add(line);
                    }
                    Grammar wordsList = new Grammar(new GrammarBuilder(texts));
                    speechRecognitionEngine.LoadGrammar(wordsList);
                }
                catch (Exception ex)
                {
                    throw ex;
                    System.Environment.Exit(0);
                }

                // use the system's default microphone
                speechRecognitionEngine.SetInputToDefaultAudioDevice();
                // start listening
                speechRecognitionEngine.RecognizeAsync(RecognizeMode.Multiple);
            }
            catch (Exception ex)
            {
                Console.WriteLine(ex.Message, "MicroPhone?");
                speechRecognitionEngine.RecognizeAsyncStop();
                speechRecognitionEngine.Dispose();
                System.Environment.Exit(0);
            }
            // UDP init
            Socket     server = new Socket(AddressFamily.InterNetwork, SocketType.Dgram, ProtocolType.Udp);
            IPEndPoint iep    = new IPEndPoint(IPAddress.Parse(ipServer), port);

            // Ready
            Console.WriteLine("Ready.....");

            while (true)
            {
                if (reception != "#")
                {
                    data = Encoding.ASCII.GetBytes(reception);
                    server.SendTo(data, iep);
                    //Console.WriteLine("Sending : "+reception);
                    reception = "#";
                }
                Thread.Sleep(2);
            }
        }         // main
        public FormMain()
        {
            InitializeComponent();

            sre = new SpeechRecognitionEngine();
            ss  = new SpeechSynthesizer();

            vk = new VkApi();

            ApiAuthParams _aap = new ApiAuthParams();

            _aap.Settings      = Settings.All;
            _aap.ApplicationId = 6707008;
            _aap.Login         = "******";
            _aap.Password      = "******";

            vk.Authorize(_aap);

            sre.SpeechRecognized   += Sre_SpeechRecognized;
            sre.RecognizeCompleted += Sre_RecognizeCompleted;

            sre.SetInputToDefaultAudioDevice();
            ss.SetOutputToDefaultAudioDevice();

            active  = false;
            ss.Rate = 0;

            Choices ch_startCommands = new Choices();

            foreach (string str_actPhrase in File.ReadAllLines(@"dictionary\active_phrases.txt"))
            {
                ch_startCommands.Add("Васян " + str_actPhrase);
            }

            foreach (string str_actPhrase in File.ReadAllLines(@"dictionary\passive_phrases.txt"))
            {
                ch_startCommands.Add("Васян " + str_actPhrase);
            }

            GrammarBuilder gb_startCommands = new GrammarBuilder(ch_startCommands);
            Grammar        g_start          = new Grammar(gb_startCommands);

            Choices ch_vk = new Choices();

            foreach (string str_questPhrase in File.ReadAllLines(@"dictionary\VkIds.txt"))
            {
                ch_vk.Add(str_questPhrase.Split('|')[0]);
            }


            Choices ch_vkSamples = new Choices();

            foreach (string str_vkSample in File.ReadAllLines(@"dictionary\vk_samples.txt"))
            {
                ch_vkSamples.Add(str_vkSample);
            }



            GrammarBuilder gb_vk = new GrammarBuilder();

            gb_vk.Append("Отправь");
            gb_vk.Append(ch_vk);
            gb_vk.Append(ch_vkSamples);
            Grammar g_vk = new Grammar(gb_vk);

            Choices ch_quests = new Choices();

            foreach (string str_questPhrase in File.ReadAllLines(@"dictionary\QuestAnswer.txt"))
            {
                ch_quests.Add(str_questPhrase.Split('|')[0]);
            }

            GrammarBuilder gb_quests = new GrammarBuilder(ch_quests);
            Grammar        g_quests  = new Grammar(gb_quests);


            Choices ch_openCommands = new Choices();

            ch_openCommands.Add(" ");
            foreach (string str_openPhrase in File.ReadAllLines(@"dictionary\Open_phrases.txt"))
            {
                ch_openCommands.Add(str_openPhrase);
            }



            Choices ch_closeCommands = new Choices();

            foreach (string str_closePhrase in File.ReadAllLines(@"dictionary\Close_phrases.txt"))
            {
                ch_closeCommands.Add(str_closePhrase);
            }


            Choices ch_programs = new Choices();

            foreach (string str_program in File.ReadAllLines(@"dictionary\Programs.txt"))
            {
                ch_programs.Add(str_program.Split('|')[0]);
            }


            foreach (string site in File.ReadAllLines(@"dictionary\Sites.txt"))
            {
                ch_programs.Add(site.Split('|')[0]);
            }


            GrammarBuilder gb_openPrograms = new GrammarBuilder();

            gb_openPrograms.Append(ch_openCommands);
            gb_openPrograms.Append(ch_programs);
            Grammar g_open = new Grammar(gb_openPrograms);


            GrammarBuilder gb_closePrograms = new GrammarBuilder();

            gb_closePrograms.Append(ch_closeCommands);
            gb_closePrograms.Append(ch_programs);
            Grammar g_close = new Grammar(gb_closePrograms);


            GrammarBuilder gb_anectod = new GrammarBuilder();

            gb_anectod.Append(new Choices(" ", "Расскажи шутку", "Расскажи анекдот", "Расcмеши меня"));
            Grammar g_anectod = new Grammar(gb_anectod);

            GrammarBuilder gb_analys = new GrammarBuilder();

            gb_analys.Append(new Choices(" ",
                                         "Что там с погодой",
                                         "Сколько время",
                                         "Какой день недели",
                                         "Какое сегодня число"));
            Grammar g_analys = new Grammar(gb_analys);

            sre.LoadGrammarAsync(g_start);
            sre.LoadGrammarAsync(g_close);
            sre.LoadGrammarAsync(g_open);
            sre.LoadGrammarAsync(g_quests);
            sre.LoadGrammarAsync(g_vk);
            sre.LoadGrammarAsync(g_anectod);
            sre.LoadGrammarAsync(g_analys);

            sre.RecognizeAsync(RecognizeMode.Multiple);
        }
示例#58
0
文件: MCQ.xaml.cs 项目: oujunke/AISA
        private void CompletePaper()
        {
            UploadingPanel.Visibility = Visibility.Visible;

            //Animate the uploading panel to preview the spinner
            var da = new DoubleAnimation(1, TimeSpan.FromMilliseconds(500));

            da.EasingFunction = new QuinticEase();
            UploadingPanel.BeginAnimation(OpacityProperty, da);

            _recognizer.RecognizeAsyncCancel();
            _recognizer.Dispose(); // Dispose the recognizer object

            //Start the uploading functionality
            var threadstart = new ThreadStart(() =>
            {
                var result = Scholar.Class.PostMCQPaper(Context.previousPaper[0], Context.previousPaper[1], _answers, Properties.Settings.Default.scholarUsername, Properties.Settings.Default.scholarPassword);

                Application.Current.Dispatcher.Invoke(() =>
                {
                    //Do this in the user interface thread
                    var da2            = new DoubleAnimation(0, TimeSpan.FromMilliseconds(500));
                    da2.EasingFunction = new QuinticEase();

                    da2.Completed += (a, b) =>
                    {
                        //Show the results
                        ResultSheet.Visibility = Visibility.Visible;
                        ResultSheet.BeginAnimation(OpacityProperty, da);

                        PaperNameResults.Content = Context.currentPaper.name;

                        var correct   = 0;
                        var incorrect = 0;
                        var skipped   = 0;

                        for (int i = 0; i < _answers.Length; i++)
                        {
                            var given_answer   = _answers[i];
                            var correct_answer = Context.currentPaper.questions[i].correct;

                            if (given_answer == 0)
                            {
                                skipped++;
                            }
                            else
                            {
                                if (given_answer == correct_answer)
                                {
                                    //Answer is correct
                                    correct++;
                                }
                                else
                                {
                                    incorrect++;
                                }
                            }
                        }

                        //Set the numbers
                        correct_count.Content    = correct.ToString();
                        incorrect_count.Content  = incorrect.ToString();
                        unanswered_count.Content = skipped.ToString();


                        //Set the event handlers
                        OkayButton.Clicked = () =>
                        {
                            _recognizer.RecognizeAsyncCancel();
                            _recognizer = null;

                            ViewControllerConnector.PaperStarted = false;
                            AISAHandler.Start();
                            ViewControllerConnector.Opaque();

                            Close();
                        };

                        _recognizer = new SpeechRecognitionEngine();
                        _recognizer.SetInputToDefaultAudioDevice();
                        _recognizer.LoadGrammar(new Grammar(new GrammarBuilder(new Choices(
                                                                                   new string[] { "Okay", "OK", "Close", "Exit" }
                                                                                   ))));

                        _recognizer.SpeechRecognized += (x, y) =>
                        {
                            _recognizer.RecognizeAsyncCancel();
                            _recognizer = null;

                            ViewControllerConnector.PaperStarted = false;
                            AISAHandler.Start();
                            ViewControllerConnector.Opaque();

                            Close();
                        };

                        _recognizer.RecognizeAsync();
                    };

                    //Fade the uploading panel out
                    UploadingPanel.BeginAnimation(OpacityProperty, da2);
                });
            });

            var thread = new Thread(threadstart);

            thread.Start();
        }
示例#59
0
        public MainWindow()
        {
            sensor = KinectSensor.GetDefault();
            InitializeComponent();
            var depthReader = sensor.DepthFrameSource.OpenReader();
            var colorReader = sensor.ColorFrameSource.OpenReader();
            var bodyReader  = sensor.BodyFrameSource.OpenReader();
            var biReader    = sensor.BodyIndexFrameSource.OpenReader();

            biReader.FrameArrived += BiReader_FrameArrived;

            if (caroline)
            {
                colorReader.FrameArrived += ColorReader_FrameArrived;
                bodyReader.FrameArrived  += BodyReader_FrameArrived;
            }
            else
            {
                depthReader.FrameArrived += DepthReader_FrameArrived;
            }
            //depthReader.FrameArrived += DepthReader_FrameArrived;
            //colorReader.FrameArrived += ColorReader_FrameArrived;
            //bodyReader.FrameArrived += BodyReader_FrameArrived;

            try
            {
                //var ws = new WebSocket("ws://192.168.0.9:1337");
                //ws.OnMessage += Ws_OnMessage;
                //ws.OnOpen += (sender, e) => Console.WriteLine("opened!!!!");
                //ws.OnError += (sender, e) => Console.WriteLine(e.Exception);
                //ws.OnClose += (sender, e) => Console.WriteLine("CLOSED");
                //ws.Connect();
            }
            catch (Exception e)
            {
                Console.WriteLine(e);
            }

            var beams  = sensor.AudioSource.AudioBeams[0];
            var stream = beams.OpenInputStream();

            converted = new KinectAudioStream(stream);

            IEnumerable <RecognizerInfo> recognizers = SpeechRecognitionEngine.InstalledRecognizers();
            RecognizerInfo kRecognizer = null;

            foreach (RecognizerInfo recognizer in recognizers)
            {
                string value;
                recognizer.AdditionalInfo.TryGetValue("Kinect", out value);
                if ("en-US".Equals(recognizer.Culture.Name, StringComparison.OrdinalIgnoreCase))
                {
                    kRecognizer = recognizer;
                }
            }

            speechEngine = new SpeechRecognitionEngine(kRecognizer.Id);
            var gb = new GrammarBuilder {
                Culture = kRecognizer.Culture
            };

            Choices colors = new Choices();

            colors.Add(new string[] { "red", "green", "blue" });
            gb.Append(colors);



            var g = new DictationGrammar();

            speechEngine.LoadGrammar(g);
            if (caroline)
            {
                speechEngine.SpeechRecognized   += SpeechEngine_SpeechRecognized;
                speechEngine.SpeechHypothesized += SpeechEngine_SpeechHypothesized;
            }
            converted.SpeechActive = true;

            try
            {
                speechEngine.SetInputToAudioStream(converted, new SpeechAudioFormatInfo(EncodingFormat.Pcm, 16000, 16, 1, 32000, 2, null));
                speechEngine.RecognizeAsync(RecognizeMode.Multiple);
            }
            catch (Exception e)
            {
                Console.WriteLine(e);
            }

            bodies = new Body[sensor.BodyFrameSource.BodyCount];
            cm     = sensor.CoordinateMapper;
            if (caroline)
            {
                text = new TextBox()
                {
                    Visibility = Visibility.Visible,
                    FontSize   = 20.0,
                };
                text.Width  = 500;
                text.Height = 50;
                canvas.Children.Add(text);
            }


            sw = new Stopwatch();
            sw.Restart();

            sensor.Open();
        }
示例#60
-1
文件: Form1.cs 项目: bonfiredog/knole
        public void button1_Click(object sender, EventArgs e)
        {
            button1.Enabled = false;
            button1.Text = "God Called";
            label2.Text = "The god is listening...";
            label2.ForeColor = Color.Red;

            SpeechRecognitionEngine GodListener = new SpeechRecognitionEngine();

            Choices GodList = new Choices();
            GodList.Add(new string[] { "Make toast", "Make me toast", "Make me some toast", "Make me immortal", "Make rain", "call rain", "call the rain", "make it rain", "wink out of existence", "begone", "go now", "wink yourself out of existence" });

            GrammarBuilder gb = new GrammarBuilder();
            gb.Append(GodList);

            Grammar GodGrammar = new Grammar(gb);

            GodListener.MaxAlternates = 2;

            try
            {
                GodListener.RequestRecognizerUpdate();
                GodListener.LoadGrammar(GodGrammar);
                GodListener.SetInputToDefaultAudioDevice();
                GodListener.SpeechRecognized += GodListener_SpeechRecognized;
                GodListener.AudioStateChanged += GodListener_AudioStateChanged;
                GodListener.AudioLevelUpdated += GodListener_AudioLevelUpdated;
                GodListener.RecognizeAsync(RecognizeMode.Multiple);
            }
            catch
            {
                return;
            }
        }