コード例 #1
0
        protected override void Dispose(bool disposing)
        {
            if (disposing)
            {
                if (mRecognizer != null)
                {
                    mRecognizer.Dispose();
                }
                mRecognizer = null;

                if (mInput != null)
                {
                    mInput.Dispose();
                }
                mInput = null;
            }

            base.Dispose(disposing);
        }
コード例 #2
0
ファイル: SpeechService.cs プロジェクト: Necat0r/Automation
        public SpeechService(ServiceCreationInfo info)
            : base("speech", info)
        {
            mVoice = new SpVoice();

            // Select voice
            string voiceName = null;
            try
            {
                voiceName = info.Configuration.Voice;
            }
            catch (RuntimeBinderException) {}

            if (!string.IsNullOrEmpty(voiceName))
            {
                SpObjectToken voiceToken = null;

                CultureInfo culture = new CultureInfo("en-US");
                foreach (var voice in mVoice.GetVoices())
                {
                    var token = voice as SpObjectToken;
                    if (token == null)
                        continue;

                    if (culture.CompareInfo.IndexOf(token.Id, voiceName, CompareOptions.IgnoreCase) < 0)
                        continue;

                    voiceToken = token;
                }

                if (voiceToken != null)
                    mVoice.Voice = voiceToken;
            }

            // Select output. Why isn't this default any longer?
            var enumerator = new MMDeviceEnumerator();
            MMDevice endpoint = enumerator.GetDefaultAudioEndpoint(DataFlow.Render, Role.Console);
            if (endpoint != null)
            {
                foreach (var output in mVoice.GetAudioOutputs())
                {
                    var token = output as SpObjectToken;
                    if (token == null)
                        continue;

                    if (token.Id.IndexOf(endpoint.ID) < 0)
                        continue;

                    mVoice.AudioOutput = token;
                    break;
                }
            }

            mVoiceCommands = new Dictionary<string, DeviceBase.VoiceCommand>();

            mInput = new AudioInput();

            mRecognizer = new SpeechRecognitionEngine();
            mRecognizer.SpeechRecognized += new EventHandler<SpeechRecognizedEventArgs>(OnSpeechRecognized);
            mRecognizer.RecognizerUpdateReached += new EventHandler<RecognizerUpdateReachedEventArgs>(OnUpdateRecognizer);
            mRecognizer.RecognizeCompleted += new EventHandler<RecognizeCompletedEventArgs>(OnRecognizeCompleted);

            var grammar = new Grammar(new GrammarBuilder(new Choices(new string[] { "computer" })));
            mRecognizer.LoadGrammar(grammar);

            var speechFormat = new SpeechAudioFormatInfo(44100, AudioBitsPerSample.Sixteen, AudioChannel.Mono);
            mRecognizer.SetInputToAudioStream(mInput.mStream, speechFormat);

            mRecognizer.RecognizeAsync(RecognizeMode.Multiple);
        }
コード例 #3
0
ファイル: SpeechService.cs プロジェクト: Necat0r/Automation
        protected override void Dispose(bool disposing)
        {
            if (disposing)
            {
                if (mRecognizer != null)
                    mRecognizer.Dispose();
                mRecognizer = null;

                if (mInput != null)
                    mInput.Dispose();
                mInput = null;
            }

            base.Dispose(disposing);
        }
コード例 #4
0
        public SpeechService(ServiceCreationInfo info)
            : base("speech", info)
        {
            mVoice = new SpVoice();

            // Select voice
            string voiceName = null;

            try
            {
                voiceName = info.Configuration.Voice;
            }
            catch (RuntimeBinderException) {}

            if (!string.IsNullOrEmpty(voiceName))
            {
                SpObjectToken voiceToken = null;

                CultureInfo culture = new CultureInfo("en-US");
                foreach (var voice in mVoice.GetVoices())
                {
                    var token = voice as SpObjectToken;
                    if (token == null)
                    {
                        continue;
                    }

                    if (culture.CompareInfo.IndexOf(token.Id, voiceName, CompareOptions.IgnoreCase) < 0)
                    {
                        continue;
                    }

                    voiceToken = token;
                }

                if (voiceToken != null)
                {
                    mVoice.Voice = voiceToken;
                }
            }

            // Select output. Why isn't this default any longer?
            var      enumerator = new MMDeviceEnumerator();
            MMDevice endpoint   = enumerator.GetDefaultAudioEndpoint(DataFlow.Render, Role.Console);

            if (endpoint != null)
            {
                foreach (var output in mVoice.GetAudioOutputs())
                {
                    var token = output as SpObjectToken;
                    if (token == null)
                    {
                        continue;
                    }

                    if (token.Id.IndexOf(endpoint.ID) < 0)
                    {
                        continue;
                    }

                    mVoice.AudioOutput = token;
                    break;
                }
            }

            mVoiceCommands = new Dictionary <string, DeviceBase.VoiceCommand>();

            mInput = new AudioInput();

            mRecognizer = new SpeechRecognitionEngine();
            mRecognizer.SpeechRecognized        += new EventHandler <SpeechRecognizedEventArgs>(OnSpeechRecognized);
            mRecognizer.RecognizerUpdateReached += new EventHandler <RecognizerUpdateReachedEventArgs>(OnUpdateRecognizer);
            mRecognizer.RecognizeCompleted      += new EventHandler <RecognizeCompletedEventArgs>(OnRecognizeCompleted);

            var grammar = new Grammar(new GrammarBuilder(new Choices(new string[] { "computer" })));

            mRecognizer.LoadGrammar(grammar);

            var speechFormat = new SpeechAudioFormatInfo(44100, AudioBitsPerSample.Sixteen, AudioChannel.Mono);

            mRecognizer.SetInputToAudioStream(mInput.mStream, speechFormat);

            mRecognizer.RecognizeAsync(RecognizeMode.Multiple);
        }