示例#1
0
文件: Talker.cs 项目: Arakade/navhack
 public Talker(string description)
 {
     m_voice = new SpVoice();
     ISpeechObjectTokens tokens = m_voice.GetAudioOutputs(null, null);
     foreach (ISpeechObjectToken token in tokens)
     {
         if (token.GetDescription(0) == description)
             m_voice.AudioOutput = (SpObjectToken)token;
     }
 }
示例#2
0
        public void ScanOutputDevices()
        {
            Dictionary <string, SpObjectToken> OutDevPool = new Dictionary <string, SpObjectToken>();

            int idx = 0;

            foreach (SpObjectToken token in sapi.GetAudioOutputs("", ""))
            {
                string dev = token.GetDescription();

                if (!OutDevPool.ContainsKey(dev))
                {
                    OutDevPool.Add(dev, token);
                }
            }

            OutputDeviceList.Clear();
            foreach (var item in OutDevPool)
            {
                OutputDeviceList.Add(idx, item.Value);
                idx++;
            }
        }
示例#3
0
        public SpeechService(ServiceCreationInfo info)
            : base("speech", info)
        {
            mVoice = new SpVoice();

            // Select voice
            string voiceName = null;
            try
            {
                voiceName = info.Configuration.Voice;
            }
            catch (RuntimeBinderException) {}

            if (!string.IsNullOrEmpty(voiceName))
            {
                SpObjectToken voiceToken = null;

                CultureInfo culture = new CultureInfo("en-US");
                foreach (var voice in mVoice.GetVoices())
                {
                    var token = voice as SpObjectToken;
                    if (token == null)
                        continue;

                    if (culture.CompareInfo.IndexOf(token.Id, voiceName, CompareOptions.IgnoreCase) < 0)
                        continue;

                    voiceToken = token;
                }

                if (voiceToken != null)
                    mVoice.Voice = voiceToken;
            }

            // Select output. Why isn't this default any longer?
            var enumerator = new MMDeviceEnumerator();
            MMDevice endpoint = enumerator.GetDefaultAudioEndpoint(DataFlow.Render, Role.Console);
            if (endpoint != null)
            {
                foreach (var output in mVoice.GetAudioOutputs())
                {
                    var token = output as SpObjectToken;
                    if (token == null)
                        continue;

                    if (token.Id.IndexOf(endpoint.ID) < 0)
                        continue;

                    mVoice.AudioOutput = token;
                    break;
                }
            }

            mVoiceCommands = new Dictionary<string, DeviceBase.VoiceCommand>();

            mInput = new AudioInput();

            mRecognizer = new SpeechRecognitionEngine();
            mRecognizer.SpeechRecognized += new EventHandler<SpeechRecognizedEventArgs>(OnSpeechRecognized);
            mRecognizer.RecognizerUpdateReached += new EventHandler<RecognizerUpdateReachedEventArgs>(OnUpdateRecognizer);
            mRecognizer.RecognizeCompleted += new EventHandler<RecognizeCompletedEventArgs>(OnRecognizeCompleted);

            var grammar = new Grammar(new GrammarBuilder(new Choices(new string[] { "computer" })));
            mRecognizer.LoadGrammar(grammar);

            var speechFormat = new SpeechAudioFormatInfo(44100, AudioBitsPerSample.Sixteen, AudioChannel.Mono);
            mRecognizer.SetInputToAudioStream(mInput.mStream, speechFormat);

            mRecognizer.RecognizeAsync(RecognizeMode.Multiple);
        }
示例#4
0
        public SpeechService(ServiceCreationInfo info)
            : base("speech", info)
        {
            mVoice = new SpVoice();

            // Select voice
            string voiceName = null;

            try
            {
                voiceName = info.Configuration.Voice;
            }
            catch (RuntimeBinderException) {}

            if (!string.IsNullOrEmpty(voiceName))
            {
                SpObjectToken voiceToken = null;

                CultureInfo culture = new CultureInfo("en-US");
                foreach (var voice in mVoice.GetVoices())
                {
                    var token = voice as SpObjectToken;
                    if (token == null)
                    {
                        continue;
                    }

                    if (culture.CompareInfo.IndexOf(token.Id, voiceName, CompareOptions.IgnoreCase) < 0)
                    {
                        continue;
                    }

                    voiceToken = token;
                }

                if (voiceToken != null)
                {
                    mVoice.Voice = voiceToken;
                }
            }

            // Select output. Why isn't this default any longer?
            var      enumerator = new MMDeviceEnumerator();
            MMDevice endpoint   = enumerator.GetDefaultAudioEndpoint(DataFlow.Render, Role.Console);

            if (endpoint != null)
            {
                foreach (var output in mVoice.GetAudioOutputs())
                {
                    var token = output as SpObjectToken;
                    if (token == null)
                    {
                        continue;
                    }

                    if (token.Id.IndexOf(endpoint.ID) < 0)
                    {
                        continue;
                    }

                    mVoice.AudioOutput = token;
                    break;
                }
            }

            mVoiceCommands = new Dictionary <string, DeviceBase.VoiceCommand>();

            mInput = new AudioInput();

            mRecognizer = new SpeechRecognitionEngine();
            mRecognizer.SpeechRecognized        += new EventHandler <SpeechRecognizedEventArgs>(OnSpeechRecognized);
            mRecognizer.RecognizerUpdateReached += new EventHandler <RecognizerUpdateReachedEventArgs>(OnUpdateRecognizer);
            mRecognizer.RecognizeCompleted      += new EventHandler <RecognizeCompletedEventArgs>(OnRecognizeCompleted);

            var grammar = new Grammar(new GrammarBuilder(new Choices(new string[] { "computer" })));

            mRecognizer.LoadGrammar(grammar);

            var speechFormat = new SpeechAudioFormatInfo(44100, AudioBitsPerSample.Sixteen, AudioChannel.Mono);

            mRecognizer.SetInputToAudioStream(mInput.mStream, speechFormat);

            mRecognizer.RecognizeAsync(RecognizeMode.Multiple);
        }