Exemple #1
0
        void Flow_StateChanged(object sender, MediaFlowStateChangedEventArgs e)
        {
            Log("ControlAVCall Flow_StateChanged PreviousState=" + e.PreviousState + " State=" + e.State);

            AudioVideoFlow avFlow = (AudioVideoFlow)sender;

            if (avFlow.State == MediaFlowState.Active)
            {
                SpeechRecognitionConnector speechRecognitionConnector = new SpeechRecognitionConnector();
                speechRecognitionConnector.AttachFlow(avFlow);

                SpeechRecognitionStream stream = speechRecognitionConnector.Start();

                _speechRecognitionEngine = new SpeechRecognitionEngine();
                _speechRecognitionEngine.SpeechRecognized     += new EventHandler <SpeechRecognizedEventArgs>(_speechRecognitionEngine_SpeechRecognized);
                _speechRecognitionEngine.LoadGrammarCompleted += new EventHandler <LoadGrammarCompletedEventArgs>(_speechRecognitionEngine_LoadGrammarCompleted);

                Choices pathChoice = new Choices(new string[] { "previous", "next" });
                Grammar gr         = new Grammar(new GrammarBuilder(pathChoice));
                _speechRecognitionEngine.LoadGrammarAsync(gr);

                SpeechAudioFormatInfo speechAudioFormatInfo = new SpeechAudioFormatInfo(8000, AudioBitsPerSample.Sixteen, Microsoft.Speech.AudioFormat.AudioChannel.Mono);
                _speechRecognitionEngine.SetInputToAudioStream(stream, speechAudioFormatInfo);
                _speechRecognitionEngine.RecognizeAsync(RecognizeMode.Multiple);
            }
            else
            {
                if (avFlow.SpeechRecognitionConnector != null)
                {
                    avFlow.SpeechRecognitionConnector.DetachFlow();
                }
            }
        }
        public void Shutdown()
        {
            if (_isActive)
            {
                StopSpeechRecognition();
            }

            if (_speechRecognitionEngine != null)
            {
                _speechRecognitionEngine.UnloadAllGrammars();
                _grammars.Clear();
                _pendingLoadSpeechGrammarCounter = 0;

                _speechRecognitionEngine.SpeechDetected       -= (SpeechRecognitionEngine_SpeechDetected);
                _speechRecognitionEngine.RecognizeCompleted   -= (SpeechRecognitionEngine_RecognizeCompleted);
                _speechRecognitionEngine.LoadGrammarCompleted -= (SpeechRecognitionEngine_LoadGrammarCompleted);
            }

            if (_speechRecognitionConnector != null)
            {
                _speechRecognitionConnector.Dispose();
                _speechRecognitionConnector = null;
            }

            _speechTranscript.Clear();
            _transcriptRecorder = null;
        }
Exemple #3
0
        public void Run()
        {
            // A helper class to take care of platform and endpoint setup and cleanup.
            _helper = new UCMASampleHelper();

            // Create a user endpoint using the network credential object.
            _userEndpoint = _helper.CreateEstablishedUserEndpoint("Broadcast User");

            // Register a delegate to be called when an incoming audio-video call arrives.
            _userEndpoint.RegisterForIncomingCall <AudioVideoCall>(AudioVideoCall_Received);

            // Wait for the incoming call to be accepted.
            Console.WriteLine("Waiting for incoming call...");
            _waitForCallToBeAccepted.WaitOne();

            // Create a speech recognition connector and attach an AudioVideoFlow to it.
            SpeechRecognitionConnector speechRecognitionConnector = new SpeechRecognitionConnector();

            speechRecognitionConnector.AttachFlow(_audioVideoFlow);

            // Start the speech recognition connector.
            SpeechRecognitionStream stream = speechRecognitionConnector.Start();

            // Create a speech recognition engine.
            SpeechRecognitionEngine speechRecognitionEngine = new SpeechRecognitionEngine();

            speechRecognitionEngine.SpeechRecognized += new EventHandler <SpeechRecognizedEventArgs>(SpeechRecognitionEngine_SpeechRecognized);

            //Add a grammar.
            string[] recoString = { "buy", "sell", "Fabrikam", "Contoso", "maximum", "minimum", "one", "ten", "twenty", "send" };
            Choices  choices    = new Choices(recoString);

            speechRecognitionEngine.LoadGrammar(new Grammar(new GrammarBuilder(choices)));

            //Attach to audio stream to the SR engine.
            SpeechAudioFormatInfo speechAudioFormatInfo = new SpeechAudioFormatInfo(8000, AudioBitsPerSample.Sixteen, Microsoft.Speech.AudioFormat.AudioChannel.Mono);

            speechRecognitionEngine.SetInputToAudioStream(stream, speechAudioFormatInfo);
            Console.WriteLine("\r\nGrammar loaded, say send to send IM.");

            //Prepare the SR engine to perform multiple asynchronous recognitions.
            speechRecognitionEngine.RecognizeAsync(RecognizeMode.Multiple);

            //Pause the main thread until recognition completes.
            _waitForConnectorToStop.WaitOne();
            speechRecognitionConnector.Stop();
            Console.WriteLine("connector stopped");

            // Detach the flow from the speech recognition connector, to prevent the flow from being kept in memory.
            speechRecognitionConnector.DetachFlow();

            // Terminate the call, the conversation, and then unregister the
            // endpoint from receiving an incoming call.
            _audioVideoCall.BeginTerminate(CallTerminateCB, _audioVideoCall);
            _waitForConversationToBeTerminated.WaitOne();

            // Shut down the platform.
            _helper.ShutdownPlatform();
        }
        public void Run()
        {
            // Create AudioVideoFlow
            AudioVideoFlowHelper audioVideoFlowHelper = new AudioVideoFlowHelper();

            _audioVideoFlow = audioVideoFlowHelper.CreateAudioVideoFlow(
                null,
                audioVideoFlow_StateChanged);

            // Create a speech recognition connector and attach it to a AudioVideoFlow
            SpeechRecognitionConnector speechRecognitionConnector = new SpeechRecognitionConnector();

            speechRecognitionConnector.AttachFlow(_audioVideoFlow);

            //Start recognizing
            SpeechRecognitionStream stream = speechRecognitionConnector.Start();

            // Create speech recognition engine and start recognizing by attaching connector to engine
            SpeechRecognitionEngine speechRecognitionEngine = new SpeechRecognitionEngine();

            speechRecognitionEngine.SpeechRecognized += new EventHandler <SpeechRecognizedEventArgs>(speechRecognitionEngine_SpeechRecognized);


            string[] recognizedString = { "zero", "one", "two", "three", "four", "five", "six", "seven", "eight", "nine", "ten", "exit" };
            Choices  numberChoices    = new Choices(recognizedString);

            speechRecognitionEngine.LoadGrammar(new Grammar(new GrammarBuilder(numberChoices)));

            SpeechAudioFormatInfo speechAudioFormatInfo = new SpeechAudioFormatInfo(8000, AudioBitsPerSample.Sixteen, Microsoft.Speech.AudioFormat.AudioChannel.Mono);

            speechRecognitionEngine.SetInputToAudioStream(stream, speechAudioFormatInfo);
            Console.WriteLine("\r\nGrammar loaded from zero to ten, say exit to shutdown.");

            speechRecognitionEngine.RecognizeAsync(RecognizeMode.Multiple);

            _waitForXXXCompleted.WaitOne();
            //Stop the connector
            speechRecognitionConnector.Stop();
            Console.WriteLine("Stopping the speech recognition connector");

            //speech recognition connector must be detached from the flow, otherwise if the connector is rooted, it will keep the flow in memory.
            speechRecognitionConnector.DetachFlow();

            // Shutdown the platform
            ShutdownPlatform();

            _waitForShutdownEventCompleted.WaitOne();
        }
Exemple #5
0
        void Flow_StateChanged(object sender, MediaFlowStateChangedEventArgs e)
        {
            Log("ControlAVCall Flow_StateChanged PreviousState=" + e.PreviousState + " State=" + e.State);

            AudioVideoFlow avFlow = (AudioVideoFlow)sender;

            if (avFlow.State == MediaFlowState.Active)
            {
                SpeechRecognitionConnector speechRecognitionConnector = new SpeechRecognitionConnector();
                speechRecognitionConnector.AttachFlow(avFlow);

                SpeechRecognitionStream stream = speechRecognitionConnector.Start();

                _speechRecognitionEngine = new SpeechRecognitionEngine();
                _speechRecognitionEngine.SpeechRecognized += new EventHandler<SpeechRecognizedEventArgs>(_speechRecognitionEngine_SpeechRecognized);
                _speechRecognitionEngine.LoadGrammarCompleted += new EventHandler<LoadGrammarCompletedEventArgs>(_speechRecognitionEngine_LoadGrammarCompleted);

                Choices pathChoice = new Choices(new string[] { "previous", "next" });
                Grammar gr = new Grammar(new GrammarBuilder(pathChoice));
                _speechRecognitionEngine.LoadGrammarAsync(gr);

                SpeechAudioFormatInfo speechAudioFormatInfo = new SpeechAudioFormatInfo(8000, AudioBitsPerSample.Sixteen, Microsoft.Speech.AudioFormat.AudioChannel.Mono);
                _speechRecognitionEngine.SetInputToAudioStream(stream, speechAudioFormatInfo);
                _speechRecognitionEngine.RecognizeAsync(RecognizeMode.Multiple);
            }
            else
            {
                if (avFlow.SpeechRecognitionConnector != null)
                {
                    avFlow.SpeechRecognitionConnector.DetachFlow();
                }
            }
        }
        public SpeechRecognizer(TranscriptRecorderSession transcriptRecorder)
        {
            _transcriptRecorder = transcriptRecorder;
            _speechTranscript = new List<Microsoft.Speech.Recognition.RecognitionResult>();
            _isActive = false;
            _isRecognizing = false;

            // Create a speech recognition connector
            _speechRecognitionConnector = new SpeechRecognitionConnector();

            _currentSRLocale = ConfigurationManager.AppSettings[SpeechRecogLocaleKey];
            if (String.IsNullOrEmpty(_currentSRLocale))
            {
                NonBlockingConsole.WriteLine("No locale specified, using default locale for speech recognition: " + DefaultLocale);
                _currentSRLocale = DefaultLocale;
            }

            // Create speech recognition engine and start recognizing by attaching connector to engine
            try
            {
                _speechRecognitionEngine = new Microsoft.Speech.Recognition.SpeechRecognitionEngine();
                /*
                System.Globalization.CultureInfo localeCultureInfo = new System.Globalization.CultureInfo(_currentSRLocale);
                foreach (RecognizerInfo r in Microsoft.Speech.Recognition.SpeechRecognitionEngine.InstalledRecognizers())
                {
                    if (r.Culture.Equals(localeCultureInfo))
                    {
                        _speechRecognitionEngine = new Microsoft.Speech.Recognition.SpeechRecognitionEngine(r);
                        break;
                    }
                }
                if (_speechRecognitionEngine == null)
                {
                    _speechRecognitionEngine = new SpeechRecognitionEngine();
                }
                */
                //_speechRecognitionEngine = new Microsoft.Speech.Recognition.SpeechRecognitionEngine(new System.Globalization.CultureInfo(_currentSRLocale));
            }
            catch (Exception e)
            {
                NonBlockingConsole.WriteLine("Error: Unable to load SpeechRecognition locale: " + _currentSRLocale + ". Exception: " + e.ToString());
                // Use default locale
                NonBlockingConsole.WriteLine("Falling back to default locale for SpeechRecognitionEngine: " + DefaultLocale);
                _currentSRLocale = DefaultLocale;
                _speechRecognitionEngine = new SpeechRecognitionEngine();
                //_speechRecognitionEngine = new Microsoft.Speech.Recognition.SpeechRecognitionEngine(new System.Globalization.CultureInfo(_currentSRLocale));
            }

            _speechRecognitionEngine.SpeechDetected += new EventHandler<Microsoft.Speech.Recognition.SpeechDetectedEventArgs>(SpeechRecognitionEngine_SpeechDetected);
            _speechRecognitionEngine.RecognizeCompleted += new EventHandler<Microsoft.Speech.Recognition.RecognizeCompletedEventArgs>(SpeechRecognitionEngine_RecognizeCompleted);
            _speechRecognitionEngine.LoadGrammarCompleted += new EventHandler<Microsoft.Speech.Recognition.LoadGrammarCompletedEventArgs>(SpeechRecognitionEngine_LoadGrammarCompleted);

            _grammars = new List<Microsoft.Speech.Recognition.Grammar>();
            // TODO: Add default installed speech recognizer grammar
            // TODO: Might already be done via compiling with Recognition Settings File?
            // Add default locale language grammar file (if it exists)
            String localLanguageGrammarFilePath = Path.Combine(Environment.CurrentDirectory, @"en-US.cfgpp");
            if (File.Exists(localLanguageGrammarFilePath))
            {
                NonBlockingConsole.WriteLine("SpeechRecognizer(). Adding locale language file at path: " + localLanguageGrammarFilePath);
                GrammarBuilder builder = new GrammarBuilder();
                builder.AppendRuleReference(localLanguageGrammarFilePath);
                Grammar localeLanguageGrammar = new Grammar(builder);
                localeLanguageGrammar.Name = "Local language grammar";
                //localeLanguageGrammar.Priority = 1;
                _grammars.Add(localeLanguageGrammar);
            }
            
            string[] recognizedString = { "hello", "bye", "yes", "no", "help", "zero", "one", "two", "three", "four", "five", "six", "seven", "eight", "nine", "ten", "exit" };
            Choices numberChoices = new Choices(recognizedString);
            Grammar basicGrammar = new Grammar(new GrammarBuilder(numberChoices));
            basicGrammar.Name = "Basic Grammar";
            //basicGrammar.Priority = 2;
            _grammars.Add(basicGrammar);

            LoadSpeechGrammarAsync();
        }
        public void Shutdown()
        {
            if (_isActive)
            {
                StopSpeechRecognition();
            }

            if (_speechRecognitionEngine != null)
            {
                _speechRecognitionEngine.UnloadAllGrammars();
                _grammars.Clear();
                _pendingLoadSpeechGrammarCounter = 0;
                
                _speechRecognitionEngine.SpeechDetected -= (SpeechRecognitionEngine_SpeechDetected);
                _speechRecognitionEngine.RecognizeCompleted -= (SpeechRecognitionEngine_RecognizeCompleted);
                _speechRecognitionEngine.LoadGrammarCompleted -= (SpeechRecognitionEngine_LoadGrammarCompleted);
            }

            if (_speechRecognitionConnector != null)
            {
                _speechRecognitionConnector.Dispose();
                _speechRecognitionConnector = null;
            }

            _speechTranscript.Clear();
            _transcriptRecorder = null;
        }
        public SpeechRecognizer(TranscriptRecorderSession transcriptRecorder)
        {
            _transcriptRecorder = transcriptRecorder;
            _speechTranscript   = new List <Microsoft.Speech.Recognition.RecognitionResult>();
            _isActive           = false;
            _isRecognizing      = false;

            // Create a speech recognition connector
            _speechRecognitionConnector = new SpeechRecognitionConnector();

            _currentSRLocale = ConfigurationManager.AppSettings[SpeechRecogLocaleKey];
            if (String.IsNullOrEmpty(_currentSRLocale))
            {
                NonBlockingConsole.WriteLine("No locale specified, using default locale for speech recognition: " + DefaultLocale);
                _currentSRLocale = DefaultLocale;
            }

            // Create speech recognition engine and start recognizing by attaching connector to engine
            try
            {
                _speechRecognitionEngine = new Microsoft.Speech.Recognition.SpeechRecognitionEngine();

                /*
                 * System.Globalization.CultureInfo localeCultureInfo = new System.Globalization.CultureInfo(_currentSRLocale);
                 * foreach (RecognizerInfo r in Microsoft.Speech.Recognition.SpeechRecognitionEngine.InstalledRecognizers())
                 * {
                 *  if (r.Culture.Equals(localeCultureInfo))
                 *  {
                 *      _speechRecognitionEngine = new Microsoft.Speech.Recognition.SpeechRecognitionEngine(r);
                 *      break;
                 *  }
                 * }
                 * if (_speechRecognitionEngine == null)
                 * {
                 *  _speechRecognitionEngine = new SpeechRecognitionEngine();
                 * }
                 */
                //_speechRecognitionEngine = new Microsoft.Speech.Recognition.SpeechRecognitionEngine(new System.Globalization.CultureInfo(_currentSRLocale));
            }
            catch (Exception e)
            {
                NonBlockingConsole.WriteLine("Error: Unable to load SpeechRecognition locale: " + _currentSRLocale + ". Exception: " + e.ToString());
                // Use default locale
                NonBlockingConsole.WriteLine("Falling back to default locale for SpeechRecognitionEngine: " + DefaultLocale);
                _currentSRLocale         = DefaultLocale;
                _speechRecognitionEngine = new SpeechRecognitionEngine();
                //_speechRecognitionEngine = new Microsoft.Speech.Recognition.SpeechRecognitionEngine(new System.Globalization.CultureInfo(_currentSRLocale));
            }

            _speechRecognitionEngine.SpeechDetected       += new EventHandler <Microsoft.Speech.Recognition.SpeechDetectedEventArgs>(SpeechRecognitionEngine_SpeechDetected);
            _speechRecognitionEngine.RecognizeCompleted   += new EventHandler <Microsoft.Speech.Recognition.RecognizeCompletedEventArgs>(SpeechRecognitionEngine_RecognizeCompleted);
            _speechRecognitionEngine.LoadGrammarCompleted += new EventHandler <Microsoft.Speech.Recognition.LoadGrammarCompletedEventArgs>(SpeechRecognitionEngine_LoadGrammarCompleted);

            _grammars = new List <Microsoft.Speech.Recognition.Grammar>();
            // TODO: Add default installed speech recognizer grammar
            // TODO: Might already be done via compiling with Recognition Settings File?
            // Add default locale language grammar file (if it exists)
            String localLanguageGrammarFilePath = Path.Combine(Environment.CurrentDirectory, @"en-US.cfgpp");

            if (File.Exists(localLanguageGrammarFilePath))
            {
                NonBlockingConsole.WriteLine("SpeechRecognizer(). Adding locale language file at path: " + localLanguageGrammarFilePath);
                GrammarBuilder builder = new GrammarBuilder();
                builder.AppendRuleReference(localLanguageGrammarFilePath);
                Grammar localeLanguageGrammar = new Grammar(builder);
                localeLanguageGrammar.Name = "Local language grammar";
                //localeLanguageGrammar.Priority = 1;
                _grammars.Add(localeLanguageGrammar);
            }

            string[] recognizedString = { "hello", "bye", "yes", "no", "help", "zero", "one", "two", "three", "four", "five", "six", "seven", "eight", "nine", "ten", "exit" };
            Choices  numberChoices    = new Choices(recognizedString);
            Grammar  basicGrammar     = new Grammar(new GrammarBuilder(numberChoices));

            basicGrammar.Name = "Basic Grammar";
            //basicGrammar.Priority = 2;
            _grammars.Add(basicGrammar);

            LoadSpeechGrammarAsync();
        }