Exemple #1
0
 private void OnResponseReceived(object sender, SpeechResponseEventArgs e)
 {
     Console.WriteLine(e.PhraseResponse.RecognitionStatus);
     if (e.PhraseResponse.RecognitionStatus == RecognitionStatus.InitialSilenceTimeout ||
         e.PhraseResponse.RecognitionStatus == RecognitionStatus.DictationEndSilenceTimeout)
     {
         Task.Run(() =>
         {
             lock (speechClientLocker)
             {
                 speechClient = SpeechRecognitionServiceFactory.CreateMicrophoneClient(SpeechRecognitionMode.LongDictation, "en-US", cloudCreds.SpeechAPIKey);
                 speechClient.OnPartialResponseReceived += OnPartialResponseReceived;
                 speechClient.OnResponseReceived        += OnResponseReceived;
                 speechClient.StartMicAndRecognition();
             }
         });
     }
     else
     {
         var result = e.PhraseResponse.Results?.OrderByDescending(i => i.Confidence).Select(i => i.DisplayText).FirstOrDefault();
         if (!string.IsNullOrEmpty(result))
         {
             ResponseReceived?.Invoke(result);
         }
     }
 }
Exemple #2
0
        /*
         * Testing Stuff No Touchy
         */

        public static void testSpeechReco(int mode)
        {
            Console.WriteLine("testing now");
            switch (mode)
            {
            case 0:
                //shortPhraseClient.StartMicAndRecognition();
                Console.WriteLine("shortphrase started");
                break;

            case 1:
                longDictationClient.StartMicAndRecognition();
                Console.WriteLine("longdictation started");
                break;
            }
        }
Exemple #3
0
        static void Main()
        {
            var speechRecogeMode = SpeechRecognitionMode.LongDictation;
            var lang             = "en-us";
            var subkey           = "c060c1c953504758ae24a0e6f17f7d71";
            var cur = string.Empty;
            var r   = string.Empty;

            l = SpeechRecognitionServiceFactory.CreateMicrophoneClient(speechRecogeMode, lang, subkey);
            l.OnResponseReceived += (sender, e) =>
            {
                Console.Clear();

                if (e.PhraseResponse.Results.Count() > 0)
                {
                    var res = e.PhraseResponse.Results.Last().DisplayText;

                    if (res.Contains("학생"))
                    {
                        cur += res + '\n';
                    }
                    else
                    {
                        r += res + '\n';
                    }
                }
                Console.WriteLine(cur);
                Console.WriteLine();

                Console.WriteLine(r);
            };

            l.StartMicAndRecognition();
        }
 private void startListening()
 {
     LogRecognitionStart("microphone", _recoLanguage, SpeechRecognitionMode.ShortPhrase);
     Console.WriteLine("Inside Start Listening");
     micClient = CreateMicrophoneRecoClient(_recoLanguage, SpeechRecognitionMode.ShortPhrase);
     micClient.StartMicAndRecognition();
     _startButton.IsEnabled = false;
 }
Exemple #5
0
 private void StartMicrophone()
 {
     if (micClient == null)
     {
         CreateMicrophoneRecoClient();
     }
     micClient.StartMicAndRecognition();
 }
Exemple #6
0
 public static void ConvertSpeechToText(SpeechRecognitionMode mode, string language, string subscriptionKey)
 {
     _microRecogClient = SpeechRecognitionServiceFactory.CreateMicrophoneClient(mode, language, subscriptionKey);
     _microRecogClient.OnResponseReceived        += OnResponseReceivedHandler;
     _microRecogClient.OnPartialResponseReceived += OnPartialResponseReceivedHandler;
     _microRecogClient.OnConversationError       += OnConversationError;
     _microRecogClient.StartMicAndRecognition();
 }
        private void StartRecording()
        {
            m_MicrophoneRecording.OnResponseReceived        += OnResponseReceived;
            m_MicrophoneRecording.OnPartialResponseReceived += OnPartialResponseReceived;

            m_MicrophoneRecording.AudioStart();
            m_MicrophoneRecording.StartMicAndRecognition();
        }
Exemple #8
0
        private void btnStart_Click(object sender, RoutedEventArgs e)
        {
            if (micClient == null)
            {
                CreateMicrophoneRecoClient();
            }

            micClient.StartMicAndRecognition();
        }
Exemple #9
0
        private void RecordOnClick(object sender, RoutedEventArgs e)
        {
            RecordButton.IsEnabled = false;
            RecordButton.Content   = "Recording...";

            PartialResults.Text = "";
            FinalResult.Text    = "";

            _micClient.StartMicAndRecognition();
        }
        private void SesiYaziyaDonustur()
        {
            var    sesTanimaModu = SpeechRecognitionMode.ShortPhrase;
            string dil           = "en-US";
            string scriptionKEY  = ConfigurationManager.AppSettings["MicrosoftSpeechApiKey"].ToString();

            mikrofonTanimaAlicisi = SpeechRecognitionServiceFactory.CreateMicrophoneClient(sesTanimaModu, dil, scriptionKEY);
            mikrofonTanimaAlicisi.OnPartialResponseReceived += OnPartialResponseReceived_HANDLER;
            mikrofonTanimaAlicisi.StartMicAndRecognition();
        }
Exemple #11
0
        public void ConvertSpeechToText()
        {
            var    speechRecognitionMode = SpeechRecognitionMode.LongDictation;
            string language = "en-us";
            string ApiKey   = "efa2f45a7d3e4e7a8ef841ed967ee5c0";

            microphoneRecognitionClient = SpeechRecognitionServiceFactory.CreateMicrophoneClient(speechRecognitionMode, language, ApiKey);
            microphoneRecognitionClient.OnResponseReceived += ResponseReceived;
            microphoneRecognitionClient.StartMicAndRecognition();
        }
Exemple #12
0
        private void btnTurnOnAutoStuff_Click(object sender, RoutedEventArgs e)
        {
            string apiKey1 = "da75bfe0a6bc4d2bacda60b10b5cef7e";
            string apiKey2 = "c36c061f0b8748bd862aa5bbcceda683";
            MicrophoneRecognitionClient longDictationClient = SpeechRecognitionServiceFactory.CreateMicrophoneClient(SpeechRecognitionMode.LongDictation, "en-US", apiKey1, apiKey2);

            longDictationClient.OnPartialResponseReceived += App.onPartialResponseReceivedHandler;
            longDictationClient.OnResponseReceived        += App.onResponseReceivedHandler;
            longDictationClient.StartMicAndRecognition();
        }
        private void PresenceDetector_PresenceDetected(object sender, EventArgs e)
        {
            if (micClient == null)
            {
                InitMicClient(false);
            }

            micClient.StartMicAndRecognition();
            SetMicImage(true);
        }
Exemple #14
0
 private void startListening()
 {
     LogRecognitionStart("microphone", _recoLanguage, SpeechRecognitionMode.ShortPhrase);
     if (_micClient == null)
     {
         _micClient = CreateMicrophoneRecoClientWithIntent(_recoLanguage);
     }
     _micClient.StartMicAndRecognition();
     _startButton.IsEnabled = false;
 }
        public void openMic()
        {
            LogRecognitionStart("microphone", _recoLanguage, SpeechRecognitionMode.ShortPhrase);

            if (_micClient == null)
            {
                _micClient = CreateMicrophoneRecoClient(SpeechRecognitionMode.ShortPhrase, _recoLanguage, SubscriptionKey);
            }
            _micClient.StartMicAndRecognition();
        }
Exemple #16
0
        public static void ConverSpeechToText()

        {
            try
            {
                if (micClient == null)
                {
                    CreateMicrophoneRecoClient();
                    micClient.StartMicAndRecognition();
                }
                else
                {
                    micClient.StartMicAndRecognition();
                }
            }
            catch (Exception e)
            {
                Console.WriteLine("Nothing");
            }
        }
Exemple #17
0
        /// <summary>
        /// Analyze speech input
        /// </summary>
        /// <param name="sender"></param>
        /// <param name="e"></param>
        private void btnAnalyzeSpeech_Click(object sender, RoutedEventArgs e)
        {
            ignoreNextString             = false;
            AudioControlsGrid.Visibility = Visibility.Visible;

            if (ButtonState == "Record")
            {
                this.fullText = null;
                recording     = true;
                BrushConverter bc = new BrushConverter();
                mySentiment.Sentiment = 0.5;
                userInput.Text        = "";

                recordGrid.Visibility     = System.Windows.Visibility.Hidden;
                recordingdGrid.Visibility = System.Windows.Visibility.Visible;

                recordingState = RecordingState.Recording;

                string speechAPIKey = confCollection["SpeechRecognitionAPIKey"].Value;

                MicrophoneRecognitionClient intentMicClient =
                    SpeechRecognitionServiceFactory.CreateMicrophoneClient(SpeechRecognitionMode.LongDictation,
                                                                           "en-us",
                                                                           speechAPIKey);

                m_micClient = intentMicClient;

                // Event handlers for speech recognition results
                m_micClient.OnResponseReceived        += this.OnResponseReceivedHandler;
                m_micClient.OnPartialResponseReceived += this.OnPartialResponseReceivedHandler;
                //m_micClient.OnConversationError += OnConversationErrorHandler;

                // First send of audio data to service
                m_micClient.StartMicAndRecognition();

                ButtonState = "Finish";
            }
            // Finish the recording
            else if (ButtonState == "Finish")
            {
                Thread.Sleep(1000);
                recording = false;
                m_micClient.EndMicAndRecognition();
                recordGrid.Visibility     = System.Windows.Visibility.Visible;
                recordingdGrid.Visibility = System.Windows.Visibility.Hidden;

                ButtonState = "Record";

                DisplayAnalysis();

                // Stop recording.
                Stop();
            }
        }
Exemple #18
0
        private void StartRecognition()
        {
            clientUserSpeech.StartMicAndRecognition();

            Dispatcher.Invoke(() =>
            {
                btnMic.IsEnabled   = false;
                btnSend.Visibility = Visibility.Hidden;
                dockText.Width     = MaxTextWidth; // this should be moved to MVVM

                txtText.Text = "Listening..";
            });
        }
        public void StartRecordingSession()
        {
            Recording = true;

            LogRecognitionStart();

            if (_micClient == null)
            {
                CreateMicrophoneRecoClient();
            }

            _micClient.StartMicAndRecognition();
        }
Exemple #20
0
 private void Record_Click(object sender, RoutedEventArgs e)
 {
     if (RecordButtonText == "Record")
     {
         micRecognitionClient.StartMicAndRecognition();
         RecordButtonText = "Stop";
     }
     else
     {
         micRecognitionClient.EndMicAndRecognition();
         RecordButtonText = "Record";
     }
 }
        private void ConvertSpeechToText()
        {
            var    speechRecognitionMode = SpeechRecognitionMode.ShortPhrase;
            string language        = "es-es";
            string subscriptionkey = ConfigurationManager.AppSettings["SpeechKey"].ToString();


            _microphoneRecognitionClient = SpeechRecognitionServiceFactory.CreateMicrophoneClient
                                               (speechRecognitionMode, language, subscriptionkey);

            _microphoneRecognitionClient.OnPartialResponseReceived += OnPartialResponseReceivedHandler;
            _microphoneRecognitionClient.OnResponseReceived        += OnMicShortPhraseResponseReceivedHandler;
            _microphoneRecognitionClient.StartMicAndRecognition();
        }
Exemple #22
0
        private void ConvertSpeechToTextnew()
        {
            // Converting speech method for Message_textbox
            var    speechRecognitionMode = SpeechRecognitionMode.ShortPhrase;
            string language        = "en-us";
            string subscriptionKey = ConfigurationManager.AppSettings["MicrosoftSpeechApiKey"].ToString();

            _microphoneRecognitionClient = SpeechRecognitionServiceFactory.CreateMicrophoneClient(
                speechRecognitionMode,
                language,
                subscriptionKey
                );
            _microphoneRecognitionClient.OnPartialResponseReceived += ResponseReceivednew;
            _microphoneRecognitionClient.OnResponseReceived        += OnMicShortPhraseResponceReceivedHandlernew;
            _microphoneRecognitionClient.StartMicAndRecognition();
        }
Exemple #23
0
        private void StartButton_Click(object sender, RoutedEventArgs e)
        {
            if (ShortWaveFile == null || LongWaveFile == null)
            {
                MessageBox.Show("Please select an audio file (.wav).");
                return;
            }

            _startButton.IsEnabled = false;
            _radioGroup.IsEnabled  = false;

            //LogRecognitionStart();

            if (UseMicrophone)
            {
                if (_micClient == null)
                {
                    if (WantIntent)
                    {
                        CreateMicrophoneRecoClientWithIntent();
                    }
                    else
                    {
                        CreateMicrophoneRecoClient();
                    }
                }

                _micClient?.StartMicAndRecognition();
            }
            else
            {
                if (null == _dataClient)
                {
                    if (WantIntent)
                    {
                        CreateDataRecoClientWithIntent();
                    }
                    else
                    {
                        CreateDataRecoClient();
                    }
                }

                SendAudioHelper((Mode == SpeechRecognitionMode.ShortPhrase) ? ShortWaveFile : LongWaveFile);
            }
        }
Exemple #24
0
 private void StartButton_Click(object sender, RoutedEventArgs e)
 {
     if (IsRecognizing)
     {
         StartButton.Content = "Start";
         micClient.EndMicAndRecognition();
         micClient.Dispose();
         micClient = null;
     }
     else
     {
         StartButton.Content = "Stop";
         micClient           = SpeechRecognitionServiceFactory.CreateMicrophoneClient(SpeechRecognitionMode.LongDictation, "en-US", ApiKey1.Text);
         micClient.OnPartialResponseReceived += OnPartialResponseReceivedHandler;
         micClient.OnResponseReceived        += OnMicDictationResponseReceivedHandler;
         micClient.StartMicAndRecognition();
     }
     IsRecognizing = !IsRecognizing;
 }
Exemple #25
0
        //Azure SpeechToText
        private void ConvertSpeechToText()
        {
            var    speechRecognitionMode = SpeechRecognitionMode.LongDictation; //LongDictation 대신 ShortPhrase 선택
            string language        = "en-us";
            string subscriptionKey = "8415e70a55d541a3b0cf40bdf9366c41";

            //string subscriptionKey = ConfigurationManager.AppSettings["5e3c0f17ea3f40b39cfb6ec28c77bf3e"];
            microphoneRecognitionClient = SpeechRecognitionServiceFactory.CreateMicrophoneClient(
                speechRecognitionMode,
                language,
                subscriptionKey
                );

            //_microphoneRecognitionClient.OnResponseReceived += ResponseReceived;
            microphoneRecognitionClient.OnPartialResponseReceived += ResponseReceived;
            //_microphoneRecognitionClient.OnResponseReceived += OnMicShortPhraseResponseReceivedHandler;
            microphoneRecognitionClient.OnResponseReceived += OnMicDictationResponseReceivedHandler;
            microphoneRecognitionClient.StartMicAndRecognition();
        }
        private void InitMicClient(bool startListening)
        {
            DestroyMicClient();

            micClient = SpeechRecognitionServiceFactory.CreateMicrophoneClient(
                SpeechRecognitionMode.LongDictation,
                LANGUAGE,
                oxfordKey);

            micClient.OnMicrophoneStatus        += OnMicrophoneStatus;
            micClient.OnPartialResponseReceived += OnPartialResponseReceivedHandler;
            micClient.OnResponseReceived        += OnMicDictationResponseReceivedHandler;
            micClient.OnConversationError       += OnConversationErrorHandler;

            if (startListening)
            {
                micClient.StartMicAndRecognition();
            }
        }
        // ----------------------------------------------------------------------------------------------------------------------------------------

        // Starts the microphone and set events that are wroking until the user stops
        public void CreateMicrophoneRecoClientWithIntent()
        {
            Console.WriteLine("--- Start microphone dictation with Intent detection ----");

            Microphone =
                SpeechRecognitionServiceFactory.CreateMicrophoneClientWithIntentUsingEndpointUrl(
                    RecognitionLanguage,
                    SpeechAPISubscriptionKey,
                    LuisEndpointURL);

            Microphone.OnIntent += OnIntentHandler;

            // Event handlers for speech recognition results
            //micClient.OnMicrophoneStatus += OnMicrophoneStatus;
            Microphone.OnPartialResponseReceived += OnPartialResponseReceivedHandler;
            Microphone.OnResponseReceived        += OnMicShortPhraseResponseReceivedHandler;
            Microphone.OnConversationError       += OnConversationErrorHandler;

            Microphone.StartMicAndRecognition();
        }
Exemple #28
0
        /// <summary>
        ///     Speech recognition from the microphone.  The microphone is turned on and data from the microphone
        ///     is sent to the Speech Recognition Service.  A built in Silence Detector
        ///     is applied to the microphone data before it is sent to the recognition service.
        /// </summary>
        void DoMicrophoneRecognition(MicrophoneRecognitionClient micClient)
        {
            int waitSeconds = (m_recoMode == SpeechRecognitionMode.LongDictation) ? 200 : 15;

            try
            {
                // Turn on the microphone and stream audio to the Speech Recognition Service
                micClient.StartMicAndRecognition();
                Console.WriteLine("Start talking");

                // sleep until the final result in OnResponseReceived event call, or waitSeconds, whichever is smaller.
                bool isReceivedResponse = micClient.WaitForFinalResponse(waitSeconds * 1000);
                if (!isReceivedResponse)
                {
                    Console.WriteLine("{0}: Timed out waiting for conversation response after {1} ms",
                                      DateTime.UtcNow, waitSeconds * 1000);
                }
            }
            finally
            {
                // We are done sending audio.  Final recognition results will arrive in OnResponseReceived event call.
                micClient.EndMicAndRecognition();
            }
        }
Exemple #29
0
 /// <summary>
 /// Function to start recording audio with a microphone
 /// </summary>
 public void StartMicToText()
 {
     _micRecClient.StartMicAndRecognition();
     _isMicRecording = true;
 }
        /// <summary>
        ///     Speech recognition from the microphone.  The microphone is turned on and data from the microphone
        ///     is sent to the Speech Recognition Service.  A built in Silence Detector
        ///     is applied to the microphone data before it is sent to the recognition service.
        /// </summary>
        void DoMicrophoneRecognition(MicrophoneRecognitionClient micClient)
        {
            int waitSeconds = (m_recoMode == SpeechRecognitionMode.LongDictation) ? 200 : 15;

            try
            {
                // Turn on the microphone and stream audio to the Speech Recognition Service
                micClient.StartMicAndRecognition();
                Console.WriteLine("Start talking");

                // sleep until the final result in OnResponseReceived event call, or waitSeconds, whichever is smaller.
                bool isReceivedResponse = micClient.WaitForFinalResponse(waitSeconds * 1000);
                if (!isReceivedResponse)
                {
                    Console.WriteLine("{0}: Timed out waiting for conversation response after {1} ms",
                                      DateTime.UtcNow, waitSeconds * 1000);
                }
            }
            finally
            {
                // We are done sending audio.  Final recognition results will arrive in OnResponseReceived event call.
                micClient.EndMicAndRecognition();
            }
        }
        public IObservable <RecognitionResult> StartMicAndRecognition()
        {
            client.StartMicAndRecognition();

            return(this.ResponseReceived);
        }
        /// <summary>
        /// Analyze speech input
        /// </summary>
        /// <param name="sender"></param>
        /// <param name="e"></param>
        private void btnAnalyzeSpeech_Click(object sender, RoutedEventArgs e)
        {
            ignoreNextString = false;
            AudioControlsGrid.Visibility = Visibility.Visible;

            if (ButtonState == "Record")
            {
                this.fullText = null;
                recording = true;
                BrushConverter bc = new BrushConverter();
                mySentiment.Sentiment = 0.5;
                userInput.Text = "";

                recordGrid.Visibility = System.Windows.Visibility.Hidden;
                recordingdGrid.Visibility = System.Windows.Visibility.Visible;

                recordingState = RecordingState.Recording;

                //string speechAPIKey = confCollection["SpeechRecognitionAPIKey"].Value;
                
                string speechAPIKey = Properties.Settings.Default.SpeechRecognitionAPIKey;

                MicrophoneRecognitionClient intentMicClient =
                                SpeechRecognitionServiceFactory.CreateMicrophoneClient(SpeechRecognitionMode.LongDictation,
                                                                                       "en-us",
                                                                                       speechAPIKey);

                m_micClient = intentMicClient;

                // Event handlers for speech recognition results
                m_micClient.OnResponseReceived += this.OnResponseReceivedHandler;
                m_micClient.OnPartialResponseReceived += this.OnPartialResponseReceivedHandler;
                //m_micClient.OnConversationError += OnConversationErrorHandler;

                // First send of audio data to service
                m_micClient.StartMicAndRecognition();

                ButtonState = "Finish";
            }
            // Finish the recording
            else if (ButtonState == "Finish")
            {
                Thread.Sleep(1000);
                recording = false;
                m_micClient.EndMicAndRecognition();
                recordGrid.Visibility = System.Windows.Visibility.Visible;
                recordingdGrid.Visibility = System.Windows.Visibility.Hidden;

                ButtonState = "Record";

                DisplayAnalysis();

                // Stop recording.
                Stop();
            }
        }