/// <summary> /// Creates a new microphone reco client without LUIS intent support. /// </summary> private void CreateMicrophoneRecoClient() { _micClient = SpeechRecognitionServiceFactory.CreateMicrophoneClient( Mode, DefaultLocale, SubscriptionKey); _micClient.AuthenticationUri = AuthenticationUri; // Event handlers for speech recognition results _micClient.OnMicrophoneStatus += OnMicrophoneStatus; //micClient.OnPartialResponseReceived += OnPartialResponseReceivedHandler; switch (Mode) { case SpeechRecognitionMode.ShortPhrase: _micClient.OnResponseReceived += OnMicShortPhraseResponseReceivedHandler; break; case SpeechRecognitionMode.LongDictation: _micClient.OnResponseReceived += OnMicDictationResponseReceivedHandler; break; } _micClient.OnConversationError += OnConversationErrorHandler; }
/// <summary> /// Creates a new microphone reco client without LUIS intent support. /// </summary> private void CreateMicrophoneRecoClient() { this.micClient = SpeechRecognitionServiceFactory.CreateMicrophoneClient( this.Mode, this.DefaultLocale, this.SubscriptionKey); this.micClient.AuthenticationUri = this.AuthenticationUri; this.WriteLine("Baka 1"); // Event handlers for speech recognition results this.micClient.OnMicrophoneStatus += this.OnMicrophoneStatus; this.WriteLine("Baka 2"); this.micClient.OnPartialResponseReceived += this.OnPartialResponseReceivedHandler; this.WriteLine("Baka 3"); if (this.Mode == SpeechRecognitionMode.ShortPhrase) { this.WriteLine("Baka 4"); this.micClient.OnResponseReceived += this.OnMicShortPhraseResponseReceivedHandler; } else if (this.Mode == SpeechRecognitionMode.LongDictation) { this.WriteLine("Baka 5"); this.micClient.OnResponseReceived += this.OnMicDictationResponseReceivedHandler; } this.WriteLine("Baka 6"); this.micClient.OnConversationError += this.OnConversationErrorHandler; }
private void Stopbtn_Click(object sender, RoutedEventArgs e) { Dispatcher.Invoke((Action)(() => { try { _FinalResponceEvent.Set(); _microphoneRecognitionClient.EndMicAndRecognition(); _microphoneRecognitionClient.Dispose(); _microphoneRecognitionClient = null; Speakbtn.Content = "Start\nRecording"; Speakbtn.IsEnabled = true; Responsetxt.Background = Brushes.White; Responsetxt.Foreground = Brushes.Black; } catch (Exception e1) { Console.WriteLine(e); } })); // Speakbtn.Content = ""; Speakbtn.IsEnabled = true; Responsetxt.Background = Brushes.White; Responsetxt.Foreground = Brushes.Black; Responsetxt.Text = "hello helpline number there is case of murder in my area send urgent help"; GetSentiments(Responsetxt.Text); // using (System.IO.StreamWriter file = //new System.IO.StreamWriter(@"C:\Users\saksham\Desktop\powerbi.xlsx", true)) // { // file.WriteLine(Responsetxt.Text+"\n"); //} }
/// <summary> /// Called when a final response is received; /// </summary> void OnMicDictationResponseReceivedHandler(object sender, SpeechResponseEventArgs e) { WriteLine("--- OnMicDictationResponseReceivedHandler ---"); if (e.PhraseResponse.RecognitionStatus == RecognitionStatus.EndOfDictation || e.PhraseResponse.RecognitionStatus == RecognitionStatus.DictationEndSilenceTimeout) { Dispatcher.Invoke((Action)(() => { _FinalResponseEvent.Set(); // we got the final result, so it we can end the mic reco. No need to do this // for dataReco, since we already called endAudio() on it as soon as we were done // sending all the data. _micClient.EndMicAndRecognition(); // BUGBUG: Work around for the issue when cached _micClient cannot be re-used for recognition. _micClient.Dispose(); _micClient = null; _startButton.IsEnabled = true; _radioGroup.IsEnabled = true; })); } WriteResponseResult(e); }
static void Main() { var speechRecogeMode = SpeechRecognitionMode.LongDictation; var lang = "en-us"; var subkey = "c060c1c953504758ae24a0e6f17f7d71"; var cur = string.Empty; var r = string.Empty; l = SpeechRecognitionServiceFactory.CreateMicrophoneClient(speechRecogeMode, lang, subkey); l.OnResponseReceived += (sender, e) => { Console.Clear(); if (e.PhraseResponse.Results.Count() > 0) { var res = e.PhraseResponse.Results.Last().DisplayText; if (res.Contains("학생")) { cur += res + '\n'; } else { r += res + '\n'; } } Console.WriteLine(cur); Console.WriteLine(); Console.WriteLine(r); }; l.StartMicAndRecognition(); }
public MainWindow() { InitializeComponent(); this.micClient = SpeechRecognitionServiceFactory.CreateMicrophoneClient(SpeechRecognitionMode.ShortPhrase, "en-US", "1d802aaea3b54dc5a18fd04014ae490b"); this.micClient.OnMicrophoneStatus += MicClient_OnMicrophoneStatus; this.micClient.OnResponseReceived += MicClient_OnResponseReceived; }
/// <summary> /// Creates a new microphone reco client without LUIS intent support. /// </summary> private void CreateMicrophoneRecoClient() { string url = null; if (this.Mode == SpeechRecognitionMode.ShortPhrase) { url = "https://d5a89bbf25d54ab2a6cbcff90aece700.api.cris.ai/ws/cris/speech/recognize"; } else if (this.Mode == SpeechRecognitionMode.LongDictation) { url = "https://a5936cdca4384273a0428efc972cf356.api.cris.ai/ws/cris/speech/recognize/continuous"; } this.micClient = SpeechRecognitionServiceFactory.CreateMicrophoneClient( this.Mode, this.DefaultLocale, this.SubscriptionKey, this.SubscriptionKey, url); this.micClient.AuthenticationUri = this.AuthenticationUri; // Event handlers for speech recognition results this.micClient.OnMicrophoneStatus += this.OnMicrophoneStatus; // micClient. this.micClient.OnPartialResponseReceived += this.OnPartialResponseReceivedHandler; if (this.Mode == SpeechRecognitionMode.ShortPhrase) { this.micClient.OnResponseReceived += this.OnMicShortPhraseResponseReceivedHandler; } else if (this.Mode == SpeechRecognitionMode.LongDictation) { this.micClient.OnResponseReceived += this.OnMicDictationResponseReceivedHandler; } this.micClient.OnConversationError += this.OnConversationErrorHandler; }
private void OnResponseReceived(object sender, SpeechResponseEventArgs e) { Console.WriteLine(e.PhraseResponse.RecognitionStatus); if (e.PhraseResponse.RecognitionStatus == RecognitionStatus.InitialSilenceTimeout || e.PhraseResponse.RecognitionStatus == RecognitionStatus.DictationEndSilenceTimeout) { Task.Run(() => { lock (speechClientLocker) { speechClient = SpeechRecognitionServiceFactory.CreateMicrophoneClient(SpeechRecognitionMode.LongDictation, "en-US", cloudCreds.SpeechAPIKey); speechClient.OnPartialResponseReceived += OnPartialResponseReceived; speechClient.OnResponseReceived += OnResponseReceived; speechClient.StartMicAndRecognition(); } }); } else { var result = e.PhraseResponse.Results?.OrderByDescending(i => i.Confidence).Select(i => i.DisplayText).FirstOrDefault(); if (!string.IsNullOrEmpty(result)) { ResponseReceived?.Invoke(result); } } }
/// <summary> /// SpeechToText constructor. Creates the recording objects and calls the Initialize function /// </summary> /// <param name="bingApiKey">Bing Speech API key</param> public SpeechToText(string bingApiKey) { _dataRecClient = SpeechRecognitionServiceFactory.CreateDataClientWithIntentUsingEndpointUrl(_language, bingApiKey, "LUIS_ENDPOINT"); _micRecClient = SpeechRecognitionServiceFactory.CreateMicrophoneClient(_speechMode, _language, bingApiKey); Initialize(); }
private static void CreateMicrophoneRecoClient() { var db = new Models(); string language = "en-IN"; if (db != null && db.SettingsDB != null && db.SettingsDB.Any(s => s.Key.Equals("Language Preference", StringComparison.InvariantCultureIgnoreCase))) { language = db.SettingsDB.Where(s => s.Key.Equals("Language Preference", StringComparison.InvariantCultureIgnoreCase)).FirstOrDefault().Value; if (language.Equals("English - US")) { language = "en-US"; } else { language = "en-IN"; } } micClient = SpeechRecognitionServiceFactory.CreateMicrophoneClient( SpeechRecognitionMode.ShortPhrase, language, "f078735144bb444d93025bfcc860894b"); micClient.AuthenticationUri = string.Empty; // Event handlers for speech recognition results micClient.OnMicrophoneStatus += OnMicrophoneStatus; micClient.OnResponseReceived += OnMicShortPhraseResponseReceivedHandler; }
protected virtual void Dispose(bool disposing) { if (disposing) { if (_micRecClient != null) { _micRecClient.EndMicAndRecognition(); _micRecClient.OnMicrophoneStatus -= OnMicrophoneStatus; _micRecClient.OnPartialResponseReceived -= OnPartialResponseReceived; _micRecClient.OnResponseReceived -= OnResponseReceived; _micRecClient.OnConversationError -= OnConversationErrorReceived; _micRecClient.Dispose(); _micRecClient = null; } if (_dataRecClient != null) { _dataRecClient.OnIntent -= OnIntentReceived; _dataRecClient.OnPartialResponseReceived -= OnPartialResponseReceived; _dataRecClient.OnConversationError -= OnConversationErrorReceived; _dataRecClient.OnResponseReceived -= OnResponseReceived; _dataRecClient.Dispose(); _dataRecClient = null; } } }
/// <summary> /// SpeechToText constructor. Creates the recording objects and calls the Initialize function /// </summary> /// <param name="bingApiKey">Bing Speech API key</param> public SpeechToText(string bingApiKey) { _dataRecClient = SpeechRecognitionServiceFactory.CreateDataClientWithIntent(_language, bingApiKey, "LUIS_APP_ID", "LUIS_API_KEY"); _micRecClient = SpeechRecognitionServiceFactory.CreateMicrophoneClient(_speechMode, _language, bingApiKey); Initialize(); }
public SpeechRecManager() { cloudCreds = CloudCreds.GetInstance(); speechClient = SpeechRecognitionServiceFactory.CreateMicrophoneClient(SpeechRecognitionMode.LongDictation, "en-US", cloudCreds.SpeechAPIKey); speechClient.OnPartialResponseReceived += OnPartialResponseReceived; speechClient.OnResponseReceived += OnResponseReceived; speechClient.OnMicrophoneStatus += (s, e) => Console.WriteLine(e.Recording); }
private void startListening() { LogRecognitionStart("microphone", _recoLanguage, SpeechRecognitionMode.ShortPhrase); Console.WriteLine("Inside Start Listening"); micClient = CreateMicrophoneRecoClient(_recoLanguage, SpeechRecognitionMode.ShortPhrase); micClient.StartMicAndRecognition(); _startButton.IsEnabled = false; }
public MainWindow() { InitializeComponent(); this.micClient = SpeechRecognitionServiceFactory.CreateMicrophoneClient( SpeechRecognitionMode.ShortPhrase, "en-US", "KEY_Bing_Speech_API"); this.micClient.OnMicrophoneStatus += MicClient_OnMicrophoneStatus; this.micClient.OnResponseReceived += MicClient_OnResponseReceived; }
public CrisReactiveClient(string subscriptionKey, string endpointUrl, string authenticationUri, string language = "en-US") { this.client = SpeechRecognitionServiceFactory.CreateMicrophoneClient( SpeechRecognitionMode.LongDictation, language, subscriptionKey, subscriptionKey, endpointUrl); client.AuthenticationUri = authenticationUri; }
public static void ConvertSpeechToText(SpeechRecognitionMode mode, string language, string subscriptionKey) { _microRecogClient = SpeechRecognitionServiceFactory.CreateMicrophoneClient(mode, language, subscriptionKey); _microRecogClient.OnResponseReceived += OnResponseReceivedHandler; _microRecogClient.OnPartialResponseReceived += OnPartialResponseReceivedHandler; _microRecogClient.OnConversationError += OnConversationError; _microRecogClient.StartMicAndRecognition(); }
private void DestroyMicClient() { if (micClient != null) { //micClient.EndMicAndRecognition(); micClient.Dispose(); micClient = null; } }
private void btnTurnOnAutoStuff_Click(object sender, RoutedEventArgs e) { string apiKey1 = "da75bfe0a6bc4d2bacda60b10b5cef7e"; string apiKey2 = "c36c061f0b8748bd862aa5bbcceda683"; MicrophoneRecognitionClient longDictationClient = SpeechRecognitionServiceFactory.CreateMicrophoneClient(SpeechRecognitionMode.LongDictation, "en-US", apiKey1, apiKey2); longDictationClient.OnPartialResponseReceived += App.onPartialResponseReceivedHandler; longDictationClient.OnResponseReceived += App.onResponseReceivedHandler; longDictationClient.StartMicAndRecognition(); }
public MainWindow() { InitializeComponent(); this.micClient = SpeechRecognitionServiceFactory.CreateMicrophoneClient( SpeechRecognitionMode.ShortPhrase, "en-US", "c33fdf2cdcdc429bb3e3b424c502f6cc"); this.micClient.OnMicrophoneStatus += MicClient_OnMicrophoneStatus; this.micClient.OnResponseReceived += MicClient_OnResponseReceived; }
private void SesiYaziyaDonustur() { var sesTanimaModu = SpeechRecognitionMode.ShortPhrase; string dil = "en-US"; string scriptionKEY = ConfigurationManager.AppSettings["MicrosoftSpeechApiKey"].ToString(); mikrofonTanimaAlicisi = SpeechRecognitionServiceFactory.CreateMicrophoneClient(sesTanimaModu, dil, scriptionKEY); mikrofonTanimaAlicisi.OnPartialResponseReceived += OnPartialResponseReceived_HANDLER; mikrofonTanimaAlicisi.StartMicAndRecognition(); }
public void ConvertSpeechToText() { var speechRecognitionMode = SpeechRecognitionMode.LongDictation; string language = "en-us"; string ApiKey = "efa2f45a7d3e4e7a8ef841ed967ee5c0"; microphoneRecognitionClient = SpeechRecognitionServiceFactory.CreateMicrophoneClient(speechRecognitionMode, language, ApiKey); microphoneRecognitionClient.OnResponseReceived += ResponseReceived; microphoneRecognitionClient.StartMicAndRecognition(); }
private void startListening() { LogRecognitionStart("microphone", _recoLanguage, SpeechRecognitionMode.ShortPhrase); if (_micClient == null) { _micClient = CreateMicrophoneRecoClientWithIntent(_recoLanguage); } _micClient.StartMicAndRecognition(); _startButton.IsEnabled = false; }
public void openMic() { LogRecognitionStart("microphone", _recoLanguage, SpeechRecognitionMode.ShortPhrase); if (_micClient == null) { _micClient = CreateMicrophoneRecoClient(SpeechRecognitionMode.ShortPhrase, _recoLanguage, SubscriptionKey); } _micClient.StartMicAndRecognition(); }
public MainWindow() { InitializeComponent(); this.micClient = SpeechRecognitionServiceFactory.CreateMicrophoneClient( SpeechRecognitionMode.ShortPhrase, "en-US", "COPY-KEY-HERE"); micClient.AuthenticationUri = ""; this.micClient.OnMicrophoneStatus += MicClient_OnMicrophoneStatus; this.micClient.OnResponseReceived += MicClient_OnResponseReceived; }
/// <summary> /// Analyze speech input /// </summary> /// <param name="sender"></param> /// <param name="e"></param> private void btnAnalyzeSpeech_Click(object sender, RoutedEventArgs e) { ignoreNextString = false; AudioControlsGrid.Visibility = Visibility.Visible; if (ButtonState == "Record") { this.fullText = null; recording = true; BrushConverter bc = new BrushConverter(); mySentiment.Sentiment = 0.5; userInput.Text = ""; recordGrid.Visibility = System.Windows.Visibility.Hidden; recordingdGrid.Visibility = System.Windows.Visibility.Visible; recordingState = RecordingState.Recording; string speechAPIKey = confCollection["SpeechRecognitionAPIKey"].Value; MicrophoneRecognitionClient intentMicClient = SpeechRecognitionServiceFactory.CreateMicrophoneClient(SpeechRecognitionMode.LongDictation, "en-us", speechAPIKey); m_micClient = intentMicClient; // Event handlers for speech recognition results m_micClient.OnResponseReceived += this.OnResponseReceivedHandler; m_micClient.OnPartialResponseReceived += this.OnPartialResponseReceivedHandler; //m_micClient.OnConversationError += OnConversationErrorHandler; // First send of audio data to service m_micClient.StartMicAndRecognition(); ButtonState = "Finish"; } // Finish the recording else if (ButtonState == "Finish") { Thread.Sleep(1000); recording = false; m_micClient.EndMicAndRecognition(); recordGrid.Visibility = System.Windows.Visibility.Visible; recordingdGrid.Visibility = System.Windows.Visibility.Hidden; ButtonState = "Record"; DisplayAnalysis(); // Stop recording. Stop(); } }
private void InitSpeech() { clientUserSpeech = SpeechRecognitionServiceFactory.CreateMicrophoneClient(SpeechRecognitionMode.ShortPhrase, ConfigurationManager.AppSettings["Locale"] ?? "en-US", ConfigurationManager.AppSettings["CognitiveKey"]); // Event handlers for speech recognition results clientUserSpeech.OnMicrophoneStatus += this.OnMicrophoneStatus; clientUserSpeech.OnPartialResponseReceived += this.OnPartialResponseReceivedHandler; clientUserSpeech.OnResponseReceived += OnMicShortPhraseResponseReceivedHandler; clientUserSpeech.OnConversationError += OnConversationErrorHandler; }
/// <summary> /// Create a microphone client to record voice and send voice to server. /// </summary> private void createClient() { //Useing API Factory to create a microphone client this.client = SpeechRecognitionServiceFactory.CreateMicrophoneClient( SpeechRecognitionMode.ShortPhrase, "en-US", this.key); //Load event handler this.client.OnResponseReceived += this.respondListener; // start record voice and translate this.client.StartMicAndRecognition(); }
/// <summary> /// Handles the Click event of the RadioButton control. /// </summary> /// <param name="sender">The source of the event.</param> /// <param name="e">The <see cref="RoutedEventArgs"/> instance containing the event data.</param> private void RadioButton_Click(object sender, RoutedEventArgs e) { // Reset everything if (this.micClient != null) { this.micClient.EndMicAndRecognition(); this.micClient.Dispose(); this.micClient = null; } this._logText.Text = string.Empty; this._startButton.IsEnabled = true; this._radioGroup.IsEnabled = true; }
/// <summary> /// Handles the Click event of the _stopButton control. /// </summary> /// <param name="sender">The source of the event.</param> /// <param name="e">The <see cref="RoutedEventArgs"/> instance containing the event data.</param> private void StopButton_Click(object sender, RoutedEventArgs e) { // Reset everything if (this.micClient != null) { this.micClient.EndMicAndRecognition(); this.micClient.Dispose(); this.micClient = null; } this._statusText.Text = "Stopped"; this._startButton.IsEnabled = true; this._stopButton.IsEnabled = false; }
// ReSharper disable once UnusedMember.Local private void IniWithoutLuis() { const string language = "en-US"; const string subscriptionKey = "30f97626b3144136a6c9f398172a61ac"; _micClient = SpeechRecognitionServiceFactory.CreateMicrophoneClient( SpeechRecognitionMode.ShortPhrase, language, subscriptionKey, subscriptionKey); _micClient.OnMicrophoneStatus += OnMicrophoneStatus; _micClient.OnPartialResponseReceived += OnPartialResponseReceived; _micClient.OnResponseReceived += OnResponseReceived; _micClient.OnConversationError += OnConversationError; }
// ReSharper disable once UnusedMember.Local private void InitWithLuis() { const string language = "en-US"; const string subscriptionKey = "30f97626b3144136a6c9f398172a61ac"; const string luisAppId = "0090b094-7126-4ff8-81f3-06cd2a32a1de"; const string luisSubscriptionId = "0d47bd4a03a149f490247fd01827401c"; _micClient = SpeechRecognitionServiceFactory.CreateMicrophoneClientWithIntent( language, subscriptionKey, subscriptionKey, luisAppId, luisSubscriptionId); _micClient.OnIntent += OnIntent; _micClient.OnMicrophoneStatus += OnMicrophoneStatus; _micClient.OnPartialResponseReceived += OnPartialResponseReceived; _micClient.OnResponseReceived += OnResponseReceived; _micClient.OnConversationError += OnConversationError; }
/// <summary> /// Speech recognition from the microphone. The microphone is turned on and data from the microphone /// is sent to the Speech Recognition Service. A built in Silence Detector /// is applied to the microphone data before it is sent to the recognition service. /// </summary> void DoMicrophoneRecognition(MicrophoneRecognitionClient micClient) { int waitSeconds = (m_recoMode == SpeechRecognitionMode.LongDictation) ? 200 : 15; try { // Turn on the microphone and stream audio to the Speech Recognition Service micClient.StartMicAndRecognition(); Console.WriteLine("Start talking"); // sleep until the final result in OnResponseReceived event call, or waitSeconds, whichever is smaller. bool isReceivedResponse = micClient.WaitForFinalResponse(waitSeconds * 1000); if (!isReceivedResponse) { Console.WriteLine("{0}: Timed out waiting for conversation response after {1} ms", DateTime.UtcNow, waitSeconds * 1000); } } finally { // We are done sending audio. Final recognition results will arrive in OnResponseReceived event call. micClient.EndMicAndRecognition(); } }
/// <summary> /// Analyze speech input /// </summary> /// <param name="sender"></param> /// <param name="e"></param> private void btnAnalyzeSpeech_Click(object sender, RoutedEventArgs e) { ignoreNextString = false; AudioControlsGrid.Visibility = Visibility.Visible; if (ButtonState == "Record") { this.fullText = null; recording = true; BrushConverter bc = new BrushConverter(); mySentiment.Sentiment = 0.5; userInput.Text = ""; recordGrid.Visibility = System.Windows.Visibility.Hidden; recordingdGrid.Visibility = System.Windows.Visibility.Visible; recordingState = RecordingState.Recording; //string speechAPIKey = confCollection["SpeechRecognitionAPIKey"].Value; string speechAPIKey = Properties.Settings.Default.SpeechRecognitionAPIKey; MicrophoneRecognitionClient intentMicClient = SpeechRecognitionServiceFactory.CreateMicrophoneClient(SpeechRecognitionMode.LongDictation, "en-us", speechAPIKey); m_micClient = intentMicClient; // Event handlers for speech recognition results m_micClient.OnResponseReceived += this.OnResponseReceivedHandler; m_micClient.OnPartialResponseReceived += this.OnPartialResponseReceivedHandler; //m_micClient.OnConversationError += OnConversationErrorHandler; // First send of audio data to service m_micClient.StartMicAndRecognition(); ButtonState = "Finish"; } // Finish the recording else if (ButtonState == "Finish") { Thread.Sleep(1000); recording = false; m_micClient.EndMicAndRecognition(); recordGrid.Visibility = System.Windows.Visibility.Visible; recordingdGrid.Visibility = System.Windows.Visibility.Hidden; ButtonState = "Record"; DisplayAnalysis(); // Stop recording. Stop(); } }