public SpeechRecognitionViewModel() { StartListeningCommand = new RelayCommand(StartListening, () => !IsListening); StopListeningCommand = new RelayCommand(StopListening, () => IsListening); _microphoneRecognitionClient = SpeechRecognitionServiceFactory .CreateMicrophoneClient( SpeechRecognitionMode.LongDictation, "en-US", ConfigurationManager.AppSettings.Get("SpeechRecognitionApiKey")); _microphoneRecognitionClient.OnPartialResponseReceived += MicrophoneRecognitionClientOnOnPartialResponseReceived; _microphoneRecognitionClient.OnResponseReceived += MicrophoneRecognitionClientOnOnResponseReceived; }
/// <summary> /// Creates a data client without LUIS intent support. /// Speech recognition with data (for example from a file or audio source). /// The data is broken up into buffers and each buffer is sent to the Speech Recognition Service. /// No modification is done to the buffers, so the user can apply their /// own Silence Detection if desired. /// </summary> private void CreateDataRecoClient() { this.dataClient = SpeechRecognitionServiceFactory.CreateDataClient( this.Mode, this.DefaultLocale, this.SubscriptionKey); // Event handlers for speech recognition results this.dataClient.OnResponseReceived += this.OnDataDictationResponseReceivedHandler; this.dataClient.OnPartialResponseReceived += this.OnPartialResponseReceivedHandler; }
private void CreateMicrophoneRecoClient() { this.micClient = SpeechRecognitionServiceFactory.CreateMicrophoneClient( SpeechRecognitionMode.LongDictation, "en-US", // assume this default locale ConfigurationManager.AppSettings["luisSubscriptionID"]); //this.micClient.AuthenticationUri = this.AuthenticationUri; // Event handlers for speech recognition results this.micClient.OnMicrophoneStatus += this.OnMicrophoneStatus; this.micClient.OnResponseReceived += this.OnMicDictationResponseReceivedHandler; this.micClient.OnConversationError += this.OnConversationErrorHandler; }
/// <summary> /// Creates a data client with LUIS intent support. /// Speech recognition with data (for example from a file or audio source). /// The data is broken up into buffers and each buffer is sent to the Speech Recognition Service. /// No modification is done to the buffers, so the user can apply their /// own Silence Detection if desired. /// </summary> private void CreateDataRecoClientWithIntent() { this.dataClient = SpeechRecognitionServiceFactory.CreateDataClientWithIntent( this.DefaultLocale, this.SubscriptionKey, this.LuisAppId, this.LuisSubscriptionID); this.dataClient.OnPartialResponseReceived += this.OnPartialResponseReceivedHandler; // Event handler for intent result this.dataClient.OnIntent += this.OnIntentHandler; }
/// <summary> /// Creates a new microphone reco client with LUIS intent support. /// </summary> private void CreateMicrophoneRecoClientWithIntent() { this.micClient = SpeechRecognitionServiceFactory.CreateMicrophoneClientWithIntent( this.DefaultLocale, this.SubscriptionKey, this.LuisAppId, this.LuisSubscriptionID); this.micClient.OnIntent += this.OnIntentHandler; // Event handlers for speech recognition results this.micClient.OnMicrophoneStatus += this.OnMicrophoneStatus; this.micClient.OnPartialResponseReceived += this.OnPartialResponseReceivedHandler; this.micClient.OnResponseReceived += this.OnMicShortPhraseResponseReceivedHandler; }
/// <summary> /// Creates a new microphone reco client without LUIS intent support. /// </summary> private void CreateMicrophoneRecoClient() { this.micClient = SpeechRecognitionServiceFactory.CreateMicrophoneClient( SpeechRecognitionMode.ShortPhrase, "en - US", this.SubscriptionKey); // Event handlers for speech recognition results this.micClient.OnMicrophoneStatus += this.OnMicrophoneStatus; this.micClient.OnPartialResponseReceived += this.OnPartialResponseReceivedHandler; if (this.Mode == SpeechRecognitionMode.ShortPhrase) { this.micClient.OnResponseReceived += this.OnMicShortPhraseResponseReceivedHandler; } }
public void Start() { _client?.Dispose(); _client = SpeechRecognitionServiceFactory.CreateDataClient( SpeechRecognitionMode.LongDictation, _speechSettings.SpeechLanguage, _speechSettings.AzureSpeechPrimaryKey /*, _speechSettings.AzureSpeechSecondaryKey,_speechSettings.AzureSpeechAuthUrl*/); _started = true; ; _client.SendAudioFormat(SpeechAudioFormat.create16BitPCMFormat(_speechSettings.SampleRateValue)); _client.OnResponseReceived += ClientOnResponseReceived; }
public SpeechInteration(string language, string speechApiPrimaryKey, string speechApiSecondaryKey) { _speechClient = SpeechRecognitionServiceFactory.CreateMicrophoneClient(SpeechRecognitionMode.LongDictation, language, speechApiPrimaryKey, speechApiSecondaryKey); SPEECHAPI_KEY1 = speechApiPrimaryKey; SPEECHAPI_KEY2 = speechApiSecondaryKey; Language = language; // Event handlers for speech recognition results this._speechClient.OnMicrophoneStatus += this.OnMicrophoneStatus; this._speechClient.OnPartialResponseReceived += this.OnPartialResponseReceivedHandler; this._speechClient.OnResponseReceived += this.OnMicDictationResponseReceivedHandler; this._speechClient.OnConversationError += this.OnConversationErrorHandler; }
private void CreateDataRecoClientWithIntent() { string PointURL = "https://westus.api.cognitive.microsoft.com/luis/v2.0/apps/75a1f980-aa8e-42e5-927c-eef62286b24c?subscription-key=3441bb92f501414e8bcb7013517a20f1&verbose=true&timezoneOffset=0&q="; this.dataClient = SpeechRecognitionServiceFactory.CreateDataClientWithIntentUsingEndpointUrl( this.DefaultLocale, this.SubscriptionKey, PointURL); this.dataClient.AuthenticationUri = this.AuthenticationUri; this.dataClient.OnResponseReceived += this.OnDataShortPhraseResponseReceivedHandler; this.dataClient.OnPartialResponseReceived += this.OnPartialResponseReceivedHandler; this.dataClient.OnConversationError += this.OnConversationErrorHandler; this.dataClient.OnIntent += this.OnIntentHandler; }