/// <summary> /// Uses the provided properties to create a connector from config and register callbacks /// </summary> private void CreateDialogServiceConnector() { Debug.Log($"CreateDialogServiceConnector enter"); if (dialogServiceConnector == null) { if (subscriptionKey == string.Empty || region == string.Empty) { Debug.Log($"One or more input fields weren't provided. Check the fields in the Canvas object or in the script source"); throw new InvalidOperationException("DialogServiceConfig creation failed"); } // Creates an instance of a DialogServiceConfig with your bot connection ID, subscription key, and service region. // Replace in the editor on the Canvas object OR directly in the code, above in the member declarations dialogServiceConfig = BotFrameworkConfig.FromSubscription(subscriptionKey, region); if (dialogServiceConfig == null) { Debug.Log($"One or more input fields weren't provided. Check the fields in the Canvas object or in the script source"); throw new InvalidOperationException("DialogServiceConfig creation failed"); } AudioConfig audioConfig = AudioConfig.FromDefaultMicrophoneInput(); dialogServiceConnector = new DialogServiceConnector(dialogServiceConfig, audioConfig); dialogServiceConnector.ActivityReceived += DialogServiceConnector_ActivityReceived; dialogServiceConnector.Canceled += DialogServiceConnector_Canceled; dialogServiceConnector.Recognized += DialogServiceConnector_Recognized; } stateIndicatorString = "DialogServiceConnector created"; ttsAudio = GetComponent <AudioSource>(); Debug.Log($"CreateDialogServiceConnector exit"); }
/// <summary> /// Create a DialogServiceConnector from the user-provided input /// </summary> public void InitDialogServiceConnector() { DialogServiceConfig dialogServiceConfig = null; dialogServiceConfig = BotFrameworkConfig.FromSubscription(SubscriptionTB.Text, RegionTB.Text); if (dialogServiceConnector != null) { dialogServiceConnector.SessionStarted -= DialogServiceConnector_SessionStarted; dialogServiceConnector.SessionStopped -= DialogServiceConnector_SessionStopped; dialogServiceConnector.Recognizing -= DialogServiceConnector_Recognizing; dialogServiceConnector.Recognized -= DialogServiceConnector_Recognized; dialogServiceConnector.ActivityReceived -= DialogServiceConnector_ActivityReceived; dialogServiceConnector.Canceled -= DialogServiceConnector_Canceled; } var audioConfig = AudioConfig.FromDefaultMicrophoneInput(); dialogServiceConnector = new DialogServiceConnector(dialogServiceConfig, audioConfig); dialogServiceConnector.SessionStarted += DialogServiceConnector_SessionStarted; dialogServiceConnector.SessionStopped += DialogServiceConnector_SessionStopped; dialogServiceConnector.Recognizing += DialogServiceConnector_Recognizing; dialogServiceConnector.Recognized += DialogServiceConnector_Recognized; dialogServiceConnector.ActivityReceived += DialogServiceConnector_ActivityReceived; dialogServiceConnector.Canceled += DialogServiceConnector_Canceled; SendActivityButton.IsEnabled = true; StartButton.IsEnabled = true; }
private void InitializeDialogServiceConnector() { // create a DialogServiceConfig by providing a bot secret key and Cognitive Services subscription key // the RecoLanguage property is optional (default en-US); note that only en-US is supported in Preview const string channelSecret = "YourChannelSecret"; // Your channel secret const string speechSubscriptionKey = "YourSpeechSubscriptionKey"; // Your subscription key const string region = "YourServiceRegion"; // Your subscription service region. Note: only 'westus2' is currently supported var botConfig = DialogServiceConfig.FromBotSecret(channelSecret, speechSubscriptionKey, region); botConfig.SetProperty(PropertyId.SpeechServiceConnection_RecoLanguage, "en-US"); connector = new DialogServiceConnector(botConfig); // ActivityReceived is the main way your bot will communicate with the client and uses bot framework activities connector.ActivityReceived += async(sender, activityReceivedEventArgs) => { NotifyUser($"Activity received, hasAudio={activityReceivedEventArgs.HasAudio} activity={activityReceivedEventArgs.Activity}"); if (activityReceivedEventArgs.HasAudio) { SynchronouslyPlayActivityAudio(activityReceivedEventArgs.Audio); } }; // Canceled will be signaled when a turn is aborted or experiences an error condition connector.Canceled += (sender, canceledEventArgs) => { NotifyUser($"Canceled, reason={canceledEventArgs.Reason}"); if (canceledEventArgs.Reason == CancellationReason.Error) { NotifyUser($"Error: code={canceledEventArgs.ErrorCode}, details={canceledEventArgs.ErrorDetails}"); } }; // Recognizing (not 'Recognized') will provide the intermediate recognized text while an audio stream is being processed connector.Recognizing += (sender, recognitionEventArgs) => { NotifyUser($"Recognizing! in-progress text={recognitionEventArgs.Result.Text}"); }; // Recognized (not 'Recognizing') will provide the final recognized text once audio capture is completed connector.Recognized += (sender, recognitionEventArgs) => { NotifyUser($"Final speech-to-text result: '{recognitionEventArgs.Result.Text}'"); }; // SessionStarted will notify when audio begins flowing to the service for a turn connector.SessionStarted += (sender, sessionEventArgs) => { NotifyUser($"Now Listening! Session started, id={sessionEventArgs.SessionId}"); }; // SessionStopped will notify when a turn is complete and it's safe to begin listening again connector.SessionStopped += (sender, sessionEventArgs) => { NotifyUser($"Listening complete. Session ended, id={sessionEventArgs.SessionId}"); }; }
public async Task SendDirectLineSpeechTextMessage() { GetEnvironmentVars(); echoGuid = Guid.NewGuid().ToString(); input += echoGuid; // Create a Dialog Service Config for use with the Direct Line Speech Connector var config = DialogServiceConfig.FromBotSecret(speechBotSecret, speechSubscription, speechRegion); config.SpeechRecognitionLanguage = "en-us"; config.SetProperty(PropertyId.Conversation_From_Id, FromUser); // Create a new Dialog Service Connector for the above configuration and register to receive events var connector = new DialogServiceConnector(config, AudioConfig.FromWavFileInput(soundFilePath)); connector.ActivityReceived += Connector_ActivityReceived; // Open a connection to Direct Line Speech channel. No await because the call will block until the connection closes. #pragma warning disable CS4014 // Because this call is not awaited, execution of the current method continues before the call is completed connector.ConnectAsync(); #pragma warning restore CS4014 // Because this call is not awaited, execution of the current method continues before the call is completed // Create a message activity with the input text. var userMessage = new Activity { From = new ChannelAccount(FromUser), Text = input, Type = ActivityTypes.Message, }; // Send the message activity to the bot. await connector.SendActivityAsync(JsonConvert.SerializeObject(userMessage)); // Give the bot time to respond. System.Threading.Thread.Sleep(1000); // Read the bot's message. var botAnswer = messages.LastOrDefault(); // Cleanup await connector.DisconnectAsync(); connector.Dispose(); // Assert Assert.IsNotNull(botAnswer); Assert.AreEqual(string.Format("You said '{0}'", input), botAnswer.Message); }
public async Task SendDirectLineSpeechVoiceMessage() { GetEnvironmentVars(); // Make sure the sound clip exists Assert.IsTrue(File.Exists(SoundFilePath)); // Create a Dialog Service Config for use with the Direct Line Speech Connector var config = DialogServiceConfig.FromBotSecret(speechBotSecret, speechSubscription, SpeechRegion); config.SpeechRecognitionLanguage = "en-us"; config.SetProperty(PropertyId.Conversation_From_Id, FromUser); // Create a new Dialog Service Connector for the above configuration and register to receive events var connector = new DialogServiceConnector(config, AudioConfig.FromWavFileInput(SoundFilePath)); connector.ActivityReceived += Connector_ActivityReceived; // Open a connection to Direct Line Speech channel. No await because the call will block until the connection closes. #pragma warning disable CS4014 // Because this call is not awaited, execution of the current method continues before the call is completed connector.ConnectAsync(); #pragma warning restore CS4014 // Because this call is not awaited, execution of the current method continues before the call is completed // Send the message activity to the bot. await connector.ListenOnceAsync(); // Give the bot time to respond. System.Threading.Thread.Sleep(1000); // Read the bot's message. var botAnswer = messages.LastOrDefault(); // Cleanup await connector.DisconnectAsync(); connector.Dispose(); // Assert Assert.IsNotNull(botAnswer); Assert.AreEqual(string.Format("Echo: {0}.", SoundFileMessage), botAnswer.Message); }
/// <summary> /// The method reads user-entered settings and creates a new instance of the DialogServiceConnector object /// when the "Reconnect" button is pressed (or the microphone button is pressed for the first time). /// </summary> private void InitSpeechConnector() { DialogServiceConfig config = null; var hasSubscription = !string.IsNullOrWhiteSpace(this.settings.RuntimeSettings.Profile.SubscriptionKey); var hasRegion = !string.IsNullOrWhiteSpace(this.settings.RuntimeSettings.Profile.SubscriptionKeyRegion); var hasBotId = !string.IsNullOrWhiteSpace(this.settings.RuntimeSettings.Profile.BotId); var hasUrlOverride = !string.IsNullOrWhiteSpace(this.settings.RuntimeSettings.Profile.UrlOverride); if (hasSubscription && (hasRegion || hasUrlOverride)) { if (!string.IsNullOrWhiteSpace(this.settings.RuntimeSettings.Profile.CustomCommandsAppId)) { // NOTE: Custom commands is a preview Azure Service. // Set the custom commands configuration object based on three items: // - The Custom commands application ID // - Cognitive services speech subscription key. // - The Azure region of the subscription key(e.g. "westus"). config = CustomCommandsConfig.FromSubscription(this.settings.RuntimeSettings.Profile.CustomCommandsAppId, this.settings.RuntimeSettings.Profile.SubscriptionKey, this.settings.RuntimeSettings.Profile.SubscriptionKeyRegion); } else if (hasBotId) { config = BotFrameworkConfig.FromSubscription(this.settings.RuntimeSettings.Profile.SubscriptionKey, this.settings.RuntimeSettings.Profile.SubscriptionKeyRegion, this.settings.RuntimeSettings.Profile.BotId); } else { // Set the bot framework configuration object based on two items: // - Cognitive services speech subscription key. It is needed for billing and is tied to the bot registration. // - The Azure region of the subscription key(e.g. "westus"). config = BotFrameworkConfig.FromSubscription(this.settings.RuntimeSettings.Profile.SubscriptionKey, this.settings.RuntimeSettings.Profile.SubscriptionKeyRegion); } } if (!string.IsNullOrWhiteSpace(this.settings.RuntimeSettings.Profile.ConnectionLanguage)) { // Set the speech recognition language. If not set, the default is "en-us". config.Language = this.settings.RuntimeSettings.Profile.ConnectionLanguage; } if (this.settings.RuntimeSettings.Profile.CustomSpeechEnabled) { // Set your custom speech end-point id here, as given to you by the speech portal https://speech.microsoft.com/portal. // Otherwise the standard speech end-point will be used. config.SetServiceProperty("cid", this.settings.RuntimeSettings.Profile.CustomSpeechEndpointId, ServicePropertyChannel.UriQueryParameter); // Custom Speech does not support cloud Keyword Verification at the moment. If this is not done, there will be an error // from the service and connection will close. Remove line below when supported. config.SetProperty("KeywordConfig_EnableKeywordVerification", "false"); } if (this.settings.RuntimeSettings.Profile.VoiceDeploymentEnabled) { // Set one or more IDs associated with the custom TTS voice your bot will use // The format of the string is one or more GUIDs separated by comma (no spaces). You get these GUIDs from // your custom TTS on the speech portal https://speech.microsoft.com/portal. config.SetProperty(PropertyId.Conversation_Custom_Voice_Deployment_Ids, this.settings.RuntimeSettings.Profile.VoiceDeploymentIds); } if (!string.IsNullOrEmpty(this.settings.RuntimeSettings.Profile.FromId)) { // Set the from.id in the Bot-Framework Activity sent by this tool. // from.id field identifies who generated the activity, and may be required by some bots. // See https://github.com/microsoft/botframework-sdk/blob/master/specs/botframework-activity/botframework-activity.md // for Bot Framework Activity schema and from.id. config.SetProperty(PropertyId.Conversation_From_Id, this.settings.RuntimeSettings.Profile.FromId); } if (!string.IsNullOrWhiteSpace(this.settings.RuntimeSettings.Profile.LogFilePath)) { // Speech SDK has verbose logging to local file, which may be useful when reporting issues. // Supply the path to a text file on disk here. By default no logging happens. config.SetProperty(PropertyId.Speech_LogFilename, this.settings.RuntimeSettings.Profile.LogFilePath); } if (hasUrlOverride) { // For prototyping new Direct Line Speech channel service feature, a custom service URL may be // provided by Microsoft and entered in this tool. config.SetProperty("SPEECH-Endpoint", this.settings.RuntimeSettings.Profile.UrlOverride); } if (!string.IsNullOrWhiteSpace(this.settings.RuntimeSettings.Profile.ProxyHostName) && !string.IsNullOrWhiteSpace(this.settings.RuntimeSettings.Profile.ProxyPortNumber) && int.TryParse(this.settings.RuntimeSettings.Profile.ProxyPortNumber, out var proxyPortNumber)) { // To funnel network traffic via a proxy, set the host name and port number here config.SetProxy(this.settings.RuntimeSettings.Profile.ProxyHostName, proxyPortNumber, string.Empty, string.Empty); } // If a the DialogServiceConnector object already exists, destroy it first if (this.connector != null) { // First, unregister all events this.connector.ActivityReceived -= this.Connector_ActivityReceived; this.connector.Recognizing -= this.Connector_Recognizing; this.connector.Recognized -= this.Connector_Recognized; this.connector.Canceled -= this.Connector_Canceled; this.connector.SessionStarted -= this.Connector_SessionStarted; this.connector.SessionStopped -= this.Connector_SessionStopped; // Then dispose the object this.connector.Dispose(); this.connector = null; } // Create a new Dialog Service Connector for the above configuration and register to receive events this.connector = new DialogServiceConnector(config, AudioConfig.FromDefaultMicrophoneInput()); this.connector.ActivityReceived += this.Connector_ActivityReceived; this.connector.Recognizing += this.Connector_Recognizing; this.connector.Recognized += this.Connector_Recognized; this.connector.Canceled += this.Connector_Canceled; this.connector.SessionStarted += this.Connector_SessionStarted; this.connector.SessionStopped += this.Connector_SessionStopped; // Open a connection to Direct Line Speech channel this.connector.ConnectAsync(); if (this.settings.RuntimeSettings.Profile.CustomSpeechEnabled) { this.customSpeechConfig = new CustomSpeechConfiguration(this.settings.RuntimeSettings.Profile.CustomSpeechEndpointId); } if (this.settings.RuntimeSettings.Profile.WakeWordEnabled) { // Configure wake word (also known as "keyword") this.activeWakeWordConfig = new WakeWordConfiguration(this.settings.RuntimeSettings.Profile.WakeWordPath); this.connector.StartKeywordRecognitionAsync(this.activeWakeWordConfig.WakeWordModel); } }
/// <summary> /// Sets up the initial state needed for Direct Line Speech, including creation of the /// underlying DialogServiceConnector and wiring of its events. /// </summary> /// <param name="keywordFile"> The keyword file to be loaded as part of initialization.</param> /// <returns> A task that completes once initialization is complete. </returns> public Task InitializeAsync(StorageFile keywordFile) { Contract.Requires(keywordFile != null); var configRefreshRequired = this.TryRefreshConfigValues(); var refreshConnector = configRefreshRequired || (this.keywordFilePath != keywordFile.Path); if (LocalSettingsHelper.SetProperty != null) { this.enableKwsLogging = true; } if (this.enableKwsLogging) { refreshConnector = true; this.enableKwsLogging = false; } if (refreshConnector) { var newConnectorConfiguration = this.CreateConfiguration(); this.ConfirmationModel = KeywordRecognitionModel.FromFile(keywordFile.Path); this.keywordFilePath = keywordFile.Path; this.ConnectorConfiguration = newConnectorConfiguration; this.connectorInputStream = AudioInputStream.CreatePushStream(); this.connector?.Dispose(); this.connector = new DialogServiceConnector( this.ConnectorConfiguration, AudioConfig.FromStreamInput(this.connectorInputStream)); this.connector.SessionStarted += (s, e) => this.SessionStarted?.Invoke(e.SessionId); this.connector.SessionStopped += (s, e) => this.SessionStopped?.Invoke(e.SessionId); this.connector.Recognizing += (s, e) => { switch (e.Result.Reason) { case ResultReason.RecognizingKeyword: this.logger.Log(LogMessageLevel.SignalDetection, $"Local model recognized keyword \"{e.Result.Text}\""); this.KeywordRecognizing?.Invoke(e.Result.Text); this.secondStageConfirmed = true; break; case ResultReason.RecognizingSpeech: this.logger.Log(LogMessageLevel.SignalDetection, $"Recognized speech in progress: \"{e.Result.Text}\""); this.SpeechRecognizing?.Invoke(e.Result.Text); break; default: throw new InvalidOperationException(); } }; this.connector.Recognized += (s, e) => { KwsPerformanceLogger.KwsEventFireTime = TimeSpan.FromTicks(DateTime.Now.Ticks); switch (e.Result.Reason) { case ResultReason.RecognizedKeyword: var thirdStageStartTime = KwsPerformanceLogger.KwsStartTime.Ticks; thirdStageStartTime = DateTime.Now.Ticks; this.logger.Log(LogMessageLevel.SignalDetection, $"Cloud model recognized keyword \"{e.Result.Text}\""); this.KeywordRecognized?.Invoke(e.Result.Text); this.kwsPerformanceLogger.LogSignalReceived("SWKWS", "A", "3", KwsPerformanceLogger.KwsEventFireTime.Ticks, thirdStageStartTime, DateTime.Now.Ticks); this.secondStageConfirmed = false; break; case ResultReason.RecognizedSpeech: this.logger.Log(LogMessageLevel.SignalDetection, $"Recognized final speech: \"{e.Result.Text}\""); this.SpeechRecognized?.Invoke(e.Result.Text); break; case ResultReason.NoMatch: // If a KeywordRecognized handler is available, this is a final stage // keyword verification rejection. this.logger.Log(LogMessageLevel.SignalDetection, $"Cloud model rejected keyword"); if (this.secondStageConfirmed) { var thirdStageStartTimeRejected = KwsPerformanceLogger.KwsStartTime.Ticks; thirdStageStartTimeRejected = DateTime.Now.Ticks; this.kwsPerformanceLogger.LogSignalReceived("SWKWS", "R", "3", KwsPerformanceLogger.KwsEventFireTime.Ticks, thirdStageStartTimeRejected, DateTime.Now.Ticks); this.secondStageConfirmed = false; } this.KeywordRecognized?.Invoke(null); break; default: throw new InvalidOperationException(); } }; this.connector.Canceled += (s, e) => { var code = (int)e.ErrorCode; var message = $"{e.Reason.ToString()}: {e.ErrorDetails}"; this.ErrorReceived?.Invoke(new DialogErrorInformation(code, message)); }; this.connector.ActivityReceived += (s, e) => { // Note: the contract of when to end a turn is unique to your dialog system. In this sample, // it's assumed that receiving a message activity without audio marks the end of a turn. Your // dialog system may have a different contract! var wrapper = new ActivityWrapper(e.Activity); if (wrapper.Type == ActivityWrapper.ActivityType.Event) { if (!this.startEventReceived) { this.startEventReceived = true; return; } else { this.startEventReceived = false; } } var payload = new DialogResponse( messageBody: e.Activity, messageMedia: e.HasAudio ? new DirectLineSpeechAudioOutputStream(e.Audio, LocalSettingsHelper.OutputFormat) : null, shouldEndTurn: (e.Audio == null && wrapper.Type == ActivityWrapper.ActivityType.Message) || wrapper.Type == ActivityWrapper.ActivityType.Event, shouldStartNewTurn: wrapper.InputHint == ActivityWrapper.InputHintType.ExpectingInput); this.DialogResponseReceived?.Invoke(payload); }; } return(Task.FromResult(0)); }
/// <summary> /// The method reads user-entered settings and creates a new instance of the DialogServiceConnector object /// when the "Reconnect" button is pressed (or the microphone button is pressed for the first time). /// </summary> private void InitSpeechConnector() { DialogServiceConfig config = null; // Save the Direct Line Speech channel secret key. This is one of two keys you get when you register your bot with Direct Line speech // channel. It uniquely defines the bot. Here we call it bot secret for short. this.botSecret = this.botSecretLabel.Text; if (!string.IsNullOrWhiteSpace(this.settings.Settings.SubscriptionKey) && !string.IsNullOrWhiteSpace(this.botSecret)) { // Set the dialog service configuration object based on three items: // - Direct Line Speech channel secret (aka "bot secret") // - Cognitive services speech subscription key. It is needed for billing. // - The Azure region of the subscription key (e.g. "westus"). config = DialogServiceConfig.FromBotSecret(this.botSecret, this.settings.Settings.SubscriptionKey, this.settings.Settings.SubscriptionKeyRegion); } if (!string.IsNullOrWhiteSpace(this.settings.Settings.Language)) { // Set the speech recognition language. If not set, the default is "en-us". config.SetProperty("SPEECH-RecoLanguage", this.settings.Settings.Language); } if (!string.IsNullOrEmpty(this.settings.Settings.FromId)) { // Set the from.id in the Bot-Framework Activity sent by this tool. // from.id field identifies who generated the activity, and may be required by some bots. // See https://github.com/microsoft/botframework-sdk/blob/master/specs/botframework-activity/botframework-activity.md // for Bot Framework Activity schema and from.id. config.SetProperty("BOT-FromId", this.settings.Settings.FromId); } if (!string.IsNullOrWhiteSpace(this.settings.Settings.LogFilePath)) { // Speech SDK has verbose logging to local file, which may be useful when reporting issues. // Supply the path to a text file on disk here. By default no logging happens. config.SetProperty("SPEECH-LogFilename", this.settings.Settings.LogFilePath); } if (!string.IsNullOrWhiteSpace(this.settings.Settings.UrlOverride)) { // For prototyping new Direct Line Speech channel service feature, a custom service URL may be // provided by Microsoft and entered in this tool. config.SetProperty("SPEECH-Endpoint", this.settings.Settings.UrlOverride); } if (!string.IsNullOrWhiteSpace(this.settings.Settings.ProxyHostName) && !string.IsNullOrWhiteSpace(this.settings.Settings.ProxyPortNumber) && int.TryParse(this.settings.Settings.ProxyPortNumber, out var proxyPortNumber)) { // To funnel network traffic via a proxy, set the host name and port number here config.SetProxy(this.settings.Settings.ProxyHostName, proxyPortNumber, string.Empty, string.Empty); } // If a the DialogServiceConnector object already exists, destroy it first if (this.connector != null) { // First, unregister all events this.connector.ActivityReceived -= this.Connector_ActivityReceived; this.connector.Recognizing -= this.Connector_Recognizing; this.connector.Recognized -= this.Connector_Recognized; this.connector.Canceled -= this.Connector_Canceled; this.connector.SessionStarted -= this.Connector_SessionStarted; this.connector.SessionStopped -= this.Connector_SessionStopped; // Then dispose the object this.connector.Dispose(); this.connector = null; } // Create a new Dialog Service Connector for the above configuration and register to receive events this.connector = new DialogServiceConnector(config, AudioConfig.FromDefaultMicrophoneInput()); this.connector.ActivityReceived += this.Connector_ActivityReceived; this.connector.Recognizing += this.Connector_Recognizing; this.connector.Recognized += this.Connector_Recognized; this.connector.Canceled += this.Connector_Canceled; this.connector.SessionStarted += this.Connector_SessionStarted; this.connector.SessionStopped += this.Connector_SessionStopped; // Open a connection to Direct Line Speech channel this.connector.ConnectAsync(); // Save the recent bot secret in the history, so it can easily be retrieved later on this.AddBotIdEntryIntoHistory(this.botSecret); if (this.settings.Settings.WakeWordEnabled) { // Configure wake word (also known as "keyword") this.activeWakeWordConfig = new WakeWordConfiguration(this.settings.Settings.WakeWordPath); this.connector.StartKeywordRecognitionAsync(this.activeWakeWordConfig.WakeWordModel); } }
/// <summary> /// Sets up the initial state needed for Direct Line Speech, including creation of the /// underlying DialogServiceConnector and wiring of its events. /// </summary> /// <param name="keywordFile"> The keyword file to be loaded as part of initialization.</param> /// <returns> A task that completes once initialization is complete. </returns> public Task InitializeAsync(StorageFile keywordFile) { Contract.Requires(keywordFile != null); // Default values -- these can be updated this.ConnectorConfiguration = this.CreateConfiguration(); this.ConfirmationModel = KeywordRecognitionModel.FromFile(keywordFile.Path); this.connectorInputStream = AudioInputStream.CreatePushStream(); this.connector = new DialogServiceConnector( this.ConnectorConfiguration, AudioConfig.FromStreamInput(this.connectorInputStream)); this.connector.SessionStarted += (s, e) => this.SessionStarted?.Invoke(e.SessionId); this.connector.SessionStopped += (s, e) => this.SessionStopped?.Invoke(e.SessionId); this.connector.Recognizing += (s, e) => { switch (e.Result.Reason) { case ResultReason.RecognizingKeyword: this.logger.Log($"Local model recognized keyword \"{e.Result.Text}\""); this.KeywordRecognizing?.Invoke(e.Result.Text); break; case ResultReason.RecognizingSpeech: this.logger.Log($"Recognized speech in progress: \"{e.Result.Text}\""); this.SpeechRecognizing?.Invoke(e.Result.Text); break; default: throw new InvalidOperationException(); } }; this.connector.Recognized += (s, e) => { switch (e.Result.Reason) { case ResultReason.RecognizedKeyword: this.logger.Log($"Cloud model recognized keyword \"{e.Result.Text}\""); this.KeywordRecognized?.Invoke(e.Result.Text); break; case ResultReason.RecognizedSpeech: this.logger.Log($"Recognized final speech: \"{e.Result.Text}\""); this.SpeechRecognized?.Invoke(e.Result.Text); break; case ResultReason.NoMatch: // If a KeywordRecognized handler is available, this is a final stage // keyword verification rejection. this.logger.Log($"Cloud model rejected keyword"); this.KeywordRecognized?.Invoke(null); break; default: throw new InvalidOperationException(); } }; this.connector.Canceled += (s, e) => { var code = (int)e.ErrorCode; var message = $"{e.Reason.ToString()}: {e.ErrorDetails}"; this.ErrorReceived?.Invoke(new DialogErrorInformation(code, message)); }; this.connector.ActivityReceived += (s, e) => { // Note: the contract of when to end a turn is unique to your dialog system. In this sample, // it's assumed that receiving a message activity without audio marks the end of a turn. Your // dialog system may have a different contract! var wrapper = new ActivityWrapper(e.Activity); var payload = new DialogResponse( messageBody: e.Activity, messageMedia: e.HasAudio ? new DirectLineSpeechAudioOutputStream(e.Audio, LocalSettingsHelper.OutputFormat) : null, shouldEndTurn: e.Audio == null && wrapper.Type == ActivityWrapper.ActivityType.Message, shouldStartNewTurn: wrapper.InputHint == ActivityWrapper.InputHintType.ExpectingInput); this.logger.Log($"Connector activity received"); this.DialogResponseReceived?.Invoke(payload); }; return(Task.FromResult(0)); }