/// <summary> /// Sets up the initial state needed for Direct Line Speech, including creation of the /// underlying DialogServiceConnector and wiring of its events. /// </summary> /// <param name="keywordFile"> The keyword file to be loaded as part of initialization.</param> /// <returns> A task that completes once initialization is complete. </returns> public Task InitializeAsync(StorageFile keywordFile) { Contract.Requires(keywordFile != null); var configRefreshRequired = this.TryRefreshConfigValues(); var refreshConnector = configRefreshRequired || (this.keywordFilePath != keywordFile.Path); if (LocalSettingsHelper.SetProperty != null) { this.enableKwsLogging = true; } if (this.enableKwsLogging) { refreshConnector = true; this.enableKwsLogging = false; } if (refreshConnector) { var newConnectorConfiguration = this.CreateConfiguration(); this.ConfirmationModel = KeywordRecognitionModel.FromFile(keywordFile.Path); this.keywordFilePath = keywordFile.Path; this.ConnectorConfiguration = newConnectorConfiguration; this.connectorInputStream = AudioInputStream.CreatePushStream(); this.connector?.Dispose(); this.connector = new DialogServiceConnector( this.ConnectorConfiguration, AudioConfig.FromStreamInput(this.connectorInputStream)); this.connector.SessionStarted += (s, e) => this.SessionStarted?.Invoke(e.SessionId); this.connector.SessionStopped += (s, e) => this.SessionStopped?.Invoke(e.SessionId); this.connector.Recognizing += (s, e) => { switch (e.Result.Reason) { case ResultReason.RecognizingKeyword: this.logger.Log(LogMessageLevel.SignalDetection, $"Local model recognized keyword \"{e.Result.Text}\""); this.KeywordRecognizing?.Invoke(e.Result.Text); this.secondStageConfirmed = true; break; case ResultReason.RecognizingSpeech: this.logger.Log(LogMessageLevel.SignalDetection, $"Recognized speech in progress: \"{e.Result.Text}\""); this.SpeechRecognizing?.Invoke(e.Result.Text); break; default: throw new InvalidOperationException(); } }; this.connector.Recognized += (s, e) => { KwsPerformanceLogger.KwsEventFireTime = TimeSpan.FromTicks(DateTime.Now.Ticks); switch (e.Result.Reason) { case ResultReason.RecognizedKeyword: var thirdStageStartTime = KwsPerformanceLogger.KwsStartTime.Ticks; thirdStageStartTime = DateTime.Now.Ticks; this.logger.Log(LogMessageLevel.SignalDetection, $"Cloud model recognized keyword \"{e.Result.Text}\""); this.KeywordRecognized?.Invoke(e.Result.Text); this.kwsPerformanceLogger.LogSignalReceived("SWKWS", "A", "3", KwsPerformanceLogger.KwsEventFireTime.Ticks, thirdStageStartTime, DateTime.Now.Ticks); this.secondStageConfirmed = false; break; case ResultReason.RecognizedSpeech: this.logger.Log(LogMessageLevel.SignalDetection, $"Recognized final speech: \"{e.Result.Text}\""); this.SpeechRecognized?.Invoke(e.Result.Text); break; case ResultReason.NoMatch: // If a KeywordRecognized handler is available, this is a final stage // keyword verification rejection. this.logger.Log(LogMessageLevel.SignalDetection, $"Cloud model rejected keyword"); if (this.secondStageConfirmed) { var thirdStageStartTimeRejected = KwsPerformanceLogger.KwsStartTime.Ticks; thirdStageStartTimeRejected = DateTime.Now.Ticks; this.kwsPerformanceLogger.LogSignalReceived("SWKWS", "R", "3", KwsPerformanceLogger.KwsEventFireTime.Ticks, thirdStageStartTimeRejected, DateTime.Now.Ticks); this.secondStageConfirmed = false; } this.KeywordRecognized?.Invoke(null); break; default: throw new InvalidOperationException(); } }; this.connector.Canceled += (s, e) => { var code = (int)e.ErrorCode; var message = $"{e.Reason.ToString()}: {e.ErrorDetails}"; this.ErrorReceived?.Invoke(new DialogErrorInformation(code, message)); }; this.connector.ActivityReceived += (s, e) => { // Note: the contract of when to end a turn is unique to your dialog system. In this sample, // it's assumed that receiving a message activity without audio marks the end of a turn. Your // dialog system may have a different contract! var wrapper = new ActivityWrapper(e.Activity); if (wrapper.Type == ActivityWrapper.ActivityType.Event) { if (!this.startEventReceived) { this.startEventReceived = true; return; } else { this.startEventReceived = false; } } var payload = new DialogResponse( messageBody: e.Activity, messageMedia: e.HasAudio ? new DirectLineSpeechAudioOutputStream(e.Audio, LocalSettingsHelper.OutputFormat) : null, shouldEndTurn: (e.Audio == null && wrapper.Type == ActivityWrapper.ActivityType.Message) || wrapper.Type == ActivityWrapper.ActivityType.Event, shouldStartNewTurn: wrapper.InputHint == ActivityWrapper.InputHintType.ExpectingInput); this.DialogResponseReceived?.Invoke(payload); }; } return(Task.FromResult(0)); }
private void OnActivityReceived(DialogResponse dialogResponse) { this.ConversationContinuationRequested = dialogResponse.FollowupTurnIndicated; this.DialogResponseReceived?.Invoke(this, dialogResponse); this.dialogResponseQueue?.Enqueue(dialogResponse); }
/// <summary> /// Sets up the initial state needed for Direct Line Speech, including creation of the /// underlying DialogServiceConnector and wiring of its events. /// </summary> /// <param name="keywordFile"> The keyword file to be loaded as part of initialization.</param> /// <returns> A task that completes once initialization is complete. </returns> public Task InitializeAsync(StorageFile keywordFile) { Contract.Requires(keywordFile != null); // Default values -- these can be updated this.ConnectorConfiguration = this.CreateConfiguration(); this.ConfirmationModel = KeywordRecognitionModel.FromFile(keywordFile.Path); this.connectorInputStream = AudioInputStream.CreatePushStream(); this.connector = new DialogServiceConnector( this.ConnectorConfiguration, AudioConfig.FromStreamInput(this.connectorInputStream)); this.connector.SessionStarted += (s, e) => this.SessionStarted?.Invoke(e.SessionId); this.connector.SessionStopped += (s, e) => this.SessionStopped?.Invoke(e.SessionId); this.connector.Recognizing += (s, e) => { switch (e.Result.Reason) { case ResultReason.RecognizingKeyword: this.logger.Log($"Local model recognized keyword \"{e.Result.Text}\""); this.KeywordRecognizing?.Invoke(e.Result.Text); break; case ResultReason.RecognizingSpeech: this.logger.Log($"Recognized speech in progress: \"{e.Result.Text}\""); this.SpeechRecognizing?.Invoke(e.Result.Text); break; default: throw new InvalidOperationException(); } }; this.connector.Recognized += (s, e) => { switch (e.Result.Reason) { case ResultReason.RecognizedKeyword: this.logger.Log($"Cloud model recognized keyword \"{e.Result.Text}\""); this.KeywordRecognized?.Invoke(e.Result.Text); break; case ResultReason.RecognizedSpeech: this.logger.Log($"Recognized final speech: \"{e.Result.Text}\""); this.SpeechRecognized?.Invoke(e.Result.Text); break; case ResultReason.NoMatch: // If a KeywordRecognized handler is available, this is a final stage // keyword verification rejection. this.logger.Log($"Cloud model rejected keyword"); this.KeywordRecognized?.Invoke(null); break; default: throw new InvalidOperationException(); } }; this.connector.Canceled += (s, e) => { var code = (int)e.ErrorCode; var message = $"{e.Reason.ToString()}: {e.ErrorDetails}"; this.ErrorReceived?.Invoke(new DialogErrorInformation(code, message)); }; this.connector.ActivityReceived += (s, e) => { // Note: the contract of when to end a turn is unique to your dialog system. In this sample, // it's assumed that receiving a message activity without audio marks the end of a turn. Your // dialog system may have a different contract! var wrapper = new ActivityWrapper(e.Activity); var payload = new DialogResponse( messageBody: e.Activity, messageMedia: e.HasAudio ? new DirectLineSpeechAudioOutputStream(e.Audio, LocalSettingsHelper.OutputFormat) : null, shouldEndTurn: e.Audio == null && wrapper.Type == ActivityWrapper.ActivityType.Message, shouldStartNewTurn: wrapper.InputHint == ActivityWrapper.InputHintType.ExpectingInput); this.logger.Log($"Connector activity received"); this.DialogResponseReceived?.Invoke(payload); }; return(Task.FromResult(0)); }