private DialogServiceConfig CreateConfiguration() { var speechKey = LocalSettingsHelper.SpeechSubscriptionKey; var speechRegion = LocalSettingsHelper.AzureRegion; var customSpeechId = LocalSettingsHelper.CustomSpeechId; var customVoiceIds = LocalSettingsHelper.CustomVoiceIds; var customCommandsAppId = LocalSettingsHelper.CustomCommandsAppId; var botId = LocalSettingsHelper.BotId; // Subscription information is supported in multiple formats: // <subscription_key> use the default bot associated with the subscription // <sub_key>:<app_id> use a specified Custom Commands application // <sub_key>#<bot_id> use a specific bot within the subscription DialogServiceConfig config; if (!string.IsNullOrEmpty(speechKey) && !string.IsNullOrEmpty(speechRegion) && !string.IsNullOrEmpty(customCommandsAppId)) { config = CustomCommandsConfig.FromSubscription(customCommandsAppId, speechKey, speechRegion); } // else if (!string.IsNullOrEmpty(speechKey) && !string.IsNullOrEmpty(speechRegion) && !string.IsNullOrEmpty(botId)) // { // config = BotFrameworkConfig.FromSubscription(speechKey, speechRegion, botId); // } else { config = BotFrameworkConfig.FromSubscription( speechKey, speechRegion); } // Disable throttling of input audio (send it as fast as we can!) config.SetProperty("SPEECH-AudioThrottleAsPercentageOfRealTime", "9999"); config.SetProperty("SPEECH-TransmitLengthBeforThrottleMs", "10000"); var outputLabel = LocalSettingsHelper.OutputFormat.Label.ToLower(CultureInfo.CurrentCulture); config.SetProperty(PropertyId.SpeechServiceConnection_SynthOutputFormat, outputLabel); if (!string.IsNullOrEmpty(customSpeechId)) { config.SetServiceProperty("cid", customSpeechId, ServicePropertyChannel.UriQueryParameter); // Custom Speech does not support Keyword Verification - Remove line below when supported. config.SetProperty("KeywordConfig_EnableKeywordVerification", "false"); } if (!string.IsNullOrEmpty(customVoiceIds)) { config.SetProperty(PropertyId.Conversation_Custom_Voice_Deployment_Ids, customVoiceIds); } if (LocalSettingsHelper.EnableSdkLogging) { var logPath = $"{ApplicationData.Current.LocalFolder.Path}\\sdklog.txt"; config.SetProperty(PropertyId.Speech_LogFilename, logPath); } return(config); }
private async Task InitializeSpeechConnectorAsync() { audioConfig = AudioConfig.FromDefaultMicrophoneInput(); var config = CustomCommandsConfig.FromSubscription(Constants.CustomCommandsAppId, Constants.SubscriptionKey, Constants.Region); config.Language = Constants.Language; // Create a new Dialog Service Connector for the above configuration and register to receive events connector = new DialogServiceConnector(config, audioConfig); connector.ActivityReceived += Connector_ActivityReceived; connector.Recognizing += Connector_Recognizing; connector.Recognized += Connector_Recognized; connector.Canceled += Connector_Canceled; connector.SessionStarted += Connector_SessionStarted; connector.SessionStopped += Connector_SessionStopped; // Open a connection to Direct Line Speech channel await connector.ConnectAsync(); var keywordRecognitionModel = KeywordRecognitionModel.FromFile(Path.Combine(AppDomain.CurrentDomain.BaseDirectory, "Computer.table")); _ = connector.StartKeywordRecognitionAsync(keywordRecognitionModel); }
private const string speechSubscriptionKey = "cae26e415d0c43ffac8f000d37d86b64"; // Your subscription key public static async Task RecognizeCommands() { var speechCommandsConfig = CustomCommandsConfig.FromSubscription("c619435a-6254-452f-a668-3fee02924e69", "cae26e415d0c43ffac8f000d37d86b64", "westus2"); speechCommandsConfig.SetProperty(PropertyId.SpeechServiceConnection_RecoLanguage, "en-us"); var connector = new DialogServiceConnector(speechCommandsConfig); //Console.WriteLine("Connecting..."); //await connector.ConnectAsync(); // // This code sets up handlers for events relied on by `DialogServiceConnector` to communicate its activities, // speech recognition results, and other information. // // ActivityReceived is the main way your client will receive messages, audio, and events connector.ActivityReceived += async(sender, activityReceivedEventArgs) => { string output = "Activity received "; if (activityReceivedEventArgs.HasAudio) { var activity = JsonConvert.DeserializeObject <ActivityModel>(activityReceivedEventArgs.Activity); output += "it has audio "; var synthesizer = new SpeechSynthesizer(SpeechConfig.FromSubscription(speechSubscriptionKey, "westus2")); synthesizer.SynthesisCanceled += (sender, canceledEventArgs) => { Console.WriteLine("Failed synthesizing"); }; var synthesisResult = await synthesizer.SpeakSsmlAsync(activity.Speak); } output += "\n\n" + activityReceivedEventArgs.Activity; Console.WriteLine(output); }; // Canceled will be signaled when a turn is aborted or experiences an error condition connector.Canceled += (sender, canceledEventArgs) => { Console.WriteLine($"Canceled, reason={canceledEventArgs.Reason}"); if (canceledEventArgs.Reason == CancellationReason.Error) { Console.WriteLine( $"Error: code={canceledEventArgs.ErrorCode}, details={canceledEventArgs.ErrorDetails}"); } }; // Recognizing (not 'Recognized') will provide the intermediate recognized text // while an audio stream is being processed connector.Recognizing += (sender, recognitionEventArgs) => { Console.WriteLine($"Recognizing! in-progress text={recognitionEventArgs.Result.Text}"); }; // Recognized (not 'Recognizing') will provide the final recognized text // once audio capture is completed connector.Recognized += (sender, recognitionEventArgs) => { Console.WriteLine($"Final speech-to-text result: '{recognitionEventArgs.Result.Text}'"); }; // SessionStarted will notify when audio begins flowing to the service for a turn connector.SessionStarted += (sender, sessionEventArgs) => { Console.WriteLine($"Now Listening! Session started, id={sessionEventArgs.SessionId}"); }; // SessionStopped will notify when a turn is complete and // it's safe to begin listening again connector.SessionStopped += (sender, sessionEventArgs) => { Console.WriteLine($"Listening complete. Session ended, id={sessionEventArgs.SessionId}"); }; // Start sending audio try { // Start sending audio var result = await connector.ListenOnceAsync(); } catch (Exception ex) { Console.WriteLine($"Exception: {ex.ToString()}"); } }
/// <summary> /// The method reads user-entered settings and creates a new instance of the DialogServiceConnector object /// when the "Reconnect" button is pressed (or the microphone button is pressed for the first time). /// </summary> private void InitSpeechConnector() { DialogServiceConfig config = null; var hasSubscription = !string.IsNullOrWhiteSpace(this.settings.RuntimeSettings.Profile.SubscriptionKey); var hasRegion = !string.IsNullOrWhiteSpace(this.settings.RuntimeSettings.Profile.SubscriptionKeyRegion); var hasBotId = !string.IsNullOrWhiteSpace(this.settings.RuntimeSettings.Profile.BotId); var hasUrlOverride = !string.IsNullOrWhiteSpace(this.settings.RuntimeSettings.Profile.UrlOverride); if (hasSubscription && (hasRegion || hasUrlOverride)) { if (!string.IsNullOrWhiteSpace(this.settings.RuntimeSettings.Profile.CustomCommandsAppId)) { // NOTE: Custom commands is a preview Azure Service. // Set the custom commands configuration object based on three items: // - The Custom commands application ID // - Cognitive services speech subscription key. // - The Azure region of the subscription key(e.g. "westus"). config = CustomCommandsConfig.FromSubscription(this.settings.RuntimeSettings.Profile.CustomCommandsAppId, this.settings.RuntimeSettings.Profile.SubscriptionKey, this.settings.RuntimeSettings.Profile.SubscriptionKeyRegion); } else if (hasBotId) { config = BotFrameworkConfig.FromSubscription(this.settings.RuntimeSettings.Profile.SubscriptionKey, this.settings.RuntimeSettings.Profile.SubscriptionKeyRegion, this.settings.RuntimeSettings.Profile.BotId); } else { // Set the bot framework configuration object based on two items: // - Cognitive services speech subscription key. It is needed for billing and is tied to the bot registration. // - The Azure region of the subscription key(e.g. "westus"). config = BotFrameworkConfig.FromSubscription(this.settings.RuntimeSettings.Profile.SubscriptionKey, this.settings.RuntimeSettings.Profile.SubscriptionKeyRegion); } } if (!string.IsNullOrWhiteSpace(this.settings.RuntimeSettings.Profile.ConnectionLanguage)) { // Set the speech recognition language. If not set, the default is "en-us". config.Language = this.settings.RuntimeSettings.Profile.ConnectionLanguage; } if (this.settings.RuntimeSettings.Profile.CustomSpeechEnabled) { // Set your custom speech end-point id here, as given to you by the speech portal https://speech.microsoft.com/portal. // Otherwise the standard speech end-point will be used. config.SetServiceProperty("cid", this.settings.RuntimeSettings.Profile.CustomSpeechEndpointId, ServicePropertyChannel.UriQueryParameter); // Custom Speech does not support cloud Keyword Verification at the moment. If this is not done, there will be an error // from the service and connection will close. Remove line below when supported. config.SetProperty("KeywordConfig_EnableKeywordVerification", "false"); } if (this.settings.RuntimeSettings.Profile.VoiceDeploymentEnabled) { // Set one or more IDs associated with the custom TTS voice your bot will use // The format of the string is one or more GUIDs separated by comma (no spaces). You get these GUIDs from // your custom TTS on the speech portal https://speech.microsoft.com/portal. config.SetProperty(PropertyId.Conversation_Custom_Voice_Deployment_Ids, this.settings.RuntimeSettings.Profile.VoiceDeploymentIds); } if (!string.IsNullOrEmpty(this.settings.RuntimeSettings.Profile.FromId)) { // Set the from.id in the Bot-Framework Activity sent by this tool. // from.id field identifies who generated the activity, and may be required by some bots. // See https://github.com/microsoft/botframework-sdk/blob/master/specs/botframework-activity/botframework-activity.md // for Bot Framework Activity schema and from.id. config.SetProperty(PropertyId.Conversation_From_Id, this.settings.RuntimeSettings.Profile.FromId); } if (!string.IsNullOrWhiteSpace(this.settings.RuntimeSettings.Profile.LogFilePath)) { // Speech SDK has verbose logging to local file, which may be useful when reporting issues. // Supply the path to a text file on disk here. By default no logging happens. config.SetProperty(PropertyId.Speech_LogFilename, this.settings.RuntimeSettings.Profile.LogFilePath); } if (hasUrlOverride) { // For prototyping new Direct Line Speech channel service feature, a custom service URL may be // provided by Microsoft and entered in this tool. config.SetProperty("SPEECH-Endpoint", this.settings.RuntimeSettings.Profile.UrlOverride); } if (!string.IsNullOrWhiteSpace(this.settings.RuntimeSettings.Profile.ProxyHostName) && !string.IsNullOrWhiteSpace(this.settings.RuntimeSettings.Profile.ProxyPortNumber) && int.TryParse(this.settings.RuntimeSettings.Profile.ProxyPortNumber, out var proxyPortNumber)) { // To funnel network traffic via a proxy, set the host name and port number here config.SetProxy(this.settings.RuntimeSettings.Profile.ProxyHostName, proxyPortNumber, string.Empty, string.Empty); } // If a the DialogServiceConnector object already exists, destroy it first if (this.connector != null) { // First, unregister all events this.connector.ActivityReceived -= this.Connector_ActivityReceived; this.connector.Recognizing -= this.Connector_Recognizing; this.connector.Recognized -= this.Connector_Recognized; this.connector.Canceled -= this.Connector_Canceled; this.connector.SessionStarted -= this.Connector_SessionStarted; this.connector.SessionStopped -= this.Connector_SessionStopped; // Then dispose the object this.connector.Dispose(); this.connector = null; } // Create a new Dialog Service Connector for the above configuration and register to receive events this.connector = new DialogServiceConnector(config, AudioConfig.FromDefaultMicrophoneInput()); this.connector.ActivityReceived += this.Connector_ActivityReceived; this.connector.Recognizing += this.Connector_Recognizing; this.connector.Recognized += this.Connector_Recognized; this.connector.Canceled += this.Connector_Canceled; this.connector.SessionStarted += this.Connector_SessionStarted; this.connector.SessionStopped += this.Connector_SessionStopped; // Open a connection to Direct Line Speech channel this.connector.ConnectAsync(); if (this.settings.RuntimeSettings.Profile.CustomSpeechEnabled) { this.customSpeechConfig = new CustomSpeechConfiguration(this.settings.RuntimeSettings.Profile.CustomSpeechEndpointId); } if (this.settings.RuntimeSettings.Profile.WakeWordEnabled) { // Configure wake word (also known as "keyword") this.activeWakeWordConfig = new WakeWordConfiguration(this.settings.RuntimeSettings.Profile.WakeWordPath); this.connector.StartKeywordRecognitionAsync(this.activeWakeWordConfig.WakeWordModel); } }
/// <summary> /// Initializes the connection to the Bot. /// </summary> /// <param name="settings">Application settings object, built from the input JSON file supplied as run-time argument.</param> public void InitConnector(AppSettings settings) { DialogServiceConfig config; this.BotReplyList = new List <BotReply>(); this.stopWatch = new Stopwatch(); this.appsettings = settings; if (!string.IsNullOrWhiteSpace(this.appsettings.CustomCommandsAppId)) { // NOTE: Custom commands is a preview Azure Service. // Set the custom commands configuration object based on three items: // - The Custom commands application ID // - Cognitive services speech subscription key. // - The Azure region of the subscription key(e.g. "westus"). config = CustomCommandsConfig.FromSubscription(this.appsettings.CustomCommandsAppId, this.appsettings.SpeechSubscriptionKey, this.appsettings.SpeechRegion); } else { // Set the bot framework configuration object based on two items: // - Cognitive services speech subscription key. It is needed for billing and is tied to the bot registration. // - The Azure region of the subscription key(e.g. "westus"). config = BotFrameworkConfig.FromSubscription(this.appsettings.SpeechSubscriptionKey, this.appsettings.SpeechRegion); } if (this.appsettings.SpeechSDKLogEnabled) { // Speech SDK has verbose logging to local file, which may be useful when reporting issues. config.SetProperty(PropertyId.Speech_LogFilename, $"{this.appsettings.OutputFolder}SpeechSDKLog-{DateTime.Now.ToString("yyyy-MM-dd-HH-mm-ss", CultureInfo.CurrentCulture)}.log"); } if (!string.IsNullOrWhiteSpace(this.appsettings.SRLanguage)) { // Set the speech recognition language. If not set, the default is "en-us". config.Language = this.appsettings.SRLanguage; } if (!string.IsNullOrWhiteSpace(this.appsettings.CustomSREndpointId)) { // Set your custom speech end-point id here, as given to you by the speech portal https://speech.microsoft.com/portal. // Otherwise the standard speech end-point will be used. config.SetServiceProperty("cid", this.appsettings.CustomSREndpointId, ServicePropertyChannel.UriQueryParameter); // Custom Speech does not support cloud Keyword Verification at the moment. If this is not done, there will be an error // from the service and connection will close. Remove line below when supported. config.SetProperty("KeywordConfig_EnableKeywordVerification", "false"); } if (!string.IsNullOrWhiteSpace(this.appsettings.CustomVoiceDeploymentIds)) { // Set one or more IDs associated with the custom TTS voice your bot will use. // The format of the string is one or more GUIDs separated by comma (no spaces). You get these GUIDs from // your custom TTS on the speech portal https://speech.microsoft.com/portal. config.SetProperty(PropertyId.Conversation_Custom_Voice_Deployment_Ids, this.appsettings.CustomVoiceDeploymentIds); } this.timeout = this.appsettings.Timeout; if (!string.IsNullOrWhiteSpace(this.appsettings.KeywordRecognitionModel)) { this.kwsTable = KeywordRecognitionModel.FromFile(this.appsettings.KeywordRecognitionModel); } if (this.appsettings.SetPropertyId != null) { foreach (KeyValuePair <string, JToken> setPropertyIdPair in this.appsettings.SetPropertyId) { config.SetProperty(setPropertyIdPair.Key, setPropertyIdPair.Value.ToString()); } } if (this.appsettings.SetPropertyString != null) { foreach (KeyValuePair <string, JToken> setPropertyStringPair in this.appsettings.SetPropertyString) { config.SetProperty(setPropertyStringPair.Key.ToString(CultureInfo.CurrentCulture), setPropertyStringPair.Value.ToString()); } } if (this.appsettings.SetServiceProperty != null) { foreach (KeyValuePair <string, JToken> setServicePropertyPair in this.appsettings.SetServiceProperty) { config.SetServiceProperty(setServicePropertyPair.Key.ToString(CultureInfo.CurrentCulture), setServicePropertyPair.Value.ToString(), ServicePropertyChannel.UriQueryParameter); } } if (this.appsettings.RealTimeAudio) { config.SetProperty("SPEECH-AudioThrottleAsPercentageOfRealTime", "100"); config.SetProperty("SPEECH-TransmitLengthBeforeThrottleMs", "0"); } if (this.connector != null) { // Then dispose the object this.connector.Dispose(); this.connector = null; } this.pushAudioInputStream = AudioInputStream.CreatePushStream(); this.connector = new DialogServiceConnector(config, AudioConfig.FromStreamInput(this.pushAudioInputStream)); if (this.appsettings.BotGreeting) { // Starting the timer to calculate latency for Bot Greeting. this.stopWatch.Restart(); } this.AttachHandlers(); }