Esempio n. 1
0
    /// <summary>
    /// Uses the provided properties to create a connector from config and register callbacks
    /// </summary>
    private void CreateDialogServiceConnector()
    {
        Debug.Log($"CreateDialogServiceConnector enter");

        if (dialogServiceConnector == null)
        {
            if (subscriptionKey == string.Empty || region == string.Empty)
            {
                Debug.Log($"One or more input fields weren't provided. Check the fields in the Canvas object or in the script source");
                throw new InvalidOperationException("DialogServiceConfig creation failed");
            }

            // Creates an instance of a DialogServiceConfig with your bot connection ID, subscription key, and service region.
            // Replace in the editor on the Canvas object OR directly in the code, above in the member declarations
            dialogServiceConfig = BotFrameworkConfig.FromSubscription(subscriptionKey, region);
            if (dialogServiceConfig == null)
            {
                Debug.Log($"One or more input fields weren't provided. Check the fields in the Canvas object or in the script source");
                throw new InvalidOperationException("DialogServiceConfig creation failed");
            }

            AudioConfig audioConfig = AudioConfig.FromDefaultMicrophoneInput();
            dialogServiceConnector = new DialogServiceConnector(dialogServiceConfig, audioConfig);

            dialogServiceConnector.ActivityReceived += DialogServiceConnector_ActivityReceived;
            dialogServiceConnector.Canceled         += DialogServiceConnector_Canceled;
            dialogServiceConnector.Recognized       += DialogServiceConnector_Recognized;
        }

        stateIndicatorString = "DialogServiceConnector created";

        ttsAudio = GetComponent <AudioSource>();

        Debug.Log($"CreateDialogServiceConnector exit");
    }
        /// <summary>
        /// Create a DialogServiceConnector from the user-provided input
        /// </summary>
        public void InitDialogServiceConnector()
        {
            DialogServiceConfig dialogServiceConfig = null;

            dialogServiceConfig = BotFrameworkConfig.FromSubscription(SubscriptionTB.Text, RegionTB.Text);

            if (dialogServiceConnector != null)
            {
                dialogServiceConnector.SessionStarted   -= DialogServiceConnector_SessionStarted;
                dialogServiceConnector.SessionStopped   -= DialogServiceConnector_SessionStopped;
                dialogServiceConnector.Recognizing      -= DialogServiceConnector_Recognizing;
                dialogServiceConnector.Recognized       -= DialogServiceConnector_Recognized;
                dialogServiceConnector.ActivityReceived -= DialogServiceConnector_ActivityReceived;
                dialogServiceConnector.Canceled         -= DialogServiceConnector_Canceled;
            }

            var audioConfig = AudioConfig.FromDefaultMicrophoneInput();

            dialogServiceConnector = new DialogServiceConnector(dialogServiceConfig, audioConfig);
            dialogServiceConnector.SessionStarted   += DialogServiceConnector_SessionStarted;
            dialogServiceConnector.SessionStopped   += DialogServiceConnector_SessionStopped;
            dialogServiceConnector.Recognizing      += DialogServiceConnector_Recognizing;
            dialogServiceConnector.Recognized       += DialogServiceConnector_Recognized;
            dialogServiceConnector.ActivityReceived += DialogServiceConnector_ActivityReceived;
            dialogServiceConnector.Canceled         += DialogServiceConnector_Canceled;

            SendActivityButton.IsEnabled = true;
            StartButton.IsEnabled        = true;
        }
Esempio n. 3
0
        private DialogServiceConfig CreateConfiguration()
        {
            var speechKey           = LocalSettingsHelper.SpeechSubscriptionKey;
            var speechRegion        = LocalSettingsHelper.AzureRegion;
            var customSpeechId      = LocalSettingsHelper.CustomSpeechId;
            var customVoiceIds      = LocalSettingsHelper.CustomVoiceIds;
            var customCommandsAppId = LocalSettingsHelper.CustomCommandsAppId;
            var botId = LocalSettingsHelper.BotId;

            // Subscription information is supported in multiple formats:
            //  <subscription_key>     use the default bot associated with the subscription
            //  <sub_key>:<app_id>     use a specified Custom Commands application
            //  <sub_key>#<bot_id>     use a specific bot within the subscription
            DialogServiceConfig config;

            if (!string.IsNullOrEmpty(speechKey) && !string.IsNullOrEmpty(speechRegion) && !string.IsNullOrEmpty(customCommandsAppId))
            {
                config = CustomCommandsConfig.FromSubscription(customCommandsAppId, speechKey, speechRegion);
            }

            // else if (!string.IsNullOrEmpty(speechKey) && !string.IsNullOrEmpty(speechRegion) && !string.IsNullOrEmpty(botId))
            // {
            //    config = BotFrameworkConfig.FromSubscription(speechKey, speechRegion, botId);
            // }
            else
            {
                config = BotFrameworkConfig.FromSubscription(
                    speechKey,
                    speechRegion);
            }

            // Disable throttling of input audio (send it as fast as we can!)
            config.SetProperty("SPEECH-AudioThrottleAsPercentageOfRealTime", "9999");
            config.SetProperty("SPEECH-TransmitLengthBeforThrottleMs", "10000");

            var outputLabel = LocalSettingsHelper.OutputFormat.Label.ToLower(CultureInfo.CurrentCulture);

            config.SetProperty(PropertyId.SpeechServiceConnection_SynthOutputFormat, outputLabel);

            if (!string.IsNullOrEmpty(customSpeechId))
            {
                config.SetServiceProperty("cid", customSpeechId, ServicePropertyChannel.UriQueryParameter);

                // Custom Speech does not support Keyword Verification - Remove line below when supported.
                config.SetProperty("KeywordConfig_EnableKeywordVerification", "false");
            }

            if (!string.IsNullOrEmpty(customVoiceIds))
            {
                config.SetProperty(PropertyId.Conversation_Custom_Voice_Deployment_Ids, customVoiceIds);
            }

            if (LocalSettingsHelper.EnableSdkLogging)
            {
                var logPath = $"{ApplicationData.Current.LocalFolder.Path}\\sdklog.txt";
                config.SetProperty(PropertyId.Speech_LogFilename, logPath);
            }

            return(config);
        }
Esempio n. 4
0
        private void InitializeDialogServiceConnector()
        {
            // create a DialogServiceConfig by providing a bot secret key and Cognitive Services subscription key
            // the RecoLanguage property is optional (default en-US); note that only en-US is supported in Preview
            const string speechSubscriptionKey = "YourSpeechSubscriptionKey"; // Your subscription key
            const string region = "YourServiceRegion";                        // Your subscription service region. Note: only 'westus2' is currently supported

            var botConfig = BotFrameworkConfig.FromSubscription(speechSubscriptionKey, region);

            botConfig.SetProperty(PropertyId.SpeechServiceConnection_RecoLanguage, "en-US");
            connector = new DialogServiceConnector(botConfig);

            // ActivityReceived is the main way your bot will communicate with the client and uses bot framework activities
            connector.ActivityReceived += async(sender, activityReceivedEventArgs) =>
            {
                NotifyUser($"Activity received, hasAudio={activityReceivedEventArgs.HasAudio} activity={activityReceivedEventArgs.Activity}");

                if (activityReceivedEventArgs.HasAudio)
                {
                    SynchronouslyPlayActivityAudio(activityReceivedEventArgs.Audio);
                }
            };
            // Canceled will be signaled when a turn is aborted or experiences an error condition
            connector.Canceled += (sender, canceledEventArgs) =>
            {
                NotifyUser($"Canceled, reason={canceledEventArgs.Reason}");
                if (canceledEventArgs.Reason == CancellationReason.Error)
                {
                    NotifyUser($"Error: code={canceledEventArgs.ErrorCode}, details={canceledEventArgs.ErrorDetails}");
                }
            };
            // Recognizing (not 'Recognized') will provide the intermediate recognized text while an audio stream is being processed
            connector.Recognizing += (sender, recognitionEventArgs) =>
            {
                NotifyUser($"Recognizing! in-progress text={recognitionEventArgs.Result.Text}");
            };
            // Recognized (not 'Recognizing') will provide the final recognized text once audio capture is completed
            connector.Recognized += (sender, recognitionEventArgs) =>
            {
                NotifyUser($"Final speech-to-text result: '{recognitionEventArgs.Result.Text}'");
            };
            // SessionStarted will notify when audio begins flowing to the service for a turn
            connector.SessionStarted += (sender, sessionEventArgs) =>
            {
                NotifyUser($"Now Listening! Session started, id={sessionEventArgs.SessionId}");
            };
            // SessionStopped will notify when a turn is complete and it's safe to begin listening again
            connector.SessionStopped += (sender, sessionEventArgs) =>
            {
                NotifyUser($"Listening complete. Session ended, id={sessionEventArgs.SessionId}");
            };
        }
        /// <summary>
        /// The method reads user-entered settings and creates a new instance of the DialogServiceConnector object
        /// when the "Reconnect" button is pressed (or the microphone button is pressed for the first time).
        /// </summary>
        private void InitSpeechConnector()
        {
            DialogServiceConfig config = null;

            var hasSubscription = !string.IsNullOrWhiteSpace(this.settings.RuntimeSettings.Profile.SubscriptionKey);
            var hasRegion       = !string.IsNullOrWhiteSpace(this.settings.RuntimeSettings.Profile.SubscriptionKeyRegion);
            var hasBotId        = !string.IsNullOrWhiteSpace(this.settings.RuntimeSettings.Profile.BotId);
            var hasUrlOverride  = !string.IsNullOrWhiteSpace(this.settings.RuntimeSettings.Profile.UrlOverride);

            if (hasSubscription && (hasRegion || hasUrlOverride))
            {
                if (!string.IsNullOrWhiteSpace(this.settings.RuntimeSettings.Profile.CustomCommandsAppId))
                {
                    // NOTE: Custom commands is a preview Azure Service.
                    // Set the custom commands configuration object based on three items:
                    // - The Custom commands application ID
                    // - Cognitive services speech subscription key.
                    // - The Azure region of the subscription key(e.g. "westus").
                    config = CustomCommandsConfig.FromSubscription(this.settings.RuntimeSettings.Profile.CustomCommandsAppId, this.settings.RuntimeSettings.Profile.SubscriptionKey, this.settings.RuntimeSettings.Profile.SubscriptionKeyRegion);
                }
                else if (hasBotId)
                {
                    config = BotFrameworkConfig.FromSubscription(this.settings.RuntimeSettings.Profile.SubscriptionKey, this.settings.RuntimeSettings.Profile.SubscriptionKeyRegion, this.settings.RuntimeSettings.Profile.BotId);
                }
                else
                {
                    // Set the bot framework configuration object based on two items:
                    // - Cognitive services speech subscription key. It is needed for billing and is tied to the bot registration.
                    // - The Azure region of the subscription key(e.g. "westus").
                    config = BotFrameworkConfig.FromSubscription(this.settings.RuntimeSettings.Profile.SubscriptionKey, this.settings.RuntimeSettings.Profile.SubscriptionKeyRegion);
                }
            }

            if (!string.IsNullOrWhiteSpace(this.settings.RuntimeSettings.Profile.ConnectionLanguage))
            {
                // Set the speech recognition language. If not set, the default is "en-us".
                config.Language = this.settings.RuntimeSettings.Profile.ConnectionLanguage;
            }

            if (this.settings.RuntimeSettings.Profile.CustomSpeechEnabled)
            {
                // Set your custom speech end-point id here, as given to you by the speech portal https://speech.microsoft.com/portal.
                // Otherwise the standard speech end-point will be used.
                config.SetServiceProperty("cid", this.settings.RuntimeSettings.Profile.CustomSpeechEndpointId, ServicePropertyChannel.UriQueryParameter);

                // Custom Speech does not support cloud Keyword Verification at the moment. If this is not done, there will be an error
                // from the service and connection will close. Remove line below when supported.
                config.SetProperty("KeywordConfig_EnableKeywordVerification", "false");
            }

            if (this.settings.RuntimeSettings.Profile.VoiceDeploymentEnabled)
            {
                // Set one or more IDs associated with the custom TTS voice your bot will use
                // The format of the string is one or more GUIDs separated by comma (no spaces). You get these GUIDs from
                // your custom TTS on the speech portal https://speech.microsoft.com/portal.
                config.SetProperty(PropertyId.Conversation_Custom_Voice_Deployment_Ids, this.settings.RuntimeSettings.Profile.VoiceDeploymentIds);
            }

            if (!string.IsNullOrEmpty(this.settings.RuntimeSettings.Profile.FromId))
            {
                // Set the from.id in the Bot-Framework Activity sent by this tool.
                // from.id field identifies who generated the activity, and may be required by some bots.
                // See https://github.com/microsoft/botframework-sdk/blob/master/specs/botframework-activity/botframework-activity.md
                // for Bot Framework Activity schema and from.id.
                config.SetProperty(PropertyId.Conversation_From_Id, this.settings.RuntimeSettings.Profile.FromId);
            }

            if (!string.IsNullOrWhiteSpace(this.settings.RuntimeSettings.Profile.LogFilePath))
            {
                // Speech SDK has verbose logging to local file, which may be useful when reporting issues.
                // Supply the path to a text file on disk here. By default no logging happens.
                config.SetProperty(PropertyId.Speech_LogFilename, this.settings.RuntimeSettings.Profile.LogFilePath);
            }

            if (hasUrlOverride)
            {
                // For prototyping new Direct Line Speech channel service feature, a custom service URL may be
                // provided by Microsoft and entered in this tool.
                config.SetProperty("SPEECH-Endpoint", this.settings.RuntimeSettings.Profile.UrlOverride);
            }

            if (!string.IsNullOrWhiteSpace(this.settings.RuntimeSettings.Profile.ProxyHostName) &&
                !string.IsNullOrWhiteSpace(this.settings.RuntimeSettings.Profile.ProxyPortNumber) &&
                int.TryParse(this.settings.RuntimeSettings.Profile.ProxyPortNumber, out var proxyPortNumber))
            {
                // To funnel network traffic via a proxy, set the host name and port number here
                config.SetProxy(this.settings.RuntimeSettings.Profile.ProxyHostName, proxyPortNumber, string.Empty, string.Empty);
            }

            // If a the DialogServiceConnector object already exists, destroy it first
            if (this.connector != null)
            {
                // First, unregister all events
                this.connector.ActivityReceived -= this.Connector_ActivityReceived;
                this.connector.Recognizing      -= this.Connector_Recognizing;
                this.connector.Recognized       -= this.Connector_Recognized;
                this.connector.Canceled         -= this.Connector_Canceled;
                this.connector.SessionStarted   -= this.Connector_SessionStarted;
                this.connector.SessionStopped   -= this.Connector_SessionStopped;

                // Then dispose the object
                this.connector.Dispose();
                this.connector = null;
            }

            // Create a new Dialog Service Connector for the above configuration and register to receive events
            this.connector = new DialogServiceConnector(config, AudioConfig.FromDefaultMicrophoneInput());
            this.connector.ActivityReceived += this.Connector_ActivityReceived;
            this.connector.Recognizing      += this.Connector_Recognizing;
            this.connector.Recognized       += this.Connector_Recognized;
            this.connector.Canceled         += this.Connector_Canceled;
            this.connector.SessionStarted   += this.Connector_SessionStarted;
            this.connector.SessionStopped   += this.Connector_SessionStopped;

            // Open a connection to Direct Line Speech channel
            this.connector.ConnectAsync();

            if (this.settings.RuntimeSettings.Profile.CustomSpeechEnabled)
            {
                this.customSpeechConfig = new CustomSpeechConfiguration(this.settings.RuntimeSettings.Profile.CustomSpeechEndpointId);
            }

            if (this.settings.RuntimeSettings.Profile.WakeWordEnabled)
            {
                // Configure wake word (also known as "keyword")
                this.activeWakeWordConfig = new WakeWordConfiguration(this.settings.RuntimeSettings.Profile.WakeWordPath);
                this.connector.StartKeywordRecognitionAsync(this.activeWakeWordConfig.WakeWordModel);
            }
        }
        /// <summary>
        /// Initializes the connection to the Bot.
        /// </summary>
        /// <param name="settings">Application settings object, built from the input JSON file supplied as run-time argument.</param>
        public void InitConnector(AppSettings settings)
        {
            DialogServiceConfig config;

            this.BotReplyList = new List <BotReply>();
            this.stopWatch    = new Stopwatch();
            this.appsettings  = settings;

            if (!string.IsNullOrWhiteSpace(this.appsettings.CustomCommandsAppId))
            {
                // NOTE: Custom commands is a preview Azure Service.
                // Set the custom commands configuration object based on three items:
                // - The Custom commands application ID
                // - Cognitive services speech subscription key.
                // - The Azure region of the subscription key(e.g. "westus").
                config = CustomCommandsConfig.FromSubscription(this.appsettings.CustomCommandsAppId, this.appsettings.SpeechSubscriptionKey, this.appsettings.SpeechRegion);
            }
            else
            {
                // Set the bot framework configuration object based on two items:
                // - Cognitive services speech subscription key. It is needed for billing and is tied to the bot registration.
                // - The Azure region of the subscription key(e.g. "westus").
                config = BotFrameworkConfig.FromSubscription(this.appsettings.SpeechSubscriptionKey, this.appsettings.SpeechRegion);
            }

            if (this.appsettings.SpeechSDKLogEnabled)
            {
                // Speech SDK has verbose logging to local file, which may be useful when reporting issues.
                config.SetProperty(PropertyId.Speech_LogFilename, $"{this.appsettings.OutputFolder}SpeechSDKLog-{DateTime.Now.ToString("yyyy-MM-dd-HH-mm-ss", CultureInfo.CurrentCulture)}.log");
            }

            if (!string.IsNullOrWhiteSpace(this.appsettings.SRLanguage))
            {
                // Set the speech recognition language. If not set, the default is "en-us".
                config.Language = this.appsettings.SRLanguage;
            }

            if (!string.IsNullOrWhiteSpace(this.appsettings.CustomSREndpointId))
            {
                // Set your custom speech end-point id here, as given to you by the speech portal https://speech.microsoft.com/portal.
                // Otherwise the standard speech end-point will be used.
                config.SetServiceProperty("cid", this.appsettings.CustomSREndpointId, ServicePropertyChannel.UriQueryParameter);

                // Custom Speech does not support cloud Keyword Verification at the moment. If this is not done, there will be an error
                // from the service and connection will close. Remove line below when supported.
                config.SetProperty("KeywordConfig_EnableKeywordVerification", "false");
            }

            if (!string.IsNullOrWhiteSpace(this.appsettings.CustomVoiceDeploymentIds))
            {
                // Set one or more IDs associated with the custom TTS voice your bot will use.
                // The format of the string is one or more GUIDs separated by comma (no spaces). You get these GUIDs from
                // your custom TTS on the speech portal https://speech.microsoft.com/portal.
                config.SetProperty(PropertyId.Conversation_Custom_Voice_Deployment_Ids, this.appsettings.CustomVoiceDeploymentIds);
            }

            this.timeout = this.appsettings.Timeout;

            if (!string.IsNullOrWhiteSpace(this.appsettings.KeywordRecognitionModel))
            {
                this.kwsTable = KeywordRecognitionModel.FromFile(this.appsettings.KeywordRecognitionModel);
            }

            if (this.appsettings.SetPropertyId != null)
            {
                foreach (KeyValuePair <string, JToken> setPropertyIdPair in this.appsettings.SetPropertyId)
                {
                    config.SetProperty(setPropertyIdPair.Key, setPropertyIdPair.Value.ToString());
                }
            }

            if (this.appsettings.SetPropertyString != null)
            {
                foreach (KeyValuePair <string, JToken> setPropertyStringPair in this.appsettings.SetPropertyString)
                {
                    config.SetProperty(setPropertyStringPair.Key.ToString(CultureInfo.CurrentCulture), setPropertyStringPair.Value.ToString());
                }
            }

            if (this.appsettings.SetServiceProperty != null)
            {
                foreach (KeyValuePair <string, JToken> setServicePropertyPair in this.appsettings.SetServiceProperty)
                {
                    config.SetServiceProperty(setServicePropertyPair.Key.ToString(CultureInfo.CurrentCulture), setServicePropertyPair.Value.ToString(), ServicePropertyChannel.UriQueryParameter);
                }
            }

            if (this.appsettings.RealTimeAudio)
            {
                config.SetProperty("SPEECH-AudioThrottleAsPercentageOfRealTime", "100");
                config.SetProperty("SPEECH-TransmitLengthBeforeThrottleMs", "0");
            }

            if (this.connector != null)
            {
                // Then dispose the object
                this.connector.Dispose();
                this.connector = null;
            }

            this.pushAudioInputStream = AudioInputStream.CreatePushStream();
            this.connector            = new DialogServiceConnector(config, AudioConfig.FromStreamInput(this.pushAudioInputStream));

            if (this.appsettings.BotGreeting)
            {
                // Starting the timer to calculate latency for Bot Greeting.
                this.stopWatch.Restart();
            }

            this.AttachHandlers();
        }
Esempio n. 7
0
        static void Main(string[] args)
        {
            var server = new WebSocketServer("ws://0.0.0.0:8181");

            server.RestartAfterListenError = true;
            server.ListenerSocket.NoDelay  = true;


            const string speechSubscriptionKey = ""; // Your subscription key
            const string region = "";                // Your subscription service region.

            var botConfig = BotFrameworkConfig.FromSubscription(speechSubscriptionKey, region);

            botConfig.Language = "fr-FR";


            var audioStream = new VoiceAudioStream();

            // Initialize with the format required by the Speech service
            var audioFormat = AudioStreamFormat.GetWaveFormatPCM(16000, 16, 1);
            // Configure speech SDK to work with the audio stream in right format.
            // Alternatively this can be a direct microphone input.
            var audioConfig = AudioConfig.FromStreamInput(audioStream, audioFormat);
            var connector   = new DialogServiceConnector(botConfig, audioConfig);

            // Get credentials and region from client's message.
            server.Start(socket =>
            {
                Console.WriteLine("started!");
                sock          = socket;
                socket.OnOpen = () =>
                {
                    Console.WriteLine("Open!");
                };

                socket.OnClose = () =>
                {
                    Console.WriteLine("Close!");
                    connector.DisconnectAsync();
                };
                socket.OnMessage = message =>
                {
                    connector.ListenOnceAsync();
                    Console.WriteLine(message);
                };
                socket.OnBinary = binary =>
                {
                    if (!locked)
                    {
                        audioStream.Write(binary, 0, binary.Length);
                    }
                };
            });

            Console.WriteLine("Open!");
            connector.ActivityReceived += (sender, activityReceivedEventArgs) =>
            {
                locked = true;
                Console.WriteLine(
                    $"Activity received, hasAudio={activityReceivedEventArgs.HasAudio} activity={activityReceivedEventArgs.Activity}");

                if (!activityReceivedEventArgs.HasAudio)
                {
                    return;
                }
                byte[] pullBuffer;

                uint lastRead   = 0;
                var  numberByte = 0;
                var  listByte   = new List <byte[]>();
                var  stopWatch  = new Stopwatch();
                do
                {
                    pullBuffer  = new byte[640];
                    lastRead    = activityReceivedEventArgs.Audio.Read(pullBuffer);
                    numberByte += pullBuffer.Length;
                    listByte.Add(pullBuffer);
                }while (lastRead == pullBuffer.Length);
                stopWatch.Start();
                foreach (var byteArray in listByte)
                {
                    sock.Send(byteArray);
                    Console.WriteLine($"envois à Nexmo {stopWatch.ElapsedMilliseconds}");
                    Thread.Sleep(1);
                }

                // Get the elapsed time as a TimeSpan value.
                var wait = (((numberByte * 8) / 16000) / 16) * 1000 - stopWatch.ElapsedMilliseconds;
                Console.WriteLine(wait);
                Thread.Sleep((int)wait);
                stopWatch.Stop();
                locked = false;
                connector.ListenOnceAsync();
            };
            connector.Canceled += (sender, canceledEventArgs) =>
            {
                Console.WriteLine($"Canceled, reason={canceledEventArgs.Reason}");
                if (canceledEventArgs.Reason == CancellationReason.Error)
                {
                    Console.WriteLine(
                        $"Error: code={canceledEventArgs.ErrorCode}, details={canceledEventArgs.ErrorDetails}");
                }
            };
            connector.Recognizing += (sender, recognitionEventArgs) =>
            {
                Console.WriteLine($"Recognizing! in-progress text={recognitionEventArgs.Result.Text}");
            };
            connector.Recognized += (sender, recognitionEventArgs) =>
            {
                Console.WriteLine($"Final speech-to-text result: '{recognitionEventArgs.Result.Text}'");
            };
            connector.SessionStarted += (sender, sessionEventArgs) =>
            {
                Console.WriteLine($"Now Listening! Session started, id={sessionEventArgs.SessionId}");
            };
            connector.SessionStopped += (sender, sessionEventArgs) =>
            {
                Console.WriteLine($"Listening complete. Session ended, id={sessionEventArgs.SessionId}");
            };
            connector.ConnectAsync();

            _quitEvent.WaitOne();
        }