async void OnVerifyAsync(object sender, RoutedEventArgs e)
        {
            // Take the user's profile ID back from the UI as we haven't stored
            // it anywhere.
            Guid profileId = Guid.Parse(this.txtProfileId.Text);

            // Prompt the user to speak.
            await ConfirmMessageAsync("Dismiss the dialog then speak your phrase");

            // Wrapper class which uses AudioGraph to record audio to a file over a specified
            // period of time.
            StorageFile recordedFile =
                await CognitiveAudioGraphRecorder.RecordToTemporaryFileAsync(
                    TimeSpan.FromSeconds(10));

            // VerificationClient is my wrapper for the verification REST API.
            // It needs my Cognitive speaker recognition API key in order to work.
            VerificationClient verificationClient = new VerificationClient(
                cognitiveApiKey);

            VerificationResult result =
                await verificationClient.VerifyRecordedSpeechForProfileIdAsync(
                    profileId, recordedFile);

            // Get rid of the recorded audio file.
            await recordedFile.DeleteAsync();

            await ConfirmMessageAsync(
                $"Your speech was {result.Result}ed with {result.Confidence} confidence");
        }
        async void OnIdentifyAsync(object sender, RoutedEventArgs e)
        {
            // IdentificationClient is my wrapper for the identification REST API.
            // It needs my Cognitive speaker recognition API key in order to work.
            IdentificationClient idClient = new IdentificationClient(cognitiveApiKey);

            // In this example, we are only going to use the first 10 profile IDs that the
            // service knows in order to keep the code shorter.
            IdentificationProfile[] profiles = await idClient.GetIdentificationProfilesAsync();

            Guid[] profileIds = profiles.Take(10).Select(p => p.IdentificationProfileId).ToArray();

            // Ask the user to begin speaking.
            await ConfirmMessageAsync(
                $"dismiss the dialog then speak for 60 seconds");

            // Wrapper class which uses AudioGraph to record audio to a file over a specified
            // period of time.
            StorageFile recordingFile = await CognitiveAudioGraphRecorder.RecordToTemporaryFileAsync(
                TimeSpan.FromSeconds(60));

            // Make a call to the 'Create Enrollment' API to process the speech for the
            // profile.
            PendingOperationResult serviceOperationResult = await
                                                            idClient.IdentifyRecordedSpeechForProfileIdsAsync(recordingFile, profileIds);

            // Make polling calls to the 'Get Operation Status' REST API waiting for the
            // service side operation to complete
            IdentificationOperationResult result =
                await serviceOperationResult.PollForProcessingResultAsync(TimeSpan.FromSeconds(5));

            // Get rid of the speech file.
            await recordingFile.DeleteAsync();

            // Assume that things failed.
            string message = "not recognised";

            // But if they worked...
            if (result?.ProcessingResult.IdentifiedProfileId != default(Guid))
            {
                // Build up a message containing the recognised profile ID and the confidence applied.
                message = $"recognised profile {result.ProcessingResult.IdentifiedProfileId.ToString()}" +
                          $" with {result.ProcessingResult.Confidence} confidence";
            }
            await ConfirmMessageAsync(message);
        }
        async void OnEnrolAsync(object sender, RoutedEventArgs e)
        {
            // IdentificationClient is my wrapper for the identification REST API.
            // It needs my Cognitive speaker recognition API key in order to work.
            IdentificationClient idClient = new IdentificationClient(cognitiveApiKey);

            // Make a call to the 'Create Profile' REST API and get back a new profile ID.
            Guid profileId = await idClient.AddIdentificationProfileAsync();

            float remainingTalkTime = 60.0f;

            // Loop until we have fully enrolled - this check is perhaps simplistic as
            // we may get errors etc.
            while (remainingTalkTime > 0)
            {
                // The service wants a minimum of 20 seconds of recorded file.
                remainingTalkTime = Math.Max(remainingTalkTime, 20.0f);

                // Ask the user to begin speaking.
                await ConfirmMessageAsync(
                    $"dismiss the dialog then speak for {remainingTalkTime} seconds");

                // Wrapper class which uses AudioGraph to record audio to a file over a specified
                // period of time.
                StorageFile recordedFile = await CognitiveAudioGraphRecorder.RecordToTemporaryFileAsync(
                    TimeSpan.FromSeconds(remainingTalkTime));

                // Make a call to the 'Create Enrollment' API to process the speech for the
                // profile.
                PendingOperationResult serviceOperationResult = await
                                                                idClient.EnrollRecordedSpeechForProfileIdAsync(profileId, recordedFile);

                // Make polling calls to the 'Get Operation Status' REST API waiting for the
                // service side operation to complete
                IdentificationOperationResult result =
                    await serviceOperationResult.PollForProcessingResultAsync(TimeSpan.FromSeconds(5));

                // Get rid of the speech file.
                await recordedFile.DeleteAsync();

                // How much more speech does the service need to hear from the user?
                remainingTalkTime = result.ProcessingResult.RemainingEnrollmentSpeechTime;
            }
            await ConfirmMessageAsync("enrolled, thanks");
        }
        async void OnEnrollAsync(object sender, RoutedEventArgs e)
        {
            // VerificationClient is my wrapper for the verification REST API.
            // It needs my Cognitive speaker recognition API key in order to work.
            VerificationClient verificationClient = new VerificationClient(cognitiveApiKey);

            // This calls the 'create profile' REST API and returns the GUID of the
            // new profile.
            Guid profileId = await verificationClient.AddVerificationProfileAsync();

            // Display the profile ID in the UI.
            this.txtProfileId.Text = profileId.ToString();

            bool enrolled = false;

            do
            {
                await ConfirmMessageAsync("Dismiss this dialog then say your phrase");

                // Wrapper class which uses AudioGraph to record audio to a file over a specified
                // period of time.
                StorageFile recordedAudioFile =
                    await CognitiveAudioGraphRecorder.RecordToTemporaryFileAsync(TimeSpan.FromSeconds(10));

                // This calls the 'create enrollment' API with the speech stream and
                // decodes the returned JSON.
                VerificationEnrollmentResult result =
                    await verificationClient.EnrollRecordedSpeechForProfileIdAsync(
                        profileId, recordedAudioFile);

                // Get rid of the recorded speech.
                await recordedAudioFile.DeleteAsync();

                // Do we need to do more enrollments? Note - this check is probably
                // over-simplistic.
                enrolled = (result.RemainingEnrollments == 0);
            } while (!enrolled);
        }