private async Task <bool> IsCustomerSmilingAsync(SoftwareBitmap bitmap)
        {
            // Convert video frame image to a stream
            var stream = await bitmap.AsStream();

            // Call Cognitive Services Face API to look for identity candidates in the bitmap image
            var client = new FaceServiceClient(App.FACE_API_SUBSCRIPTION_KEY);

            // Ask Cognitive Services to also analyze the picture for smiles on the face
            var faces = await client.DetectAsync(
                imageStream : stream,
                returnFaceId : true,
                returnFaceLandmarks : false,
                returnFaceAttributes : new FaceAttributeType[] { FaceAttributeType.Smile }
                );

            // If a face was found, check to see if the confidence of the smile is at least 75%
            if (faces?.Any() == true)
            {
                return(faces[0].FaceAttributes.Smile > .75);
            }
            else
            {
                return(false);
            }
        }
        /// <summary>
        /// Detects faces in an instance of a SoftwareBitmap object representing a frame from a video feed.
        /// </summary>
        /// <param name="bitmap">Image from a frame from a video feed.</param>
        /// <param name="features">Array of FaceAttributeType enum objects used to specify which facial features should be analyzed.</param>
        /// <returns></returns>
        private async Task <Microsoft.ProjectOxford.Face.Contract.Face[]> FaceDetectionAsync(SoftwareBitmap bitmap, params FaceAttributeType[] features)
        {
            // Convert video frame image to a stream
            var stream = await bitmap.AsStream();

            // Cognitive Services Face API client from the Nuget package
            var client = new FaceServiceClient(App.FACE_API_SUBSCRIPTION_KEY);

            // Ask Cognitive Services to analyze the picture and determine face attributes as specified in array
            var faces = await client.DetectAsync(
                imageStream : stream,
                returnFaceId : true,
                returnFaceLandmarks : false,
                returnFaceAttributes : features
                );

            // Remove previous faces on UI canvas
            this.ClearFacesOnUI();

            // Video feed is probably a different resolution than the actual window size, so scale the sizes of each face
            double widthScale  = bitmap.PixelWidth / facesCanvas.ActualWidth;
            double heightScale = bitmap.PixelHeight / facesCanvas.ActualHeight;

            // Draw a box for each face detected w/ text of face features
            foreach (var face in faces)
            {
                this.DrawFaceOnUI(widthScale, heightScale, face);
            }

            return(faces);
        }
        private async Task <Microsoft.ProjectOxford.Emotion.Contract.Emotion[]> EmotionDetectionAsync(SoftwareBitmap bitmap)
        {
            // Convert video frame image to a stream
            var stream = await bitmap.AsStream();

            // Use the Emotion API nuget package to access to the Cognitive Services Emotions service
            var client = new EmotionServiceClient(App.EMOTION_API_SUBSCRIPTION_KEY);

            // Pass the video frame image as a stream to the Emotion API to find all face/emotions in the video still
            return(await client.RecognizeAsync(stream));
        }