/// <summary>
        /// Runs the skill against a binding object, executing the skill logic on the associated input features and populating the output ones
        /// This skill proceeds in 2 steps:
        /// 1) Run FaceDetector against the image and populate the face bound feature in the binding object
        /// 2) If a face was detected, proceeds with sentiment analysis of that portion fo the image using Windows ML then updating the score
        /// of each possible sentiment returned as result
        /// </summary>
        /// <param name="binding"></param>
        /// <returns></returns>
        public IAsyncAction EvaluateAsync(ISkillBinding binding)
        {
            FaceSentimentAnalyzerBinding bindingObj = binding as FaceSentimentAnalyzerBinding;

            if (bindingObj == null)
            {
                throw new ArgumentException("Invalid ISkillBinding parameter: This skill handles evaluation of FaceSentimentAnalyzerBinding instances only");
            }

            return(AsyncInfo.Run(async(token) =>
            {
                // Retrieve input frame from the binding object
                VideoFrame inputFrame = (binding[FaceSentimentAnalyzerConst.SKILL_INPUTNAME_IMAGE].FeatureValue as SkillFeatureImageValue).VideoFrame;
                SoftwareBitmap softwareBitmapInput = inputFrame.SoftwareBitmap;

                // Retrieve a SoftwareBitmap to run face detection
                if (softwareBitmapInput == null)
                {
                    if (inputFrame.Direct3DSurface == null)
                    {
                        throw (new ArgumentNullException("An invalid input frame has been bound"));
                    }
                    softwareBitmapInput = await SoftwareBitmap.CreateCopyFromSurfaceAsync(inputFrame.Direct3DSurface);
                }

                // Run face detection and retrieve face detection result
                var faceDetectionResult = await m_faceDetector.DetectFacesAsync(softwareBitmapInput);

                // Retrieve face rectangle feature from the binding object
                var faceRectangleFeature = binding[FaceSentimentAnalyzerConst.SKILL_OUTPUTNAME_FACERECTANGLE];

                // Retrieve face sentiment scores feature from the binding object
                var faceSentimentScores = binding[FaceSentimentAnalyzerConst.SKILL_OUTPUTNAME_FACESENTIMENTSCORES];

                // If a face is found, update face rectangle feature
                if (faceDetectionResult.Count > 0)
                {
                    // Retrieve the face bound and enlarge it by a factor of 1.5x while also ensuring clamping to frame dimensions
                    BitmapBounds faceBound = faceDetectionResult[0].FaceBox;
                    var additionalOffset = faceBound.Width / 2;
                    faceBound.X = Math.Max(0, faceBound.X - additionalOffset);
                    faceBound.Y = Math.Max(0, faceBound.Y - additionalOffset);
                    faceBound.Width = (uint)Math.Min(faceBound.Width + 2 * additionalOffset, softwareBitmapInput.PixelWidth - faceBound.X);
                    faceBound.Height = (uint)Math.Min(faceBound.Height + 2 * additionalOffset, softwareBitmapInput.PixelHeight - faceBound.Y);

                    // Set the face rectangle SkillFeatureValue in the skill binding object
                    // note that values are in normalized coordinates between [0, 1] for ease of use
                    await faceRectangleFeature.SetFeatureValueAsync(
                        new List <float>()
                    {
                        (float)faceBound.X / softwareBitmapInput.PixelWidth,                      // left
                        (float)faceBound.Y / softwareBitmapInput.PixelHeight,                     // top
                        (float)(faceBound.X + faceBound.Width) / softwareBitmapInput.PixelWidth,  // right
                        (float)(faceBound.Y + faceBound.Height) / softwareBitmapInput.PixelHeight // bottom
                    });

                    // Bind the WinML input frame with the adequate face bounds specified as metadata
                    bindingObj.m_winmlBinding.Bind(
                        FaceSentimentAnalyzerConst.WINML_MODEL_INPUTNAME, // WinML feature name
                        inputFrame,                                       // VideoFrame
                        new PropertySet()                                 // VideoFrame bounds
                    {
                        { "BitmapBounds",
                          PropertyValue.CreateUInt32Array(new uint[] { faceBound.X, faceBound.Y, faceBound.Width, faceBound.Height }) }
                    });

                    // Run WinML evaluation
                    var winMLEvaluationResult = await m_winmlSession.EvaluateAsync(bindingObj.m_winmlBinding, "");
                    var winMLModelResult = (winMLEvaluationResult.Outputs[FaceSentimentAnalyzerConst.WINML_MODEL_OUTPUTNAME] as TensorFloat).GetAsVectorView();
                    var predictionScores = SoftMax(winMLModelResult);

                    // Set the SkillFeatureValue in the skill binding object related to the face sentiment scores for each possible SentimentType
                    // note that we SoftMax the output of WinML to give a score normalized between [0, 1] for ease of use
                    await faceSentimentScores.SetFeatureValueAsync(predictionScores);
                }
                else // if no face found, reset output SkillFeatureValues with 0s
                {
                    await faceRectangleFeature.SetFeatureValueAsync(FaceSentimentAnalyzerConst.ZeroFaceRectangleCoordinates);
                    await faceSentimentScores.SetFeatureValueAsync(FaceSentimentAnalyzerConst.ZeroFaceSentimentScores);
                }
            }));
        }