/// <summary> /// Delegate to define the actions taken upon the photo capture of the camera class. /// This one uses the memory stream of the photo capture to identify the faces in the iamge, using the local face detection instance. /// </summary> /// <param name="memoryStream">The memory stream containing the captured image.</param> private async Task IdentifyFaces_Delegate(MemoryStream memoryStream) { var faces = await faceDetection.DetectFaces(memoryStream); if (faces.Length > 0) { // you could call the face API once for all faces together // but there's no way to map the found persons to the detected face rectangles // so we have to call the API per detected face rectangle foreach (var face in faces) { try { var persons = await faceDetection.IdentifyFace(new[] { face }); // set identities when there is at least one person within the viewport if (persons.Count > 0) { var person = persons.FirstOrDefault(); if (person != null) { identityInterpolation.IdentifiedFace(face, person); } } // remove the current identity when there is no person identified else if (persons.Count == 0) { currentIdentity = null; } } catch (FaceAPIException) { // if the person group is not yet trained or face identification fails for any other reason, continue } } // after adding new identified faces, switch to interact with the possibly new person within the viewport, // so make sure to get the new largest face object (event won't be fired again!) var trackedIdentity = identityInterpolation.GetLargestFace(); if (trackedIdentity != null) { // if there is no current identity, handle the new person as found if (currentIdentity == null) { OnPersonFound(trackedIdentity); } else if (currentIdentity != null) { // if there is an identity currently, check if it has actually changed before calling person found if (currentIdentity.PersonId != trackedIdentity.PersonId) { OnPersonFound(trackedIdentity); } } } } }