/// <summary> /// Delegate to define the actions taken upon the photo capture of the camera class. /// This one uses the memory stream of the photo capture to identify the faces in the iamge, using the local face detection instance. /// </summary> /// <param name="memoryStream">The memory stream containing the captured image.</param> private async Task IdentifyFaces_Delegate(MemoryStream memoryStream) { var faces = await faceDetection.DetectFaces(memoryStream); if (faces.Length > 0) { // you could call the face API once for all faces together // but there's no way to map the found persons to the detected face rectangles // so we have to call the API per detected face rectangle foreach (var face in faces) { try { var persons = await faceDetection.IdentifyFace(new[] { face }); // set identities when there is at least one person within the viewport if (persons.Count > 0) { var person = persons.FirstOrDefault(); if (person != null) { identityInterpolation.IdentifiedFace(face, person); } } // remove the current identity when there is no person identified else if (persons.Count == 0) { currentIdentity = null; } } catch (FaceAPIException) { // if the person group is not yet trained or face identification fails for any other reason, continue } } // after adding new identified faces, switch to interact with the possibly new person within the viewport, // so make sure to get the new largest face object (event won't be fired again!) var trackedIdentity = identityInterpolation.GetLargestFace(); if (trackedIdentity != null) { // if there is no current identity, handle the new person as found if (currentIdentity == null) { OnPersonFound(trackedIdentity); } else if (currentIdentity != null) { // if there is an identity currently, check if it has actually changed before calling person found if (currentIdentity.PersonId != trackedIdentity.PersonId) { OnPersonFound(trackedIdentity); } } } } }
/// <summary> /// Called when a new main person to interact with has been found / appeared within the viewport. /// The current identity will be set to this identity and the corresponding event will be fired, so the brain can subscribe to / act upon this event. /// </summary> /// <param name="trackedIdentity">The new TrackedIdentity to interact with.</param> private void OnPersonFound(TrackedIdentity trackedIdentity) { currentIdentity = trackedIdentity; var handler = NewActivePersonEvent; // ReSharper disable once UseNullPropagation if (handler != null) { handler(currentIdentity); } }
/// <summary> /// Draws a face box around the given face. /// </summary> /// <param name="canvas">The canvas to draw the face box on.</param> /// <param name="face">The (detected) face to draw the box around.</param> /// <param name="widthScale">The horizontal canvas scaling factor.</param> /// <param name="heightScale">The vertical canvas scaling factor.</param> private void DrawFaceBox(Canvas canvas, TrackedIdentity face, double widthScale, double heightScale) { var box = new Rectangle { Tag = face.FaceBox, Width = (uint)(face.FaceBox.Width / widthScale), Height = (uint)(face.FaceBox.Height / heightScale), Fill = transparentBrush, Stroke = faceBoxBrush, StrokeThickness = 3d, Margin = new Thickness((uint)(face.FaceBox.X / widthScale), (uint)(face.FaceBox.Y / heightScale), 0, 0) }; canvas.Children.Add(box); }
/// <summary> /// Builds up a string containing all available information of the given face. /// </summary> /// <param name="face">The (detected) face to build up the caption text for.</param> /// <returns></returns> private string BuildFaceCaption(TrackedIdentity face) { const int guidLength = 36; const string delimiter = "-"; var caption = string.Empty; // add the name if (!string.IsNullOrEmpty(face.Name)) { caption = face.Name; // if the face name contains a name and a Guid, split those values over two lines if (caption.Contains("-") && caption.Length > guidLength) { var delimiterPosition = caption.IndexOf(delimiter, StringComparison.Ordinal); caption = $"{caption.Substring(0, delimiterPosition)}{Environment.NewLine}{caption.Substring(++delimiterPosition)}"; caption = FirstCharacterToUpperCase(caption); } } // add (parts of) the detected appearance if (face.Appearance != null) { if (!string.IsNullOrEmpty(caption)) { caption += Environment.NewLine; } caption += $"{face.Appearance.Age} y/o {face.Appearance.Gender}"; } // add the emotions if (face.EmotionScores != null) { if (!string.IsNullOrEmpty(caption)) { caption += Environment.NewLine; } var topScoringEmotion = face.EmotionScores.ToRankedList().First(); caption += $"{topScoringEmotion.Key}: {(int)Math.Round(topScoringEmotion.Value * 100)}%"; } return(caption); }
/// <summary> /// Draws a textual caption under the given face. /// </summary> /// <param name="canvas">The canvas to draw the face caption on.</param> /// <param name="face">The (detected) face to draw the caption under.</param> /// <param name="widthScale">The horizontal canvas scaling factor.</param> /// <param name="heightScale">The vertical canvas scaling factor.</param> private void DrawFaceCaption(Canvas canvas, TrackedIdentity face, double widthScale, double heightScale) { const double fontSize = 20d; var caption = BuildFaceCaption(face); if (string.IsNullOrEmpty(caption)) { return; } var textBlock = new TextBlock() { Tag = face.FaceBox, Text = caption, Foreground = onScreenTextBrush, FontFamily = defaultFamily, FontSize = fontSize / widthScale, FontWeight = FontWeights.Bold, Margin = new Thickness((uint)(face.FaceBox.X / widthScale), (uint)((face.FaceBox.Y + face.FaceBox.Height) / heightScale), 0, 0) }; canvas.Children.Add(textBlock); }
/// <summary> /// When a new active person has been identified, this event gets triggered. /// </summary> /// <param name="identity">The TrackedIdentity of the new person to interact with.</param> private async void Eyes_NewActivePersonEvent(TrackedIdentity identity) { // only continue if we have an identity if (identity == null) { return; } // if there is a personID, the person was identified by face recognition if (identity.PersonId != Guid.Empty) { ReportEvent("brain", $"sensed a new person with ID {identity.PersonId}"); // sets the current identity, which can be used by brains currentIdentityPersonId = identity.PersonId.ToString(); var client = GetClientForCurrentUser(); // identify the current user with sitecore // this will return the analytics cookie and all activities will be logged to the right identity in XDB var response = await client.Identify(); // if the gender isn't set within the identified profile, get the gender from the Face API appereance and update the profile if (string.IsNullOrEmpty(response.Gender)) { var p = new Profile { Gender = identity.Appearance.Gender }; await client.UpdateProfile(p); } // if there is no name known yet, ask the name of the person (should be triggered by engagement plan? probably in separate action) if (string.IsNullOrEmpty(response.Name)) { await AskForName(); } // else if the name is known, greet the person and get the identify intent action (should be renamed) else { var intentAction = await client.GetIntentAction("identify"); var text = intentAction.Reply.Replace("{name}", response.Name); var emotion = intentAction.Emotion; await SayTextWithEmotion(text, emotion); } } // if unknown (not identified) else if (identity.PersonId == Guid.Empty) { // set anonymous ID and identify to Sitecore without a name currentIdentityPersonId = AnonymousPersonId; var client = GetClientForCurrentUser(); await client.Identify(); if (identity.Appearance != null) { var p = new Profile { Gender = identity.Appearance.Gender }; await client.UpdateProfile(p); } // ask name (engagement, trigger action to ask name) - no other actions should occur await AskForName(); } }