private async Task SetWebcamImage(MainWindow mainWindowInstance) { var pickedImagePath = System.IO.Path.GetFullPath(ImageNameHelper.GetLatestWebcamImage()); var renderingImage = FaceRecognitionHelper.LoadImageAppliedOrientation(pickedImagePath); var imageInfo = FaceRecognitionHelper.GetImageInfoForRendering(renderingImage); LeftImageDisplay.Source = renderingImage; mainWindowInstance.Log(string.Format("Request: Detecting in {0}", pickedImagePath)); var sw = Stopwatch.StartNew(); LeftResultCollection.Clear(); var detectedFaces = await MicrosoftApiHelper.DetectFaces(pickedImagePath, mainWindowInstance, imageInfo); for (var i = 0; i < detectedFaces.Count; i++) { LeftResultCollection.Add(detectedFaces[i]); } }
private async void SetImageAndDetect() { var mainWindowInstance = (MainWindow)Application.Current.MainWindow; var pickedImagePath = ImageNameHelper.GetLatestWebcamImage(); var renderingImage = FaceRecognitionHelper.LoadImageAppliedOrientation(pickedImagePath); var imageInfo = FaceRecognitionHelper.GetImageInfoForRendering(renderingImage); SelectedFile = renderingImage; ResultCollection.Clear(); DetectedFaces.Clear(); DetectedResultsInText = string.Format("Detecting..."); mainWindowInstance.Log(string.Format("Request: Detecting {0}", pickedImagePath)); var sw = Stopwatch.StartNew(); // Call detection REST API using (var fStream = File.OpenRead(pickedImagePath)) { try { var faceAttributeTypes = new FaceAttributeType[] { FaceAttributeType.Gender, FaceAttributeType.Age, FaceAttributeType.Smile, FaceAttributeType.Glasses, FaceAttributeType.HeadPose, FaceAttributeType.FacialHair, FaceAttributeType.Emotion, FaceAttributeType.Hair, FaceAttributeType.Makeup, FaceAttributeType.Occlusion, FaceAttributeType.Accessories, FaceAttributeType.Noise, FaceAttributeType.Exposure, FaceAttributeType.Blur }; var faceServiceClient = new FaceServiceClient(Constants.SubscriptionKey, Constants.ApiEndpoint); var faces = await faceServiceClient.DetectAsync(fStream, false, true, faceAttributeTypes); mainWindowInstance.Log(string.Format("Response: Success. Detected {0} face(s) in {1}", faces.Length, pickedImagePath)); DetectedResultsInText = string.Format("Info: {0} face(s) has been detected", faces.Length); foreach (var face in faces) { DetectedFaces.Add(new Face() { ImageFile = SelectedFile, Left = face.FaceRectangle.Left, Top = face.FaceRectangle.Top, Width = face.FaceRectangle.Width, Height = face.FaceRectangle.Height, FaceId = face.FaceId.ToString(), Age = string.Format("{0:#} years old", face.FaceAttributes.Age), Gender = face.FaceAttributes.Gender, HeadPose = string.Format("Pitch: {0}, Roll: {1}, Yaw: {2}", Math.Round(face.FaceAttributes.HeadPose.Pitch, 2), Math.Round(face.FaceAttributes.HeadPose.Roll, 2), Math.Round(face.FaceAttributes.HeadPose.Yaw, 2)), FacialHair = string.Format("FacialHair: {0}", face.FaceAttributes.FacialHair.Moustache + face.FaceAttributes.FacialHair.Beard + face.FaceAttributes.FacialHair.Sideburns > 0 ? "Yes" : "No"), Glasses = string.Format("GlassesType: {0}", face.FaceAttributes.Glasses.ToString()), Emotion = $"{GetEmotion(face.FaceAttributes.Emotion)}", Hair = string.Format("Hair: {0}", GetHair(face.FaceAttributes.Hair)), Makeup = string.Format("Makeup: {0}", ((face.FaceAttributes.Makeup.EyeMakeup || face.FaceAttributes.Makeup.LipMakeup) ? "Yes" : "No")), EyeOcclusion = string.Format("EyeOccluded: {0}", ((face.FaceAttributes.Occlusion.EyeOccluded) ? "Yes" : "No")), ForeheadOcclusion = string.Format("ForeheadOccluded: {0}", (face.FaceAttributes.Occlusion.ForeheadOccluded ? "Yes" : "No")), MouthOcclusion = string.Format("MouthOccluded: {0}", (face.FaceAttributes.Occlusion.MouthOccluded ? "Yes" : "No")), Accessories = $"{GetAccessories(face.FaceAttributes.Accessories)}", Blur = string.Format("Blur: {0}", face.FaceAttributes.Blur.BlurLevel.ToString()), Exposure = string.Format("{0}", face.FaceAttributes.Exposure.ExposureLevel.ToString()), Noise = string.Format("Noise: {0}", face.FaceAttributes.Noise.NoiseLevel.ToString()), }); } // Convert detection result into UI binding object for rendering foreach (var face in FaceRecognitionHelper.CalculateFaceRectangleForRendering(faces, MaxImageSize, imageInfo)) { ResultCollection.Add(face); } } catch (FaceAPIException ex) { mainWindowInstance.Log(string.Format("Response: {0}. {1}", ex.ErrorCode, ex.ErrorMessage)); GC.Collect(); return; } System.GC.Collect(); System.GC.WaitForPendingFinalizers(); } }
/// <summary> /// Check if authentication was successful /// </summary> /// <returns></returns> private async Task <bool> VerifyAuth(MainWindow mainWindowInstance) { // return true; // User already picked one image var pickedImagePath = System.IO.Path.GetFullPath(ImageNameHelper.GetLatestWebcamImage()); var renderingImage = FaceRecognitionHelper.LoadImageAppliedOrientation(pickedImagePath); var imageInfo = FaceRecognitionHelper.GetImageInfoForRendering(renderingImage); mainWindowInstance.Log(string.Format("Request: Detecting in {0}", pickedImagePath)); var sw = Stopwatch.StartNew(); LeftResultCollection.Clear(); var detectedFaces = await MicrosoftApiHelper.DetectFaces(pickedImagePath, mainWindowInstance, imageInfo); for (var i = 0; i < detectedFaces.Count; i++) { LeftResultCollection.Add(detectedFaces[i]); } // FaceVerifyResult = string.Empty; var pickedImagePath2 = System.IO.Path.GetFullPath(Constants.OwnerImagePath); var renderingImage2 = FaceRecognitionHelper.LoadImageAppliedOrientation(pickedImagePath2); var imageInfo2 = FaceRecognitionHelper.GetImageInfoForRendering(renderingImage2); mainWindowInstance.Log(string.Format("Request: Detecting in {0}", pickedImagePath2)); var sw2 = Stopwatch.StartNew(); // Clear last time detection results RightResultCollection.Clear(); detectedFaces = await MicrosoftApiHelper.DetectFaces(pickedImagePath2, mainWindowInstance, imageInfo2); for (var i = 0; i < detectedFaces.Count; i++) { RightResultCollection.Add(detectedFaces[i]); } if (LeftResultCollection.Count == 1 && RightResultCollection.Count == 1) { var faceId1 = LeftResultCollection[0].FaceId; var faceId2 = RightResultCollection[0].FaceId; mainWindowInstance.Log(string.Format("Request: Verifying face {0} and {1}", faceId1, faceId2)); // Call verify REST API with two face id try { var faceServiceClient = new FaceServiceClient(Constants.SubscriptionKey, Constants.ApiEndpoint); var res = await faceServiceClient.VerifyAsync(Guid.Parse(faceId1), Guid.Parse(faceId2)); mainWindowInstance.Log(string.Format("Response: Confidence = {0:0.00}, Face {1} and {2} {3} to the same person", res.Confidence, faceId1, faceId2, res.IsIdentical ? "belong" : "not belong")); return(res.IsIdentical); } catch (FaceAPIException ex) { mainWindowInstance.Log(string.Format("Response: {0}. {1}", ex.ErrorCode, ex.ErrorMessage)); return(false); } } else { MessageBox.Show("Verification accepts two faces as input, please pick images with only one detectable face in it.", "Warning", MessageBoxButton.OK); mainWindowInstance.NavigateTo(typeof(WebcamPage)); return(false); } GC.Collect(); }