/// <summary> /// Processes scors and boxes and generate a list of face rectangles. /// </summary> /// <param name="landmarkTensors">landmark output of Onnx model.</param> /// <param name="imageX">X start position of the image.</param> /// <param name="imageY">Y start position of the image.</param> /// <param name="imageWidth">width of the image.</param> /// <param name="imageHeight">height of the image.</param> public static FaceLandmarks Predict(TensorFloat landmarkTensors, int imageX, int imageY, int imageWidth, int imageHeight) { var faceLandmarks = new FaceLandmarks(); IReadOnlyList <float> vectorLandmarks = landmarkTensors.GetAsVectorView(); IList <float> landmarkFloatList = vectorLandmarks.ToList(); long numAnchors = (long)Math.Ceiling(landmarkTensors.Shape[1] * 0.5); for (var i = 0; i < numAnchors; i++) { var mark = new FaceLandmark { X = landmarkFloatList[i * 2] * imageWidth + imageX, Y = landmarkFloatList[i * 2 + 1] * imageHeight + imageY }; faceLandmarks.landmarkList.Add(mark); } return(faceLandmarks); }
/// <summary> /// Load the picked image and preprocessed it as the model input. /// The function should excute after the image is loaded. /// </summary> private async void DetectFaces() { // Detect face using Onnx models rfbInput.input = FaceDetectionHelper.SoftwareBitmapToTensorFloat(imageInputData); rfbOutput = await rfbModelGen.EvaluateAsync(rfbInput); List <FaceDetectionRectangle> faceRects = (List <FaceDetectionRectangle>)FaceDetectionHelper.Predict(rfbOutput.scores, rfbOutput.boxes); // Detect facial landmarks using Onnx models List <FaceLandmarks> faceLandmarksList = new List <FaceLandmarks>(); if (ShowDetail) { closestDistance = 10000.0f; System.Diagnostics.Debug.WriteLine("Total: " + faceRects.Count); foreach (FaceDetectionRectangle faceRect in faceRects) { int rectX = (int)faceRect.X1; int rectY = (int)faceRect.Y1; int rectWidth = (int)(faceRect.X2 - faceRect.X1) + 1; int rectHeight = (int)(faceRect.Y2 - faceRect.Y1) + 1; // Crop only the image region with faces SoftwareBitmap croppedBitmap = new SoftwareBitmap( imageInputData.BitmapPixelFormat, FaceLandmarkHelper.inputImageDataSize, FaceLandmarkHelper.inputImageDataSize, BitmapAlphaMode.Ignore); System.Diagnostics.Debug.WriteLine("Crop"); bool cropped = openCVHelper.CropResize(imageInputData, croppedBitmap, rectX, rectY, rectWidth, rectHeight); if (!cropped) { continue; } // Model Processing landmarkInput.input = FaceDetectionHelper.SoftwareBitmapToTensorFloat(croppedBitmap); landmarkOutput = await landmarkModelGen.EvaluateAsync(landmarkInput); FaceLandmarks faceLandmarks = (FaceLandmarks)FaceLandmarkHelper.Predict(landmarkOutput.output, rectX, rectY, rectWidth, rectHeight); // Calculate camera distance if (faceLandmarks.IsValid) { System.Diagnostics.Debug.WriteLine("Valid: " + faceLandmarks.landmarkList.Count); float distance = ImageHelper.CalculateCameraDistance(cameraFocalLength, faceLandmarks.EyeDistance); closestDistance = distance < closestDistance ? distance : closestDistance; faceLandmarksList.Add(faceLandmarks); } croppedBitmap.Dispose(); } closestDistance = closestDistance == 10000.0f ? 0.0f : closestDistance; if (CameraMode) { detailText.Text = $"Distance: {(int)closestDistance} cm"; } else { detailText.Text = ""; } } else { detailText.Text = ""; } // Draw rectangles or facial landmarks of detected faces on top of image ClearPreviousFaceRectangles(); if (ShowDetail) { drawingFace.DrawFaceAll(faceRects, faceLandmarksList); } else { drawingFace.DrawFaceRetangles(faceRects); } foreach (Path path in drawingFace.pathes) { imageGrid.Children.Add(path); } faceLandmarksList.Clear(); }