/// <summary> /// Detect the orientation of the head in the current video frame. /// </summary> /// <param name="shape">The landmark points to use</param> /// <param name="cameraMatrix">The camera calibration matrix to use</param> /// <param name="rotationMatrix">The detected head rotation matrix</param> /// <param name="translationMatrix">The detected head translation matrix</param> /// <param name="coefficientMatrix">The detected coefficient matrix</param> public static void DetectHeadAngle( FullObjectDetection shape, MatOfDouble cameraMatrix, out Mat rotationMatrix, out Mat translationMatrix, out MatOfDouble coefficientMatrix) { // build the 3d face model var model = Utility.GetFaceModel(); // build the landmark point list var landmarks = new MatOfPoint2d(1, 6, (from i in new int[] { 30, 8, 36, 45, 48, 54 } let p = shape.GetPart((uint)i) select new OpenCvSharp.Point2d(p.X, p.Y)).ToArray()); // build the coefficient matrix var coeffs = new MatOfDouble(4, 1); coeffs.SetTo(0); // find head rotation and translation Mat rotation = new MatOfDouble(); Mat translation = new MatOfDouble(); Cv2.SolvePnP(model, landmarks, cameraMatrix, coeffs, rotation, translation); // return results rotationMatrix = rotation; translationMatrix = translation; coefficientMatrix = coeffs; }
/// <summary> /// Draw a straight line indicating head pose on the specified image. /// </summary> /// <param name="rotation">The rotation matrix of the head.</param> /// <param name="translation">The translation matrix of the head.</param> /// <param name="cameraMatrix">The camera calibration matrix.</param> /// <param name="coefficients">The coefficient matrix.</param> /// <param name="shape">The list of facial landmark points.</param> /// <param name="image">The image to draw on.</param> public static void DrawPoseLine( Mat rotation, Mat translation, MatOfDouble cameraMatrix, MatOfDouble coefficients, FullObjectDetection shape, Bitmap image) { // create a new model point in front of the nose and project it into 2d var poseModel = new MatOfPoint3d(1, 1, new Point3d(0, 0, 1000)); var poseProjection = new MatOfPoint2d(); Cv2.ProjectPoints(poseModel, rotation, translation, cameraMatrix, coefficients, poseProjection); // get landmark point 30 (tip of the nose) var point = shape.GetPart(30); var tipOfNose = new OpenCvSharp.Point2d(point.X, point.Y); // draw a line from the tip of the nose pointing in the direction of head pose var p = poseProjection.At <Point2d>(0); var pen = new Pen(Brushes.White, 2); using (Graphics g = Graphics.FromImage(image)) { g.DrawLine(pen, (int)tipOfNose.X, (int)tipOfNose.Y, (int)p.X, (int)p.Y); } }
private static double InterocularDistance(FullObjectDetection det) { var l = new DPoint(); var r = new DPoint(); double cnt = 0; // Find the center of the left eye by averaging the points around // the eye. for (var i = 36u; i <= 41; ++i) { l += det.GetPart(i); ++cnt; } l /= cnt; // Find the center of the right eye by averaging the points around // the eye. cnt = 0; for (var i = 42u; i <= 47; ++i) { r += det.GetPart(i); ++cnt; } r /= cnt; // Now return the distance between the centers of the eyes return(Dlib.Length(l - r)); }
public void Create2() { var rect = new Rectangle(10, 20, 40, 50); var points = new [] { new Point(10, 50), new Point(20, 40), new Point(30, 30), new Point(40, 20), new Point(50, 10), }; using (var detection = new FullObjectDetection(rect, points)) { var r = detection.Rect; Assert.Equal(rect.Left, r.Left); Assert.Equal(rect.Right, r.Right); Assert.Equal(rect.Top, r.Top); Assert.Equal(rect.Bottom, r.Bottom); Assert.Equal(detection.Parts, (uint)points.Length); for (var index = 0; index < points.Length; index++) { var p = detection.GetPart((uint)index); Assert.Equal(points[index].X, p.X); Assert.Equal(points[index].Y, p.Y); } } }
// 对比 public double getEar(FullObjectDetection face, EarEnum earEnum) { List <DlibDotNet.Point> points = new List <DlibDotNet.Point>(); if (earEnum == EarEnum.Left) { foreach (var item in leftEarPoint) { points.Add(face.GetPart(item)); } } else if (earEnum == EarEnum.Right) { foreach (var item in rightEarPoint) { points.Add(face.GetPart(item)); } } else { foreach (var item in mouthPoint) { points.Add(face.GetPart(item)); } } return(getEarNum(points)); }
private void buscarrosto(Bitmap frame) { Image <Rgb, Byte> imageCV = new Image <Rgb, byte>(frame); Emgu.CV.Mat mat = imageCV.Mat; var array = new byte[mat.Width * mat.Height * mat.ElementSize]; mat.CopyTo(array); using (Array2D <RgbPixel> image = Dlib.LoadImageData <RgbPixel>(array, (uint)mat.Height, (uint)mat.Width, (uint)(mat.Width * mat.ElementSize))) { using (FrontalFaceDetector fd = Dlib.GetFrontalFaceDetector()) { var faces = fd.Operator(image); foreach (DlibDotNet.Rectangle face in faces) { FullObjectDetection shape = _ShapePredictor.Detect(image, face); ChipDetails faceChipDetail = Dlib.GetFaceChipDetails(shape, 150, 0.25); Array2D <RgbPixel> faceChip = Dlib.ExtractImageChip <RgbPixel>(image, faceChipDetail); Bitmap bitmap1 = faceChip.ToBitmap <RgbPixel>(); MainWindow.main.Statusa1 = bitmap1; Dlib.DrawRectangle(image, face, color: new RgbPixel(0, 255, 255), thickness: 4); } } frame = image.ToBitmap <RgbPixel>(); MainWindow.main.Statusa = frame; } }
/// <summary> /// 旋转角度 /// </summary> /// <param name="shape"></param> /// <returns></returns> public static double GetRotationAngle(FullObjectDetection shape) { return(GetRotationAngle(new List <FullObjectDetection>() { shape })[0]); }
/// <summary> /// Detect the eye state of the face on camera. /// </summary> /// <param name="shape">The detected facial landmark points.</param> /// <returns>The surface area of both eyes.</returns> private bool AreEyesOpen(FullObjectDetection shape) { // get all landmark points of the left eye var leftEye = from i in Enumerable.Range(36, 6) let p = shape.GetPart((uint)i) select new OpenCvSharp.Point(p.X, p.Y); // get all landmark points of the right eye var rightEye = from i in Enumerable.Range(42, 6) let p = shape.GetPart((uint)i) select new OpenCvSharp.Point(p.X, p.Y); // create a mask of the eye areas using (var mask = new Mat(new OpenCvSharp.Size(640, 480), MatType.CV_8UC1)) { mask.SetTo(0); Cv2.FillConvexPoly(mask, leftEye, Scalar.White); Cv2.FillConvexPoly(mask, rightEye, Scalar.White); // count the number of pixels in the eye area var pixels = Cv2.CountNonZero(mask); // the maximum possible area is 40% of the surface area of both eyeballs int r1 = (shape.GetPart(39).X - shape.GetPart(36).X) / 2; int r2 = (shape.GetPart(45).X - shape.GetPart(42).X) / 2; double normalizedArea = 0.4 * Math.PI * r1 * r1 + 0.4 * Math.PI * r2 * r2; // calculate fractional area and normalize on a 0-100 scale var value = (int)(100 * pixels / normalizedArea - 20); var eyeState = value >= 0 && value <= 100 ? value : 0; // return result return(eyeState > 30); } }
/// <summary> /// Detect the eye state of the face on camera. /// </summary> /// <param name="shape">The detected facial landmark points.</param> /// <returns>The surface area of both eyes.</returns> private bool AreEyesOpen(FullObjectDetection shape) { // ****************** // ADD YOUR CODE HERE // ****************** return(true); // replace this when done! }
/// <summary> /// Get the angle between an index and a point in radians. /// </summary> /// <param name="shape"></param> /// <param name="pointIndex"></param> /// <param name="point"></param> /// <returns></returns> static float GetAngleBetween(ref FullObjectDetection shape, uint pointIndex, Point point) { var pointI = shape.GetPart(pointIndex); var vectorBetweenPoints = pointI - point; var vectorBetweenPointsNormalized = new Vector2((float)(vectorBetweenPoints.X / vectorBetweenPoints.Length), (float)(vectorBetweenPoints.Y / vectorBetweenPoints.Length)); return((float)Math.Atan2(vectorBetweenPointsNormalized.Y, vectorBetweenPointsNormalized.X) /** (180f / (float)Math.PI)*/); }
static double GetRightLip(FullObjectDetection shape) { double sum_dis = 0.0, right_lip_distance; right_lip_distance = GetDistance(GetPoint(shape, 34), GetPoint(shape, 52)); sum_dis += GetDistance(GetPoint(shape, 34), GetPoint(shape, 55)) / right_lip_distance; sum_dis += GetDistance(GetPoint(shape, 34), GetPoint(shape, 54)) / right_lip_distance; sum_dis += GetDistance(GetPoint(shape, 34), GetPoint(shape, 53)) / right_lip_distance; return(sum_dis); }
static double GetLeftLip(FullObjectDetection shape) { double sum_dis = 0.0, left_lip_distance; left_lip_distance = GetDistance(GetPoint(shape, 34), GetPoint(shape, 52)); sum_dis += GetDistance(GetPoint(shape, 34), GetPoint(shape, 49)) / left_lip_distance; sum_dis += GetDistance(GetPoint(shape, 34), GetPoint(shape, 50)) / left_lip_distance; sum_dis += GetDistance(GetPoint(shape, 34), GetPoint(shape, 51)) / left_lip_distance; return(sum_dis); }
public static FaceLandmarkDetail From(FullObjectDetection faceLandmark) { var points = new DlibDotNet.Point[faceLandmark.Parts]; for (uint index = 0; index < faceLandmark.Parts; index++) { points[index] = faceLandmark.GetPart(index); } return(new FaceLandmarkDetail(points)); }
static double GetLeftEyebrow(FullObjectDetection shape) { double sum_dis = 0.0, left_eye_distance; left_eye_distance = GetDistance(GetPoint(shape, 40), GetPoint(shape, 22)); sum_dis += GetDistance(GetPoint(shape, 40), GetPoint(shape, 19)) / left_eye_distance; sum_dis += GetDistance(GetPoint(shape, 40), GetPoint(shape, 20)) / left_eye_distance; sum_dis += GetDistance(GetPoint(shape, 40), GetPoint(shape, 21)) / left_eye_distance; sum_dis += 1; return(sum_dis); }
static double GetRightEyebrow(FullObjectDetection shape) { double sum_dis = 0.0, right_eye_distance; right_eye_distance = GetDistance(GetPoint(shape, 43), GetPoint(shape, 23)); sum_dis += GetDistance(GetPoint(shape, 43), GetPoint(shape, 26)) / right_eye_distance; sum_dis += GetDistance(GetPoint(shape, 43), GetPoint(shape, 25)) / right_eye_distance; sum_dis += GetDistance(GetPoint(shape, 43), GetPoint(shape, 24)) / right_eye_distance; sum_dis += 1; return(sum_dis); }
static float CalculateRightLip(FullObjectDetection shape) { float result = 0; float NormalisationDistance = CalculateDistance(shape, 33, 51); for (uint i = 52; i <= 54; i++) { result += CalculateDistance(shape, i, 33) / NormalisationDistance; } return(result); }
static float CalculateRightEyebrow(FullObjectDetection shape) { float result = 0; float NormalisationDistance = CalculateDistance(shape, 22, 42); for (uint i = 22; i <= 25; i++) { result += CalculateDistance(shape, i, 42) / NormalisationDistance; } return(result); }
static float CalculateLeftEyebrow(FullObjectDetection shape) { float result = 0; float NormalisationDistance = CalculateDistance(shape, 21, 39); for (uint i = 18; i <= 21; i++) { result += CalculateDistance(shape, i, 39) / NormalisationDistance; } return(result); }
/// <summary> /// Detect the orientation of the head in the current video frame. /// </summary> /// <param name="image">The current video frame.</param> /// <param name="shape">The landmark points.</param> private void DetectHeadPose(System.Drawing.Bitmap image, FullObjectDetection shape) { // build the 3d face model var model = Utility.GetFaceModel(); // build the landmark point list var landmarks = new MatOfPoint2d(1, 6, (from i in new int[] { 30, 8, 36, 45, 48, 54 } let p = shape.GetPart((uint)i) select new OpenCvSharp.Point2d(p.X, p.Y)).ToArray()); // build the camera matrix var cameraMatrix = Utility.GetCameraMatrix(image.Width, image.Height); // build the coefficient matrix var coeffs = new MatOfDouble(4, 1); coeffs.SetTo(0); // find head rotation and translation Mat rotation = new MatOfDouble(); Mat translation = new MatOfDouble(); Cv2.SolvePnP(model, landmarks, cameraMatrix, coeffs, rotation, translation); // find and store euler angles var euler = Utility.GetEulerMatrix(rotation); headRotation = euler; // create a new model point in front of the nose, and project it into 2d var poseModel = new MatOfPoint3d(1, 1, new Point3d(0, 0, 1000)); var poseProjection = new MatOfPoint2d(); Cv2.ProjectPoints(poseModel, rotation, translation, cameraMatrix, coeffs, poseProjection); // draw the 6 landmark points using (Graphics g = Graphics.FromImage(image)) { foreach (var i in new int[] { 30, 8, 36, 45, 48, 54 }) { var point = shape.GetPart((uint)i); g.FillRectangle(Brushes.LightGreen, point.X - 5, point.Y - 5, 10, 10); } // draw a line from the tip of the nose pointing in the direction of head pose var landmark = landmarks.At <Point2d>(0); var p = poseProjection.At <Point2d>(0); var pen = new Pen(Brushes.LightGreen, 4); g.DrawLine(pen, (int)landmark.X, (int)landmark.Y, (int)p.X, (int)p.Y); } }
public void Create() { var rect = new Rectangle(10, 20, 40, 50); using (var detection = new FullObjectDetection(rect)) { var r = detection.Rect; Assert.Equal(rect.Left, r.Left); Assert.Equal(rect.Right, r.Right); Assert.Equal(rect.Top, r.Top); Assert.Equal(rect.Bottom, r.Bottom); } }
/// <summary> /// Draw the landmark points on the specified image. /// </summary> /// <param name="shape">The facial landmark points.</param> /// <param name="image">The image to draw on.</param> public static void DrawLandmarkPoints( FullObjectDetection shape, Bitmap image) { using (Graphics g = Graphics.FromImage(image)) { for (var i = 0; i < shape.Parts; i++) { var point = shape.GetPart((uint)i); g.FillRectangle(Brushes.White, point.X - 2, point.Y - 2, 4, 4); } } }
// 活体识别 public bool BioAssay(FullObjectDetection face, ref QueueFixedLength <double> leftEarQueue, ref QueueFixedLength <double> rightEarQueue, ref QueueFixedLength <double> mouthQueue, ref bool leftEarFlag, ref bool rightEarFlag, ref bool mouthFlag) { if (!leftEarFlag) { double leftear = getEar(face, EarEnum.Left); // 左眼添加到队列 leftEarQueue.Enqueue(leftear); var leftEarNum = GetqueueMaxAndMin(leftEarQueue); if (leftEarNum.Item1 - leftEarNum.Item2 > 0.1) { leftEarFlag = true; } } if (!rightEarFlag) { double rightear = getEar(face, EarEnum.Right); // 右眼添加到队列 rightEarQueue.Enqueue(rightear); var rightEarNum = GetqueueMaxAndMin(rightEarQueue); if (rightEarNum.Item1 - rightEarNum.Item2 > 0.1) { rightEarFlag = true; } } if (!mouthFlag) { double mouth = getEar(face, EarEnum.Mouth); // 嘴巴添加到队列 mouthQueue.Enqueue(mouth); var mouthEarNum = GetqueueMaxAndMin(mouthQueue); if (mouthEarNum.Item1 - mouthEarNum.Item2 > 0.08) { mouthFlag = true; } } Console.WriteLine(leftEarFlag + ":" + rightEarFlag + ":" + mouthFlag); if (leftEarFlag && rightEarFlag && mouthFlag) { return(true); } return(false); }
public void Init(FullObjectDetection landmarks) { uint startpt = 36; if (!isLeftEye) { startpt = 42; } List <OpenCvSharp.Point> lst = new List <OpenCvSharp.Point>(); lst.Add(GetPoint(landmarks, startpt++)); lst.Add(GetPoint(landmarks, startpt++)); lst.Add(GetPoint(landmarks, startpt++)); lst.Add(GetPoint(landmarks, startpt++)); lst.Add(GetPoint(landmarks, startpt)); Points = lst.ToArray(); }
/// <summary> /// Extract the features from a shape and place it into the <see cref="FaceData3"/>. /// </summary> /// <param name="shape"></param> /// <param name="label">The emotion/label of the face.</param> /// <returns></returns> static FaceData3 GetFaceDataPoints3(ref FullObjectDetection shape, string label) { // Get average point float avgx = 0f, avgy = 0f; for (uint i = 0; i < shape.Parts; ++i) { avgx += shape.GetPart(i).X; avgy += shape.GetPart(i).Y; } avgx /= shape.Parts; avgy /= shape.Parts; // Get normalization Distance var middle = new Point((int)avgx, (int)avgy); var normalization = (float)(shape.GetPart(27) - middle).LengthSquared; FaceData3 fd3 = new FaceData3(); fd3.Emotion = label; fd3.LeftEyebrowDistance = (GetDistance(ref shape, middle, 17, normalization) + GetDistance(ref shape, middle, 18, normalization) + GetDistance(ref shape, middle, 18, normalization) + GetDistance(ref shape, middle, 19, normalization) + GetDistance(ref shape, middle, 20, normalization) + GetDistance(ref shape, middle, 21, normalization)) / 5; fd3.RightEyebrowDistance = (GetDistance(ref shape, middle, 22, normalization) + GetDistance(ref shape, middle, 23, normalization) + GetDistance(ref shape, middle, 24, normalization) + GetDistance(ref shape, middle, 25, normalization) + GetDistance(ref shape, middle, 25, normalization)) / 5; fd3.LeftEyeWidth = GetDistanceBetween(ref shape, 36, 39, normalization); fd3.RightEyeWidth = GetDistanceBetween(ref shape, 42, 45, normalization); fd3.LeftEyeHeight = GetDistanceBetween(ref shape, 40, 38, normalization); fd3.RightEyeHeight = GetDistanceBetween(ref shape, 46, 44, normalization); fd3.OuterLipWidth = GetDistanceBetween(ref shape, 48, 54, normalization); fd3.InnerLipWidth = GetDistanceBetween(ref shape, 60, 64, normalization); fd3.OuterLipHeight = GetDistanceBetween(ref shape, 52, 58, normalization); fd3.InnerLipHeight = GetDistanceBetween(ref shape, 63, 67, normalization); fd3.LeftLipEdgeAngle = GetAngleBetween(ref shape, 48, middle); fd3.RightLipEdgeAngle = GetAngleBetween(ref shape, 54, middle); return(fd3); }
/// <summary> /// Extract the features from a shape and place it into the <see cref="FaceData2"/>. /// </summary> /// <param name="shape"></param> /// <param name="label">The emotion/label of the face.</param> /// <returns></returns> static FaceData2 GetFaceDataPoints2(ref FullObjectDetection shape, string label) { //http://www.paulvangent.com/2016/08/05/emotion-recognition-using-facial-landmarks/#more-565 float avgx = 0f, avgy = 0f; float[] x = new float[shape.Parts]; float[] y = new float[shape.Parts]; Point[] distToCentres = new Point[shape.Parts]; for (uint i = 0; i < shape.Parts; ++i) { avgx += shape.GetPart(i).X; x[i] = shape.GetPart(i).X; avgy += shape.GetPart(i).Y; y[i] = shape.GetPart(i).Y; } avgx /= shape.Parts; avgy /= shape.Parts; for (var i = 0; i < distToCentres.Length; i++) { distToCentres[i] = new Point(Convert.ToInt32(x[i] - avgx), Convert.ToInt32(y[i] - avgy)); } FaceData2 fd = new FaceData2(); // Get angle var middlePoint = shape.GetPart(33); var topNasalPoint = shape.GetPart(27); for (uint i = 0; i < shape.Parts; ++i) { fd.Emotion = label; fd.RawCoordiantesX[i] = x[i]; fd.RawCoordiantesY[i] = y[i]; var distance = (new Point((int)avgx, (int)avgy) - new Point((int)x[i], (int)y[i])).LengthSquared; fd.LengthBetweenFeatures[i] = (float)distance; fd.AngleBetweenFeatures[i] = Convert.ToSingle(Math.Atan2(y[i], x[i]) * 360 / (2 * Math.PI)); } return(fd); }
/// <summary> /// Get the list of the landmarks based on the given first and last index and the face shape object /// </summary> /// <param name="firstIdx">first index of the landmark (0-67)</param> /// <param name="lastIdx">last index of the landmark (0-67)</param> /// <param name="shape">object of the predicted face shape</param> /// <returns>List of the landmarks</returns> private List <Point> GetLandmarkList(int firstIdx, int lastIdx, FullObjectDetection shape) { try { if (shape != null) { List <Point> res = new List <Point>(); for (uint i = Convert.ToUInt32(firstIdx); i <= Convert.ToUInt32(lastIdx); i++) { res.Add(shape.GetPart(i)); } return(res); } return(null); } catch (Exception ex) { throw new Exception(string.Format("Failed to Get Landmark List from index {0} to index {1}. Exception: {2}", firstIdx, lastIdx, ex.Message)); } }
/// <summary> /// Called when videoPlayer receives a new frame. /// </summary> /// <param name="sender"></param> /// <param name="image"></param> private void videoPlayer_NewFrameReceived(object sender, Accord.Video.NewFrameEventArgs eventArgs) { // get the current camera frame var frame = eventArgs.Frame; // get the landmark points landmarkPoints = DetectLandmarks(frame, frameIndex); // do we have 68 landmark points? if (landmarkPoints != null && landmarkPoints.Parts == 68) { // draw the landmark points in the bottom right box var poseImage = new Bitmap(frame.Width, frame.Height); Utility.DrawLandmarkPoints(landmarkPoints, poseImage); // draw the eye area in the bottom left box var eyeImage = new Bitmap(frame.Width, frame.Height); Utility.DrawEyeArea(landmarkPoints, eyeImage); eyeBox.Image = eyeImage; // build a quick and dirty camera calibration matrix var cameraMatrix = Utility.GetCameraMatrix(eventArgs.Frame.Width, eventArgs.Frame.Height); // detect head angle Utility.DetectHeadAngle( landmarkPoints, cameraMatrix, out Mat rotation, out Mat translation, out MatOfDouble coefficients); // draw the pose line in the bottom right box Utility.DrawPoseLine(rotation, translation, cameraMatrix, coefficients, landmarkPoints, poseImage); headBox.Image = poseImage; // get the euler angles headRotation = Utility.GetEulerMatrix(rotation); } // update frame counter frameIndex++; }
/// <summary> /// Detect the eye state from the landmark points. /// </summary> /// <param name="frame">The current video frame.</param> /// <param name="shape">The current landmark points.</param> private void DetectEyeState(System.Drawing.Bitmap frame, FullObjectDetection shape) { // get all landmark points of the left eye var leftEye = from i in Enumerable.Range(36, 6) let p = shape.GetPart((uint)i) select new OpenCvSharp.Point(p.X, p.Y); // get all landmark points of the right eye var rightEye = from i in Enumerable.Range(42, 6) let p = shape.GetPart((uint)i) select new OpenCvSharp.Point(p.X, p.Y); // draw the eye areas into a new image using (var mask = new Mat(new Size(frame.Width, frame.Height), MatType.CV_8UC1)) { mask.SetTo(0); Cv2.FillConvexPoly(mask, leftEye, Scalar.White); Cv2.FillConvexPoly(mask, rightEye, Scalar.White); // calculate surface area of both eyes int area = Cv2.CountNonZero(mask); // the maximum possible area is 40% of the surface area of both eyeballs int r1 = (shape.GetPart(39).X - shape.GetPart(36).X) / 2; int r2 = (shape.GetPart(45).X - shape.GetPart(42).X) / 2; double normalizedArea = 0.4 * Math.PI * r1 * r1 + 0.4 * Math.PI * r2 * r2; // calculate fractional area and normalize on a 0-100 scale var value = (int)(100 * area / normalizedArea - 20); eyeStateValue = value >= 0 && value <= 100 ? value : 0; // calculate bounding box around eyes var rect = Cv2.BoundingRect(Enumerable.Union(leftEye, rightEye)); rect.Inflate(30, 30); // copy the eye image to the picturebox var maskImg = BitmapConverter.ToBitmap(mask.Clone(rect)); eyeBox.Image = maskImg; } }
public void GetImage(string imagePath) { Array2D <RgbPixel> image = Dlib.LoadImage <RgbPixel>(imagePath); using (FrontalFaceDetector fd = Dlib.GetFrontalFaceDetector()) { var faces = fd.Operator(image); foreach (DlibDotNet.Rectangle face in faces) { FullObjectDetection shape = _ShapePredictor.Detect(image, face); ChipDetails faceChipDetail = Dlib.GetFaceChipDetails(shape, 150, 0.25); Array2D <RgbPixel> faceChip = Dlib.ExtractImageChip <RgbPixel>(image, faceChipDetail); Bitmap bitmap1 = faceChip.ToBitmap <RgbPixel>(); MainWindow.main.Statusa1 = bitmap1; Dlib.DrawRectangle(image, face, color: new RgbPixel(0, 255, 255), thickness: 4); } } Bitmap frame = image.ToBitmap <RgbPixel>(); MainWindow.main.Statusa = frame; }
/// <summary> /// Draw the eye area in the specified image. /// </summary> /// <param name="shape">The facial landmark points.</param> /// <param name="image">The image to draw on.</param> public static void DrawEyeArea( FullObjectDetection shape, Bitmap image) { // get all landmark points of the left eye var leftEye = (from i in Enumerable.Range(36, 6) let p = shape.GetPart((uint)i) select new PointF(p.X, p.Y)).ToArray(); // get all landmark points of the right eye var rightEye = (from i in Enumerable.Range(42, 6) let p = shape.GetPart((uint)i) select new PointF(p.X, p.Y)).ToArray(); // create a white mask of the eye areas using (Graphics g = Graphics.FromImage(image)) { var brush = Brushes.White; g.FillClosedCurve(brush, leftEye); g.FillClosedCurve(brush, rightEye); } }