/// <summary> /// Draw a straight line indicating head pose on the specified image. /// </summary> /// <param name="rotation">The rotation matrix of the head.</param> /// <param name="translation">The translation matrix of the head.</param> /// <param name="cameraMatrix">The camera calibration matrix.</param> /// <param name="coefficients">The coefficient matrix.</param> /// <param name="shape">The list of facial landmark points.</param> /// <param name="image">The image to draw on.</param> public static void DrawPoseLine( Mat rotation, Mat translation, MatOfDouble cameraMatrix, MatOfDouble coefficients, FullObjectDetection shape, Bitmap image) { // create a new model point in front of the nose and project it into 2d var poseModel = new MatOfPoint3d(1, 1, new Point3d(0, 0, 1000)); var poseProjection = new MatOfPoint2d(); Cv2.ProjectPoints(poseModel, rotation, translation, cameraMatrix, coefficients, poseProjection); // get landmark point 30 (tip of the nose) var point = shape.GetPart(30); var tipOfNose = new OpenCvSharp.Point2d(point.X, point.Y); // draw a line from the tip of the nose pointing in the direction of head pose var p = poseProjection.At <Point2d>(0); var pen = new Pen(Brushes.White, 2); using (Graphics g = Graphics.FromImage(image)) { g.DrawLine(pen, (int)tipOfNose.X, (int)tipOfNose.Y, (int)p.X, (int)p.Y); } }
/// <summary> /// Detect the orientation of the head in the current video frame. /// </summary> /// <param name="shape">The landmark points to use</param> /// <param name="cameraMatrix">The camera calibration matrix to use</param> /// <param name="rotationMatrix">The detected head rotation matrix</param> /// <param name="translationMatrix">The detected head translation matrix</param> /// <param name="coefficientMatrix">The detected coefficient matrix</param> public static void DetectHeadAngle( FullObjectDetection shape, MatOfDouble cameraMatrix, out Mat rotationMatrix, out Mat translationMatrix, out MatOfDouble coefficientMatrix) { // build the 3d face model var model = Utility.GetFaceModel(); // build the landmark point list var landmarks = new MatOfPoint2d(1, 6, (from i in new int[] { 30, 8, 36, 45, 48, 54 } let p = shape.GetPart((uint)i) select new OpenCvSharp.Point2d(p.X, p.Y)).ToArray()); // build the coefficient matrix var coeffs = new MatOfDouble(4, 1); coeffs.SetTo(0); // find head rotation and translation Mat rotation = new MatOfDouble(); Mat translation = new MatOfDouble(); Cv2.SolvePnP(model, landmarks, cameraMatrix, coeffs, rotation, translation); // return results rotationMatrix = rotation; translationMatrix = translation; coefficientMatrix = coeffs; }
public double[] detectFaceLandmarks(Array2D <RgbPixel> frame) { var img = frame; double[] headParams = new double[3]; var faces = fd.Operator(img); foreach (var face in faces) { var shape = sp.Detect(img, face); var eyesPoints = (from i in new int[] { 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47 } let pt = shape.GetPart((uint)i) select new OpenCvSharp.Point2d(pt.X, pt.Y)).ToArray(); headParams[2] = calculateEAR(eyesPoints); var model = Utility.GetFaceModel(); var landmarks = new MatOfPoint2d(1, 6, (from i in new int[] { 30, 8, 36, 45, 48, 54 } let pt = shape.GetPart((uint)i) select new OpenCvSharp.Point2d(pt.X, pt.Y)).ToArray()); var cameraMatrix = Utility.GetCameraMatrix((int)img.Rect.Width, (int)img.Rect.Height); var coeffs = new MatOfDouble(4, 1); coeffs.SetTo(0); Mat rotation = new MatOfDouble(); Mat translation = new MatOfDouble(); Cv2.SolvePnP(model, landmarks, cameraMatrix, coeffs, rotation, translation); /* var euler = Utility.GetEulerMatrix(rotation); * * var yaw = 180 * euler.At<double>(0, 2) / Math.PI; * var pitch = 180 * euler.At<double>(0, 1) / Math.PI; * var roll = 180 * euler.At<double>(0, 0) / Math.PI; * * pitch = Math.Sign(pitch) * 180 - pitch; */ var poseModel = new MatOfPoint3d(1, 1, new Point3d(0, 0, 1000)); var poseProjection = new MatOfPoint2d(); Cv2.ProjectPoints(poseModel, rotation, translation, cameraMatrix, coeffs, poseProjection); var landmark = landmarks.At <Point2d>(0); var p = poseProjection.At <Point2d>(0); headParams[0] = (double)p.X; headParams[1] = (double)p.Y; } return(headParams); }
/// <summary> /// Detect the orientation of the head in the current video frame. /// </summary> /// <param name="image">The current video frame.</param> /// <param name="shape">The landmark points.</param> private void DetectHeadPose(System.Drawing.Bitmap image, FullObjectDetection shape) { // build the 3d face model var model = Utility.GetFaceModel(); // build the landmark point list var landmarks = new MatOfPoint2d(1, 6, (from i in new int[] { 30, 8, 36, 45, 48, 54 } let p = shape.GetPart((uint)i) select new OpenCvSharp.Point2d(p.X, p.Y)).ToArray()); // build the camera matrix var cameraMatrix = Utility.GetCameraMatrix(image.Width, image.Height); // build the coefficient matrix var coeffs = new MatOfDouble(4, 1); coeffs.SetTo(0); // find head rotation and translation Mat rotation = new MatOfDouble(); Mat translation = new MatOfDouble(); Cv2.SolvePnP(model, landmarks, cameraMatrix, coeffs, rotation, translation); // find and store euler angles var euler = Utility.GetEulerMatrix(rotation); headRotation = euler; // create a new model point in front of the nose, and project it into 2d var poseModel = new MatOfPoint3d(1, 1, new Point3d(0, 0, 1000)); var poseProjection = new MatOfPoint2d(); Cv2.ProjectPoints(poseModel, rotation, translation, cameraMatrix, coeffs, poseProjection); // draw the 6 landmark points using (Graphics g = Graphics.FromImage(image)) { foreach (var i in new int[] { 30, 8, 36, 45, 48, 54 }) { var point = shape.GetPart((uint)i); g.FillRectangle(Brushes.LightGreen, point.X - 5, point.Y - 5, 10, 10); } // draw a line from the tip of the nose pointing in the direction of head pose var landmark = landmarks.At <Point2d>(0); var p = poseProjection.At <Point2d>(0); var pen = new Pen(Brushes.LightGreen, 4); g.DrawLine(pen, (int)landmark.X, (int)landmark.Y, (int)p.X, (int)p.Y); } }
public Form1() { InitializeComponent(); this.capture = new VideoCapture(0); this.frame = new Mat(); this.fd = Dlib.GetFrontalFaceDetector(); this.sp = ShapePredictor.Deserialize(@"C:\Users\trago\OneDrive\Desktop\OpenCV\shape_predictor_68_face_landmarks.dat"); this.model = Utility.GetFaceModel(); this.coeffs = new MatOfDouble(4, 1); this.coeffs.SetTo(0); this.poseModel = new MatOfPoint3d(1, 1, new Point3d(0, 0, 1000)); this.poseProjection = new MatOfPoint2d(); this.checker = new int[4] { 100, -10, 10, 0 }; this.text = new string[4] { "1. เอาหน้าใส่กรอบ", "2. ก้มหน้าเล็กน้อย", "3. เงยหน้าเล็กน้อย", "4. หน้าตรง" }; this.timeset = 3; this.size = new Size(250, 300); SetStart(); SetZero(); }
/// <summary> /// performs perspective transformation of each element of multi-channel input matrix /// </summary> /// <param name="src">The source two-channel or three-channel floating-point array; /// each element is 2D/3D vector to be transformed</param> /// <param name="m">3x3 or 4x4 transformation matrix</param> /// <returns>The destination array; it will have the same size and same type as src</returns> public static Point2d[] PerspectiveTransform(IEnumerable<Point2d> src, Mat m) { if (src == null) throw new ArgumentNullException("src"); if (m == null) throw new ArgumentNullException("m"); using (var srcMat = MatOfPoint2d.FromArray(src)) using (var dstMat = new MatOfPoint2d()) { NativeMethods.core_perspectiveTransform_Mat(srcMat.CvPtr, dstMat.CvPtr, m.CvPtr); return dstMat.ToArray(); } }
/// <summary> /// projects points from the model coordinate space to the image coordinates. /// Also computes derivatives of the image coordinates w.r.t the intrinsic /// and extrinsic camera parameters /// </summary> /// <param name="objectPoints">Array of object points, 3xN/Nx3 1-channel or /// 1xN/Nx1 3-channel, where N is the number of points in the view.</param> /// <param name="rvec">Rotation vector (3x1).</param> /// <param name="tvec">Translation vector (3x1).</param> /// <param name="cameraMatrix">Camera matrix (3x3)</param> /// <param name="distCoeffs">Input vector of distortion coefficients /// (k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6]]) of 4, 5, or 8 elements. /// If the vector is null, the zero distortion coefficients are assumed.</param> /// <param name="imagePoints">Output array of image points, 2xN/Nx2 1-channel /// or 1xN/Nx1 2-channel</param> /// <param name="jacobian">Optional output 2Nx(10 + numDistCoeffs) jacobian matrix /// of derivatives of image points with respect to components of the rotation vector, /// translation vector, focal lengths, coordinates of the principal point and /// the distortion coefficients. In the old interface different components of /// the jacobian are returned via different output parameters.</param> /// <param name="aspectRatio">Optional “fixed aspect ratio” parameter. /// If the parameter is not 0, the function assumes that the aspect ratio (fx/fy) /// is fixed and correspondingly adjusts the jacobian matrix.</param> public static void ProjectPoints(IEnumerable<Point3d> objectPoints, double[] rvec, double[] tvec, double[,] cameraMatrix, double[] distCoeffs, out Point2d[] imagePoints, out double[,] jacobian, double aspectRatio = 0) { if (objectPoints == null) throw new ArgumentNullException("objectPoints"); if (rvec == null) throw new ArgumentNullException("rvec"); if (rvec.Length != 3) throw new ArgumentException("rvec.Length != 3"); if (tvec == null) throw new ArgumentNullException("tvec"); if (tvec.Length != 3) throw new ArgumentException("tvec.Length != 3"); if (cameraMatrix == null) throw new ArgumentNullException("cameraMatrix"); if (cameraMatrix.GetLength(0) != 3 || cameraMatrix.GetLength(1) != 3) throw new ArgumentException("cameraMatrix must be double[3,3]"); Point3d[] objectPointsArray = EnumerableEx.ToArray(objectPoints); using (var objectPointsM = new Mat(objectPointsArray.Length, 1, MatType.CV_64FC3, objectPointsArray)) using (var rvecM = new Mat(3, 1, MatType.CV_64FC1, rvec)) using (var tvecM = new Mat(3, 1, MatType.CV_64FC1, tvec)) using (var cameraMatrixM = new Mat(3, 3, MatType.CV_64FC1, cameraMatrix)) using (var imagePointsM = new MatOfPoint2d()) { var distCoeffsM = new Mat(); if (distCoeffs != null) distCoeffsM = new Mat(distCoeffs.Length, 1, MatType.CV_64FC1, distCoeffs); var jacobianM = new MatOfDouble(); NativeMethods.calib3d_projectPoints_Mat(objectPointsM.CvPtr, rvecM.CvPtr, tvecM.CvPtr, cameraMatrixM.CvPtr, distCoeffsM.CvPtr, imagePointsM.CvPtr, jacobianM.CvPtr, aspectRatio); imagePoints = imagePointsM.ToArray(); jacobian = jacobianM.ToRectangularArray(); } }
/// <summary> /// The main program entry point /// </summary> /// <param name="args">The command line arguments</param> static void Main(string[] args) { // set up Dlib facedetectors and shapedetectors using (var fd = Dlib.GetFrontalFaceDetector()) using (var sp = ShapePredictor.Deserialize("shape_predictor_68_face_landmarks.dat")) { // load input image var img = Dlib.LoadImage <RgbPixel>(inputFilePath); // find all faces in the image var faces = fd.Operator(img); foreach (var face in faces) { // find the landmark points for this face var shape = sp.Detect(img, face); // build the 3d face model var model = Utility.GetFaceModel(); // get the landmark point we need var landmarks = new MatOfPoint2d(1, 6, (from i in new int[] { 30, 8, 36, 45, 48, 54 } let pt = shape.GetPart((uint)i) select new OpenCvSharp.Point2d(pt.X, pt.Y)).ToArray()); // build the camera matrix var cameraMatrix = Utility.GetCameraMatrix((int)img.Rect.Width, (int)img.Rect.Height); // build the coefficient matrix var coeffs = new MatOfDouble(4, 1); coeffs.SetTo(0); // find head rotation and translation Mat rotation = new MatOfDouble(); Mat translation = new MatOfDouble(); Cv2.SolvePnP(model, landmarks, cameraMatrix, coeffs, rotation, translation); // find euler angles var euler = Utility.GetEulerMatrix(rotation); // calculate head rotation in degrees var yaw = 180 * euler.At <double>(0, 2) / Math.PI; var pitch = 180 * euler.At <double>(0, 1) / Math.PI; var roll = 180 * euler.At <double>(0, 0) / Math.PI; // looking straight ahead wraps at -180/180, so make the range smooth pitch = Math.Sign(pitch) * 180 - pitch; // calculate if the driver is facing forward // the left/right angle must be in the -25..25 range // the up/down angle must be in the -10..10 range var facingForward = yaw >= -25 && yaw <= 25 && pitch >= -10 && pitch <= 10; // create a new model point in front of the nose, and project it into 2d var poseModel = new MatOfPoint3d(1, 1, new Point3d(0, 0, 1000)); var poseProjection = new MatOfPoint2d(); Cv2.ProjectPoints(poseModel, rotation, translation, cameraMatrix, coeffs, poseProjection); // draw the key landmark points in yellow on the image foreach (var i in new int[] { 30, 8, 36, 45, 48, 54 }) { var point = shape.GetPart((uint)i); var rect = new Rectangle(point); Dlib.DrawRectangle(img, rect, color: new RgbPixel(255, 255, 0), thickness: 4); } // draw a line from the tip of the nose pointing in the direction of head pose var landmark = landmarks.At <Point2d>(0); var p = poseProjection.At <Point2d>(0); Dlib.DrawLine( img, new DlibDotNet.Point((int)landmark.X, (int)landmark.Y), new DlibDotNet.Point((int)p.X, (int)p.Y), color: new RgbPixel(0, 255, 255)); // draw a box around the face if it's facing forward if (facingForward) { Dlib.DrawRectangle(img, face, color: new RgbPixel(0, 255, 255), thickness: 4); } } // export the modified image Dlib.SaveJpeg(img, "output.jpg"); } }
private void Timer1_Tick(object sender, EventArgs e) { capture.Read(frame); this.point = new Point((frame.Width - size.Width) / 2, (frame.Height - size.Height) / 2); this.rect = new Rect(point, size); Cv2.Flip(frame, frame, FlipMode.Y); if (!frame.Empty() && start) { var img = ConvertToArray2D(frame); var faces = fd.Operator(img); if (faces.Any(face => IsFaceInFrame(face))) { foreach (var face in faces) { if (IsFaceInFrame(face)) { //Dlib.DrawRectangle(img, face, color: new RgbPixel(0, 255, 255), thickness: 4); var shape = sp.Detect(img, face); var landmarks = new MatOfPoint2d(1, 6, (from i in new int[] { 30, 8, 36, 45, 48, 54 } let pt = shape.GetPart((uint)i) select new OpenCvSharp.Point2d(pt.X, pt.Y)).ToArray()); var cameraMatrix = Utility.GetCameraMatrix((int)img.Rect.Width, (int)img.Rect.Height); Mat rotation = new MatOfDouble(); Mat translation = new MatOfDouble(); Cv2.SolvePnP(model, landmarks, cameraMatrix, coeffs, rotation, translation); var euler = Utility.GetEulerMatrix(rotation); var yaw = 180 * euler.At <double>(0, 2) / Math.PI; var pitch = 180 * euler.At <double>(0, 1) / Math.PI; pitch = Math.Sign(pitch) * 180 - pitch; Cv2.ProjectPoints(poseModel, rotation, translation, cameraMatrix, coeffs, poseProjection); //var landmark = landmarks.At<Point2d>(0); //var p = poseProjection.At<Point2d>(0); //Dlib.DrawLine( // img, // new DlibDotNet.Point((int)landmark.X, (int)landmark.Y), // new DlibDotNet.Point((int)p.X, (int)p.Y), // color: new RgbPixel(0, 255, 255)); //foreach (var i in new int[] { 30, 8, 36, 45, 48, 54 }) //{ // var point = shape.GetPart((uint)i); // var rect = new Rectangle(point); // Dlib.DrawRectangle(img, rect, color: new RgbPixel(255, 255, 0), thickness: 4); //} for (var i = 0; i < shape.Parts; i++) { var point = shape.GetPart((uint)i); var rect = new Rectangle(point); Dlib.DrawRectangle(img, rect, color: new RgbPixel(0, 255, 255), thickness: 4); } CheckFace(pitch, frame, face, yaw, pitch); frame = img.ToBitmap().ToMat(); } } } else if (this.step > 0) { SetZero(); this.ErrorMsg.Visible = true; } } Cv2.Rectangle(frame, rect, Scalar.Yellow, thickness: 2); camera.Image = frame.ToBitmap(); }