private void buscarrosto(Bitmap frame)
        {
            Image <Rgb, Byte> imageCV = new Image <Rgb, byte>(frame);

            Emgu.CV.Mat mat   = imageCV.Mat;
            var         array = new byte[mat.Width * mat.Height * mat.ElementSize];

            mat.CopyTo(array);

            using (Array2D <RgbPixel> image = Dlib.LoadImageData <RgbPixel>(array, (uint)mat.Height, (uint)mat.Width, (uint)(mat.Width * mat.ElementSize)))
            {
                using (FrontalFaceDetector fd = Dlib.GetFrontalFaceDetector())

                {
                    var faces = fd.Operator(image);
                    foreach (DlibDotNet.Rectangle face in faces)
                    {
                        FullObjectDetection shape          = _ShapePredictor.Detect(image, face);
                        ChipDetails         faceChipDetail = Dlib.GetFaceChipDetails(shape, 150, 0.25);
                        Array2D <RgbPixel>  faceChip       = Dlib.ExtractImageChip <RgbPixel>(image, faceChipDetail);
                        Bitmap bitmap1 = faceChip.ToBitmap <RgbPixel>();
                        MainWindow.main.Statusa1 = bitmap1;
                        Dlib.DrawRectangle(image, face, color: new RgbPixel(0, 255, 255), thickness: 4);
                    }
                }
                frame = image.ToBitmap <RgbPixel>();
                MainWindow.main.Statusa = frame;
            }
        }
示例#2
0
        private void BackgroundWorkerOnDoWork(object sender, DoWorkEventArgs doWorkEventArgs)
        {
            var path = doWorkEventArgs.Argument as string;

            if (string.IsNullOrWhiteSpace(path) || !File.Exists(path))
            {
                return;
            }

            // DlibDotNet can create Array2D from file but this sample demonstrate
            // converting managed image class to dlib class and vice versa.
            using (var faceDetector = FrontalFaceDetector.GetFrontalFaceDetector())
                using (var ms = new MemoryStream(File.ReadAllBytes(path)))
                    using (var bitmap = (Bitmap)Image.FromStream(ms))
                    {
                        using (var image = bitmap.ToArray2D <RgbPixel>())
                        {
                            var dets = faceDetector.Detect(image);
                            foreach (var r in dets)
                            {
                                Dlib.DrawRectangle(image, r, new RgbPixel {
                                    Green = 255
                                });
                            }

                            var result = image.ToBitmap();
                            this.pictureBox.Invoke(new Action(() =>
                            {
                                this.pictureBox.Image?.Dispose();
                                this.pictureBox.Image = result;
                            }));
                        }
                    }
        }
示例#3
0
        /// <summary>
        /// The main program entry point
        /// </summary>
        /// <param name="args">The command line arguments</param>
        static void Main(string[] args)
        {
            // set up Dlib facedetectors and shapedetectors
            using (var fd = Dlib.GetFrontalFaceDetector())
                using (var sp = ShapePredictor.Deserialize("shape_predictor_68_face_landmarks.dat"))
                {
                    // load input image
                    var img = Dlib.LoadImage <RgbPixel>(inputFilePath);

                    // find all faces in the image
                    var faces = fd.Operator(img);
                    foreach (var face in faces)
                    {
                        // find the landmark points for this face
                        var shape = sp.Detect(img, face);

                        // draw the landmark points on the image
                        for (var i = 0; i < shape.Parts; i++)
                        {
                            var point = shape.GetPart((uint)i);
                            var rect  = new Rectangle(point);
                            Dlib.DrawRectangle(img, rect, color: new RgbPixel(255, 255, 0), thickness: 4);
                        }
                    }

                    // export the modified image
                    Dlib.SaveJpeg(img, "output.jpg");
                }
        }
示例#4
0
    static void Main(string[] args)
    {
        // Read image in
        var img = Dlib.LoadImage <RgbPixel>(imgFilePath);

        // Let's detect faces and draw rectangles around them
        FaceDetector faceDetector = new FaceDetector(facialLandmarksSerializedPredictor);

        Rectangle[] facesBoundingBoxes = faceDetector.DetectFacesBoundingBoxes(img);

        foreach (var bb in facesBoundingBoxes)
        {
            Dlib.DrawRectangle(img, bb, color: new RgbPixel(0, 0, 255), thickness: 3);
        }

        // Draw eyes bounding box for subject (i.e., largest) face
        if (facesBoundingBoxes.Length > 0)
        {
            // Example code if you wish to do this only on the largest face

            /*Rectangle subjectFaceBoundingBox = new Rectangle(0, 0);
             * foreach (var bb in facesBoundingBoxes)
             * {
             *  if (bb.Area > subjectFaceBoundingBox.Area)
             *  {
             *      subjectFaceBoundingBox = bb;
             *  }
             * }*/

            // Here we do it on all faces
            foreach (var subjectFaceBoundingBox in facesBoundingBoxes)
            {
                // Next, obtain facial landmarks
                var landmarks = faceDetector.DetectFacialLandmarks(img, subjectFaceBoundingBox);
                // We also draw them
                foreach (Point p in landmarks)
                {
                    Dlib.DrawRectangle(img, new Rectangle(p), color: new RgbPixel(255, 0, 0), thickness: 3);
                }

                // Now draw bounding box around the eyes
                var topLeft         = new Point(landmarks[FacialLandmarks.RIGHT_EYEBROW].X, landmarks[FacialLandmarks.RIGHT_EYEBROW].Y);
                var bottomRight     = new Point(landmarks[FacialLandmarks.LEFT_EYEBROW].X, landmarks[FacialLandmarks.UPPER_NOSE].Y);
                var eyesBoundingBox = new Rectangle(topLeft, bottomRight);
                Dlib.DrawRectangle(img, eyesBoundingBox, color: new RgbPixel(0, 255, 0), thickness: 3);
            }
        }

        // Create output file path (for later)
        string outFilePath;
        var    tmpStrArray = imgFilePath.Split('/');
        var    extension   = tmpStrArray[tmpStrArray.Length - 1].Split('.')[1];

        outFilePath = String.Join('/', tmpStrArray.SkipLast(1).ToArray()) + '/' +
                      tmpStrArray[tmpStrArray.Length - 1].Replace("." + extension, "_out." + extension);
        Console.WriteLine(outFilePath);

        // Write img
        faceDetector.WriteImageToFilePath(img, outFilePath);
    }
        public static void DetectFacesAsync(string inputFilePath, string subscriptionKey, string uriBase, IFaceClient client, string vocabularyPath)
        {
            // set up Dlib facedetector
            DirectoryInfo dir = new DirectoryInfo(inputFilePath);

            using (var fd = Dlib.GetFrontalFaceDetector())
            {
                foreach (FileInfo files in dir.GetFiles("*.jpg"))
                {
                    string _inputFilePath = inputFilePath + files.Name;

                    // load input image
                    Array2D <RgbPixel> img = Dlib.LoadImage <RgbPixel>(_inputFilePath);

                    // find all faces in the image
                    Rectangle[] faces = fd.Operator(img);
                    if (faces.Length != 0)
                    {
                        Console.WriteLine("Picture " + files.Name + " have faces, sending data to Azure");
                        MakeAnalysisRequestAsync(_inputFilePath, subscriptionKey, uriBase, files.Name, client, vocabularyPath).Wait();
                    }

                    foreach (var face in faces)
                    {
                        // draw a rectangle for each face
                        Dlib.DrawRectangle(img, face, color: new RgbPixel(0, 255, 255), thickness: 4);
                    }
                    // export the modified image
                    Dlib.SaveJpeg(img, "./Results/" + files.Name);
                }
            }
 public static void DrawPointsOfLandmarks(FileInfo image)
 {
     using (var fd = Dlib.GetFrontalFaceDetector())
         using (var sp = ShapePredictor.Deserialize(GetFile(ShapePredictorFileName).FullName))
         {
             using (var img = Dlib.LoadImage <RgbPixel>(image.FullName))
             {
                 var faces = fd.Operator(img);
                 // for each face draw over the facial landmarks
                 foreach (var face in faces)
                 {
                     var shape = sp.Detect(img, face);
                     // draw the landmark points on the image
                     for (var i = 0; i < shape.Parts; i++)
                     {
                         var point = shape.GetPart((uint)i);
                         var rect  = new Rectangle(point);
                         if (i == 0)
                         {
                             Dlib.DrawRectangle(img, rect, color: new RgbPixel(255, 255, 255), thickness: 8);
                         }
                         else if (i == 21 || i == 22 || i == 39 || i == 42 || i == 33 || i == 51 || i == 57 ||
                                  i == 48 ||
                                  i == 54)
                         {
                             Dlib.DrawRectangle(img, rect, color: new RgbPixel(255, 0, 255), thickness: 4);
                         }
                         else if (i == 18 || i == 19 || i == 20 || i == 21) // left eye
                         {
                             Dlib.DrawRectangle(img, rect, color: new RgbPixel(255, 0, 0), 6);
                         }
                         else if (i == 22 || i == 23 || i == 24 || i == 25) // right eye
                         {
                             Dlib.DrawRectangle(img, rect, color: new RgbPixel(255, 128, 0), 6);
                         }
                         else if (i == 48 || i == 49 || i == 50) // left lip
                         {
                             Dlib.DrawRectangle(img, rect, color: new RgbPixel(255, 255, 0), 2);
                         }
                         else if (i == 52 || i == 53 || i == 54) // right lip
                         {
                             Dlib.DrawRectangle(img, rect, color: new RgbPixel(255, 0, 128), 2);
                         }
                         else
                         {
                             Dlib.DrawRectangle(img, rect, color: new RgbPixel(0, 0, 0), thickness: 4);
                         }
                     }
                     Dlib.SavePng(img, "output.jpg");
                 }
             }
         }
 }
 private static void DetectFacesOnImage(Array2D <RgbPixel> image)
 {
     // set up Dlib facedetector
     using (var fd = Dlib.GetFrontalFaceDetector())
     {
         // find all faces in the image
         var faces = fd.Operator(image);
         foreach (Rectangle face in faces)
         {
             // draw a rectangle for each face
             Dlib.DrawRectangle(image, face, color: new RgbPixel(0, 255, 255), thickness: 4);
         }
     }
 }
        private Bitmap ProcessImage(Bitmap image)
        {
            // set up Dlib facedetectors and shapedetectors
            using (var fd = FrontalFaceDetector.GetFrontalFaceDetector())
                using (var sp = new ShapePredictor("shape_predictor_68_face_landmarks.dat"))
                {
                    // convert image to dlib format
                    var img = image.ToArray2D <RgbPixel>();

                    // detect faces
                    var faces = fd.Detect(img);

                    // detect facial landmarks
                    foreach (var rect in faces)
                    {
                        // detect facial landmarks
                        var shape = sp.Detect(img, rect);

                        // extract face chip
                        var chip      = Dlib.GetFaceChipDetails(shape);
                        var thumbnail = Dlib.ExtractImageChips <RgbPixel>(img, chip);

                        // add picturebox
                        var box = new PictureBox()
                        {
                            Image    = thumbnail.ToBitmap <RgbPixel>(),
                            SizeMode = PictureBoxSizeMode.Zoom,
                            Width    = 62,
                            Height   = 62
                        };
                        imagesPanel.Controls.Add(box);

                        // draw landmarks on main image
                        var lines = Dlib.RenderFaceDetections(new FullObjectDetection[] { shape });
                        foreach (var line in lines)
                        {
                            Dlib.DrawRectangle(
                                img,
                                new DlibDotNet.Rectangle(line.Point1),
                                new RgbPixel {
                                Green = 255
                            },
                                8);
                        }
                    }
                    return(img.ToBitmap <RgbPixel>());
                }
        }
示例#9
0
        public void FindFaces()
        {
            using (var fd = Dlib.GetFrontalFaceDetector())
            {
                var img = Dlib.LoadImage <RgbPixel>(path);

                // find all faces in the image
                var faces = fd.Operator(img);
                foreach (var face in faces)
                {
                    // draw a rectangle for each face
                    Dlib.DrawRectangle(img, face, color: new RgbPixel(0, 255, 255), thickness: 4);
                }
                Dlib.SaveJpeg(img, @"D:\output.png");
            }
        }
        /// <summary>
        /// Get the image with detected faces highlighted by the rectangle
        /// </summary>
        /// <param name="image"></param>
        /// <param name="numOfFaceDetected"></param>
        /// <returns></returns>
        public Bitmap FaceDetectionFromImage(Bitmap image, out int numOfFaceDetected)
        {
            numOfFaceDetected = 0;
            if (image != null)
            {
                // set up Dlib facedetectors and shapedetectors
                using (var faceDetector = FrontalFaceDetector.GetFrontalFaceDetector())
                    using (var shapePredictor = new ShapePredictor(Configuration.SHAP_PREDICTOR_CONFIG))
                    {
                        // convert image to dlib format
                        var img = image.ToArray2D <RgbPixel>();

                        // detect faces
                        var faces = faceDetector.Detect(img);

                        // detect facial landmarks
                        foreach (var rect in faces)
                        {
                            // detect facial landmarks
                            var shape = shapePredictor.Detect(img, rect);

                            //The left eye using landmark index[42, 47].
                            Landmarks landmarkLeftEye = new Landmarks(42, 47, shape);
                            //The right eye using landmark index [36, 41].
                            Landmarks landmarkRightEye = new Landmarks(36, 41, shape);
                            //draw landmark rectangle
                            var leftEyeRect      = Utils.RectangleAdjust(landmarkLeftEye.GetLandmarkRectangle(), img);
                            var rightEyeRect     = Utils.RectangleAdjust(landmarkRightEye.GetLandmarkRectangle(), img);
                            var adjustedFaceRect = Utils.RectangleAdjust(rect, img);

                            Dlib.DrawRectangle(img, adjustedFaceRect, new RgbPixel {
                                Blue = 255
                            }, 5);
                            Dlib.DrawRectangle(img, leftEyeRect, new RgbPixel {
                                Green = 255
                            }, 2);
                            Dlib.DrawRectangle(img, rightEyeRect, new RgbPixel {
                                Green = 255
                            }, 2);
                        }
                        numOfFaceDetected = faces.Length;
                        return(img.ToBitmap <RgbPixel>());
                    }
            }
            return(image);
        }
示例#11
0
        public static void Recognize(string file)
        {
            using (var fd = Dlib.GetFrontalFaceDetector())
            {
                var img = Dlib.LoadImage <RgbPixel>(file);

                //hola
                var faces = fd.Operator(img);

                foreach (var face in faces)
                {
                    Dlib.DrawRectangle(img, face, color: new RgbPixel(0, 255, 255), thickness: 4);
                }


                Dlib.SaveJpeg(img, file);
            }
        }
示例#12
0
        /// <summary>
        /// The main program entry point
        /// </summary>
        /// <param name="args">The command line arguments</param>
        static void Main(string[] args)
        {
            // set up Dlib facedetector
            using (var fd = Dlib.GetFrontalFaceDetector())
            {
                // load input image
                var img = Dlib.LoadImage <RgbPixel>(inputFilePath);

                // find all faces in the image
                var faces = fd.Operator(img);
                foreach (var face in faces)
                {
                    // draw a rectangle for each face
                    Dlib.DrawRectangle(img, face, color: new RgbPixel(0, 255, 255), thickness: 4);
                }

                // export the modified image
                Dlib.SaveJpeg(img, "output.jpg");
            }
        }
        public void GetImage(string imagePath)
        {
            Array2D <RgbPixel> image = Dlib.LoadImage <RgbPixel>(imagePath);

            using (FrontalFaceDetector fd = Dlib.GetFrontalFaceDetector())

            {
                var faces = fd.Operator(image);
                foreach (DlibDotNet.Rectangle face in faces)
                {
                    FullObjectDetection shape          = _ShapePredictor.Detect(image, face);
                    ChipDetails         faceChipDetail = Dlib.GetFaceChipDetails(shape, 150, 0.25);
                    Array2D <RgbPixel>  faceChip       = Dlib.ExtractImageChip <RgbPixel>(image, faceChipDetail);
                    Bitmap bitmap1 = faceChip.ToBitmap <RgbPixel>();
                    MainWindow.main.Statusa1 = bitmap1;
                    Dlib.DrawRectangle(image, face, color: new RgbPixel(0, 255, 255), thickness: 4);
                }
            }
            Bitmap frame = image.ToBitmap <RgbPixel>();

            MainWindow.main.Statusa = frame;
        }
示例#14
0
        /// <summary>
        /// The main program entry point
        /// </summary>
        /// <param name="args">The command line arguments</param>
        static void Main(string[] args)
        {
            // set up Dlib facedetectors and shapedetectors
            using (var fd = Dlib.GetFrontalFaceDetector())
                using (var sp = ShapePredictor.Deserialize("shape_predictor_68_face_landmarks.dat"))
                {
                    // load input image
                    var img = Dlib.LoadImage <RgbPixel>(inputFilePath);

                    // find all faces in the image
                    var faces = fd.Operator(img);
                    foreach (var face in faces)
                    {
                        // find the landmark points for this face
                        var shape = sp.Detect(img, face);

                        // build the 3d face model
                        var model = Utility.GetFaceModel();

                        // get the landmark point we need
                        var landmarks = new MatOfPoint2d(1, 6,
                                                         (from i in new int[] { 30, 8, 36, 45, 48, 54 }
                                                          let pt = shape.GetPart((uint)i)
                                                                   select new OpenCvSharp.Point2d(pt.X, pt.Y)).ToArray());

                        // build the camera matrix
                        var cameraMatrix = Utility.GetCameraMatrix((int)img.Rect.Width, (int)img.Rect.Height);

                        // build the coefficient matrix
                        var coeffs = new MatOfDouble(4, 1);
                        coeffs.SetTo(0);

                        // find head rotation and translation
                        Mat rotation    = new MatOfDouble();
                        Mat translation = new MatOfDouble();
                        Cv2.SolvePnP(model, landmarks, cameraMatrix, coeffs, rotation, translation);

                        // find euler angles
                        var euler = Utility.GetEulerMatrix(rotation);

                        // calculate head rotation in degrees
                        var yaw   = 180 * euler.At <double>(0, 2) / Math.PI;
                        var pitch = 180 * euler.At <double>(0, 1) / Math.PI;
                        var roll  = 180 * euler.At <double>(0, 0) / Math.PI;

                        // looking straight ahead wraps at -180/180, so make the range smooth
                        pitch = Math.Sign(pitch) * 180 - pitch;

                        // calculate if the driver is facing forward
                        // the left/right angle must be in the -25..25 range
                        // the up/down angle must be in the -10..10 range
                        var facingForward =
                            yaw >= -25 && yaw <= 25 &&
                            pitch >= -10 && pitch <= 10;

                        // create a new model point in front of the nose, and project it into 2d
                        var poseModel      = new MatOfPoint3d(1, 1, new Point3d(0, 0, 1000));
                        var poseProjection = new MatOfPoint2d();
                        Cv2.ProjectPoints(poseModel, rotation, translation, cameraMatrix, coeffs, poseProjection);

                        // draw the key landmark points in yellow on the image
                        foreach (var i in new int[] { 30, 8, 36, 45, 48, 54 })
                        {
                            var point = shape.GetPart((uint)i);
                            var rect  = new Rectangle(point);
                            Dlib.DrawRectangle(img, rect, color: new RgbPixel(255, 255, 0), thickness: 4);
                        }

                        // draw a line from the tip of the nose pointing in the direction of head pose
                        var landmark = landmarks.At <Point2d>(0);
                        var p        = poseProjection.At <Point2d>(0);
                        Dlib.DrawLine(
                            img,
                            new DlibDotNet.Point((int)landmark.X, (int)landmark.Y),
                            new DlibDotNet.Point((int)p.X, (int)p.Y),
                            color: new RgbPixel(0, 255, 255));

                        // draw a box around the face if it's facing forward
                        if (facingForward)
                        {
                            Dlib.DrawRectangle(img, face, color: new RgbPixel(0, 255, 255), thickness: 4);
                        }
                    }

                    // export the modified image
                    Dlib.SaveJpeg(img, "output.jpg");
                }
        }
示例#15
0
        private static InputDataImages GetFeaturesValuesFromImage(string str)
        {
            var returnClass = new InputDataImages();

            using (var fd = Dlib.GetFrontalFaceDetector())
                // ... and Dlib Shape DetectorS
                using (var sp = ShapePredictor.Deserialize("shape_predictor_68_face_landmarks.dat"))
                {
                    // load input image
                    var img = Dlib.LoadImage <RgbPixel>(str);

                    // find all faces i n the image
                    var faces = fd.Operator(img);
                    // for each face draw over the facial landmarks


                    // Create the CSV file and fill in the first line with the header
                    foreach (var face in faces)
                    {
                        // find the landmark points for this face
                        var shape = sp.Detect(img, face);

                        // draw the landmark points on the image
                        for (var i = 0; i < shape.Parts; i++)
                        {
                            var point = shape.GetPart((uint)i);
                            var rect  = new Rectangle(point);
                            Dlib.DrawRectangle(img, rect, color: new RgbPixel(255, 255, 0), thickness: 4);
                        }

                        /////////////// WEEK 9 LAB ////////////////

                        double[] LeftEyebrowDistances  = new double[4];
                        double[] RightEyebrowDistances = new double[4];

                        float LeftEyebrowSum  = 0;
                        float RightEyebrowSum = 0;

                        //LIP VARIABLES
                        double[] LeftLipDistances  = new double[4];
                        double[] RightLipDistances = new double[4];
                        float    LeftLipSum        = 0;
                        float    RightLipSum       = 0;


                        LeftEyebrowDistances[0] = (shape.GetPart(21) - shape.GetPart(39)).Length;
                        LeftEyebrowDistances[1] = (shape.GetPart(20) - shape.GetPart(39)).Length;
                        LeftEyebrowDistances[2] = (shape.GetPart(19) - shape.GetPart(39)).Length;
                        LeftEyebrowDistances[3] = (shape.GetPart(18) - shape.GetPart(39)).Length;

                        RightEyebrowDistances[0] = (shape.GetPart(22) - shape.GetPart(42)).Length;
                        RightEyebrowDistances[1] = (shape.GetPart(23) - shape.GetPart(42)).Length;
                        RightEyebrowDistances[2] = (shape.GetPart(24) - shape.GetPart(42)).Length;
                        RightEyebrowDistances[3] = (shape.GetPart(25) - shape.GetPart(42)).Length;


                        //LIP
                        LeftLipDistances[0] = (shape.GetPart(51) - shape.GetPart(33)).Length;
                        LeftLipDistances[1] = (shape.GetPart(50) - shape.GetPart(33)).Length;
                        LeftLipDistances[2] = (shape.GetPart(49) - shape.GetPart(33)).Length;
                        LeftLipDistances[3] = (shape.GetPart(48) - shape.GetPart(33)).Length;


                        RightLipDistances[0] = (shape.GetPart(51) - shape.GetPart(33)).Length;
                        RightLipDistances[1] = (shape.GetPart(52) - shape.GetPart(33)).Length;
                        RightLipDistances[2] = (shape.GetPart(53) - shape.GetPart(33)).Length;
                        RightLipDistances[3] = (shape.GetPart(54) - shape.GetPart(33)).Length;


                        for (int i = 0; i < 4; i++)
                        {
                            LeftEyebrowSum  += (float)(LeftEyebrowDistances[i] / LeftEyebrowDistances[0]);
                            RightEyebrowSum += (float)(RightEyebrowDistances[i] / RightEyebrowDistances[0]);
                        }

                        LeftLipSum += (float)(LeftLipDistances[1] / LeftLipDistances[0]);
                        LeftLipSum += (float)(LeftLipDistances[2] / LeftLipDistances[0]);
                        LeftLipSum += (float)(LeftLipDistances[3] / LeftLipDistances[0]);


                        RightLipSum += (float)(RightLipDistances[1] / RightLipDistances[0]);
                        RightLipSum += (float)(RightLipDistances[2] / RightLipDistances[0]);
                        RightLipSum += (float)(RightLipDistances[3] / RightLipDistances[0]);

                        double LipWidth  = (float)((shape.GetPart(48) - shape.GetPart(54)).Length / (shape.GetPart(33) - shape.GetPart(51)).Length);
                        double LipHeight = (float)((shape.GetPart(51) - shape.GetPart(57)).Length / (shape.GetPart(33) - shape.GetPart(51)).Length);

                        returnClass.LeftEyebrow  = LeftEyebrowSum;
                        returnClass.RightEyebrow = RightLipSum;
                        returnClass.LeftLip      = LeftLipSum;
                        returnClass.RightLip     = RightLipSum;
                        returnClass.LipWidth     = (float)LipWidth;
                        returnClass.LipHeight    = (float)LipHeight;


                        // export the modified image
                        string filePath = "output" + ".jpg";
                        Dlib.SaveJpeg(img, filePath);
                    }
                }

            using (System.IO.StreamWriter file = new System.IO.StreamWriter(@"TestingFeatureVectorValues.csv", true))
            {
                DirectoryInfo dr = new DirectoryInfo(str);
                //Console.WriteLine(dr.Parent.Name.ToString());
                string ParentFolderName = dr.Parent.Name.ToString();

                file.WriteLine(ParentFolderName + "," + returnClass.LeftEyebrow.ToString() + "," + returnClass.RightEyebrow.ToString()
                               + "," + returnClass.LeftLip.ToString() + "," + returnClass.RightLip.ToString() + "," + returnClass.LipWidth.ToString()
                               + "," + returnClass.LipHeight.ToString());
            }
            return(returnClass);
        }
        // The main program entry point
        static void Main(string[] args)
        {
            bool use_mirror = false;

            // file paths
            string[] files = Directory.GetFiles("images", "*.*", SearchOption.AllDirectories);
            List <FullObjectDetection> shapes = new List <FullObjectDetection>();
            List <string> emotions            = new List <string>();

            // Set up Dlib Face Detector
            using (var fd = Dlib.GetFrontalFaceDetector())
                // ... and Dlib Shape Detector
                using (var sp = ShapePredictor.Deserialize("shape_predictor_68_face_landmarks.dat"))
                {
                    // load input image
                    for (int i = 0; i < files.Length; i++)
                    {
                        var emotion = GetEmotion(files[i]);
                        var img     = Dlib.LoadImage <RgbPixel>(files[i]);

                        // find all faces in the image
                        var faces = fd.Operator(img);
                        // for each face draw over the facial landmarks
                        foreach (var face in faces)
                        {
                            // find the landmark points for this face
                            var shape = sp.Detect(img, face);
                            shapes.Add(shape);
                            emotions.Add(emotion);
                            // draw the landmark points on the image

                            for (var i2 = 0; i2 < shape.Parts; i2++)
                            {
                                var point = shape.GetPart((uint)i2);
                                var rect  = new Rectangle(point);

                                if (point == GetPoint(shape, 40) || point == GetPoint(shape, 22))
                                {
                                    Dlib.DrawRectangle(img, rect, color: new RgbPixel(0, 255, 0), thickness: 4);
                                }
                                else
                                {
                                    Dlib.DrawRectangle(img, rect, color: new RgbPixel(255, 255, 0), thickness: 4);
                                }
                            }
                        }

                        // export the modified image
                        Console.WriteLine(files[i]);
                        Dlib.SaveJpeg(img, "output_" + files[i]);
                    }

                    string header = "leftEyebrow,rightEyebrow,leftLip,rightLip,lipHeight,lipWidth,emotion\n";
                    System.IO.File.WriteAllText(@"feature_vectors.csv", header);
                    for (var i = 0; i < shapes.Count; i++)
                    {
                        var shape   = shapes[i];
                        var emotion = emotions[i];
                        using (System.IO.StreamWriter file = new System.IO.StreamWriter(@"feature_vectors.csv", true))
                        {
                            file.WriteLine(GetLeftEyebrow(shape) + "," + GetRightEyebrow(shape) + "," +
                                           GetLeftLip(shape) + "," + GetRightLip(shape) + "," + GetLipWidth(shape) + "," + GetLipHeight(shape) +
                                           "," + emotion);
                            if (use_mirror)
                            {
                                file.WriteLine(GetRightEyebrow(shape) + "," + GetLeftEyebrow(shape) + "," +
                                               GetRightLip(shape) + "," + GetLeftLip(shape) + "," + GetLipWidth(shape) + "," + GetLipHeight(shape) +
                                               "," + emotion);
                            }
                        }
                    }
                }
        }
示例#17
0
        static void Main(string[] args)
        {
            /// FaceDetectionWith_API
            Location[] coord = TestImage(fileName, Model.Hog);


            /// Face DetectionWith_DLIB
            using (var fd = Dlib.GetFrontalFaceDetector())
            {
                var img = Dlib.LoadImage <RgbPixel>(fileName);

                // find all faces in the image
                var faces = fd.Operator(img);
                foreach (var face in faces)
                {
                    // draw a rectangle for each face
                    Dlib.DrawRectangle(img, face, color: new RgbPixel(0, 255, 255), thickness: 4);
                }

                Dlib.SaveJpeg(img, outputName);
            }


            // The first thing we are going to do is load all our models.  First, since we need to
            // find faces in the image we will need a face detector:
            using (var detector = Dlib.GetFrontalFaceDetector())
                // We will also use a face landmarking model to align faces to a standard pose:  (see face_landmark_detection_ex.cpp for an introduction)
                using (var sp = ShapePredictor.Deserialize("shape_predictor_68_face_landmarks.dat"))
                    // And finally we load the DNN responsible for face recognition.
                    using (var net = DlibDotNet.Dnn.LossMetric.Deserialize("dlib_face_recognition_resnet_model_v1.dat"))

                        using (var img = Dlib.LoadImageAsMatrix <RgbPixel>(fileName))

                            using (var win = new ImageWindow(img))
                            {
                                var faces = new List <Matrix <RgbPixel> >();
                                foreach (var face in detector.Operator(img))
                                {
                                    var shape          = sp.Detect(img, face);
                                    var faceChipDetail = Dlib.GetFaceChipDetails(shape, 150, 0.25);
                                    var faceChip       = Dlib.ExtractImageChip <RgbPixel>(img, faceChipDetail);

                                    //faces.Add(move(face_chip));
                                    faces.Add(faceChip);

                                    win.AddOverlay(face);
                                }

                                if (!faces.Any())
                                {
                                    Console.WriteLine("No faces found in image!");
                                    return;
                                }

                                // This call asks the DNN to convert each face image in faces into a 128D vector.
                                // In this 128D vector space, images from the same person will be close to each other
                                // but vectors from different people will be far apart.  So we can use these vectors to
                                // identify if a pair of images are from the same person or from different people.
                                var faceDescriptors = net.Operator(faces);

                                // In particular, one simple thing we can do is face clustering.  This next bit of code
                                // creates a graph of connected faces and then uses the Chinese whispers graph clustering
                                // algorithm to identify how many people there are and which faces belong to whom.
                                var edges = new List <SamplePair>();
                                for (uint i = 0; i < faceDescriptors.Count; ++i)
                                {
                                    for (var j = i; j < faceDescriptors.Count; ++j)
                                    {
                                        // Faces are connected in the graph if they are close enough.  Here we check if
                                        // the distance between two face descriptors is less than 0.6, which is the
                                        // decision threshold the network was trained to use.  Although you can
                                        // certainly use any other threshold you find useful.
                                        var diff = faceDescriptors[i] - faceDescriptors[j];
                                        if (Dlib.Length(diff) < 0.6)
                                        {
                                            edges.Add(new SamplePair(i, j));
                                        }
                                    }
                                }

                                Dlib.ChineseWhispers(edges, 100, out var numClusters, out var labels);

                                // This will correctly indicate that there are 4 people in the image.
                                Console.WriteLine($"number of people found in the image: {numClusters}");


                                // Отобразим результат в ImageList
                                var winClusters = new List <ImageWindow>();
                                for (var i = 0; i < numClusters; i++)
                                {
                                    winClusters.Add(new ImageWindow());
                                }
                                var tileImages = new List <Matrix <RgbPixel> >();
                                for (var clusterId = 0ul; clusterId < numClusters; ++clusterId)
                                {
                                    var temp = new List <Matrix <RgbPixel> >();
                                    for (var j = 0; j < labels.Length; ++j)
                                    {
                                        if (clusterId == labels[j])
                                        {
                                            temp.Add(faces[j]);
                                        }
                                    }

                                    winClusters[(int)clusterId].Title = $"face cluster {clusterId}";
                                    var tileImage = Dlib.TileImages(temp);
                                    tileImages.Add(tileImage);
                                    winClusters[(int)clusterId].SetImage(tileImage);
                                }


                                // Finally, let's print one of the face descriptors to the screen.
                                using (var trans = Dlib.Trans(faceDescriptors[0]))
                                {
                                    Console.WriteLine($"face descriptor for one face: {trans}");

                                    // It should also be noted that face recognition accuracy can be improved if jittering
                                    // is used when creating face descriptors.  In particular, to get 99.38% on the LFW
                                    // benchmark you need to use the jitter_image() routine to compute the descriptors,
                                    // like so:
                                    var jitterImages = JitterImage(faces[0]).ToArray();
                                    var ret          = net.Operator(jitterImages);
                                    using (var m = Dlib.Mat(ret))
                                        using (var faceDescriptor = Dlib.Mean <float>(m))
                                            using (var t = Dlib.Trans(faceDescriptor))
                                            {
                                                Console.WriteLine($"jittered face descriptor for one face: {t}");

                                                // If you use the model without jittering, as we did when clustering the bald guys, it
                                                // gets an accuracy of 99.13% on the LFW benchmark.  So jittering makes the whole
                                                // procedure a little more accurate but makes face descriptor calculation slower.

                                                Console.WriteLine("hit enter to terminate");
                                                Console.ReadKey();

                                                foreach (var jitterImage in jitterImages)
                                                {
                                                    jitterImage.Dispose();
                                                }

                                                foreach (var tileImage in tileImages)
                                                {
                                                    tileImage.Dispose();
                                                }

                                                foreach (var edge in edges)
                                                {
                                                    edge.Dispose();
                                                }

                                                foreach (var descriptor in faceDescriptors)
                                                {
                                                    descriptor.Dispose();
                                                }

                                                foreach (var face in faces)
                                                {
                                                    face.Dispose();
                                                }
                                            }
                                }
                            }

            System.Console.ReadLine();
        }
        public static void CreateFeatureVectors()
        {
            int    faceCount = 0;
            float  leftEyebrow, rightEyebrow, leftLip, rightLip, lipHeight, lipWidth;
            string output;

            if (currentDataType == Datatype.Testing)
            {
                output = testingOutput;
            }
            else
            {
                output = trainingOutput;
            }

            string[] dirs = Directory.GetFiles(currentFilePath, "*.*", SearchOption.AllDirectories);

            // Set up Dlib Face Detector
            using (var fd = Dlib.GetFrontalFaceDetector())
                // ... and Dlib Shape Detector
                using (var sp = ShapePredictor.Deserialize("shape_predictor_68_face_landmarks.dat"))
                {
                    string header = "leftEyebrow,rightEyebrow,leftLip,rightLip,lipWidth,lipHeight,label\n";

                    // Create the CSV file and fill in the first line with the header
                    System.IO.File.WriteAllText(output, header);

                    foreach (string dir in dirs)
                    {
                        // call function that sets the label based on what the filename contains
                        string label = DetermineLabel(dir);

                        // load input image
                        if (!(dir.EndsWith("png") || dir.EndsWith("jpg")))
                        {
                            continue;
                        }

                        var img = Dlib.LoadImage <RgbPixel>(dir);

                        // find all faces in the image
                        var faces = fd.Operator(img);

                        // for each face draw over the facial landmarks
                        foreach (var face in faces)
                        {
                            // Write to the console displaying the progress and current emotion
                            Form1.SetProgress(faceCount, dirs.Length - 1);

                            // find the landmark points for this face
                            var shape = sp.Detect(img, face);

                            for (var i = 0; i < shape.Parts; i++)
                            {
                                RgbPixel colour = new RgbPixel(255, 255, 255);
                                var      point  = shape.GetPart((uint)i);
                                var      rect   = new DlibDotNet.Rectangle(point);
                                Dlib.DrawRectangle(img, rect, color: colour, thickness: 2);
                            }

                            SetFormImage(img);

                            leftEyebrow  = CalculateLeftEyebrow(shape);
                            rightEyebrow = CalculateRightEyebrow(shape);
                            leftLip      = CalculateLeftLip(shape);
                            rightLip     = CalculateRightLip(shape);
                            lipWidth     = CalculateLipWidth(shape);
                            lipHeight    = CalculateLipHeight(shape);

                            using (System.IO.StreamWriter file = new System.IO.StreamWriter(output, true))
                            {
                                file.WriteLine(leftEyebrow + "," + rightEyebrow + "," + leftLip + "," + rightLip + "," + lipWidth + "," + lipHeight + "," + label);
                            }

                            // Increment count used for console output
                            faceCount++;
                        }
                    }

                    if (currentDataType == Datatype.Testing)
                    {
                        var testDataView = mlContext.Data.LoadFromTextFile <FeatureInputData>(output, hasHeader: true, separatorChar: ',');
                        GenerateMetrics(testDataView);
                    }

                    Form1.HideImage();
                }
        }