Beispiel #1
0
        private void BackgroundWorkerOnDoWork(object sender, DoWorkEventArgs doWorkEventArgs)
        {
            var path = doWorkEventArgs.Argument as string;

            if (string.IsNullOrWhiteSpace(path) || !File.Exists(path))
            {
                return;
            }

            // DlibDotNet can create Array2D from file but this sample demonstrate
            // converting managed image class to dlib class and vice versa.
            using (var faceDetector = FrontalFaceDetector.GetFrontalFaceDetector())
                using (var ms = new MemoryStream(File.ReadAllBytes(path)))
                    using (var bitmap = (Bitmap)Image.FromStream(ms))
                    {
                        using (var image = bitmap.ToArray2D <RgbPixel>())
                        {
                            var dets = faceDetector.Detect(image);
                            foreach (var r in dets)
                            {
                                Dlib.DrawRectangle(image, r, new RgbPixel {
                                    Green = 255
                                });
                            }

                            var result = image.ToBitmap();
                            this.pictureBox.Invoke(new Action(() =>
                            {
                                this.pictureBox.Image?.Dispose();
                                this.pictureBox.Image = result;
                            }));
                        }
                    }
        }
Beispiel #2
0
        /// <summary>
        /// Gets an appropriate cropping for a face
        /// </summary>
        /// <param name="file"></param>
        /// <param name="width"></param>
        /// <param name="height"></param>
        /// <returns></returns>
        public static Dimension GetFaceCrop(FileInfo file, int width, int height)
        {
            int top       = int.MaxValue;
            int left      = int.MaxValue;
            int bottom    = int.MinValue;
            int right     = int.MinValue;
            int faceCount = 0;

            using (FrontalFaceDetector faceDetector = Dlib.GetFrontalFaceDetector())
            {
                foreach (Rectangle face in faceDetector.Operator(Dlib.LoadImage <RgbPixel>(file.FullName)))
                {
                    faceCount++;
                    top    = Math.Min(face.Top, top);
                    left   = Math.Min(face.Left, left);
                    bottom = Math.Max(face.Bottom, bottom);
                    right  = Math.Max(face.Right, right);
                }
            }

            if (top == int.MaxValue || faceCount > Configuration.Get.FaceDetectionLimit)
            {
                return(null);
            }                                                                                             // no faces detected, or too many

            int cropWidth  = right - left;
            int cropHeight = bottom - top;

            top    -= cropHeight;
            bottom += cropHeight;
            left   -= (int)((double)cropWidth / 1.5);
            right  += (int)((double)cropWidth / 1.5);

            if (top < 0)
            {
                top = 0;
            }
            if (bottom > height)
            {
                bottom = height;
            }
            if (left < 0)
            {
                left = 0;
            }
            if (right > width)
            {
                right = width;
            }

            int finalWidth  = right - left;
            int finalHeight = bottom - top;

            Dimension crop = GetCroppedThumbnailDimensions(finalWidth, finalHeight, true);

            crop.CropTop  += top;
            crop.CropLeft += left;

            return(crop);
        }
 public ImageUtils()
 {
     detector = Dlib.GetFrontalFaceDetector();
     sp       =
         ShapePredictor.Deserialize(
             @"..\External\shape_predictor_68_face_landmarks.dat");
 }
Beispiel #4
0
        public static IEnumerable <Rectangle> RunDetectorWithUpscale2(FrontalFaceDetector detector,
                                                                      Image image,
                                                                      uint upsamplingAmount)
        {
            if (detector == null)
            {
                throw new ArgumentNullException(nameof(detector));
            }
            if (image == null)
            {
                throw new ArgumentNullException(nameof(image));
            }
            if (image.IsDisposed)
            {
                throw new ObjectDisposedException(nameof(image));
            }

            var          detectionConfidences = new List <double>();
            var          weightIndices        = new List <ulong>();
            const double adjustThreshold      = 0.0;

            return(RunDetectorWithUpscale1(detector,
                                           image,
                                           upsamplingAmount,
                                           adjustThreshold,
                                           detectionConfidences,
                                           weightIndices));
        }
        public DLibFaceIdentification(IImageRotationService imageRotationService)
        {
            this.imageRotationService = imageRotationService ?? throw new ArgumentNullException(nameof(imageRotationService));
            detector = Dlib.GetFrontalFaceDetector();

            // set up a 5-point landmark detector
            predictor = ShapePredictor.Deserialize("model/shape_predictor_5_face_landmarks.dat");

            // set up a neural network for face recognition
            dnn = DlibDotNet.Dnn.LossMetric.Deserialize("model/dlib_face_recognition_resnet_model_v1.dat");

            // create a color palette for plotting
            palette = new RgbPixel[]
            {
                new RgbPixel(0xe6, 0x19, 0x4b),
                new RgbPixel(0xf5, 0x82, 0x31),
                new RgbPixel(0xff, 0xe1, 0x19),
                new RgbPixel(0xbc, 0xf6, 0x0c),
                new RgbPixel(0x3c, 0xb4, 0x4b),
                new RgbPixel(0x46, 0xf0, 0xf0),
                new RgbPixel(0x43, 0x63, 0xd8),
                new RgbPixel(0x91, 0x1e, 0xb4),
                new RgbPixel(0xf0, 0x32, 0xe6),
                new RgbPixel(0x80, 0x80, 0x80)
            };
        }
Beispiel #6
0
        public void LandmarkTest()
        {
            using (var faceDetector = FrontalFaceDetector.GetFrontalFaceDetector())
                using (var shapePredictor = new ShapePredictor(Configuration.SHAP_PREDICTOR_CONFIG))
                {
                    // convert image to dlib format
                    var img = Utils.LoadImageAsBitmap("TestImage/pic01.jpg").ToArray2D <RgbPixel>();

                    // detect faces
                    var faces = faceDetector.Detect(img);

                    // detect facial landmarks
                    foreach (var rect in faces)
                    {
                        // detect facial landmarks
                        var shape = shapePredictor.Detect(img, rect);

                        //The left eye using landmark index[42, 47].
                        Landmarks landmarkLeftEye = new Landmarks(42, 47, shape);
                        Assert.AreEqual(landmarkLeftEye.LandMarkPointList.Count, 6);
                        //index range should be 0-67
                        Landmarks landmark2 = new Landmarks(42, 68, shape);
                    }
                }
        }
        private void buscarrosto(Bitmap frame)
        {
            Image <Rgb, Byte> imageCV = new Image <Rgb, byte>(frame);

            Emgu.CV.Mat mat   = imageCV.Mat;
            var         array = new byte[mat.Width * mat.Height * mat.ElementSize];

            mat.CopyTo(array);

            using (Array2D <RgbPixel> image = Dlib.LoadImageData <RgbPixel>(array, (uint)mat.Height, (uint)mat.Width, (uint)(mat.Width * mat.ElementSize)))
            {
                using (FrontalFaceDetector fd = Dlib.GetFrontalFaceDetector())

                {
                    var faces = fd.Operator(image);
                    foreach (DlibDotNet.Rectangle face in faces)
                    {
                        FullObjectDetection shape          = _ShapePredictor.Detect(image, face);
                        ChipDetails         faceChipDetail = Dlib.GetFaceChipDetails(shape, 150, 0.25);
                        Array2D <RgbPixel>  faceChip       = Dlib.ExtractImageChip <RgbPixel>(image, faceChipDetail);
                        Bitmap bitmap1 = faceChip.ToBitmap <RgbPixel>();
                        MainWindow.main.Statusa1 = bitmap1;
                        Dlib.DrawRectangle(image, face, color: new RgbPixel(0, 255, 255), thickness: 4);
                    }
                }
                frame = image.ToBitmap <RgbPixel>();
                MainWindow.main.Statusa = frame;
            }
        }
Beispiel #8
0
        private static void Main(string[] args)
        {
            if (args.Length == 0)
            {
                Console.WriteLine("Give some image files as arguments to this program.");
                return;
            }

            using (var win = new ImageWindow())
                using (var detector = FrontalFaceDetector.GetFrontalFaceDetector())
                    foreach (var file in args)
                    {
                        using (var img = Dlib.LoadImage <byte>(file))
                        {
                            Dlib.PyramidUp(img);

                            var dets = detector.Detect(img);
                            Console.WriteLine($"Number of faces detected: {dets.Length}");

                            win.ClearOverlay();
                            win.SetImage(img);
                            win.AddOverlay(dets, new RgbPixel {
                                Red = 255
                            });

                            Console.WriteLine("hit enter to process next frame");
                            Console.ReadKey();
                        }
                    }
        }
Beispiel #9
0
        public void DetectFace()
        {
            if (this._ShapePredictor == null)
            {
                Assert.Fail("ShapePredictor is not initialized!!");
            }

            var faceDetector = FrontalFaceDetector.GetFrontalFaceDetector();
            //Interpolation.PyramidUp(image);

            var path  = this.GetDataFile("Lenna.jpg");
            var image = Dlib.LoadImage <byte>(path.FullName);

            var dets = faceDetector.Detect(image);

            Assert.AreEqual(dets.Length, 1);

            var rects = new List <Rectangle>();

            const int offset = 1;
            var       shapes = dets.Select(r => this._ShapePredictor.Detect(image, r)).ToList();

            foreach (var shape in shapes)
            {
                var r     = shape.Rect;
                var parts = shape.Parts;
                for (uint i = 0; i < parts; i++)
                {
                    var part = shape.GetPart(i);
                    var pr   = new Rectangle(
                        part.X - offset, part.Y - offset, part.X + offset, part.Y + offset);
                    rects.Add(pr);
                }

                rects.Add(r);
            }

#if DEBUG
            using (var bmp = Image.FromFile(path.FullName))
                using (var g = Graphics.FromImage(bmp))
                    using (var p = new Pen(Color.Green, 1f))
                    {
                        // If you executed PyramidUp, it made the image bigger by a factor of two.
                        // It means that detected coordinates are bigger by a factor of two.
                        foreach (var r in rects)
                        {
                            g.DrawRectangle(p, r.Left, r.Top, r.Width, r.Height);
                        }
                        //g.DrawRectangle(p, r.Left / 2f, r.Top / 2f, r.Width / 2f, r.Height / 2f);

                        bmp.Save(Path.Combine(this.GetOutDir(this.GetType().Name), "DetectFace.bmp"));
                    }
#endif

            this.DisposeAndCheckDisposedState(rects);
            this.DisposeAndCheckDisposedState(dets);
            this.DisposeAndCheckDisposedState(faceDetector);
            this.DisposeAndCheckDisposedState(image);
        }
        /// <summary>
        /// Initializes a new instance of the <see cref="FaceRecognition"/> class with the directory path that stores model files.
        /// </summary>
        /// <param name="directory">The directory path that stores model files.</param>
        /// <exception cref="FileNotFoundException">The model file is not found.</exception>
        /// <exception cref="DirectoryNotFoundException">The specified directory path is not found.</exception>
        private FaceRecognition(string directory)
        {
            if (!Directory.Exists(directory))
            {
                throw new DirectoryNotFoundException(directory);
            }

            var predictor68PointModel = Path.Combine(directory, FaceRecognitionModels.GetPosePredictorModelLocation());

            if (!File.Exists(predictor68PointModel))
            {
                throw new FileNotFoundException(predictor68PointModel);
            }

            var predictor5PointModel = Path.Combine(directory, FaceRecognitionModels.GetPosePredictorFivePointModelLocation());

            if (!File.Exists(predictor5PointModel))
            {
                throw new FileNotFoundException(predictor5PointModel);
            }

            var cnnFaceDetectionModel = Path.Combine(directory, FaceRecognitionModels.GetCnnFaceDetectorModelLocation());

            if (!File.Exists(cnnFaceDetectionModel))
            {
                throw new FileNotFoundException(cnnFaceDetectionModel);
            }

            var faceRecognitionModel = Path.Combine(directory, FaceRecognitionModels.GetFaceRecognitionModelLocation());

            if (!File.Exists(faceRecognitionModel))
            {
                throw new FileNotFoundException(faceRecognitionModel);
            }

            this._FaceDetector?.Dispose();
            this._FaceDetector = DlibDotNet.Dlib.GetFrontalFaceDetector();

            this._PosePredictor68Point?.Dispose();
            this._PosePredictor68Point = ShapePredictor.Deserialize(predictor68PointModel);

            this._PosePredictor5Point?.Dispose();
            this._PosePredictor5Point = ShapePredictor.Deserialize(predictor5PointModel);

            this._CnnFaceDetector?.Dispose();
            this._CnnFaceDetector = LossMmod.Deserialize(cnnFaceDetectionModel);

            this._FaceEncoder?.Dispose();
            this._FaceEncoder = LossMetric.Deserialize(faceRecognitionModel);

            var predictor194PointModel = Path.Combine(directory, FaceRecognitionModels.GetPosePredictor194PointModelLocation());

            if (File.Exists(predictor194PointModel))
            {
                this._PosePredictor194Point?.Dispose();
                this._PosePredictor194Point = ShapePredictor.Deserialize(predictor194PointModel);
            }
        }
        /// <summary>
        /// Called when MainForm loads.
        /// </summary>
        /// <param name="sender"></param>
        /// <param name="e"></param>
        private void MainForm_Load(object sender, EventArgs e)
        {
            SetCamera();

            // initialize face detector
            faceDetector = FrontalFaceDetector.GetFrontalFaceDetector();

            // initialize shape predictor
            shapePredictor = new ShapePredictor("shape_predictor_68_face_landmarks.dat");

            // start the player
            videoPlayer.Start();
        }
        public void DetectFace()
        {
            if (this._ShapePredictor == null)
            {
                Assert.Fail("ShapePredictor is not initialized!!");
            }

            var faceDetector = FrontalFaceDetector.GetFrontalFaceDetector();
            //Interpolation.PyramidUp(image);

            var path  = this.GetDataFile("Lenna.jpg");
            var image = Dlib.LoadImage <RgbPixel>(path.FullName);

            var dets = faceDetector.Detect(image);

            Assert.AreEqual(dets.Length, 1);

            var rects = new List <Rectangle>();

            const int offset = 1;
            var       shapes = dets.Select(r => this._ShapePredictor.Detect(image, r)).ToList();

            foreach (var shape in shapes)
            {
                var r     = shape.Rect;
                var parts = shape.Parts;
                for (uint i = 0; i < parts; i++)
                {
                    var part = shape.GetPart(i);
                    var pr   = new Rectangle(
                        part.X - offset, part.Y - offset, part.X + offset, part.Y + offset);
                    rects.Add(pr);
                }

                rects.Add(r);
            }

            foreach (var r in rects)
            {
                Dlib.DrawRectangle(image, r, new RgbPixel {
                    Green = 255
                });
            }

            Dlib.SaveBmp(image, Path.Combine(this.GetOutDir(this.GetType().Name), "DetectFace.bmp"));

            this.DisposeAndCheckDisposedState(rects);
            this.DisposeAndCheckDisposedState(dets);
            this.DisposeAndCheckDisposedState(faceDetector);
            this.DisposeAndCheckDisposedState(image);
        }
        /// <summary>
        /// Called when MainForm loads.
        /// </summary>
        /// <param name="sender"></param>
        /// <param name="e"></param>
        private void MainForm_Load(object sender, EventArgs e)
        {
            SetCamera();

            // load the beard image
            beardImage = Bitmap.FromFile(@".\beard.png") as Bitmap;

            // set up our face detector and shape predictor
            faceDetector   = FrontalFaceDetector.GetFrontalFaceDetector();
            shapePredictor = new ShapePredictor("shape_predictor_68_face_landmarks.dat");

            // start the player
            videoPlayer.Start();
        }
Beispiel #14
0
 private List <List <Point2f> > DetectFaces(FrontalFaceDetector detector, ShapePredictor predictor, Mat mat)
 {
     using (var image = ToArray(mat))
     {
         var points = detector.Operator(image)
                      .Select(rectangle => predictor.Detect(image, rectangle))
                      .Where(shape => shape.Parts > 2)
                      .Select(shape => Enumerable.Range(0, (int)shape.Parts)
                              .Select(i => shape.GetPart((uint)i))
                              .Select((p, i) => new Point2f(p.X, p.Y))
                              .ToList())
                      .ToList();
         return(points);
     }
 }
        private Bitmap ProcessImage(Bitmap image)
        {
            // set up Dlib facedetectors and shapedetectors
            using (var fd = FrontalFaceDetector.GetFrontalFaceDetector())
                using (var sp = new ShapePredictor("shape_predictor_68_face_landmarks.dat"))
                {
                    // convert image to dlib format
                    var img = image.ToArray2D <RgbPixel>();

                    // detect faces
                    var faces = fd.Detect(img);

                    // detect facial landmarks
                    foreach (var rect in faces)
                    {
                        // detect facial landmarks
                        var shape = sp.Detect(img, rect);

                        // extract face chip
                        var chip      = Dlib.GetFaceChipDetails(shape);
                        var thumbnail = Dlib.ExtractImageChips <RgbPixel>(img, chip);

                        // add picturebox
                        var box = new PictureBox()
                        {
                            Image    = thumbnail.ToBitmap <RgbPixel>(),
                            SizeMode = PictureBoxSizeMode.Zoom,
                            Width    = 62,
                            Height   = 62
                        };
                        imagesPanel.Controls.Add(box);

                        // draw landmarks on main image
                        var lines = Dlib.RenderFaceDetections(new FullObjectDetection[] { shape });
                        foreach (var line in lines)
                        {
                            Dlib.DrawRectangle(
                                img,
                                new DlibDotNet.Rectangle(line.Point1),
                                new RgbPixel {
                                Green = 255
                            },
                                8);
                        }
                    }
                    return(img.ToBitmap <RgbPixel>());
                }
        }
        /// <summary>
        /// Called when MainForm is closed.
        /// </summary>
        /// <param name="sender"></param>
        /// <param name="e"></param>
        private void MainForm_FormClosed(object sender, FormClosedEventArgs e)
        {
            videoPlayer.Stop();

            // dispose dlib resources
            if (faceDetector != null)
            {
                faceDetector.Dispose();
                faceDetector = null;
            }
            if (shapePredictor != null)
            {
                shapePredictor.Dispose();
                shapePredictor = null;
            }
        }
        /// <summary>
        /// Called when MainForm loads.
        /// </summary>
        /// <param name="sender"></param>
        /// <param name="e"></param>
        private void MainForm_Load(object sender, EventArgs e)
        {
            // initialize video player
            SetCamera();

            // load the car picture
            carBox.Image = Bitmap.FromFile(@"input.jpg");

            // initialize face detector
            faceDetector = FrontalFaceDetector.GetFrontalFaceDetector();

            // initialize shape predictor to detect landmarks
            shapePredictor = new ShapePredictor("shape_predictor_68_face_landmarks.dat");

            // start the players
            videoPlayer.Start();
        }
        /// <summary>
        /// Get the image with detected faces highlighted by the rectangle
        /// </summary>
        /// <param name="image"></param>
        /// <param name="numOfFaceDetected"></param>
        /// <returns></returns>
        public Bitmap FaceDetectionFromImage(Bitmap image, out int numOfFaceDetected)
        {
            numOfFaceDetected = 0;
            if (image != null)
            {
                // set up Dlib facedetectors and shapedetectors
                using (var faceDetector = FrontalFaceDetector.GetFrontalFaceDetector())
                    using (var shapePredictor = new ShapePredictor(Configuration.SHAP_PREDICTOR_CONFIG))
                    {
                        // convert image to dlib format
                        var img = image.ToArray2D <RgbPixel>();

                        // detect faces
                        var faces = faceDetector.Detect(img);

                        // detect facial landmarks
                        foreach (var rect in faces)
                        {
                            // detect facial landmarks
                            var shape = shapePredictor.Detect(img, rect);

                            //The left eye using landmark index[42, 47].
                            Landmarks landmarkLeftEye = new Landmarks(42, 47, shape);
                            //The right eye using landmark index [36, 41].
                            Landmarks landmarkRightEye = new Landmarks(36, 41, shape);
                            //draw landmark rectangle
                            var leftEyeRect      = Utils.RectangleAdjust(landmarkLeftEye.GetLandmarkRectangle(), img);
                            var rightEyeRect     = Utils.RectangleAdjust(landmarkRightEye.GetLandmarkRectangle(), img);
                            var adjustedFaceRect = Utils.RectangleAdjust(rect, img);

                            Dlib.DrawRectangle(img, adjustedFaceRect, new RgbPixel {
                                Blue = 255
                            }, 5);
                            Dlib.DrawRectangle(img, leftEyeRect, new RgbPixel {
                                Green = 255
                            }, 2);
                            Dlib.DrawRectangle(img, rightEyeRect, new RgbPixel {
                                Green = 255
                            }, 2);
                        }
                        numOfFaceDetected = faces.Length;
                        return(img.ToBitmap <RgbPixel>());
                    }
            }
            return(image);
        }
Beispiel #19
0
        public Form1()
        {
            InitializeComponent();
            DoubleBuffered = true;

            shapes = new List <FullObjectDetection>();

            img           = new Mat();
            eyeImage      = new Bitmap(Properties.Resources.Star);
            mustacheImage = new Bitmap(Properties.Resources.Mustache);

            detector  = Dlib.GetFrontalFaceDetector();
            predictor = ShapePredictor.Deserialize("Resources\\shape_predictor_68_face_landmarks.dat");

            capture = new VideoCapture();
            capture.Open(0);
            Application.Idle += OnCameraFrame;
        }
        public void TestFrontalFaceDetector()
        {
            const string imagePath = "images\\lenna.bmp";

            using (var window = new ImageWindow())
                using (var image = new Array2dUchar())
                    using (var detector = new FrontalFaceDetector())
                    {
                        var bmp = new System.Drawing.Bitmap(imagePath);
                        image.SetBitmap(bmp);
                        image.PyramidUp();
                        window.SetImage(image);
                        OpenCvSharp.Cv2.WaitKey(Cv2WaitKeyDelay);
                        var rects = detector.DetectFaces(image, -0.5);
                        foreach (var rect in rects)
                        {
                            Console.WriteLine(rect);
                        }
                    }
        }
        public void GetImage(string imagePath)
        {
            Array2D <RgbPixel> image = Dlib.LoadImage <RgbPixel>(imagePath);

            using (FrontalFaceDetector fd = Dlib.GetFrontalFaceDetector())

            {
                var faces = fd.Operator(image);
                foreach (DlibDotNet.Rectangle face in faces)
                {
                    FullObjectDetection shape          = _ShapePredictor.Detect(image, face);
                    ChipDetails         faceChipDetail = Dlib.GetFaceChipDetails(shape, 150, 0.25);
                    Array2D <RgbPixel>  faceChip       = Dlib.ExtractImageChip <RgbPixel>(image, faceChipDetail);
                    Bitmap bitmap1 = faceChip.ToBitmap <RgbPixel>();
                    MainWindow.main.Statusa1 = bitmap1;
                    Dlib.DrawRectangle(image, face, color: new RgbPixel(0, 255, 255), thickness: 4);
                }
            }
            Bitmap frame = image.ToBitmap <RgbPixel>();

            MainWindow.main.Statusa = frame;
        }
Beispiel #22
0
 public Form1()
 {
     InitializeComponent();
     this.capture = new VideoCapture(0);
     this.frame   = new Mat();
     this.fd      = Dlib.GetFrontalFaceDetector();
     this.sp      = ShapePredictor.Deserialize(@"C:\Users\trago\OneDrive\Desktop\OpenCV\shape_predictor_68_face_landmarks.dat");
     this.model   = Utility.GetFaceModel();
     this.coeffs  = new MatOfDouble(4, 1);
     this.coeffs.SetTo(0);
     this.poseModel      = new MatOfPoint3d(1, 1, new Point3d(0, 0, 1000));
     this.poseProjection = new MatOfPoint2d();
     this.checker        = new int[4] {
         100, -10, 10, 0
     };
     this.text = new string[4] {
         "1. เอาหน้าใส่กรอบ", "2. ก้มหน้าเล็กน้อย", "3. เงยหน้าเล็กน้อย", "4. หน้าตรง"
     };
     this.timeset = 3;
     this.size    = new Size(250, 300);
     SetStart();
     SetZero();
 }
Beispiel #23
0
        public static IEnumerable <Rectangle> RunDetectorWithUpscale1(FrontalFaceDetector detector,
                                                                      Image img,
                                                                      uint upsamplingAmount,
                                                                      double adjustThreshold,
                                                                      List <double> detectionConfidences,
                                                                      List <ulong> weightIndices)
        {
            var rectangles = new List <Rectangle>();

            if (img.Mode == Mode.Greyscale)
            {
                var greyscaleMatrix = img.Matrix as Matrix <byte>;
                if (upsamplingAmount == 0)
                {
                    detector.Operator(greyscaleMatrix, out var rectDetections, adjustThreshold);

                    var dets = rectDetections.ToArray();
                    SplitRectDetections(dets, rectangles, detectionConfidences, weightIndices);

                    foreach (var rectDetection in dets)
                    {
                        rectDetection.Dispose();
                    }
                }
                else
                {
                    using (var pyr = new PyramidDown(2))
                    {
                        Matrix <byte> temp = null;

                        try
                        {
                            DlibDotNet.Dlib.PyramidUp(greyscaleMatrix, pyr, out temp);

                            var levels = upsamplingAmount - 1;
                            while (levels > 0)
                            {
                                levels--;
                                DlibDotNet.Dlib.PyramidUp(temp);
                            }

                            detector.Operator(temp, out var rectDetections, adjustThreshold);

                            var dets = rectDetections.ToArray();
                            foreach (var t in dets)
                            {
                                t.Rect = pyr.RectDown(t.Rect, upsamplingAmount);
                            }

                            SplitRectDetections(dets, rectangles, detectionConfidences, weightIndices);

                            foreach (var rectDetection in dets)
                            {
                                rectDetection.Dispose();
                            }
                        }
                        finally
                        {
                            temp?.Dispose();
                        }
                    }
                }

                return(rectangles);
            }
            else
            {
                var rgbMatrix = img.Matrix as Matrix <RgbPixel>;
                if (upsamplingAmount == 0)
                {
                    detector.Operator(rgbMatrix, out var rectDetections, adjustThreshold);

                    var dets = rectDetections.ToArray();
                    SplitRectDetections(dets, rectangles, detectionConfidences, weightIndices);

                    foreach (var rectDetection in dets)
                    {
                        rectDetection.Dispose();
                    }
                }
                else
                {
                    using (var pyr = new PyramidDown(2))
                    {
                        Matrix <RgbPixel> temp = null;

                        try
                        {
                            DlibDotNet.Dlib.PyramidUp(rgbMatrix, pyr, out temp);

                            var levels = upsamplingAmount - 1;
                            while (levels > 0)
                            {
                                levels--;
                                DlibDotNet.Dlib.PyramidUp(temp);
                            }

                            detector.Operator(temp, out var rectDetections, adjustThreshold);

                            var dets = rectDetections.ToArray();
                            foreach (var t in dets)
                            {
                                t.Rect = pyr.RectDown(t.Rect, upsamplingAmount);
                            }

                            SplitRectDetections(dets, rectangles, detectionConfidences, weightIndices);

                            foreach (var rectDetection in dets)
                            {
                                rectDetection.Dispose();
                            }
                        }
                        finally
                        {
                            temp?.Dispose();
                        }
                    }
                }

                return(rectangles);
            }
        }
 public void GetFrontalFaceDetector()
 {
     this._FrontalFaceDetector = Dlib.GetFrontalFaceDetector();
 }
Beispiel #25
0
        public void DetectFace2()
        {
            if (this._ShapePredictor == null)
            {
                Assert.Fail("ShapePredictor is not initialized!!");
            }

            const string testName = "DetectFace2";
            var          path     = this.GetDataFile("Lenna_mini.bmp");
            var          tests    = new[]
            {
                new { Type = ImageTypes.RgbPixel, ExpectResult = true },
                new { Type = ImageTypes.UInt8, ExpectResult = true },
                new { Type = ImageTypes.UInt16, ExpectResult = true },
                new { Type = ImageTypes.Int32, ExpectResult = true },
                new { Type = ImageTypes.HsiPixel, ExpectResult = true },
                new { Type = ImageTypes.Float, ExpectResult = true },
                new { Type = ImageTypes.Double, ExpectResult = true },
                new { Type = ImageTypes.RgbAlphaPixel, ExpectResult = false }
            };

            using (var faceDetector = FrontalFaceDetector.GetFrontalFaceDetector())
                foreach (var input in tests)
                {
                    var         expectResult = input.ExpectResult;
                    var         imageObj     = DlibTest.LoadImage(input.Type, path);
                    Rectangle[] dets         = null;

                    var outputImageAction = new Func <bool, Array2DBase>(expect =>
                    {
                        dets = faceDetector.Detect(imageObj);
                        return(imageObj);
                    });

                    var successAction = new Action <Array2DBase>(image =>
                    {
                        var rects        = new List <Rectangle>();
                        const int offset = 1;
                        var shapes       = dets.Select(r => this._ShapePredictor.Detect(image, r)).ToList();
                        foreach (var shape in shapes)
                        {
                            var r     = shape.Rect;
                            var parts = shape.Parts;
                            for (uint i = 0; i < parts; i++)
                            {
                                var part = shape.GetPart(i);
                                var pr   = new Rectangle(
                                    part.X - offset, part.Y - offset, part.X + offset, part.Y + offset);
                                rects.Add(pr);
                            }

                            rects.Add(r);
                        }

                        // This test does NOT check whether output image and detect face area are correct
                        //Dlib.SaveJpeg(image, $"{Path.Combine(this.GetOutDir(type, testName), $"2008_001322_{input.Type}.jpg")}");
                    });

                    var failAction = new Action(() =>
                    {
                        Assert.Fail($"{testName} should throw excption for InputType: {input.Type}.");
                    });

                    var finallyAction = new Action(() =>
                    {
                        this.DisposeAndCheckDisposedState(imageObj);
                    });

                    var exceptionAction = new Action(() =>
                    {
                        Console.WriteLine($"Failed to execute {testName} to InputType: {input.Type}.");
                    });

                    DoTest(outputImageAction, expectResult, successAction, finallyAction, failAction, exceptionAction);
                }
        }
Beispiel #26
0
 public Implementation(ILogger <Implementation> logger)
 {
     _faceDetector = Dlib.GetFrontalFaceDetector();
     _logger       = logger;
 }
Beispiel #27
0
        private static void Main(string[] args)
        {
            if (args.Length != 1)
            {
                Console.WriteLine("Run this example by invoking it like this: ");
                Console.WriteLine("   ./DnnFaceRecognition faces/bald_guys.jpg");
                Console.WriteLine("You will also need to get the face landmarking model file as well as ");
                Console.WriteLine("the face recognition model file.  Download and then decompress these files from: ");
                Console.WriteLine("http://dlib.net/files/shape_predictor_5_face_landmarks.dat.bz2");
                Console.WriteLine("http://dlib.net/files/dlib_face_recognition_resnet_model_v1.dat.bz2");
                return;
            }

            // The first thing we are going to do is load all our models.  First, since we need to
            // find faces in the image we will need a face detector:
            using (var detector = FrontalFaceDetector.GetFrontalFaceDetector())
                // We will also use a face landmarking model to align faces to a standard pose:  (see face_landmark_detection_ex.cpp for an introduction)
                using (var sp = new ShapePredictor("shape_predictor_5_face_landmarks.dat"))
                    // And finally we load the DNN responsible for face recognition.
                    using (var net = DlibDotNet.Dnn.LossMetric.Deserialize("dlib_face_recognition_resnet_model_v1.dat"))

                        using (var img = Dlib.LoadImage <RgbPixel>(args[0]))
                            using (var mat = new Matrix <RgbPixel>(img))

                                // Display the raw image on the screen
                                using (var win = new ImageWindow(img))
                                {
                                    // Run the face detector on the image of our action heroes, and for each face extract a
                                    // copy that has been normalized to 150x150 pixels in size and appropriately rotated
                                    // and centered.
                                    var faces = new List <Matrix <RgbPixel> >();
                                    foreach (var face in detector.Detect(img))
                                    {
                                        var shape          = sp.Detect(img, face);
                                        var faceChipDetail = Dlib.GetFaceChipDetails(shape, 150, 0.25);
                                        var faceChip       = Dlib.ExtractImageChip <RgbPixel>(mat, faceChipDetail);

                                        //faces.Add(move(face_chip));
                                        faces.Add(faceChip);

                                        // Also put some boxes on the faces so we can see that the detector is finding
                                        // them.
                                        win.AddOverlay(face);
                                    }

                                    if (!faces.Any())
                                    {
                                        Console.WriteLine("No faces found in image!");
                                        return;
                                    }

                                    // This call asks the DNN to convert each face image in faces into a 128D vector.
                                    // In this 128D vector space, images from the same person will be close to each other
                                    // but vectors from different people will be far apart.  So we can use these vectors to
                                    // identify if a pair of images are from the same person or from different people.
                                    var faceDescriptors = net.Operator(faces);

                                    // In particular, one simple thing we can do is face clustering.  This next bit of code
                                    // creates a graph of connected faces and then uses the Chinese whispers graph clustering
                                    // algorithm to identify how many people there are and which faces belong to whom.
                                    var edges = new List <SamplePair>();
                                    for (uint i = 0; i < faceDescriptors.Count; ++i)
                                    {
                                        for (var j = i; j < faceDescriptors.Count; ++j)
                                        {
                                            // Faces are connected in the graph if they are close enough.  Here we check if
                                            // the distance between two face descriptors is less than 0.6, which is the
                                            // decision threshold the network was trained to use.  Although you can
                                            // certainly use any other threshold you find useful.
                                            var diff = faceDescriptors[i] - faceDescriptors[j];
                                            if (Dlib.Length(diff) < 0.6)
                                            {
                                                edges.Add(new SamplePair(i, j));
                                            }
                                        }
                                    }

                                    Dlib.ChineseWhispers(edges, 100, out var numClusters, out var labels);

                                    // This will correctly indicate that there are 4 people in the image.
                                    Console.WriteLine($"number of people found in the image: {numClusters}");

                                    // Now let's display the face clustering results on the screen.  You will see that it
                                    // correctly grouped all the faces.
                                    var winClusters = new List <ImageWindow>();
                                    for (var i = 0; i < numClusters; i++)
                                    {
                                        winClusters.Add(new ImageWindow());
                                    }
                                    var tileImages = new List <Matrix <RgbPixel> >();
                                    for (var clusterId = 0ul; clusterId < numClusters; ++clusterId)
                                    {
                                        var temp = new List <Matrix <RgbPixel> >();
                                        for (var j = 0; j < labels.Length; ++j)
                                        {
                                            if (clusterId == labels[j])
                                            {
                                                temp.Add(faces[j]);
                                            }
                                        }

                                        winClusters[(int)clusterId].Title = $"face cluster {clusterId}";
                                        var tileImage = Dlib.TileImages(temp);
                                        tileImages.Add(tileImage);
                                        winClusters[(int)clusterId].SetImage(tileImage);
                                    }

                                    // Finally, let's print one of the face descriptors to the screen.
                                    using (var trans = Dlib.Trans(faceDescriptors[0]))
                                    {
                                        Console.WriteLine($"face descriptor for one face: {trans}");

                                        // It should also be noted that face recognition accuracy can be improved if jittering
                                        // is used when creating face descriptors.  In particular, to get 99.38% on the LFW
                                        // benchmark you need to use the jitter_image() routine to compute the descriptors,
                                        // like so:
                                        var jitterImages = JitterImage(faces[0]).ToArray();
                                        var ret          = net.Operator(jitterImages);
                                        using (var m = Dlib.Mat(ret))
                                            using (var faceDescriptor = Dlib.Mean <float>(m))
                                                using (var t = Dlib.Trans(faceDescriptor))
                                                {
                                                    Console.WriteLine($"jittered face descriptor for one face: {t}");

                                                    // If you use the model without jittering, as we did when clustering the bald guys, it
                                                    // gets an accuracy of 99.13% on the LFW benchmark.  So jittering makes the whole
                                                    // procedure a little more accurate but makes face descriptor calculation slower.

                                                    Console.WriteLine("hit enter to terminate");
                                                    Console.ReadKey();

                                                    foreach (var jitterImage in jitterImages)
                                                    {
                                                        jitterImage.Dispose();
                                                    }

                                                    foreach (var tileImage in tileImages)
                                                    {
                                                        tileImage.Dispose();
                                                    }

                                                    foreach (var edge in edges)
                                                    {
                                                        edge.Dispose();
                                                    }

                                                    foreach (var descriptor in faceDescriptors)
                                                    {
                                                        descriptor.Dispose();
                                                    }

                                                    foreach (var face in faces)
                                                    {
                                                        face.Dispose();
                                                    }
                                                }
                                    }
                                }
        }
Beispiel #28
0
        private static void Main(string[] args)
        {
            if (args.Length == 0)
            {
                Console.WriteLine("Give some image files as arguments to this program.");
                Console.WriteLine("Call this program like this:");
                Console.WriteLine("./face_landmark_detection_ex shape_predictor_68_face_landmarks.dat faces/*.jpg");
                Console.WriteLine("You can get the shape_predictor_68_face_landmarks.dat file from:");
                Console.WriteLine("http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2");
                return;
            }

            using (var win = new ImageWindow())
                using (var winFaces = new ImageWindow())
                    using (var detector = FrontalFaceDetector.GetFrontalFaceDetector())
                        using (var sp = new ShapePredictor(args[0]))
                            foreach (var file in args.ToList().GetRange(1, args.Length - 1))
                            {
                                Console.WriteLine($"processing image {file}");

                                using (var img = Dlib.LoadImage <RgbPixel>(file))
                                {
                                    Dlib.PyramidUp(img);

                                    var dets = detector.Detect(img);
                                    Console.WriteLine($"Number of faces detected: {dets.Length}");

                                    var shapes = new List <FullObjectDetection>();
                                    foreach (var rect in dets)
                                    {
                                        var shape = sp.Detect(img, rect);
                                        Console.WriteLine($"number of parts: {shape.Parts}");
                                        if (shape.Parts > 2)
                                        {
                                            Console.WriteLine($"pixel position of first part:  {shape.GetPart(0)}");
                                            Console.WriteLine($"pixel position of second part: {shape.GetPart(1)}");
                                            shapes.Add(shape);
                                        }
                                    }

                                    win.ClearOverlay();
                                    win.SetImage(img);

                                    if (shapes.Any())
                                    {
                                        var lines = Dlib.RenderFaceDetections(shapes);
                                        win.AddOverlay(lines);

                                        foreach (var l in lines)
                                        {
                                            l.Dispose();
                                        }

                                        var chipLocations = Dlib.GetFaceChipDetails(shapes);
                                        using (var faceChips = Dlib.ExtractImageChips <RgbPixel>(img, chipLocations))
                                            using (var tileImage = Dlib.TileImages(faceChips))
                                                winFaces.SetImage(tileImage);

                                        foreach (var c in chipLocations)
                                        {
                                            c.Dispose();
                                        }
                                    }

                                    Console.WriteLine("hit enter to process next frame");
                                    Console.ReadKey();

                                    foreach (var s in shapes)
                                    {
                                        s.Dispose();
                                    }
                                    foreach (var r in dets)
                                    {
                                        r.Dispose();
                                    }
                                }
                            }
        }
Beispiel #29
0
        private static void Preprocess(string type, string input, FrontalFaceDetector faceDetector, ShapePredictor posePredictor, string output)
        {
            var imageCount = 0;

            var r = new ulong[Size * Size];
            var g = new ulong[Size * Size];
            var b = new ulong[Size * Size];

            var csv       = ReadCsv(Path.Combine(input, $"{type}.csv"));
            var outputDir = Path.Combine(output, type);

            foreach (var kvp in csv)
            {
                using (var tmp = Dlib.LoadImageAsMatrix <RgbPixel>(Path.Combine(input, type, kvp.Key)))
                {
                    var dets = faceDetector.Operator(tmp);
                    if (!dets.Any())
                    {
                        Console.WriteLine($"Warning: Failed to detect face from '{kvp}'");
                        continue;
                    }

                    // Get max size rectangle. It could be better face.
                    var det = dets.Select((val, idx) => new { V = val, I = idx }).Aggregate((max, working) => (max.V.Area > working.V.Area) ? max : working).V;
                    using (var ret = posePredictor.Detect(tmp, det))
                        using (var chip = Dlib.GetFaceChipDetails(ret, Size, 0d))
                            using (var faceChips = Dlib.ExtractImageChip <RgbPixel>(tmp, chip))
                            {
                                var dst    = Path.Combine(outputDir, kvp.Key);
                                var dstDir = Path.GetDirectoryName(dst);
                                Directory.CreateDirectory(dstDir);
                                Dlib.SaveJpeg(faceChips, Path.Combine(outputDir, kvp.Key), 100);

                                var index = 0;
                                for (var row = 0; row < Size; row++)
                                {
                                    for (var col = 0; col < Size; col++)
                                    {
                                        var rgb = faceChips[row, col];
                                        r[index] += rgb.Red;
                                        g[index] += rgb.Green;
                                        b[index] += rgb.Blue;
                                        index++;
                                    }
                                }
                            }

                    imageCount++;
                }
            }

            using (var mean = new Matrix <RgbPixel>(Size, Size))
            {
                var index = 0;
                for (var row = 0; row < Size; row++)
                {
                    for (var col = 0; col < Size; col++)
                    {
                        var red   = (double)r[index] / imageCount;
                        var green = (double)g[index] / imageCount;
                        var blue  = (double)b[index] / imageCount;

                        var newRed   = (byte)Math.Round(red);
                        var newGreen = (byte)Math.Round(green);
                        var newBlue  = (byte)Math.Round(blue);
                        mean[row, col] = new RgbPixel(newRed, newGreen, newBlue);

                        index++;
                    }
                }

                Dlib.SaveBmp(mean, Path.Combine(output, $"{type}.mean.bmp"));
            }
        }
    /// <summary>
    /// Extract features from an image and store it in <see cref="FaceData1"/>.
    /// </summary>
    /// <param name="imageFileInfo">File info of the image.</param>
    /// <param name="sp"></param>
    /// <param name="fd"></param>
    /// <param name="getLabel">>Whether to get the label or not. False if not using for prediction.</param>
    /// <returns></returns>
    /// <seealso cref="GetFaceDataPoints1"/>
    static FaceData1 GetFaceData1FromImage(FileInfo imageFileInfo, ShapePredictor sp, FrontalFaceDetector fd, bool getLabel = true)
    {
        // load input image
        using (var img = Dlib.LoadImage <RgbPixel>(imageFileInfo.FullName))
        {
            var faces = fd.Operator(img);
            foreach (var face in faces)
            {
                var shape = sp.Detect(img, face);

                return(GetFaceDataPoints1(ref shape,
                                          getLabel
                        ? GetLabel(imageFileInfo)
                        : "Not getting label, see argument this function was called with."));
            }
        }
        Debug.WriteLine($"Unable to get facial feature from {imageFileInfo.Name} as no faces were found!");
        return(null);
    }