private void buscarrosto(Bitmap frame)
        {
            Image <Rgb, Byte> imageCV = new Image <Rgb, byte>(frame);

            Emgu.CV.Mat mat   = imageCV.Mat;
            var         array = new byte[mat.Width * mat.Height * mat.ElementSize];

            mat.CopyTo(array);

            using (Array2D <RgbPixel> image = Dlib.LoadImageData <RgbPixel>(array, (uint)mat.Height, (uint)mat.Width, (uint)(mat.Width * mat.ElementSize)))
            {
                using (FrontalFaceDetector fd = Dlib.GetFrontalFaceDetector())

                {
                    var faces = fd.Operator(image);
                    foreach (DlibDotNet.Rectangle face in faces)
                    {
                        FullObjectDetection shape          = _ShapePredictor.Detect(image, face);
                        ChipDetails         faceChipDetail = Dlib.GetFaceChipDetails(shape, 150, 0.25);
                        Array2D <RgbPixel>  faceChip       = Dlib.ExtractImageChip <RgbPixel>(image, faceChipDetail);
                        Bitmap bitmap1 = faceChip.ToBitmap <RgbPixel>();
                        MainWindow.main.Statusa1 = bitmap1;
                        Dlib.DrawRectangle(image, face, color: new RgbPixel(0, 255, 255), thickness: 4);
                    }
                }
                frame = image.ToBitmap <RgbPixel>();
                MainWindow.main.Statusa = frame;
            }
        }
예제 #2
0
        /// <summary>
        /// Called when videoPlayer receives a new frame.
        /// </summary>
        /// <param name="sender"></param>
        /// <param name="image"></param>
        private void videoPlayer_NewFrameReceived(object sender, Accord.Video.NewFrameEventArgs eventArgs)
        {
            // convert image to dlib format
            var img = eventArgs.Frame.ToArray2D <RgbPixel>();

            // detect face every 4 frames
            if (frameIndex % 4 == 0)
            {
                var faces = faceDetector.Detect(img);
                if (faces.Length > 0)
                {
                    currentFace = faces.First();
                }
            }

            // abort if we don't have a face at this point
            if (currentFace == default(DlibDotNet.Rectangle))
            {
                return;
            }

            // detect facial landmarks
            var shape = shapePredictor.Detect(img, currentFace);

            // detect head pose
            if (shape.Parts == 68)
            {
                DetectHeadPose(eventArgs.Frame, shape);
            }

            // update frame counter
            frameIndex++;
        }
예제 #3
0
        public void DetectEyes(Array2D <byte> image, ref System.Drawing.Point[][] eyes, ref bool IsDetected)
        {
            //ImageWindow win = new ImageWindow(image);
            //win.Show();

            var dets = detector.Operator(image);

            eyes[0] = new Point[6];
            eyes[1] = new Point[6];
            if (dets.Length > 0)
            {
                var shape = sp.Detect(image, dets[0]);
                if (shape.Parts > 60)
                {
                    for (int ii = 0; ii < 6; ii++)
                    {
                        var temp = shape.GetPart(36 + (uint)ii);
                        eyes[0][ii] = new Point(temp.X, temp.Y);

                        temp        = shape.GetPart(42 + (uint)ii);
                        eyes[1][ii] = new Point(temp.X, temp.Y);
                    }
                    IsDetected = true;
                    //var chipLocations = Dlib.GetFaceChipDetails(shapes);
                }
                else
                {
                    IsDetected = false;
                }
            }
        }
예제 #4
0
        public void LandmarkTest()
        {
            using (var faceDetector = FrontalFaceDetector.GetFrontalFaceDetector())
                using (var shapePredictor = new ShapePredictor(Configuration.SHAP_PREDICTOR_CONFIG))
                {
                    // convert image to dlib format
                    var img = Utils.LoadImageAsBitmap("TestImage/pic01.jpg").ToArray2D <RgbPixel>();

                    // detect faces
                    var faces = faceDetector.Detect(img);

                    // detect facial landmarks
                    foreach (var rect in faces)
                    {
                        // detect facial landmarks
                        var shape = shapePredictor.Detect(img, rect);

                        //The left eye using landmark index[42, 47].
                        Landmarks landmarkLeftEye = new Landmarks(42, 47, shape);
                        Assert.AreEqual(landmarkLeftEye.LandMarkPointList.Count, 6);
                        //index range should be 0-67
                        Landmarks landmark2 = new Landmarks(42, 68, shape);
                    }
                }
        }
        /// <summary>
        /// Called when videoPlayer receives a new frame.
        /// </summary>
        /// <param name="sender"></param>
        /// <param name="image"></param>
        private void videoPlayer_NewFrame(object sender, ref System.Drawing.Bitmap image)
        {
            // conver frame to grayscale
            var grayscale = new GrayscaleBT709();
            var grayImage = grayscale.Apply(image);

            // convert image to dlib format
            var img = grayImage.ToArray2D <RgbPixel>();

            // detect face every 4 frames
            if (frameIndex % 4 == 0)
            {
                var faces = faceDetector.Detect(img);
                if (faces.Length > 0)
                {
                    currentFace = faces.First();
                }
            }

            // abort if we don't have a face at this point
            if (currentFace == default(DlibDotNet.Rectangle))
            {
                return;
            }

            // detect facial landmarks
            var shape = shapePredictor.Detect(img, currentFace);

            // detect eye state
            DetectEyeState(image, shape);

            // update frame counter
            frameIndex++;
        }
예제 #6
0
        public double[] detectFaceLandmarks(Array2D <RgbPixel> frame)
        {
            var img = frame;

            double[] headParams = new double[3];
            var      faces      = fd.Operator(img);

            foreach (var face in faces)
            {
                var shape = sp.Detect(img, face);

                var eyesPoints =
                    (from i in new int[] { 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47 }
                     let pt = shape.GetPart((uint)i)
                              select new OpenCvSharp.Point2d(pt.X, pt.Y)).ToArray();

                headParams[2] = calculateEAR(eyesPoints);

                var model = Utility.GetFaceModel();

                var landmarks = new MatOfPoint2d(1, 6,
                                                 (from i in new int[] { 30, 8, 36, 45, 48, 54 }
                                                  let pt = shape.GetPart((uint)i)
                                                           select new OpenCvSharp.Point2d(pt.X, pt.Y)).ToArray());

                var cameraMatrix = Utility.GetCameraMatrix((int)img.Rect.Width, (int)img.Rect.Height);

                var coeffs = new MatOfDouble(4, 1);
                coeffs.SetTo(0);

                Mat rotation    = new MatOfDouble();
                Mat translation = new MatOfDouble();

                Cv2.SolvePnP(model, landmarks, cameraMatrix, coeffs, rotation, translation);

                /* var euler = Utility.GetEulerMatrix(rotation);
                 *
                 * var yaw = 180 * euler.At<double>(0, 2) / Math.PI;
                 * var pitch = 180 * euler.At<double>(0, 1) / Math.PI;
                 * var roll = 180 * euler.At<double>(0, 0) / Math.PI;
                 *
                 * pitch = Math.Sign(pitch) * 180 - pitch;
                 */
                var poseModel      = new MatOfPoint3d(1, 1, new Point3d(0, 0, 1000));
                var poseProjection = new MatOfPoint2d();
                Cv2.ProjectPoints(poseModel, rotation, translation, cameraMatrix, coeffs, poseProjection);
                var landmark = landmarks.At <Point2d>(0);
                var p        = poseProjection.At <Point2d>(0);
                headParams[0] = (double)p.X;
                headParams[1] = (double)p.Y;
            }

            return(headParams);
        }
        /// <summary>
        /// Called when videoPlayer receives a new frame.
        /// </summary>
        /// <param name="sender"></param>
        /// <param name="image"></param>
        private void videoPlayer_NewFrame(object sender, ref Bitmap image)
        {
            // convert image to dlib format
            var img = image.ToArray2D <RgbPixel>();

            // find the face
            // note that we only detect faces every 4 frames
            if (faceRect == default(DlibDotNet.Rectangle) || frameIndex++ % 4 == 0)
            {
                var faces = faceDetector.Detect(img);
                faceRect = faces.FirstOrDefault();
            }

            // abort if we found no face
            if (faceRect == default(DlibDotNet.Rectangle))
            {
                return;
            }

            // find face landmark points
            var shape          = shapePredictor.Detect(img, faceRect);
            var landmarkPoints = BeardHelper.GetLandmarkPoints(shape);

            // find beard landmark points
            var beardPoints = BeardHelper.GetBeardPoints();

            // calculate Delaunay triangles
            var triangles = Utility.GetDelaunayTriangles(landmarkPoints);

            // get transformations to warp the beard onto the face
            var warps = Utility.GetWarps(beardPoints, landmarkPoints, triangles);

            // split the beard image into an alpha mask and an RGB part
            var beard = BitmapConverter.ToMat(beardImage);

            BeardHelper.SplitChannels(beard, out var beardMask, out var beardRgb);

            // warp the beard RGB image
            var warpedBeard = Utility.ApplyWarps(BitmapConverter.ToBitmap(beardRgb), image.Width, image.Height, warps);

            // warp the beard alpha mask
            var warpedBeardMask = Utility.ApplyWarps(BitmapConverter.ToBitmap(beardMask), image.Width, image.Height, warps);

            // blend the beard onto the camera frame by using the mask
            var frame  = BitmapConverter.ToMat(image);
            var result = BeardHelper.Blend(warpedBeard, warpedBeardMask, frame);

            // return result
            image = BitmapConverter.ToBitmap(result);
        }
예제 #8
0
 private List <List <Point2f> > DetectFaces(FrontalFaceDetector detector, ShapePredictor predictor, Mat mat)
 {
     using (var image = ToArray(mat))
     {
         var points = detector.Operator(image)
                      .Select(rectangle => predictor.Detect(image, rectangle))
                      .Where(shape => shape.Parts > 2)
                      .Select(shape => Enumerable.Range(0, (int)shape.Parts)
                              .Select(i => shape.GetPart((uint)i))
                              .Select((p, i) => new Point2f(p.X, p.Y))
                              .ToList())
                      .ToList();
         return(points);
     }
 }
        private Bitmap ProcessImage(Bitmap image)
        {
            // set up Dlib facedetectors and shapedetectors
            using (var fd = FrontalFaceDetector.GetFrontalFaceDetector())
                using (var sp = new ShapePredictor("shape_predictor_68_face_landmarks.dat"))
                {
                    // convert image to dlib format
                    var img = image.ToArray2D <RgbPixel>();

                    // detect faces
                    var faces = fd.Detect(img);

                    // detect facial landmarks
                    foreach (var rect in faces)
                    {
                        // detect facial landmarks
                        var shape = sp.Detect(img, rect);

                        // extract face chip
                        var chip      = Dlib.GetFaceChipDetails(shape);
                        var thumbnail = Dlib.ExtractImageChips <RgbPixel>(img, chip);

                        // add picturebox
                        var box = new PictureBox()
                        {
                            Image    = thumbnail.ToBitmap <RgbPixel>(),
                            SizeMode = PictureBoxSizeMode.Zoom,
                            Width    = 62,
                            Height   = 62
                        };
                        imagesPanel.Controls.Add(box);

                        // draw landmarks on main image
                        var lines = Dlib.RenderFaceDetections(new FullObjectDetection[] { shape });
                        foreach (var line in lines)
                        {
                            Dlib.DrawRectangle(
                                img,
                                new DlibDotNet.Rectangle(line.Point1),
                                new RgbPixel {
                                Green = 255
                            },
                                8);
                        }
                    }
                    return(img.ToBitmap <RgbPixel>());
                }
        }
        /// <summary>
        /// Get the image with detected faces highlighted by the rectangle
        /// </summary>
        /// <param name="image"></param>
        /// <param name="numOfFaceDetected"></param>
        /// <returns></returns>
        public Bitmap FaceDetectionFromImage(Bitmap image, out int numOfFaceDetected)
        {
            numOfFaceDetected = 0;
            if (image != null)
            {
                // set up Dlib facedetectors and shapedetectors
                using (var faceDetector = FrontalFaceDetector.GetFrontalFaceDetector())
                    using (var shapePredictor = new ShapePredictor(Configuration.SHAP_PREDICTOR_CONFIG))
                    {
                        // convert image to dlib format
                        var img = image.ToArray2D <RgbPixel>();

                        // detect faces
                        var faces = faceDetector.Detect(img);

                        // detect facial landmarks
                        foreach (var rect in faces)
                        {
                            // detect facial landmarks
                            var shape = shapePredictor.Detect(img, rect);

                            //The left eye using landmark index[42, 47].
                            Landmarks landmarkLeftEye = new Landmarks(42, 47, shape);
                            //The right eye using landmark index [36, 41].
                            Landmarks landmarkRightEye = new Landmarks(36, 41, shape);
                            //draw landmark rectangle
                            var leftEyeRect      = Utils.RectangleAdjust(landmarkLeftEye.GetLandmarkRectangle(), img);
                            var rightEyeRect     = Utils.RectangleAdjust(landmarkRightEye.GetLandmarkRectangle(), img);
                            var adjustedFaceRect = Utils.RectangleAdjust(rect, img);

                            Dlib.DrawRectangle(img, adjustedFaceRect, new RgbPixel {
                                Blue = 255
                            }, 5);
                            Dlib.DrawRectangle(img, leftEyeRect, new RgbPixel {
                                Green = 255
                            }, 2);
                            Dlib.DrawRectangle(img, rightEyeRect, new RgbPixel {
                                Green = 255
                            }, 2);
                        }
                        numOfFaceDetected = faces.Length;
                        return(img.ToBitmap <RgbPixel>());
                    }
            }
            return(image);
        }
예제 #11
0
        public static bool ExtractFaceDataFromImage(
            Array2D <RgbPixel> rgb_array2d_img,
            ref Array2D <RgbPixel> face_array2d_img,
            ref Array2D <RgbPixel> left_eye_array2d_img,
            ref Array2D <RgbPixel> right_eye_array2d_img,
            ref float[] face_grid)
        {
            var ycbcr_array2d_img = RgbToYCbCr(rgb_array2d_img);

            //Dlib.SaveJpeg(rgb_array2d_img, "face_rgb.jpg");
            //Dlib.SaveJpeg(ycbcr_array2d_img, "face_ycbcr.jpg");

            var face_rects = detector.Operator(rgb_array2d_img);

            if (face_rects.Length != 1)
            {
                return(false);
            }

            foreach (var face_rect in face_rects)
            {
                var shape = sp.Detect(rgb_array2d_img, face_rect);

                var left_eye_rect  = GetRect(shape, LEFT_EYE_START, LEFT_EYE_END);
                var right_eye_rect = GetRect(shape, RIGHT_EYE_START, RIGHT_EYE_END);

                var left_eye_rect_normalized  = GetEyeRectSizeNormalized(face_rect, left_eye_rect);
                var right_eye_rect_normalized = GetEyeRectSizeNormalized(face_rect, right_eye_rect);

                DrawDebugDataOnImage(rgb_array2d_img, shape, face_rect, left_eye_rect, right_eye_rect);

                GenerateInputs(
                    ycbcr_array2d_img,
                    face_rect,
                    left_eye_rect_normalized,
                    right_eye_rect_normalized,
                    ref face_array2d_img,
                    ref left_eye_array2d_img,
                    ref right_eye_array2d_img,
                    ref face_grid);
            }

            return(true);
        }
    /// <summary>
    /// Extract features from an image and store it in <see cref="FaceData1"/>.
    /// </summary>
    /// <param name="imageFileInfo">File info of the image.</param>
    /// <param name="sp"></param>
    /// <param name="fd"></param>
    /// <param name="getLabel">>Whether to get the label or not. False if not using for prediction.</param>
    /// <returns></returns>
    /// <seealso cref="GetFaceDataPoints1"/>
    static FaceData1 GetFaceData1FromImage(FileInfo imageFileInfo, ShapePredictor sp, FrontalFaceDetector fd, bool getLabel = true)
    {
        // load input image
        using (var img = Dlib.LoadImage <RgbPixel>(imageFileInfo.FullName))
        {
            var faces = fd.Operator(img);
            foreach (var face in faces)
            {
                var shape = sp.Detect(img, face);

                return(GetFaceDataPoints1(ref shape,
                                          getLabel
                        ? GetLabel(imageFileInfo)
                        : "Not getting label, see argument this function was called with."));
            }
        }
        Debug.WriteLine($"Unable to get facial feature from {imageFileInfo.Name} as no faces were found!");
        return(null);
    }
예제 #13
0
        private void OnCameraFrame(object sender, EventArgs e)
        {
            img = capture.RetrieveMat();
            Cv2.Flip(img, img, FlipMode.Y);

            var array = new byte[img.Cols * img.Rows * img.ElemSize()];

            Marshal.Copy(img.Data, array, 0, array.Length);
            var image = Dlib.LoadImageData <RgbPixel>(array, (uint)img.Rows, (uint)img.Cols, (uint)(img.Cols * img.ElemSize()));

            faces = detector.Operator(image);

            shapes.Clear();
            foreach (var rect in faces)
            {
                DlibDotNet.Rectangle face = rect;
                shapes.Add(predictor.Detect(image, face));
            }


            Invalidate();
        }
        public async Task <MatrixFloatDto[]> GetFaceDescriptors(string filename, System.Drawing.Rectangle[] faces)
        {
            var inputFilename = filename;
            var chips         = new List <Matrix <RgbPixel> >();

            using var img = await DlibHelpers.LoadRotatedImage(imageRotationService, inputFilename);

            foreach (var face in faces.Select(x => new Rectangle(x.Left, x.Top, x.Right, x.Bottom)))
            {
                // detect landmarks
                var shape = predictor.Detect(img, face);

                // extract normalized and rotated 150x150 face chip
                var faceChipDetail = Dlib.GetFaceChipDetails(shape, 150, 0.25);
                var faceChip       = Dlib.ExtractImageChip <RgbPixel>(img, faceChipDetail);

                // convert the chip to a matrix and store
                var matrix = new Matrix <RgbPixel>(faceChip);
                chips.Add(matrix);
            }

            if (!chips.Any())
            {
                return(Array.Empty <MatrixFloatDto>());
            }

            // put each fae in a 128D embedding space
            // similar faces will be placed close together
            var descriptors = dnn.Operator(chips);

            return(descriptors
                   .Select(x => new MatrixFloatDto
            {
                Data = x.ToArray(),
                Row = x.Rows,
                Columns = x.Columns,
            })
                   .ToArray());
        }
예제 #15
0
        private void GetLandmarks(OpenCvSharp.Mat frame, OpenCvSharp.Rect face, List <System.Drawing.Rectangle> rfaces)
        {
            EyePoints      rightEye  = new EyePoints(leftEye: true);
            EyePoints      leftEye   = new EyePoints(leftEye: false);
            ShapePredictor predictor = ShapePredictor.Deserialize(shapePredictorDataFile);

            //Scalar eyecolor = new Scalar(0, 0, 255);
            Array2D <byte>      gray      = ConvertMatToDlib2DArray(frame);
            FullObjectDetection landmarks = predictor.Detect(gray, ConvertToDlib(face));

            InitializeEyes(landmarks, leftEye, rightEye);
            //DrawEye(que, landmarks, leftEye);
            //DrawEye(que, landmarks, rightEye);
            Rect leftboundingBox = BoundingBoxAroundEye(leftEye, 0);

            rfaces.Add(FromOpenCvRect(leftboundingBox));
            //DrawRect(frame, leftboundingBox);
            OpenCvSharp.Point centerOfLeftEye = DetectCenterOfEye(frame, leftboundingBox);
            centerOfLeftEye.X += leftboundingBox.X;

            Rect rightboundingBox = BoundingBoxAroundEye(rightEye, 0);

            rfaces.Add(FromOpenCvRect(rightboundingBox));
            //DrawRect(frame, rightboundingBox);
            OpenCvSharp.Point centerOfRightEye = DetectCenterOfEye(frame, rightboundingBox);
            centerOfRightEye.X += rightboundingBox.X;

            EyeDirection leftEyeDirection  = leftEye.GetEyePosition(centerOfLeftEye);
            EyeDirection rightEyeDirection = rightEye.GetEyePosition(centerOfRightEye);

            //EyeDirection eyeDirection = EyeDirection.unknown;
            //if (leftEyeDirection == EyeDirection.center || rightEyeDirection == EyeDirection.center) eyeDirection = EyeDirection.center;
            //else if (leftEyeDirection == EyeDirection.left) eyeDirection = EyeDirection.left;
            //else if (rightEyeDirection == EyeDirection.right) eyeDirection = EyeDirection.right;

            //OpenCvSharp.Point position = new OpenCvSharp.Point(50, 50);
            //Cv2.PutText(img: frame, text: eyeDirection.ToDisplay(), org: position, fontFace: HersheyFonts.HersheySimplex, fontScale: 2, new Scalar(0, 0, 255));
        }
예제 #16
0
        /// <summary>
        /// Detect all 68 landmarks on the face on camera
        /// </summary>
        /// <param name="image">The current camera frame to analyze</param>
        /// <param name="frameIndex">The index number of the current camera frame</param>
        /// <returns>A FullObjectDetection object containing all 68 facial landmark points</returns>
        private FullObjectDetection DetectLandmarks(Bitmap image, int frameIndex)
        {
            // convert image to dlib format
            var dlibImage = image.ToArray2D <RgbPixel>();

            // detect faces every 5 frames
            if (frameIndex % 5 == 0)
            {
                var faces = faceDetector.Detect(dlibImage);
                if (faces.Length > 0)
                {
                    // grab the first face
                    currentFace = faces.First();
                }
            }

            // detect all 68 facial landmarks on the face
            if (currentFace != default(DlibDotNet.Rectangle))
            {
                return(shapePredictor.Detect(dlibImage, currentFace));
            }
            return(null);
        }
        /// <summary>
        /// 具体计算
        /// </summary>
        /// <param name="bitmap"></param>
        /// <param name="sp"></param>
        /// <returns></returns>
        public List <FullObjectDetection> Face(Bitmap bitmap)
        {
            // 加载模型文件
            if (sp == null)
            {
                var basePath = AppDomain.CurrentDomain.BaseDirectory;
                sp = ShapePredictor.Deserialize(basePath + "ShapeModel/shape_predictor_68_face_landmarks.dat");
            }

            //var link = new ImageWindow.OverlayLine[0];
            var shapes = new List <FullObjectDetection>();

            using (var detector = Dlib.GetFrontalFaceDetector())
            {
                using (var img = bitmap.ToArray2D <RgbPixel>())
                {
                    var dets = detector.Operator(img);

                    foreach (var rect in dets)
                    {
                        var shape = sp.Detect(img, rect);
                        if (shape.Parts > 2)
                        {
                            shapes.Add(shape);
                        }
                    }
                    //if (shapes.Any())
                    //{
                    //    //就是这个
                    //    var lines = Dlib.RenderFaceDetections(shapes);
                    //    link = lines;
                    //}
                }
            }
            return(shapes);
        }
예제 #18
0
        private void Timer1_Tick(object sender, EventArgs e)
        {
            capture.Read(frame);

            this.point = new Point((frame.Width - size.Width) / 2, (frame.Height - size.Height) / 2);
            this.rect  = new Rect(point, size);

            Cv2.Flip(frame, frame, FlipMode.Y);

            if (!frame.Empty() && start)
            {
                var img = ConvertToArray2D(frame);

                var faces = fd.Operator(img);

                if (faces.Any(face => IsFaceInFrame(face)))
                {
                    foreach (var face in faces)
                    {
                        if (IsFaceInFrame(face))
                        {
                            //Dlib.DrawRectangle(img, face, color: new RgbPixel(0, 255, 255), thickness: 4);
                            var shape = sp.Detect(img, face);

                            var landmarks = new MatOfPoint2d(1, 6,
                                                             (from i in new int[] { 30, 8, 36, 45, 48, 54 }
                                                              let pt = shape.GetPart((uint)i)
                                                                       select new OpenCvSharp.Point2d(pt.X, pt.Y)).ToArray());

                            var cameraMatrix = Utility.GetCameraMatrix((int)img.Rect.Width, (int)img.Rect.Height);

                            Mat rotation    = new MatOfDouble();
                            Mat translation = new MatOfDouble();
                            Cv2.SolvePnP(model, landmarks, cameraMatrix, coeffs, rotation, translation);

                            var euler = Utility.GetEulerMatrix(rotation);

                            var yaw   = 180 * euler.At <double>(0, 2) / Math.PI;
                            var pitch = 180 * euler.At <double>(0, 1) / Math.PI;
                            pitch = Math.Sign(pitch) * 180 - pitch;

                            Cv2.ProjectPoints(poseModel, rotation, translation, cameraMatrix, coeffs, poseProjection);

                            //var landmark = landmarks.At<Point2d>(0);
                            //var p = poseProjection.At<Point2d>(0);
                            //Dlib.DrawLine(
                            //    img,
                            //    new DlibDotNet.Point((int)landmark.X, (int)landmark.Y),
                            //    new DlibDotNet.Point((int)p.X, (int)p.Y),
                            //    color: new RgbPixel(0, 255, 255));

                            //foreach (var i in new int[] { 30, 8, 36, 45, 48, 54 })
                            //{
                            //    var point = shape.GetPart((uint)i);
                            //    var rect = new Rectangle(point);
                            //    Dlib.DrawRectangle(img, rect, color: new RgbPixel(255, 255, 0), thickness: 4);
                            //}
                            for (var i = 0; i < shape.Parts; i++)
                            {
                                var point = shape.GetPart((uint)i);
                                var rect  = new Rectangle(point);
                                Dlib.DrawRectangle(img, rect, color: new RgbPixel(0, 255, 255), thickness: 4);
                            }

                            CheckFace(pitch, frame, face, yaw, pitch);
                            frame = img.ToBitmap().ToMat();
                        }
                    }
                }
                else if (this.step > 0)
                {
                    SetZero();
                    this.ErrorMsg.Visible = true;
                }
            }

            Cv2.Rectangle(frame, rect, Scalar.Yellow, thickness: 2);
            camera.Image = frame.ToBitmap();
        }
        private Bitmap ProcessImage(Bitmap image, Bitmap newImage)
        {
            // set up Dlib facedetectors and shapedetectors
            using (var fd = FrontalFaceDetector.GetFrontalFaceDetector())
                using (var sp = new ShapePredictor("shape_predictor_68_face_landmarks.dat"))
                {
                    // convert image to dlib format
                    var img = image.ToArray2D <RgbPixel>();

                    // find bradley's faces in image
                    var faces   = fd.Detect(img);
                    var bradley = faces[0];

                    // get bradley's landmark points
                    var bradleyShape  = sp.Detect(img, bradley);
                    var bradleyPoints = (from i in Enumerable.Range(0, (int)bradleyShape.Parts)
                                         let p = bradleyShape.GetPart((uint)i)
                                                 select new OpenCvSharp.Point(p.X, p.Y)).ToArray();

                    // get convex hull of bradley's points
                    var hull        = Cv2.ConvexHullIndices(bradleyPoints);
                    var bradleyHull = from i in hull
                                      select bradleyPoints[i];

                    // find landmark points in face to swap
                    var imgMark    = newImage.ToArray2D <RgbPixel>();
                    var faces2     = fd.Detect(imgMark);
                    var mark       = faces2[0];
                    var markShape  = sp.Detect(imgMark, mark);
                    var markPoints = (from i in Enumerable.Range(0, (int)markShape.Parts)
                                      let p = markShape.GetPart((uint)i)
                                              select new OpenCvSharp.Point(p.X, p.Y)).ToArray();

                    // get convex hull of mark's points
                    var hull2    = Cv2.ConvexHullIndices(bradleyPoints);
                    var markHull = from i in hull2
                                   select markPoints[i];

                    // calculate Delaunay triangles
                    var triangles = Utility.GetDelaunayTriangles(bradleyHull);

                    // get transformations to warp the new face onto Bradley's face
                    var warps = Utility.GetWarps(markHull, bradleyHull, triangles);

                    // apply the warps to the new face to prep it for insertion into the main image
                    var warpedImg = Utility.ApplyWarps(newImage, image.Width, image.Height, warps);

                    // prepare a mask for the warped image
                    var mask = new Mat(image.Height, image.Width, MatType.CV_8UC3);
                    mask.SetTo(0);
                    Cv2.FillConvexPoly(mask, bradleyHull, new Scalar(255, 255, 255), LineTypes.Link8);

                    // find the center of the warped face
                    var r      = Cv2.BoundingRect(bradleyHull);
                    var center = new OpenCvSharp.Point(r.Left + r.Width / 2, r.Top + r.Height / 2);

                    // blend the warped face into the main image
                    var selfie = BitmapConverter.ToMat(image);
                    var blend  = new Mat(selfie.Size(), selfie.Type());
                    Cv2.SeamlessClone(warpedImg, selfie, mask, center, blend, SeamlessCloneMethods.NormalClone);

                    // return the modified main image
                    return(BitmapConverter.ToBitmap(blend));
                }
        }
예제 #20
0
        private static void Main(string[] args)
        {
            if (args.Length != 1)
            {
                Console.WriteLine("Run this example by invoking it like this: ");
                Console.WriteLine("   ./DnnFaceRecognition faces/bald_guys.jpg");
                Console.WriteLine("You will also need to get the face landmarking model file as well as ");
                Console.WriteLine("the face recognition model file.  Download and then decompress these files from: ");
                Console.WriteLine("http://dlib.net/files/shape_predictor_5_face_landmarks.dat.bz2");
                Console.WriteLine("http://dlib.net/files/dlib_face_recognition_resnet_model_v1.dat.bz2");
                return;
            }

            // The first thing we are going to do is load all our models.  First, since we need to
            // find faces in the image we will need a face detector:
            using (var detector = FrontalFaceDetector.GetFrontalFaceDetector())
                // We will also use a face landmarking model to align faces to a standard pose:  (see face_landmark_detection_ex.cpp for an introduction)
                using (var sp = new ShapePredictor("shape_predictor_5_face_landmarks.dat"))
                    // And finally we load the DNN responsible for face recognition.
                    using (var net = DlibDotNet.Dnn.LossMetric.Deserialize("dlib_face_recognition_resnet_model_v1.dat"))

                        using (var img = Dlib.LoadImage <RgbPixel>(args[0]))
                            using (var mat = new Matrix <RgbPixel>(img))

                                // Display the raw image on the screen
                                using (var win = new ImageWindow(img))
                                {
                                    // Run the face detector on the image of our action heroes, and for each face extract a
                                    // copy that has been normalized to 150x150 pixels in size and appropriately rotated
                                    // and centered.
                                    var faces = new List <Matrix <RgbPixel> >();
                                    foreach (var face in detector.Detect(img))
                                    {
                                        var shape          = sp.Detect(img, face);
                                        var faceChipDetail = Dlib.GetFaceChipDetails(shape, 150, 0.25);
                                        var faceChip       = Dlib.ExtractImageChip <RgbPixel>(mat, faceChipDetail);

                                        //faces.Add(move(face_chip));
                                        faces.Add(faceChip);

                                        // Also put some boxes on the faces so we can see that the detector is finding
                                        // them.
                                        win.AddOverlay(face);
                                    }

                                    if (!faces.Any())
                                    {
                                        Console.WriteLine("No faces found in image!");
                                        return;
                                    }

                                    // This call asks the DNN to convert each face image in faces into a 128D vector.
                                    // In this 128D vector space, images from the same person will be close to each other
                                    // but vectors from different people will be far apart.  So we can use these vectors to
                                    // identify if a pair of images are from the same person or from different people.
                                    var faceDescriptors = net.Operator(faces);

                                    // In particular, one simple thing we can do is face clustering.  This next bit of code
                                    // creates a graph of connected faces and then uses the Chinese whispers graph clustering
                                    // algorithm to identify how many people there are and which faces belong to whom.
                                    var edges = new List <SamplePair>();
                                    for (uint i = 0; i < faceDescriptors.Count; ++i)
                                    {
                                        for (var j = i; j < faceDescriptors.Count; ++j)
                                        {
                                            // Faces are connected in the graph if they are close enough.  Here we check if
                                            // the distance between two face descriptors is less than 0.6, which is the
                                            // decision threshold the network was trained to use.  Although you can
                                            // certainly use any other threshold you find useful.
                                            var diff = faceDescriptors[i] - faceDescriptors[j];
                                            if (Dlib.Length(diff) < 0.6)
                                            {
                                                edges.Add(new SamplePair(i, j));
                                            }
                                        }
                                    }

                                    Dlib.ChineseWhispers(edges, 100, out var numClusters, out var labels);

                                    // This will correctly indicate that there are 4 people in the image.
                                    Console.WriteLine($"number of people found in the image: {numClusters}");

                                    // Now let's display the face clustering results on the screen.  You will see that it
                                    // correctly grouped all the faces.
                                    var winClusters = new List <ImageWindow>();
                                    for (var i = 0; i < numClusters; i++)
                                    {
                                        winClusters.Add(new ImageWindow());
                                    }
                                    var tileImages = new List <Matrix <RgbPixel> >();
                                    for (var clusterId = 0ul; clusterId < numClusters; ++clusterId)
                                    {
                                        var temp = new List <Matrix <RgbPixel> >();
                                        for (var j = 0; j < labels.Length; ++j)
                                        {
                                            if (clusterId == labels[j])
                                            {
                                                temp.Add(faces[j]);
                                            }
                                        }

                                        winClusters[(int)clusterId].Title = $"face cluster {clusterId}";
                                        var tileImage = Dlib.TileImages(temp);
                                        tileImages.Add(tileImage);
                                        winClusters[(int)clusterId].SetImage(tileImage);
                                    }

                                    // Finally, let's print one of the face descriptors to the screen.
                                    using (var trans = Dlib.Trans(faceDescriptors[0]))
                                    {
                                        Console.WriteLine($"face descriptor for one face: {trans}");

                                        // It should also be noted that face recognition accuracy can be improved if jittering
                                        // is used when creating face descriptors.  In particular, to get 99.38% on the LFW
                                        // benchmark you need to use the jitter_image() routine to compute the descriptors,
                                        // like so:
                                        var jitterImages = JitterImage(faces[0]).ToArray();
                                        var ret          = net.Operator(jitterImages);
                                        using (var m = Dlib.Mat(ret))
                                            using (var faceDescriptor = Dlib.Mean <float>(m))
                                                using (var t = Dlib.Trans(faceDescriptor))
                                                {
                                                    Console.WriteLine($"jittered face descriptor for one face: {t}");

                                                    // If you use the model without jittering, as we did when clustering the bald guys, it
                                                    // gets an accuracy of 99.13% on the LFW benchmark.  So jittering makes the whole
                                                    // procedure a little more accurate but makes face descriptor calculation slower.

                                                    Console.WriteLine("hit enter to terminate");
                                                    Console.ReadKey();

                                                    foreach (var jitterImage in jitterImages)
                                                    {
                                                        jitterImage.Dispose();
                                                    }

                                                    foreach (var tileImage in tileImages)
                                                    {
                                                        tileImage.Dispose();
                                                    }

                                                    foreach (var edge in edges)
                                                    {
                                                        edge.Dispose();
                                                    }

                                                    foreach (var descriptor in faceDescriptors)
                                                    {
                                                        descriptor.Dispose();
                                                    }

                                                    foreach (var face in faces)
                                                    {
                                                        face.Dispose();
                                                    }
                                                }
                                    }
                                }
        }
예제 #21
0
        private static void Main(string[] args)
        {
            if (args.Length == 0)
            {
                Console.WriteLine("Give some image files as arguments to this program.");
                Console.WriteLine("Call this program like this:");
                Console.WriteLine("./face_landmark_detection_ex shape_predictor_68_face_landmarks.dat faces/*.jpg");
                Console.WriteLine("You can get the shape_predictor_68_face_landmarks.dat file from:");
                Console.WriteLine("http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2");
                return;
            }

            using (var win = new ImageWindow())
                using (var winFaces = new ImageWindow())
                    using (var detector = FrontalFaceDetector.GetFrontalFaceDetector())
                        using (var sp = new ShapePredictor(args[0]))
                            foreach (var file in args.ToList().GetRange(1, args.Length - 1))
                            {
                                Console.WriteLine($"processing image {file}");

                                using (var img = Dlib.LoadImage <RgbPixel>(file))
                                {
                                    Dlib.PyramidUp(img);

                                    var dets = detector.Detect(img);
                                    Console.WriteLine($"Number of faces detected: {dets.Length}");

                                    var shapes = new List <FullObjectDetection>();
                                    foreach (var rect in dets)
                                    {
                                        var shape = sp.Detect(img, rect);
                                        Console.WriteLine($"number of parts: {shape.Parts}");
                                        if (shape.Parts > 2)
                                        {
                                            Console.WriteLine($"pixel position of first part:  {shape.GetPart(0)}");
                                            Console.WriteLine($"pixel position of second part: {shape.GetPart(1)}");
                                            shapes.Add(shape);
                                        }
                                    }

                                    win.ClearOverlay();
                                    win.SetImage(img);

                                    if (shapes.Any())
                                    {
                                        var lines = Dlib.RenderFaceDetections(shapes);
                                        win.AddOverlay(lines);

                                        foreach (var l in lines)
                                        {
                                            l.Dispose();
                                        }

                                        var chipLocations = Dlib.GetFaceChipDetails(shapes);
                                        using (var faceChips = Dlib.ExtractImageChips <RgbPixel>(img, chipLocations))
                                            using (var tileImage = Dlib.TileImages(faceChips))
                                                winFaces.SetImage(tileImage);

                                        foreach (var c in chipLocations)
                                        {
                                            c.Dispose();
                                        }
                                    }

                                    Console.WriteLine("hit enter to process next frame");
                                    Console.ReadKey();

                                    foreach (var s in shapes)
                                    {
                                        s.Dispose();
                                    }
                                    foreach (var r in dets)
                                    {
                                        r.Dispose();
                                    }
                                }
                            }
        }
예제 #22
0
        private static void Preprocess(string type, string input, FrontalFaceDetector faceDetector, ShapePredictor posePredictor, string output)
        {
            var imageCount = 0;

            var r = new ulong[Size * Size];
            var g = new ulong[Size * Size];
            var b = new ulong[Size * Size];

            var csv       = ReadCsv(Path.Combine(input, $"{type}.csv"));
            var outputDir = Path.Combine(output, type);

            foreach (var kvp in csv)
            {
                using (var tmp = Dlib.LoadImageAsMatrix <RgbPixel>(Path.Combine(input, type, kvp.Key)))
                {
                    var dets = faceDetector.Operator(tmp);
                    if (!dets.Any())
                    {
                        Console.WriteLine($"Warning: Failed to detect face from '{kvp}'");
                        continue;
                    }

                    // Get max size rectangle. It could be better face.
                    var det = dets.Select((val, idx) => new { V = val, I = idx }).Aggregate((max, working) => (max.V.Area > working.V.Area) ? max : working).V;
                    using (var ret = posePredictor.Detect(tmp, det))
                        using (var chip = Dlib.GetFaceChipDetails(ret, Size, 0d))
                            using (var faceChips = Dlib.ExtractImageChip <RgbPixel>(tmp, chip))
                            {
                                var dst    = Path.Combine(outputDir, kvp.Key);
                                var dstDir = Path.GetDirectoryName(dst);
                                Directory.CreateDirectory(dstDir);
                                Dlib.SaveJpeg(faceChips, Path.Combine(outputDir, kvp.Key), 100);

                                var index = 0;
                                for (var row = 0; row < Size; row++)
                                {
                                    for (var col = 0; col < Size; col++)
                                    {
                                        var rgb = faceChips[row, col];
                                        r[index] += rgb.Red;
                                        g[index] += rgb.Green;
                                        b[index] += rgb.Blue;
                                        index++;
                                    }
                                }
                            }

                    imageCount++;
                }
            }

            using (var mean = new Matrix <RgbPixel>(Size, Size))
            {
                var index = 0;
                for (var row = 0; row < Size; row++)
                {
                    for (var col = 0; col < Size; col++)
                    {
                        var red   = (double)r[index] / imageCount;
                        var green = (double)g[index] / imageCount;
                        var blue  = (double)b[index] / imageCount;

                        var newRed   = (byte)Math.Round(red);
                        var newGreen = (byte)Math.Round(green);
                        var newBlue  = (byte)Math.Round(blue);
                        mean[row, col] = new RgbPixel(newRed, newGreen, newBlue);

                        index++;
                    }
                }

                Dlib.SaveBmp(mean, Path.Combine(output, $"{type}.mean.bmp"));
            }
        }
예제 #23
0
        private void StartWebCam(BackgroundWorker worker = null)
        {
            if (cap == null)
            {
                cap = new VideoCapture(0);
            }
            if (!cap.Open(0))
            {
                return;
            }
            OpenCvSharp.Cv2.NamedWindow("Video", WindowMode.AutoSize);
            int       cnt      = 0;
            Mat       frame    = new Mat();
            EyePoints rightEye = new EyePoints(true);
            EyePoints leftEye  = new EyePoints(false);

            IsRunning = true;
            while (IsRunning)
            {
                bool result = cap.Read(frame);
                if (!result)
                {
                    worker.CancelAsync();
                    IsRunning = false;
                }
                if (frame != null && (frame.Rows * frame.Cols > 0))
                {
                    cnt++;
                    if (cnt % frameskip == 0)
                    {
                        FrameQueue.Enqueue(frame);
                        cnt = 0;
                    }
                }
                while (FrameQueue.Count > 0)
                {
                    Mat    que   = FrameQueue.Dequeue();
                    Rect[] faces = GetFaces(que, 1);
                    for (int i = 0; i < faces.Length; i++)
                    {
                        //GetFaceInRect(faces[i], que, i);
                        Scalar              eyecolor  = new Scalar(0, 0, 255);
                        Array2D <byte>      gray      = ConvertMatToDlib2DArray(que);
                        FullObjectDetection landmarks = predictor.Detect(gray, ConvertToDlib(faces[i]));
                        InitializeEyes(landmarks, leftEye, rightEye);
                        //DrawEye(que, landmarks, leftEye);
                        //DrawEye(que, landmarks, rightEye);
                        Rect leftboundingBox = BoundingBoxAroundEye(leftEye, 0);
                        DrawRect(que, leftboundingBox);
                        OpenCvSharp.Point centerOfLeftEye = DetectCenterOfEye(que, leftboundingBox);
                        centerOfLeftEye.X += leftboundingBox.X;

                        Rect rightboundingBox = BoundingBoxAroundEye(rightEye, 0);
                        DrawRect(que, rightboundingBox);
                        OpenCvSharp.Point centerOfRightEye = DetectCenterOfEye(que, rightboundingBox);
                        centerOfRightEye.X += rightboundingBox.X;

                        EyeDirection leftEyeDirection  = leftEye.GetEyePosition(centerOfLeftEye);
                        EyeDirection rightEyeDirection = rightEye.GetEyePosition(centerOfRightEye);

                        EyeDirection eyeDirection = EyeDirection.unknown;
                        if (leftEyeDirection == EyeDirection.center || rightEyeDirection == EyeDirection.center)
                        {
                            eyeDirection = EyeDirection.center;
                        }
                        else if (leftEyeDirection == EyeDirection.left)
                        {
                            eyeDirection = EyeDirection.left;
                        }
                        else if (rightEyeDirection == EyeDirection.right)
                        {
                            eyeDirection = EyeDirection.right;
                        }

                        OpenCvSharp.Point position = new OpenCvSharp.Point(50, 50);
                        Cv2.PutText(img: que, text: eyeDirection.ToDisplay(), org: position, fontFace: HersheyFonts.HersheySimplex, fontScale: 2, new Scalar(0, 0, 255));
                    }
                    //BitmapImage bmi = ConvertToBMI(frame, cnt, "D:/junk/TestCamImages");
                    if (worker != null)
                    {
                        //worker.ReportProgress(cnt, bmi);
                        try
                        {
                            OpenCvSharp.Cv2.ImShow("Video", que);
                            int key = Cv2.WaitKey(10);   // as in 10 milliseconds
                            if (key == 27)
                            {
                                worker.CancelAsync();
                                IsRunning = false;
                            }
                        }
                        catch (Exception ex)
                        {
                            string msg = ex.Message;
                        }
                    }
                    if (worker.CancellationPending)
                    {
                        Cv2.DestroyWindow("Video");
                        break;
                    }
                }
            }
        }
예제 #24
0
        public async Task ProcessAsync(string[] inputFilenames)
        {
            var chips        = new List <Matrix <RgbPixel> >();
            var faces        = new List <Rectangle>();
            var filename     = new List <string>();
            var jsonFilename = inputFilenames.First() + ".json";

            foreach (var inputFilename in inputFilenames)
            {
                if (!File.Exists(inputFilename))
                {
                    break;
                }

                if (File.Exists(jsonFilename))
                {
                    continue;
                }

                // load the image
                using var img = await DlibHelpers.LoadRotatedImage(imageRotationService, inputFilename);

                // Dlib.SaveJpeg(img, inputFilename + "__1.jpg", 25);
                // Dlib.SaveJpeg(img, inputFilename + "__2.jpg", 25);

                // detect all faces
                foreach (var face in detector.Operator(img))
                {
                    // detect landmarks
                    var shape = predictor.Detect(img, face);

                    // extract normalized and rotated 150x150 face chip
                    var faceChipDetail = Dlib.GetFaceChipDetails(shape, 150, 0.25);
                    var faceChip       = Dlib.ExtractImageChip <RgbPixel>(img, faceChipDetail);

                    // convert the chip to a matrix and store
                    var matrix = new Matrix <RgbPixel>(faceChip);
                    chips.Add(matrix);
                    faces.Add(face);
                    filename.Add(inputFilename);
                }
            }

            if (!File.Exists(jsonFilename))
            {
                var ffd = new FoundFacesData
                {
                    // Chips = chips,
                    Faces     = faces,
                    Filenames = filename,
                };

                OutputLabels <Matrix <float> > descriptors = null;
                if (chips.Any())
                {
                    // put each fae in a 128D embedding space
                    // similar faces will be placed close together
                    // Console.WriteLine("Recognizing faces...");
                    descriptors     = dnn.Operator(chips);
                    ffd.Descriptors = descriptors.ToList();
                }
                else
                {
                    ffd.Descriptors = new List <Matrix <float> >(0);
                }

                var dto = new FoundFacesDataDto
                {
                    Faces = ffd.Faces
                            .Select(f => new RectangleDto
                    {
                        Bottom = f.Bottom,
                        Left   = f.Left,
                        Top    = f.Top,
                        Right  = f.Right,
                    })
                            .ToList(),

                    Filenames = ffd.Filenames,

                    Descriptors = ffd.Descriptors
                                  .Select(x => new MatrixFloatDto
                    {
                        Data    = x.ToArray(),
                        Row     = x.Rows,
                        Columns = x.Columns,
                    })
                                  .ToList()
                };

                var x = JsonConvert.SerializeObject(dto);
                File.WriteAllText(jsonFilename, JsonConvert.SerializeObject(dto));
            }

            FoundFacesData items;

            using (var r = new StreamReader(jsonFilename))
            {
                var json     = r.ReadToEnd();
                var itemsdto = JsonConvert.DeserializeObject <FoundFacesDataDto>(json);
                items = new FoundFacesData
                {
                    Faces       = itemsdto.Faces.Select(f => new Rectangle(f.Left, f.Top, f.Right, f.Bottom)).ToList(),
                    Filenames   = itemsdto.Filenames.ToList(),
                    Descriptors = itemsdto.Descriptors.Select(d => new Matrix <float>(d.Data, d.Row, d.Columns)).ToList(),
                };
            }

            if (items.Faces.Count <= 0)
            {
                return;
            }

            // // compare each face with all other faces
            var edges = new List <SamplePair>();

            // for (uint i = 0; i < descriptors.Count; ++i)
            // for (var j = i; j < descriptors.Count; ++j)
            //
            //     // record every pair of two similar faces
            //     // faces are similar if they are less than 0.6 apart in the 128D embedding space
            //     if (Dlib.Length(descriptors[i] - descriptors[j]) < 0.5)
            //         edges.Add(new SamplePair(i, j));
            //
            // // use the chinese whispers algorithm to find all face clusters
            // Dlib.ChineseWhispers(edges, 100, out var clusters, out var labels);
            // // Console.WriteLine($"   Found {clusters} unique person(s) in the image");
            //
            // // draw rectangles on each face using the cluster color
            // for (var i = 0; i < faces.Count; i++)
            // {
            //     var color = new RgbPixel(255, 255, 255);
            //     if (labels[i] < palette.Length)
            //         color = palette[labels[i]];
            //
            //     using var img = Dlib.LoadImage<RgbPixel>(filename[i] + "__1.jpg");
            //     Dlib.DrawRectangle(img, faces[i], color: color, thickness: 4);
            //     Dlib.SaveJpeg(img, filename[i] + "__1.jpg", 25);
            // }
            //
            // Console.WriteLine("end 1");

            // compare each face with all other faces
            edges = new List <SamplePair>();
            for (var i = 0; i < items.Descriptors.Count; ++i)
            {
                for (var j = i; j < items.Descriptors.Count; ++j)
                {
                    // record every pair of two similar faces
                    // faces are similar if they are less than 0.6 apart in the 128D embedding space
                    if (Dlib.Length(items.Descriptors[i] - items.Descriptors[j]) < 0.4)
                    {
                        edges.Add(new SamplePair((uint)i, (uint)j));
                    }
                }
            }

            // use the chinese whispers algorithm to find all face clusters
            Dlib.ChineseWhispers(edges, 100, out var clusters2, out var labels2);
            // Console.WriteLine($"   Found {clusters} unique person(s) in the image");

            // draw rectangles on each face using the cluster color
            for (var i = 0; i < items.Faces.Count; i++)
            {
                var color = palette[0];
                if (labels2[i] < palette.Length)
                {
                    color = palette[labels2[i]];
                }

                if (!File.Exists(items.Filenames[i] + $"_x{labels2[i]}.jpg"))
                {
                    using var img2 = await DlibHelpers.LoadRotatedImage(imageRotationService, items.Filenames[i]);

                    Dlib.SaveJpeg(img2, items.Filenames[i] + $"_x{labels2[i]}.jpg", 25);
                }

                using var img = Dlib.LoadImage <RgbPixel>(items.Filenames[i] + $"_x{labels2[i]}.jpg");
                Dlib.DrawRectangle(img, items.Faces[i], color: color, thickness: 4);
                Dlib.SaveJpeg(img, items.Filenames[i] + $"_x{labels2[i]}.jpg", 25);
            }

            // var origFilename = new FileInfo(inputFilename).Name;
            // var outputFilename = Path.Combine(outputDirectory, $"{origFilename}_Identification.jpg");

            // Dlib.SaveJpeg(img, inputFilename, 75);
        }
예제 #25
0
        private void StartWebCam()
        {
            bool useHaarcascade = true;

            if (cap == null)
            {
                cap = new VideoCapture(0);
            }
            if (!cap.Open(0))
            {
                return;
            }
            OpenCvSharp.Cv2.NamedWindow("Video", WindowMode.AutoSize);

            EyePoints rightEye = new EyePoints(true);
            EyePoints leftEye  = new EyePoints(false);

            IsRunning = true;
            while (IsRunning)
            {
                frame = new Mat();
                bool result = cap.Read(frame);
                if (!result)
                {
                    IsRunning = false;
                    break;
                }

                Rect[] faces = useHaarcascade ? GetHaarcascadeFaces(frame, 1) : GetDnnFaces(frame, 1);
                if (faces == null)
                {
                    continue;
                }

                for (int i = 0; i < faces.Length; i++)
                {
                    //GetFaceInRect(faces[i], que, i);
                    Scalar              eyecolor  = new Scalar(0, 0, 255);
                    Array2D <byte>      gray      = ConvertMatToDlib2DArray(frame);
                    FullObjectDetection landmarks = predictor.Detect(gray, ConvertToDlib(faces[i]));
                    InitializeEyes(landmarks, leftEye, rightEye);
                    //DrawEye(que, landmarks, leftEye);
                    //DrawEye(que, landmarks, rightEye);
                    Rect leftboundingBox = BoundingBoxAroundEye(leftEye, 0);
                    DrawRect(frame, leftboundingBox);
                    OpenCvSharp.Point centerOfLeftEye = DetectCenterOfEye(frame, leftboundingBox);
                    centerOfLeftEye.X += leftboundingBox.X;

                    Rect rightboundingBox = BoundingBoxAroundEye(rightEye, 0);
                    DrawRect(frame, rightboundingBox);
                    OpenCvSharp.Point centerOfRightEye = DetectCenterOfEye(frame, rightboundingBox);
                    centerOfRightEye.X += rightboundingBox.X;

                    EyeDirection leftEyeDirection  = leftEye.GetEyePosition(centerOfLeftEye);
                    EyeDirection rightEyeDirection = rightEye.GetEyePosition(centerOfRightEye);

                    EyeDirection eyeDirection = EyeDirection.unknown;
                    if (leftEyeDirection == EyeDirection.center || rightEyeDirection == EyeDirection.center)
                    {
                        eyeDirection = EyeDirection.center;
                    }
                    else if (leftEyeDirection == EyeDirection.left)
                    {
                        eyeDirection = EyeDirection.left;
                    }
                    else if (rightEyeDirection == EyeDirection.right)
                    {
                        eyeDirection = EyeDirection.right;
                    }

                    OpenCvSharp.Point position = new OpenCvSharp.Point(50, 50);
                    Cv2.PutText(img: frame, text: eyeDirection.ToDisplay(), org: position, fontFace: HersheyFonts.HersheySimplex, fontScale: 2, new Scalar(0, 0, 255));
                }

                try
                {
                    OpenCvSharp.Cv2.ImShow("Video", frame);
                    int key = Cv2.WaitKey(10);   // as in 10 milliseconds
                    if (key == 27)
                    {
                        IsRunning = false;
                    }
                }
                catch (Exception ex)
                {
                    string msg = ex.Message;
                }
                frame.Dispose();
            }
        }
예제 #26
0
        private void ProcessFrame(object sender, EventArgs e)
        {
            Stopwatch SW = new Stopwatch();

            SW.Start();

            try
            {
                Mat temp = new Mat();
                _capture.Read(temp);

                var array = new byte[temp.Width * temp.Height * temp.ElementSize];
                temp.CopyTo(array);

                Array2D <RgbPixel> cimg = Dlib.LoadImageData <RgbPixel>(array, (uint)temp.Height, (uint)temp.Width, (uint)(temp.Width * temp.ElementSize));

                Rectangle[] faces = detector.Operator(cimg);

                if (faces.Any())
                {
                    FullObjectDetection        det    = poseModel.Detect(cimg, faces[0]);
                    List <FullObjectDetection> shapes = new List <FullObjectDetection>();
                    shapes.Add(det);
                    FullObjectDetection       shape = shapes[0];
                    ImageWindow.OverlayLine[] lines = Dlib.RenderFaceDetections(shapes);

                    if (chbShowLineOnly.Checked)
                    {
                        cimg = new Array2D <RgbPixel>(cimg.Rows, cimg.Columns);
                    }

                    foreach (var line in lines)
                    {
                        Dlib.DrawLine(cimg, line.Point1, line.Point2, new RgbPixel {
                            Green = 255
                        });
                    }

                    pictureBoxImage.Image?.Dispose();
                    pictureBoxImage.Image = cimg.ToBitmap();

                    foreach (var line in lines)
                    {
                        line.Dispose();
                    }

                    for (uint i = 0; i < shape.Parts; i++)
                    {
                        landmarkPoint.Insert((int)i, new Point(shape.GetPart(i).X, shape.GetPart(i).Y));
                    }

                    GetFacialBlendShape();

                    foreach (var s in shapes)
                    {
                        s.Dispose();
                    }
                }
            }
            catch (Exception exception)
            {
                Console.WriteLine(exception.StackTrace);
            }

            SW.Stop();
            Debug.WriteLine(string.Format("FPS: {0}", 1000 / SW.ElapsedMilliseconds));
        }
        private Bitmap ProcessImage(Bitmap image, Bitmap newImage)
        {
            // set up Dlib facedetectors and shapedetectors
            using (var fd = FrontalFaceDetector.GetFrontalFaceDetector())
                using (var sp = new ShapePredictor("shape_predictor_68_face_landmarks.dat"))
                {
                    // convert images to opencv format
                    var selfie = BitmapConverter.ToMat(image);
                    var mark   = BitmapConverter.ToMat(newImage);

                    // convert image to dlib format
                    var img = image.ToArray2D <RgbPixel>();

                    // find all faces in image
                    var faces = fd.Detect(img);

                    // get bradley's landmark points
                    var bradleyShape  = sp.Detect(img, faces[1]);
                    var bradleyPoints = (from i in Enumerable.Range(0, (int)bradleyShape.Parts)
                                         let p = bradleyShape.GetPart((uint)i)
                                                 select new OpenCvSharp.Point(p.X, p.Y)).ToArray();

                    // *** WEBINAR STEP 1: draw landmarks on bradley's face
                    // Utility.DrawLandmarks(selfie, bradleyPoints);
                    // return BitmapConverter.ToBitmap(selfie);

                    // get convex hull of bradley's points
                    var hull        = Cv2.ConvexHullIndices(bradleyPoints);
                    var bradleyHull = from i in hull
                                      select bradleyPoints[i];

                    // *** WEBINAR STEP 2a: draw convex hull for bradley
                    // Utility.DrawLandmarks(selfie, bradleyHull);
                    // Utility.DrawHull(selfie, bradleyHull);
                    // return BitmapConverter.ToBitmap(selfie);

                    // find landmark points in face to swap
                    var imgMark    = newImage.ToArray2D <RgbPixel>();
                    var faces2     = fd.Detect(imgMark);
                    var markShape  = sp.Detect(imgMark, faces2[0]);
                    var markPoints = (from i in Enumerable.Range(0, (int)markShape.Parts)
                                      let p = markShape.GetPart((uint)i)
                                              select new OpenCvSharp.Point(p.X, p.Y)).ToArray();

                    // get convex hull of mark's points
                    var hull2    = Cv2.ConvexHullIndices(bradleyPoints);
                    var markHull = from i in hull2
                                   select markPoints[i];

                    // *** WEBINAR STEP 2b: draw convex hull for mark
                    // Utility.DrawLandmarks(mark, markHull);
                    // Utility.DrawHull(mark, markHull);
                    // return BitmapConverter.ToBitmap(mark);

                    // calculate Delaunay triangles
                    var triangles = Utility.GetDelaunayTriangles(bradleyHull);

                    // *** WEBINAR STEP 3: draw delaunay triangles for bradley
                    // Utility.DrawTriangles(selfie, triangles);
                    // return BitmapConverter.ToBitmap(selfie);

                    // get transformations to warp the new face onto Bradley's face
                    var warps = Utility.GetWarps(markHull, bradleyHull, triangles);

                    // *** WEBINAR STEP 6: demonstrate triangulation with all landmark points
                    // triangles = Utility.GetDelaunayTriangles(bradleyPoints);
                    // warps = Utility.GetWarps(markPoints, bradleyPoints, triangles);

                    // apply the warps to the new face to prep it for insertion into the main image
                    var warpedImg = Utility.ApplyWarps(newImage, image.Width, image.Height, warps);

                    // *** WEBINAR STEP 4: warp triangles from mark to bradley
                    // return BitmapConverter.ToBitmap(warpedImg);

                    // prepare a mask for the warped image
                    var mask = new Mat(image.Height, image.Width, MatType.CV_8UC3);
                    mask.SetTo(0);
                    Cv2.FillConvexPoly(mask, bradleyHull, new Scalar(255, 255, 255), LineTypes.Link8);

                    // *** WEBINAR STEP 5a: show blend without seamless clone
                    // Cv2.Multiply(selfie, new Scalar(1, 1, 1) - mask, selfie);
                    // Cv2.Add(selfie, warpedImg, selfie);
                    // return BitmapConverter.ToBitmap(selfie);

                    // find the center of the warped face
                    var r      = Cv2.BoundingRect(bradleyHull);
                    var center = new OpenCvSharp.Point(r.Left + r.Width / 2, r.Top + r.Height / 2);

                    // blend the warped face into the main image
                    var blend = new Mat(selfie.Size(), selfie.Type());
                    Cv2.SeamlessClone(warpedImg, selfie, mask, center, blend, SeamlessCloneMethods.NormalClone);

                    // *** WEBINAR STEP 5b: show blend with seamless clone
                    return(BitmapConverter.ToBitmap(blend));
                }
        }