Ejemplo n.º 1
0
        /// <summary>
        /// The main program entry point
        /// </summary>
        /// <param name="args">The command line arguments</param>
        static void Main(string[] args)
        {
            // set up Dlib facedetectors and shapedetectors
            using (var fd = Dlib.GetFrontalFaceDetector())
                using (var sp = ShapePredictor.Deserialize("shape_predictor_68_face_landmarks.dat"))
                {
                    // load input image
                    var img = Dlib.LoadImage <RgbPixel>(inputFilePath);

                    // find all faces in the image
                    var faces = fd.Operator(img);
                    foreach (var face in faces)
                    {
                        // find the landmark points for this face
                        var shape = sp.Detect(img, face);

                        // draw the landmark points on the image
                        for (var i = 0; i < shape.Parts; i++)
                        {
                            var point = shape.GetPart((uint)i);
                            var rect  = new Rectangle(point);
                            Dlib.DrawRectangle(img, rect, color: new RgbPixel(255, 255, 0), thickness: 4);
                        }
                    }

                    // export the modified image
                    Dlib.SaveJpeg(img, "output.jpg");
                }
        }
Ejemplo n.º 2
0
        private void BackgroundWorkerOnDoWork(object sender, DoWorkEventArgs doWorkEventArgs)
        {
            var path = doWorkEventArgs.Argument as string;

            if (string.IsNullOrWhiteSpace(path) || !File.Exists(path))
            {
                return;
            }

            // DlibDotNet can create Array2D from file but this sample demonstrate
            // converting managed image class to dlib class and vice versa.
            using (var faceDetector = Dlib.GetFrontalFaceDetector())
                using (var ms = new MemoryStream(File.ReadAllBytes(path)))
                    using (var bitmap = (Bitmap)Image.FromStream(ms))
                    {
                        using (var image = bitmap.ToArray2D <RgbPixel>())
                        {
                            var dets = faceDetector.Detect(image);
                            foreach (var r in dets)
                            {
                                Dlib.DrawRectangle(image, r, new RgbPixel {
                                    Green = 255
                                });
                            }

                            var result = image.ToBitmap();
                            this.pictureBox.Invoke(new Action(() =>
                            {
                                this.pictureBox.Image?.Dispose();
                                this.pictureBox.Image = result;
                            }));
                        }
                    }
        }
Ejemplo n.º 3
0
        public static void DetectFacesAsync(string inputFilePath, string subscriptionKey, string uriBase, IFaceClient client, string vocabularyPath)
        {
            // set up Dlib facedetector
            DirectoryInfo dir = new DirectoryInfo(inputFilePath);

            using (var fd = Dlib.GetFrontalFaceDetector())
            {
                foreach (FileInfo files in dir.GetFiles("*.jpg"))
                {
                    string _inputFilePath = inputFilePath + files.Name;

                    // load input image
                    Array2D <RgbPixel> img = Dlib.LoadImage <RgbPixel>(_inputFilePath);

                    // find all faces in the image
                    Rectangle[] faces = fd.Operator(img);
                    if (faces.Length != 0)
                    {
                        Console.WriteLine("Picture " + files.Name + " have faces, sending data to Azure");
                        MakeAnalysisRequestAsync(_inputFilePath, subscriptionKey, uriBase, files.Name, client, vocabularyPath).Wait();
                    }

                    foreach (var face in faces)
                    {
                        // draw a rectangle for each face
                        Dlib.DrawRectangle(img, face, color: new RgbPixel(0, 255, 255), thickness: 4);
                    }
                    // export the modified image
                    Dlib.SaveJpeg(img, "./Results/" + files.Name);
                }
            }
        private void buscarrosto(Bitmap frame)
        {
            Image <Rgb, Byte> imageCV = new Image <Rgb, byte>(frame);

            Emgu.CV.Mat mat   = imageCV.Mat;
            var         array = new byte[mat.Width * mat.Height * mat.ElementSize];

            mat.CopyTo(array);

            using (Array2D <RgbPixel> image = Dlib.LoadImageData <RgbPixel>(array, (uint)mat.Height, (uint)mat.Width, (uint)(mat.Width * mat.ElementSize)))
            {
                using (FrontalFaceDetector fd = Dlib.GetFrontalFaceDetector())

                {
                    var faces = fd.Operator(image);
                    foreach (DlibDotNet.Rectangle face in faces)
                    {
                        FullObjectDetection shape          = _ShapePredictor.Detect(image, face);
                        ChipDetails         faceChipDetail = Dlib.GetFaceChipDetails(shape, 150, 0.25);
                        Array2D <RgbPixel>  faceChip       = Dlib.ExtractImageChip <RgbPixel>(image, faceChipDetail);
                        Bitmap bitmap1 = faceChip.ToBitmap <RgbPixel>();
                        MainWindow.main.Statusa1 = bitmap1;
                        Dlib.DrawRectangle(image, face, color: new RgbPixel(0, 255, 255), thickness: 4);
                    }
                }
                frame = image.ToBitmap <RgbPixel>();
                MainWindow.main.Statusa = frame;
            }
        }
Ejemplo n.º 5
0
        private static List <(OutputLabels <Matrix <float> >, Rectangle[])> GetData(List <Bitmap> bitmaps, bool isAFace = false)
        {
            var datas = new List <(OutputLabels <Matrix <float> >, Rectangle[])>();

            try
            {
                foreach (var bitmap in bitmaps)
                {
                    var faces = new List <Matrix <RgbPixel> >();
                    var dets  = new Rectangle[0];
                    //在图像中寻找人脸我们需要一个人脸检测器:
                    using (var detector = Dlib.GetFrontalFaceDetector())
                    {
                        using (var img = bitmap.ToMatrix <RgbPixel>())
                        {
                            // 人脸 面积从大到小排序
                            dets = detector.Operator(img).OrderByDescending(x => x.Area).ToArray();
                            // 是否只检测面积最大的人脸
                            if (isAFace)
                            {
                                var shape          = _SP.Detect(img, dets[0]);
                                var faceChipDetail = Dlib.GetFaceChipDetails(shape, 150, 0.25);
                                var faceChip       = Dlib.ExtractImageChip <RgbPixel>(img, faceChipDetail);
                                faces.Add(faceChip);
                            }
                            else
                            {
                                foreach (var face in dets)
                                {
                                    var shape          = _SP.Detect(img, face);
                                    var faceChipDetail = Dlib.GetFaceChipDetails(shape, 150, 0.25);
                                    var faceChip       = Dlib.ExtractImageChip <RgbPixel>(img, faceChipDetail);
                                    faces.Add(faceChip);
                                }
                            }
                            if (!faces.Any())
                            {
                                datas.Add((null, null));
                            }
                            else
                            {
                                //此调用要求DNN将每个人脸图像转换为128D矢量。
                                //在这个128D的矢量空间中,来自同一个人的图像会彼此接近
                                //但是来自不同人的载体将会非常不同。所以我们可以用这些向量
                                //辨别一对图片是来自同一个人还是不同的人。
                                datas.Add((_NET.Operator(faces), dets));
                            }
                        }
                    }
                }
            }
            catch (Exception ex)
            {
                LogHelperNLog.Error(ex);
            }
            return(datas);
        }
 public static void DrawPointsOfLandmarks(FileInfo image)
 {
     using (var fd = Dlib.GetFrontalFaceDetector())
         using (var sp = ShapePredictor.Deserialize(GetFile(ShapePredictorFileName).FullName))
         {
             using (var img = Dlib.LoadImage <RgbPixel>(image.FullName))
             {
                 var faces = fd.Operator(img);
                 // for each face draw over the facial landmarks
                 foreach (var face in faces)
                 {
                     var shape = sp.Detect(img, face);
                     // draw the landmark points on the image
                     for (var i = 0; i < shape.Parts; i++)
                     {
                         var point = shape.GetPart((uint)i);
                         var rect  = new Rectangle(point);
                         if (i == 0)
                         {
                             Dlib.DrawRectangle(img, rect, color: new RgbPixel(255, 255, 255), thickness: 8);
                         }
                         else if (i == 21 || i == 22 || i == 39 || i == 42 || i == 33 || i == 51 || i == 57 ||
                                  i == 48 ||
                                  i == 54)
                         {
                             Dlib.DrawRectangle(img, rect, color: new RgbPixel(255, 0, 255), thickness: 4);
                         }
                         else if (i == 18 || i == 19 || i == 20 || i == 21) // left eye
                         {
                             Dlib.DrawRectangle(img, rect, color: new RgbPixel(255, 0, 0), 6);
                         }
                         else if (i == 22 || i == 23 || i == 24 || i == 25) // right eye
                         {
                             Dlib.DrawRectangle(img, rect, color: new RgbPixel(255, 128, 0), 6);
                         }
                         else if (i == 48 || i == 49 || i == 50) // left lip
                         {
                             Dlib.DrawRectangle(img, rect, color: new RgbPixel(255, 255, 0), 2);
                         }
                         else if (i == 52 || i == 53 || i == 54) // right lip
                         {
                             Dlib.DrawRectangle(img, rect, color: new RgbPixel(255, 0, 128), 2);
                         }
                         else
                         {
                             Dlib.DrawRectangle(img, rect, color: new RgbPixel(0, 0, 0), thickness: 4);
                         }
                     }
                     Dlib.SavePng(img, "output.jpg");
                 }
             }
         }
 }
Ejemplo n.º 7
0
        /// <summary>
        /// 具体计算
        /// </summary>
        /// <param name="bitmap"></param>
        /// <returns></returns>
        public Rectangle[] Face(Bitmap bitmap)
        {
            var dets = new Rectangle[0];

            using (var detector = Dlib.GetFrontalFaceDetector())
                //using (var img = Dlib.LoadImage<RgbPixel>("png.png"))
                using (var img = bitmap.ToArray2D <RgbPixel>())
                {
                    dets = detector.Operator(img);
                }
            return(dets);
        }
Ejemplo n.º 8
0
        public Rectangle[] DetectFacesBoundingBoxes(Array2D <RgbPixel> img)
        {
            Rectangle[] facesBoundingBoxes;

            // Set up and apply face detector
            using (var fd = Dlib.GetFrontalFaceDetector())
            {
                facesBoundingBoxes = fd.Operator(img);
            }

            return(facesBoundingBoxes);
        }
Ejemplo n.º 9
0
        /// <summary>
        /// 使用路径获取位置数据
        /// </summary>
        /// <param name="url"></param>
        /// <returns></returns>
        public static Rectangle[] GetResult(string url)
        {
            var dets = new Rectangle[0];

            url = Path.Combine(AppDomain.CurrentDomain.BaseDirectory, url);
            using (var detector = Dlib.GetFrontalFaceDetector())
                //using (var img = Dlib.LoadImage<RgbPixel>("png.png"))
                using (var img = Dlib.LoadImage <RgbPixel>(url))
                {
                    dets = detector.Operator(img);
                }
            return(dets);
        }
 private static void DetectFacesOnImage(Array2D <RgbPixel> image)
 {
     // set up Dlib facedetector
     using (var fd = Dlib.GetFrontalFaceDetector())
     {
         // find all faces in the image
         var faces = fd.Operator(image);
         foreach (Rectangle face in faces)
         {
             // draw a rectangle for each face
             Dlib.DrawRectangle(image, face, color: new RgbPixel(0, 255, 255), thickness: 4);
         }
     }
 }
        public static void DetectFacesOnImage(string sourceImagePath, string destImagePath)
        {
            // set up Dlib facedetector
            using (var fd = Dlib.GetFrontalFaceDetector())
            {
                // load input image
                var image = Dlib.LoadImage <RgbPixel>(sourceImagePath);

                DetectFacesOnImage(image);

                // export the modified image
                Dlib.SaveJpeg(image, destImagePath);
            }
        }
Ejemplo n.º 12
0
        public void FindFaces()
        {
            using (var fd = Dlib.GetFrontalFaceDetector())
            {
                var img = Dlib.LoadImage <RgbPixel>(path);

                // find all faces in the image
                var faces = fd.Operator(img);
                foreach (var face in faces)
                {
                    // draw a rectangle for each face
                    Dlib.DrawRectangle(img, face, color: new RgbPixel(0, 255, 255), thickness: 4);
                }
                Dlib.SaveJpeg(img, @"D:\output.png");
            }
        }
Ejemplo n.º 13
0
        /// <summary>
        /// 具体计算
        /// </summary>
        /// <param name="bitmaps"></param>
        /// <returns></returns>
        private static List <List <Landmark68ViewModler> > GetDate(List <Bitmap> bitmaps)
        {
            List <List <FullObjectDetection> > Detection = new List <List <FullObjectDetection> >();


            List <List <Landmark68ViewModler> > landmark68s = new List <List <Landmark68ViewModler> >();


            //人脸检测器
            using (var detector = Dlib.GetFrontalFaceDetector())
            {
                foreach (var bitmap in bitmaps)
                {
                    var shapes = new List <FullObjectDetection>();
                    // 图片转换
                    using (var img = bitmap.ToArray2D <RgbPixel>())
                    {
                        //获取位置数据
                        var dets = detector.Operator(img);

                        // 循环人脸数据
                        foreach (var rect in dets)
                        {
                            // 特征点检测
                            var shape = _SP.Detect(img, rect);
                            if (shape.Parts > 2)
                            {
                                shapes.Add(shape);
                            }
                            List <Landmark68ViewModler> landmark68 = new List <Landmark68ViewModler>();
                            for (uint i = 0; i < shape.Parts; i++)
                            {
                                var item = shape.GetPart(i);
                                landmark68.Add(new Landmark68ViewModler()
                                {
                                    X = item.X,
                                    Y = item.Y,
                                });
                            }
                            landmark68s.Add(landmark68);
                        }
                    }
                    Detection.Add(shapes);
                }
            }
            return(landmark68s);
        }
Ejemplo n.º 14
0
        public static int Number(string file)
        {
            using (var fd = Dlib.GetFrontalFaceDetector())
            {
                var img = Dlib.LoadImage <RgbPixel>(file);

                int number = 0;

                var faces = fd.Operator(img);

                foreach (var face in faces)
                {
                    number += 1;
                }
                return(number);
            }
        }
Ejemplo n.º 15
0
        public static void Recognize(string file)
        {
            using (var fd = Dlib.GetFrontalFaceDetector())
            {
                var img = Dlib.LoadImage <RgbPixel>(file);

                //hola
                var faces = fd.Operator(img);

                foreach (var face in faces)
                {
                    Dlib.DrawRectangle(img, face, color: new RgbPixel(0, 255, 255), thickness: 4);
                }


                Dlib.SaveJpeg(img, file);
            }
        }
Ejemplo n.º 16
0
        /// <summary>
        /// The main program entry point
        /// </summary>
        /// <param name="args">The command line arguments</param>
        static void Main(string[] args)
        {
            // set up Dlib facedetector
            using (var fd = Dlib.GetFrontalFaceDetector())
            {
                // load input image
                var img = Dlib.LoadImage <RgbPixel>(inputFilePath);

                // find all faces in the image
                var faces = fd.Operator(img);
                foreach (var face in faces)
                {
                    // draw a rectangle for each face
                    Dlib.DrawRectangle(img, face, color: new RgbPixel(0, 255, 255), thickness: 4);
                }

                // export the modified image
                Dlib.SaveJpeg(img, "output.jpg");
            }
        }
Ejemplo n.º 17
0
        /// <summary>
        /// 具体计算
        /// </summary>
        /// <param name="bitmaps"></param>
        /// <returns></returns>
        private static List <Rectangle[]> GetDate(List <Bitmap> bitmaps)
        {
            List <Rectangle[]> rectangles = new List <Rectangle[]>();

            // 检测器
            using (var detector = Dlib.GetFrontalFaceDetector())
            {
                // 循环所有的图片
                foreach (var bitmap in bitmaps)
                {
                    // 图片格式转化
                    using (var img = bitmap.ToArray2D <RgbPixel>())
                    {
                        // 获取位置数据
                        var dets = detector.Operator(img);
                        rectangles.Add(dets);
                    }
                }
            }
            return(rectangles);
        }
        public void GetImage(string imagePath)
        {
            Array2D <RgbPixel> image = Dlib.LoadImage <RgbPixel>(imagePath);

            using (FrontalFaceDetector fd = Dlib.GetFrontalFaceDetector())

            {
                var faces = fd.Operator(image);
                foreach (DlibDotNet.Rectangle face in faces)
                {
                    FullObjectDetection shape          = _ShapePredictor.Detect(image, face);
                    ChipDetails         faceChipDetail = Dlib.GetFaceChipDetails(shape, 150, 0.25);
                    Array2D <RgbPixel>  faceChip       = Dlib.ExtractImageChip <RgbPixel>(image, faceChipDetail);
                    Bitmap bitmap1 = faceChip.ToBitmap <RgbPixel>();
                    MainWindow.main.Statusa1 = bitmap1;
                    Dlib.DrawRectangle(image, face, color: new RgbPixel(0, 255, 255), thickness: 4);
                }
            }
            Bitmap frame = image.ToBitmap <RgbPixel>();

            MainWindow.main.Statusa = frame;
        }
        public static string TestCustomImage(string dir)
        {
            DataViewSchema predictionPipelineSchema;
            ITransformer   predictionPipeline = mlContext.Model.Load("model.zip", out predictionPipelineSchema);
            PredictionEngine <FeatureInputData, ExpressionPrediction> predictionEngine = mlContext.Model.CreatePredictionEngine <FeatureInputData, ExpressionPrediction>(predictionPipeline);
            var img = Dlib.LoadImage <RgbPixel>(dir);

            // Set up Dlib Face Detector
            using (var fd = Dlib.GetFrontalFaceDetector())
                // ... and Dlib Shape Detector
                using (var sp = ShapePredictor.Deserialize("shape_predictor_68_face_landmarks.dat"))
                {
                    // find all faces in the image
                    var faces = fd.Operator(img);

                    // for each face draw over the facial landmarks
                    foreach (var face in faces)
                    {
                        // find the landmark points for this face
                        var shape = sp.Detect(img, face);

                        FeatureInputData inputData = new FeatureInputData
                        {
                            leftEyebrow  = CalculateLeftEyebrow(shape),
                            rightEyebrow = CalculateRightEyebrow(shape),
                            leftLip      = CalculateLeftLip(shape),
                            rightLip     = CalculateRightLip(shape),
                            lipWidth     = CalculateLipWidth(shape),
                            lipHeight    = CalculateLipHeight(shape)
                        };

                        ExpressionPrediction prediction = predictionEngine.Predict(inputData);

                        return(prediction.expression.ToString());
                    }
                }
            return("N/A");
        }
Ejemplo n.º 20
0
        /// <summary>
        /// 具体计算
        /// </summary>
        /// <param name="bitmap"></param>
        /// <param name="sp"></param>
        /// <returns></returns>
        public List <FullObjectDetection> Face(Bitmap bitmap)
        {
            // 加载模型文件
            if (sp == null)
            {
                var basePath = AppDomain.CurrentDomain.BaseDirectory;
                sp = ShapePredictor.Deserialize(basePath + "ShapeModel/shape_predictor_68_face_landmarks.dat");
            }

            //var link = new ImageWindow.OverlayLine[0];
            var shapes = new List <FullObjectDetection>();

            using (var detector = Dlib.GetFrontalFaceDetector())
            {
                using (var img = bitmap.ToArray2D <RgbPixel>())
                {
                    var dets = detector.Operator(img);

                    foreach (var rect in dets)
                    {
                        var shape = sp.Detect(img, rect);
                        if (shape.Parts > 2)
                        {
                            shapes.Add(shape);
                        }
                    }
                    //if (shapes.Any())
                    //{
                    //    //就是这个
                    //    var lines = Dlib.RenderFaceDetections(shapes);
                    //    link = lines;
                    //}
                }
            }
            return(shapes);
        }
Ejemplo n.º 21
0
        /// <summary>
        /// The main program entry point
        /// </summary>
        /// <param name="args">The command line arguments</param>
        static void Main(string[] args)
        {
            // set up Dlib facedetectors and shapedetectors
            using (var fd = Dlib.GetFrontalFaceDetector())
                using (var sp = ShapePredictor.Deserialize("shape_predictor_68_face_landmarks.dat"))
                {
                    // load input image
                    var img = Dlib.LoadImage <RgbPixel>(inputFilePath);

                    // find all faces in the image
                    var faces = fd.Operator(img);
                    foreach (var face in faces)
                    {
                        // find the landmark points for this face
                        var shape = sp.Detect(img, face);

                        // build the 3d face model
                        var model = Utility.GetFaceModel();

                        // get the landmark point we need
                        var landmarks = new MatOfPoint2d(1, 6,
                                                         (from i in new int[] { 30, 8, 36, 45, 48, 54 }
                                                          let pt = shape.GetPart((uint)i)
                                                                   select new OpenCvSharp.Point2d(pt.X, pt.Y)).ToArray());

                        // build the camera matrix
                        var cameraMatrix = Utility.GetCameraMatrix((int)img.Rect.Width, (int)img.Rect.Height);

                        // build the coefficient matrix
                        var coeffs = new MatOfDouble(4, 1);
                        coeffs.SetTo(0);

                        // find head rotation and translation
                        Mat rotation    = new MatOfDouble();
                        Mat translation = new MatOfDouble();
                        Cv2.SolvePnP(model, landmarks, cameraMatrix, coeffs, rotation, translation);

                        // find euler angles
                        var euler = Utility.GetEulerMatrix(rotation);

                        // calculate head rotation in degrees
                        var yaw   = 180 * euler.At <double>(0, 2) / Math.PI;
                        var pitch = 180 * euler.At <double>(0, 1) / Math.PI;
                        var roll  = 180 * euler.At <double>(0, 0) / Math.PI;

                        // looking straight ahead wraps at -180/180, so make the range smooth
                        pitch = Math.Sign(pitch) * 180 - pitch;

                        // calculate if the driver is facing forward
                        // the left/right angle must be in the -25..25 range
                        // the up/down angle must be in the -10..10 range
                        var facingForward =
                            yaw >= -25 && yaw <= 25 &&
                            pitch >= -10 && pitch <= 10;

                        // create a new model point in front of the nose, and project it into 2d
                        var poseModel      = new MatOfPoint3d(1, 1, new Point3d(0, 0, 1000));
                        var poseProjection = new MatOfPoint2d();
                        Cv2.ProjectPoints(poseModel, rotation, translation, cameraMatrix, coeffs, poseProjection);

                        // draw the key landmark points in yellow on the image
                        foreach (var i in new int[] { 30, 8, 36, 45, 48, 54 })
                        {
                            var point = shape.GetPart((uint)i);
                            var rect  = new Rectangle(point);
                            Dlib.DrawRectangle(img, rect, color: new RgbPixel(255, 255, 0), thickness: 4);
                        }

                        // draw a line from the tip of the nose pointing in the direction of head pose
                        var landmark = landmarks.At <Point2d>(0);
                        var p        = poseProjection.At <Point2d>(0);
                        Dlib.DrawLine(
                            img,
                            new DlibDotNet.Point((int)landmark.X, (int)landmark.Y),
                            new DlibDotNet.Point((int)p.X, (int)p.Y),
                            color: new RgbPixel(0, 255, 255));

                        // draw a box around the face if it's facing forward
                        if (facingForward)
                        {
                            Dlib.DrawRectangle(img, face, color: new RgbPixel(0, 255, 255), thickness: 4);
                        }
                    }

                    // export the modified image
                    Dlib.SaveJpeg(img, "output.jpg");
                }
        }
Ejemplo n.º 22
0
        private void Test(Bitmap bitmap)
        {
            var rvec = new double[] { 0, 0, 0 };
            var tvec = new double[] { 0, 0, 0 };


            //FullObjectDetection shape = null;
            using (var detector = Dlib.GetFrontalFaceDetector())
                using (var img = bitmap.ToArray2D <RgbPixel>())
                {
                    //获取位置数据
                    var dets = detector.Operator(img);

                    // 循环人脸数据
                    foreach (var rect in dets)
                    {
                        // 特征点检测
                        var shape = _SP.Detect(img, rect);



                        var focal_length = (double)bitmap.Width;                                      // 图片的宽度
                        var center       = ((double)bitmap.Width / 2.0, (double)bitmap.Height / 2.0); //图片的宽度/2,图片的高度/2 中心点



                        var p1 = shape.GetPart(0);
                        var p2 = shape.GetPart(16);

                        var dx  = (double)(p2.X - p1.X);
                        var dy  = (double)(p2.Y - p1.Y);
                        var num = (dy / dx);
                        var aa  = Math.Tan(num);
                        var bb  = aa * 180;
                        var cc  = bb / Math.PI;
                        var nA  = (Math.Atan(dy / dx)) * 180 / Math.PI;



                        // 需要进行改变
                        var cameraMatrix = new double[3, 3]
                        {
                            { focal_length, 0, center.Item1 },
                            { 0, focal_length, center.Item2 },
                            { 0, 0, 1 }
                        };


                        var dist = new double[] { 0, 0, 0, 0, 0 };



                        // 基础点
                        var objPts = new Point3f[]
                        {
                            new Point3f(0.0f, 0.0f, 0.0f),          // Nose tip
                            new Point3f(0.0f, -330.0f, -65.0f),     // Chin
                            new Point3f(-225.0f, 170.0f, -135.0f),  // Left eye left corner
                            new Point3f(225.0f, 170.0f, -135.0f),   // Right eye right corne
                            new Point3f(-150.0f, -150.0f, -125.0f), // Left Mouth corner
                            new Point3f(150.0f, -150.0f, -125.0f)   // Right mouth corner
                        };


                        // 这个参数
                        double[,] jacobian = new double[4, 1];

                        // 人脸的位置
                        var imgPts = new Point2f[]
                        {
                            new Point2f(shape.GetPart(30).X, shape.GetPart(30).Y), // Nose tip
                            new Point2f(shape.GetPart(8).X, shape.GetPart(8).Y),   // Chin
                            new Point2f(shape.GetPart(36).X, shape.GetPart(36).Y), // Left eye left corner
                            new Point2f(shape.GetPart(45).X, shape.GetPart(45).Y), // Right eye right corne
                            new Point2f(shape.GetPart(48).X, shape.GetPart(48).Y), // Left Mouth corner
                            new Point2f(shape.GetPart(54).X, shape.GetPart(54).Y), // Right mouth corner
                        };


                        //var imgPts = new Point2f[]
                        //{
                        //    new Point2f(359, 391),     // Nose tip
                        //    new Point2f(399, 561),     // Chin
                        //    new Point2f(337, 297),     // Left eye left corner
                        //    new Point2f(513, 301),     // Right eye right corne
                        //    new Point2f(345, 465),     // Left Mouth corner
                        //    new Point2f(453, 469)      // Right mouth corner
                        //};

                        Cv2.SolvePnP(objPts, imgPts, cameraMatrix, dist, out rvec, out tvec, flags: SolvePnPFlags.Iterative);

                        GetEulerAngle(rvec);



                        var arr = new List <Point3f>()
                        {
                            new Point3f(0.0f, 0.0f, 1000.0f)
                        };


                        Cv2.ProjectPoints(arr, rvec, tvec, cameraMatrix, dist, out imgPts, out jacobian);



                        Cv2.Rodrigues(rvec, out cameraMatrix);



                        var im = Cv2.ImRead("headPose.jpg");
                        Cv2.Line(im, (int)shape.GetPart(30).X, (int)shape.GetPart(30).Y, (int)imgPts[0].X, (int)imgPts[0].Y, Scalar.Blue, (int)LineTypes.Link8);

                        Cv2.ImShow("output", im);
                        Cv2.WaitKey(0);
                        //Cv2.Line(im, 0, 0, 0, 0, Scalar.All(255));
                    }
                }
        }
Ejemplo n.º 23
0
        private static InputDataImages GetFeaturesValuesFromImage(string str)
        {
            var returnClass = new InputDataImages();

            using (var fd = Dlib.GetFrontalFaceDetector())
                // ... and Dlib Shape DetectorS
                using (var sp = ShapePredictor.Deserialize("shape_predictor_68_face_landmarks.dat"))
                {
                    // load input image
                    var img = Dlib.LoadImage <RgbPixel>(str);

                    // find all faces i n the image
                    var faces = fd.Operator(img);
                    // for each face draw over the facial landmarks


                    // Create the CSV file and fill in the first line with the header
                    foreach (var face in faces)
                    {
                        // find the landmark points for this face
                        var shape = sp.Detect(img, face);

                        // draw the landmark points on the image
                        for (var i = 0; i < shape.Parts; i++)
                        {
                            var point = shape.GetPart((uint)i);
                            var rect  = new Rectangle(point);
                            Dlib.DrawRectangle(img, rect, color: new RgbPixel(255, 255, 0), thickness: 4);
                        }

                        /////////////// WEEK 9 LAB ////////////////

                        double[] LeftEyebrowDistances  = new double[4];
                        double[] RightEyebrowDistances = new double[4];

                        float LeftEyebrowSum  = 0;
                        float RightEyebrowSum = 0;

                        //LIP VARIABLES
                        double[] LeftLipDistances  = new double[4];
                        double[] RightLipDistances = new double[4];
                        float    LeftLipSum        = 0;
                        float    RightLipSum       = 0;


                        LeftEyebrowDistances[0] = (shape.GetPart(21) - shape.GetPart(39)).Length;
                        LeftEyebrowDistances[1] = (shape.GetPart(20) - shape.GetPart(39)).Length;
                        LeftEyebrowDistances[2] = (shape.GetPart(19) - shape.GetPart(39)).Length;
                        LeftEyebrowDistances[3] = (shape.GetPart(18) - shape.GetPart(39)).Length;

                        RightEyebrowDistances[0] = (shape.GetPart(22) - shape.GetPart(42)).Length;
                        RightEyebrowDistances[1] = (shape.GetPart(23) - shape.GetPart(42)).Length;
                        RightEyebrowDistances[2] = (shape.GetPart(24) - shape.GetPart(42)).Length;
                        RightEyebrowDistances[3] = (shape.GetPart(25) - shape.GetPart(42)).Length;


                        //LIP
                        LeftLipDistances[0] = (shape.GetPart(51) - shape.GetPart(33)).Length;
                        LeftLipDistances[1] = (shape.GetPart(50) - shape.GetPart(33)).Length;
                        LeftLipDistances[2] = (shape.GetPart(49) - shape.GetPart(33)).Length;
                        LeftLipDistances[3] = (shape.GetPart(48) - shape.GetPart(33)).Length;


                        RightLipDistances[0] = (shape.GetPart(51) - shape.GetPart(33)).Length;
                        RightLipDistances[1] = (shape.GetPart(52) - shape.GetPart(33)).Length;
                        RightLipDistances[2] = (shape.GetPart(53) - shape.GetPart(33)).Length;
                        RightLipDistances[3] = (shape.GetPart(54) - shape.GetPart(33)).Length;


                        for (int i = 0; i < 4; i++)
                        {
                            LeftEyebrowSum  += (float)(LeftEyebrowDistances[i] / LeftEyebrowDistances[0]);
                            RightEyebrowSum += (float)(RightEyebrowDistances[i] / RightEyebrowDistances[0]);
                        }

                        LeftLipSum += (float)(LeftLipDistances[1] / LeftLipDistances[0]);
                        LeftLipSum += (float)(LeftLipDistances[2] / LeftLipDistances[0]);
                        LeftLipSum += (float)(LeftLipDistances[3] / LeftLipDistances[0]);


                        RightLipSum += (float)(RightLipDistances[1] / RightLipDistances[0]);
                        RightLipSum += (float)(RightLipDistances[2] / RightLipDistances[0]);
                        RightLipSum += (float)(RightLipDistances[3] / RightLipDistances[0]);

                        double LipWidth  = (float)((shape.GetPart(48) - shape.GetPart(54)).Length / (shape.GetPart(33) - shape.GetPart(51)).Length);
                        double LipHeight = (float)((shape.GetPart(51) - shape.GetPart(57)).Length / (shape.GetPart(33) - shape.GetPart(51)).Length);

                        returnClass.LeftEyebrow  = LeftEyebrowSum;
                        returnClass.RightEyebrow = RightLipSum;
                        returnClass.LeftLip      = LeftLipSum;
                        returnClass.RightLip     = RightLipSum;
                        returnClass.LipWidth     = (float)LipWidth;
                        returnClass.LipHeight    = (float)LipHeight;


                        // export the modified image
                        string filePath = "output" + ".jpg";
                        Dlib.SaveJpeg(img, filePath);
                    }
                }

            using (System.IO.StreamWriter file = new System.IO.StreamWriter(@"TestingFeatureVectorValues.csv", true))
            {
                DirectoryInfo dr = new DirectoryInfo(str);
                //Console.WriteLine(dr.Parent.Name.ToString());
                string ParentFolderName = dr.Parent.Name.ToString();

                file.WriteLine(ParentFolderName + "," + returnClass.LeftEyebrow.ToString() + "," + returnClass.RightEyebrow.ToString()
                               + "," + returnClass.LeftLip.ToString() + "," + returnClass.RightLip.ToString() + "," + returnClass.LipWidth.ToString()
                               + "," + returnClass.LipHeight.ToString());
            }
            return(returnClass);
        }
Ejemplo n.º 24
0
        // The main program entry point
        static void Main(string[] args)
        {
            bool use_mirror = false;

            // file paths
            string[] files = Directory.GetFiles("images", "*.*", SearchOption.AllDirectories);
            List <FullObjectDetection> shapes = new List <FullObjectDetection>();
            List <string> emotions            = new List <string>();

            // Set up Dlib Face Detector
            using (var fd = Dlib.GetFrontalFaceDetector())
                // ... and Dlib Shape Detector
                using (var sp = ShapePredictor.Deserialize("shape_predictor_68_face_landmarks.dat"))
                {
                    // load input image
                    for (int i = 0; i < files.Length; i++)
                    {
                        var emotion = GetEmotion(files[i]);
                        var img     = Dlib.LoadImage <RgbPixel>(files[i]);

                        // find all faces in the image
                        var faces = fd.Operator(img);
                        // for each face draw over the facial landmarks
                        foreach (var face in faces)
                        {
                            // find the landmark points for this face
                            var shape = sp.Detect(img, face);
                            shapes.Add(shape);
                            emotions.Add(emotion);
                            // draw the landmark points on the image

                            for (var i2 = 0; i2 < shape.Parts; i2++)
                            {
                                var point = shape.GetPart((uint)i2);
                                var rect  = new Rectangle(point);

                                if (point == GetPoint(shape, 40) || point == GetPoint(shape, 22))
                                {
                                    Dlib.DrawRectangle(img, rect, color: new RgbPixel(0, 255, 0), thickness: 4);
                                }
                                else
                                {
                                    Dlib.DrawRectangle(img, rect, color: new RgbPixel(255, 255, 0), thickness: 4);
                                }
                            }
                        }

                        // export the modified image
                        Console.WriteLine(files[i]);
                        Dlib.SaveJpeg(img, "output_" + files[i]);
                    }

                    string header = "leftEyebrow,rightEyebrow,leftLip,rightLip,lipHeight,lipWidth,emotion\n";
                    System.IO.File.WriteAllText(@"feature_vectors.csv", header);
                    for (var i = 0; i < shapes.Count; i++)
                    {
                        var shape   = shapes[i];
                        var emotion = emotions[i];
                        using (System.IO.StreamWriter file = new System.IO.StreamWriter(@"feature_vectors.csv", true))
                        {
                            file.WriteLine(GetLeftEyebrow(shape) + "," + GetRightEyebrow(shape) + "," +
                                           GetLeftLip(shape) + "," + GetRightLip(shape) + "," + GetLipWidth(shape) + "," + GetLipHeight(shape) +
                                           "," + emotion);
                            if (use_mirror)
                            {
                                file.WriteLine(GetRightEyebrow(shape) + "," + GetLeftEyebrow(shape) + "," +
                                               GetRightLip(shape) + "," + GetLeftLip(shape) + "," + GetLipWidth(shape) + "," + GetLipHeight(shape) +
                                               "," + emotion);
                            }
                        }
                    }
                }
        }
    /// <summary>
    /// Predict the emotion of an image.
    /// </summary>
    /// <param name="imageFileInfo"><see cref="FileInfo"/> of the image file.</param>
    /// <param name="TFaceData">Type of face data that the parameters should be used.</param>
    /// <param name="predictedEmotion">The emotion that was predicted.</param>
    /// <param name="predictedEmotionWithAllLabels">All the other emotions with their scores appended after.</param>
    public static void PredictEmotion(FileInfo imageFileInfo, Type TFaceData, out FaceOutput predictedEmotion, out Dictionary <string, float> predictedEmotionWithAllLabels)
    {
        if (mlContext == null)
        {
            mlContext = new MLContext();
        }

        if (model == null)
        {
            model = mlContext.Model.Load(GetModelZipFileName(TFaceData), out var dataView);
        }

        predictedEmotion = null;
        predictedEmotionWithAllLabels = null;

        // Not using generics because different function calls are required anyway.
        if (TFaceData == typeof(FaceData1))
        {
            using (var predictor = mlContext.Model.CreatePredictionEngine <FaceData1, FaceOutput>(model))
            {
                using (var fd = Dlib.GetFrontalFaceDetector())
                    using (var sp = ShapePredictor.Deserialize(GetFile(ShapePredictorFileName).FullName))
                    {
                        var faceDataFromImage = GetFaceData1FromImage(imageFileInfo, sp, fd, false);
                        faceDataFromImage.Emotion = ""; // Get rid of label, as this is what we want to know.

                        predictedEmotion = predictor.Predict(faceDataFromImage);
                    }

                // Prediction with all labels.
                predictedEmotionWithAllLabels = new Dictionary <string, float>();
                var slotNames = new VBuffer <ReadOnlyMemory <char> >();
                predictor.OutputSchema.GetColumnOrNull("Label")?.GetKeyValues(ref slotNames);
                var names = new string[slotNames.Length];
                var num   = 0;
                foreach (var denseValue in slotNames.DenseValues())
                {
                    predictedEmotionWithAllLabels.Add(denseValue.ToString(), predictedEmotion.Scores[num++]);
                }

                Console.WriteLine("Predicted Emotion: " + predictedEmotion.PredictedEmotion);
                Console.WriteLine($"Scores: {string.Join(" ", predictedEmotion.Scores)}");
            }
        }
        else if (TFaceData == typeof(FaceData2))
        {
            using (var predictor = mlContext.Model.CreatePredictionEngine <FaceData2, FaceOutput>(model))
            {
                using (var fd = Dlib.GetFrontalFaceDetector())
                    using (var sp = ShapePredictor.Deserialize(GetFile(ShapePredictorFileName).FullName))
                    {
                        var faceDataFromImage = GetFaceData2FromImage(imageFileInfo, sp, fd, false);
                        faceDataFromImage.Emotion = ""; // Get rid of label, as this is what we want to know.

                        predictedEmotion = predictor.Predict(faceDataFromImage);
                    }

                // Prediction with all labels.
                predictedEmotionWithAllLabels = new Dictionary <string, float>();
                var slotNames = new VBuffer <ReadOnlyMemory <char> >();
                predictor.OutputSchema.GetColumnOrNull("Label")?.GetKeyValues(ref slotNames);
                var names = new string[slotNames.Length];
                var num   = 0;
                foreach (var denseValue in slotNames.DenseValues())
                {
                    predictedEmotionWithAllLabels.Add(denseValue.ToString(), predictedEmotion.Scores[num++]);
                }

                Console.WriteLine("Predicted Emotion: " + predictedEmotion.PredictedEmotion);
                Console.WriteLine($"Scores: {string.Join(" ", predictedEmotion.Scores)}");
            }
        }
        else if (TFaceData == typeof(FaceData3))
        {
            using (var predictor = mlContext.Model.CreatePredictionEngine <FaceData3, FaceOutput>(model))
            {
                using (var fd = Dlib.GetFrontalFaceDetector())
                    using (var sp = ShapePredictor.Deserialize(GetFile(ShapePredictorFileName).FullName))
                    {
                        var faceDataFromImage = GetFaceData3FromImage(imageFileInfo, sp, fd, false);
                        faceDataFromImage.Emotion = ""; // Get rid of label, as this is what we want to know.

                        predictedEmotion = predictor.Predict(faceDataFromImage);
                    }

                // Prediction with all labels.
                predictedEmotionWithAllLabels = new Dictionary <string, float>();
                var slotNames = new VBuffer <ReadOnlyMemory <char> >();
                predictor.OutputSchema.GetColumnOrNull("Label")?.GetKeyValues(ref slotNames);
                var names = new string[slotNames.Length];
                var num   = 0;
                foreach (var denseValue in slotNames.DenseValues())
                {
                    predictedEmotionWithAllLabels.Add(denseValue.ToString(), predictedEmotion.Scores[num++]);
                }

                Console.WriteLine("Predicted Emotion: " + predictedEmotion.PredictedEmotion);
                Console.WriteLine($"Scores: {string.Join(" ", predictedEmotion.Scores)}");
            }
        }
    }
Ejemplo n.º 26
0
        static void Main(string[] args)
        {
            /// FaceDetectionWith_API
            Location[] coord = TestImage(fileName, Model.Hog);


            /// Face DetectionWith_DLIB
            using (var fd = Dlib.GetFrontalFaceDetector())
            {
                var img = Dlib.LoadImage <RgbPixel>(fileName);

                // find all faces in the image
                var faces = fd.Operator(img);
                foreach (var face in faces)
                {
                    // draw a rectangle for each face
                    Dlib.DrawRectangle(img, face, color: new RgbPixel(0, 255, 255), thickness: 4);
                }

                Dlib.SaveJpeg(img, outputName);
            }


            // The first thing we are going to do is load all our models.  First, since we need to
            // find faces in the image we will need a face detector:
            using (var detector = Dlib.GetFrontalFaceDetector())
                // We will also use a face landmarking model to align faces to a standard pose:  (see face_landmark_detection_ex.cpp for an introduction)
                using (var sp = ShapePredictor.Deserialize("shape_predictor_68_face_landmarks.dat"))
                    // And finally we load the DNN responsible for face recognition.
                    using (var net = DlibDotNet.Dnn.LossMetric.Deserialize("dlib_face_recognition_resnet_model_v1.dat"))

                        using (var img = Dlib.LoadImageAsMatrix <RgbPixel>(fileName))

                            using (var win = new ImageWindow(img))
                            {
                                var faces = new List <Matrix <RgbPixel> >();
                                foreach (var face in detector.Operator(img))
                                {
                                    var shape          = sp.Detect(img, face);
                                    var faceChipDetail = Dlib.GetFaceChipDetails(shape, 150, 0.25);
                                    var faceChip       = Dlib.ExtractImageChip <RgbPixel>(img, faceChipDetail);

                                    //faces.Add(move(face_chip));
                                    faces.Add(faceChip);

                                    win.AddOverlay(face);
                                }

                                if (!faces.Any())
                                {
                                    Console.WriteLine("No faces found in image!");
                                    return;
                                }

                                // This call asks the DNN to convert each face image in faces into a 128D vector.
                                // In this 128D vector space, images from the same person will be close to each other
                                // but vectors from different people will be far apart.  So we can use these vectors to
                                // identify if a pair of images are from the same person or from different people.
                                var faceDescriptors = net.Operator(faces);

                                // In particular, one simple thing we can do is face clustering.  This next bit of code
                                // creates a graph of connected faces and then uses the Chinese whispers graph clustering
                                // algorithm to identify how many people there are and which faces belong to whom.
                                var edges = new List <SamplePair>();
                                for (uint i = 0; i < faceDescriptors.Count; ++i)
                                {
                                    for (var j = i; j < faceDescriptors.Count; ++j)
                                    {
                                        // Faces are connected in the graph if they are close enough.  Here we check if
                                        // the distance between two face descriptors is less than 0.6, which is the
                                        // decision threshold the network was trained to use.  Although you can
                                        // certainly use any other threshold you find useful.
                                        var diff = faceDescriptors[i] - faceDescriptors[j];
                                        if (Dlib.Length(diff) < 0.6)
                                        {
                                            edges.Add(new SamplePair(i, j));
                                        }
                                    }
                                }

                                Dlib.ChineseWhispers(edges, 100, out var numClusters, out var labels);

                                // This will correctly indicate that there are 4 people in the image.
                                Console.WriteLine($"number of people found in the image: {numClusters}");


                                // Отобразим результат в ImageList
                                var winClusters = new List <ImageWindow>();
                                for (var i = 0; i < numClusters; i++)
                                {
                                    winClusters.Add(new ImageWindow());
                                }
                                var tileImages = new List <Matrix <RgbPixel> >();
                                for (var clusterId = 0ul; clusterId < numClusters; ++clusterId)
                                {
                                    var temp = new List <Matrix <RgbPixel> >();
                                    for (var j = 0; j < labels.Length; ++j)
                                    {
                                        if (clusterId == labels[j])
                                        {
                                            temp.Add(faces[j]);
                                        }
                                    }

                                    winClusters[(int)clusterId].Title = $"face cluster {clusterId}";
                                    var tileImage = Dlib.TileImages(temp);
                                    tileImages.Add(tileImage);
                                    winClusters[(int)clusterId].SetImage(tileImage);
                                }


                                // Finally, let's print one of the face descriptors to the screen.
                                using (var trans = Dlib.Trans(faceDescriptors[0]))
                                {
                                    Console.WriteLine($"face descriptor for one face: {trans}");

                                    // It should also be noted that face recognition accuracy can be improved if jittering
                                    // is used when creating face descriptors.  In particular, to get 99.38% on the LFW
                                    // benchmark you need to use the jitter_image() routine to compute the descriptors,
                                    // like so:
                                    var jitterImages = JitterImage(faces[0]).ToArray();
                                    var ret          = net.Operator(jitterImages);
                                    using (var m = Dlib.Mat(ret))
                                        using (var faceDescriptor = Dlib.Mean <float>(m))
                                            using (var t = Dlib.Trans(faceDescriptor))
                                            {
                                                Console.WriteLine($"jittered face descriptor for one face: {t}");

                                                // If you use the model without jittering, as we did when clustering the bald guys, it
                                                // gets an accuracy of 99.13% on the LFW benchmark.  So jittering makes the whole
                                                // procedure a little more accurate but makes face descriptor calculation slower.

                                                Console.WriteLine("hit enter to terminate");
                                                Console.ReadKey();

                                                foreach (var jitterImage in jitterImages)
                                                {
                                                    jitterImage.Dispose();
                                                }

                                                foreach (var tileImage in tileImages)
                                                {
                                                    tileImage.Dispose();
                                                }

                                                foreach (var edge in edges)
                                                {
                                                    edge.Dispose();
                                                }

                                                foreach (var descriptor in faceDescriptors)
                                                {
                                                    descriptor.Dispose();
                                                }

                                                foreach (var face in faces)
                                                {
                                                    face.Dispose();
                                                }
                                            }
                                }
                            }

            System.Console.ReadLine();
        }
        public static void CreateFeatureVectors()
        {
            int    faceCount = 0;
            float  leftEyebrow, rightEyebrow, leftLip, rightLip, lipHeight, lipWidth;
            string output;

            if (currentDataType == Datatype.Testing)
            {
                output = testingOutput;
            }
            else
            {
                output = trainingOutput;
            }

            string[] dirs = Directory.GetFiles(currentFilePath, "*.*", SearchOption.AllDirectories);

            // Set up Dlib Face Detector
            using (var fd = Dlib.GetFrontalFaceDetector())
                // ... and Dlib Shape Detector
                using (var sp = ShapePredictor.Deserialize("shape_predictor_68_face_landmarks.dat"))
                {
                    string header = "leftEyebrow,rightEyebrow,leftLip,rightLip,lipWidth,lipHeight,label\n";

                    // Create the CSV file and fill in the first line with the header
                    System.IO.File.WriteAllText(output, header);

                    foreach (string dir in dirs)
                    {
                        // call function that sets the label based on what the filename contains
                        string label = DetermineLabel(dir);

                        // load input image
                        if (!(dir.EndsWith("png") || dir.EndsWith("jpg")))
                        {
                            continue;
                        }

                        var img = Dlib.LoadImage <RgbPixel>(dir);

                        // find all faces in the image
                        var faces = fd.Operator(img);

                        // for each face draw over the facial landmarks
                        foreach (var face in faces)
                        {
                            // Write to the console displaying the progress and current emotion
                            Form1.SetProgress(faceCount, dirs.Length - 1);

                            // find the landmark points for this face
                            var shape = sp.Detect(img, face);

                            for (var i = 0; i < shape.Parts; i++)
                            {
                                RgbPixel colour = new RgbPixel(255, 255, 255);
                                var      point  = shape.GetPart((uint)i);
                                var      rect   = new DlibDotNet.Rectangle(point);
                                Dlib.DrawRectangle(img, rect, color: colour, thickness: 2);
                            }

                            SetFormImage(img);

                            leftEyebrow  = CalculateLeftEyebrow(shape);
                            rightEyebrow = CalculateRightEyebrow(shape);
                            leftLip      = CalculateLeftLip(shape);
                            rightLip     = CalculateRightLip(shape);
                            lipWidth     = CalculateLipWidth(shape);
                            lipHeight    = CalculateLipHeight(shape);

                            using (System.IO.StreamWriter file = new System.IO.StreamWriter(output, true))
                            {
                                file.WriteLine(leftEyebrow + "," + rightEyebrow + "," + leftLip + "," + rightLip + "," + lipWidth + "," + lipHeight + "," + label);
                            }

                            // Increment count used for console output
                            faceCount++;
                        }
                    }

                    if (currentDataType == Datatype.Testing)
                    {
                        var testDataView = mlContext.Data.LoadFromTextFile <FeatureInputData>(output, hasHeader: true, separatorChar: ',');
                        GenerateMetrics(testDataView);
                    }

                    Form1.HideImage();
                }
        }
Ejemplo n.º 28
0
        private static int Main(string[] args)
        {
            var app = new CommandLineApplication(false);

            app.Name        = nameof(AgeTraining);
            app.Description = "The program for training Adience OUI Unfiltered faces for gender and age classification dataset";
            app.HelpOption("-h|--help");

            app.Command("train", command =>
            {
                const uint epochDefault             = 300;
                const double learningRateDefault    = 0.001d;
                const double minLearningRateDefault = 0.00001d;
                const uint minBatchSizeDefault      = 256;
                const uint validationDefault        = 30;

                var datasetOption         = command.Option("-d|--dataset", "The directory of dataset", CommandOptionType.SingleValue);
                var epochOption           = command.Option("-e|--epoch", $"The epoch. Default is {epochDefault}", CommandOptionType.SingleValue);
                var learningRateOption    = command.Option("-l|--lr", $"The learning rate. Default is {learningRateDefault}", CommandOptionType.SingleValue);
                var minLearningRateOption = command.Option("-m|--min-lr", $"The minimum learning rate. Default is {minLearningRateDefault}", CommandOptionType.SingleValue);
                var minBatchSizeOption    = command.Option("-b|--min-batchsize", $"The minimum batch size. Default is {minBatchSizeDefault}", CommandOptionType.SingleValue);
                var validationOption      = command.Option("-v|--validation-interval", $"The interval of validation. Default is {validationDefault}", CommandOptionType.SingleValue);
                var useMeanOption         = command.Option("-u|--use-mean", "Use mean image", CommandOptionType.NoValue);

                command.OnExecute(() =>
                {
                    var dataset = datasetOption.Value();
                    if (!datasetOption.HasValue() || !Directory.Exists(dataset))
                    {
                        Console.WriteLine("dataset does not exist");
                        return(-1);
                    }

                    var epoch = epochDefault;
                    if (epochOption.HasValue() && !uint.TryParse(epochOption.Value(), out epoch))
                    {
                        Console.WriteLine("epoch is invalid value");
                        return(-1);
                    }

                    var learningRate = learningRateDefault;
                    if (learningRateOption.HasValue() && !double.TryParse(learningRateOption.Value(), NumberStyles.Float, Thread.CurrentThread.CurrentCulture.NumberFormat, out learningRate))
                    {
                        Console.WriteLine("learning rate is invalid value");
                        return(-1);
                    }

                    var minLearningRate = minLearningRateDefault;
                    if (minLearningRateOption.HasValue() && !double.TryParse(minLearningRateOption.Value(), NumberStyles.Float, Thread.CurrentThread.CurrentCulture.NumberFormat, out minLearningRate))
                    {
                        Console.WriteLine("minimum learning rate is invalid value");
                        return(-1);
                    }

                    var minBatchSize = minBatchSizeDefault;
                    if (minBatchSizeOption.HasValue() && !uint.TryParse(minBatchSizeOption.Value(), out minBatchSize))
                    {
                        Console.WriteLine("minimum batch size is invalid value");
                        return(-1);
                    }

                    var validation = validationDefault;
                    if (validationOption.HasValue() && !uint.TryParse(validationOption.Value(), out validation) || validation == 0)
                    {
                        Console.WriteLine("validation interval is invalid value");
                        return(-1);
                    }

                    var useMean = useMeanOption.HasValue();

                    Console.WriteLine($"            Dataset: {dataset}");
                    Console.WriteLine($"              Epoch: {epoch}");
                    Console.WriteLine($"      Learning Rate: {learningRate}");
                    Console.WriteLine($"  Min Learning Rate: {minLearningRate}");
                    Console.WriteLine($"     Min Batch Size: {minBatchSize}");
                    Console.WriteLine($"Validation Interval: {validation}");
                    Console.WriteLine($"           Use Mean: {useMean}");
                    Console.WriteLine();

                    var baseName = $"adience-age-network_{epoch}_{learningRate}_{minLearningRate}_{minBatchSize}_{useMean}";
                    Train(baseName, dataset, epoch, learningRate, minLearningRate, minBatchSize, validation, useMean);

                    return(0);
                });
            });

            app.Command("test", command =>
            {
                var datasetOption = command.Option("-d|--dataset", "The directory of dataset", CommandOptionType.SingleValue);
                var modelOption   = command.Option("-m|--model", "The model file.", CommandOptionType.SingleValue);

                command.OnExecute(() =>
                {
                    var dataset = datasetOption.Value();
                    if (!datasetOption.HasValue() || !Directory.Exists(dataset))
                    {
                        Console.WriteLine("dataset does not exist");
                        return(-1);
                    }

                    var model = modelOption.Value();
                    if (!datasetOption.HasValue() || !File.Exists(model))
                    {
                        Console.WriteLine("model does not exist");
                        return(-1);
                    }

                    Console.WriteLine($"Dataset: {dataset}");
                    Console.WriteLine($"  Model: {model}");
                    Console.WriteLine();

                    Test(dataset, model);

                    return(0);
                });
            });

            app.Command("preprocess", command =>
            {
                var datasetOption = command.Option("-d|--dataset", "The directory of dataset", CommandOptionType.SingleValue);
                var outputOption  = command.Option("-o|--output", "The path to output preprocessed dataset", CommandOptionType.SingleValue);

                command.OnExecute(() =>
                {
                    var dataset = datasetOption.Value();
                    if (!datasetOption.HasValue() || !Directory.Exists(dataset))
                    {
                        Console.WriteLine("dataset does not exist");
                        return(-1);
                    }

                    var output = outputOption.Value();
                    if (!outputOption.HasValue())
                    {
                        Console.WriteLine("output does not specify");
                        return(-1);
                    }

                    Directory.CreateDirectory(output);

                    var types = new[]
                    {
                        "train", "test"
                    };

                    foreach (var type in types)
                    {
                        var imageDir = Path.Combine(dataset, type);
                        if (!Directory.Exists(imageDir))
                        {
                            Console.WriteLine($"{imageDir} does not exist");
                            return(-1);
                        }

                        var csv = Path.Combine(dataset, $"{type}.csv");
                        if (!File.Exists(csv))
                        {
                            Console.WriteLine($"{csv} does not exist");
                            return(-1);
                        }

                        File.Copy(csv, Path.Combine(output, $"{type}.csv"), true);

                        Directory.CreateDirectory(Path.Combine(output, type));
                    }

                    Console.WriteLine($"Dataset: {dataset}");
                    Console.WriteLine($" Output: {output}");
                    Console.WriteLine();

                    using (var posePredictor = ShapePredictor.Deserialize("shape_predictor_5_face_landmarks.dat"))
                        using (var faceDetector = Dlib.GetFrontalFaceDetector())
                        {
                            foreach (var type in types)
                            {
                                Preprocess(type, dataset, faceDetector, posePredictor, output);
                            }
                        }

                    return(0);
                });
            });

            return(app.Execute(args));
        }
Ejemplo n.º 29
0
        private void BackgroundWorkerOnDoWork(object sender, DoWorkEventArgs doWorkEventArgs)
        {
            var path = doWorkEventArgs.Argument as string;

            if (string.IsNullOrWhiteSpace(path) || !File.Exists(path))
            {
                return;
            }

            using (var faceDetector = Dlib.GetFrontalFaceDetector())
                using (var img = Dlib.LoadImage <RgbPixel>(path))
                {
                    Dlib.PyramidUp(img);

                    var dets = faceDetector.Operator(img);

                    var shapes = new List <FullObjectDetection>();
                    foreach (var rect in dets)
                    {
                        var shape = this._ShapePredictor.Detect(img, rect);
                        if (shape.Parts <= 2)
                        {
                            continue;
                        }
                        shapes.Add(shape);
                    }

                    if (shapes.Any())
                    {
                        var lines = Dlib.RenderFaceDetections(shapes);
                        foreach (var line in lines)
                        {
                            Dlib.DrawLine(img, line.Point1, line.Point2, new RgbPixel
                            {
                                Green = 255
                            });
                        }

                        var wb = img.ToBitmap();
                        this.pictureBoxImage.Image?.Dispose();
                        this.pictureBoxImage.Image = wb;

                        foreach (var l in lines)
                        {
                            l.Dispose();
                        }

                        var chipLocations = Dlib.GetFaceChipDetails(shapes);
                        using (var faceChips = Dlib.ExtractImageChips <RgbPixel>(img, chipLocations))
                            using (var tileImage = Dlib.TileImages(faceChips))
                            {
                                // It is NOT necessary to re-convert WriteableBitmap to Matrix.
                                // This sample demonstrate converting managed image class to
                                // dlib class and vice versa.
                                using (var tile = tileImage.ToBitmap())
                                    using (var mat = tile.ToMatrix <RgbPixel>())
                                    {
                                        var tile2 = mat.ToBitmap();
                                        this.pictureBoxTileImage.Image?.Dispose();
                                        this.pictureBoxTileImage.Image = tile2;
                                    }
                            }

                        foreach (var c in chipLocations)
                        {
                            c.Dispose();
                        }
                    }

                    foreach (var s in shapes)
                    {
                        s.Dispose();
                    }
                }
        }
    /// <summary>
    /// Extract features from images, and store them in a csv file at the .exe directory.
    /// </summary>
    /// <param name="faceData">Type of face data that will be stored.</param>
    /// <seealso cref="FaceData1"/>
    /// <seealso cref="FaceData2"/>
    /// <seealso cref="FaceData3"/>
    public static void ExtractFeatures(Type faceData)
    {
        // Setup CSV file
        string header = $"Emotion, Mode: {faceData.ToString()}\n";

        File.WriteAllText(GetFacialFeaturesFileName(faceData), header);

        StreamWriter csvFile = new StreamWriter(GetFacialFeaturesFileName(faceData), true);

        using (var fd = Dlib.GetFrontalFaceDetector())
            using (var sp = ShapePredictor.Deserialize(GetFile(ShapePredictorFileName).FullName))
            {
                // Get all images
                DirectoryInfo mugImgDirInfo         = GetDirectory("MUG Images");
                DirectoryInfo googleImgDirInfo      = GetDirectory("Google Set");
                DirectoryInfo cohnKanadeeImgDirInfo = GetDirectory("Cohn-Kanade Images");

                List <FileInfo> imageFiles = new List <FileInfo>();
                foreach (var directoryInfo in cohnKanadeeImgDirInfo.GetDirectories())
                {
                    imageFiles.AddRange(directoryInfo.GetFiles("*.png"));
                }
                //foreach (var directoryInfo in googleImgDirInfo.GetDirectories())
                //{
                //    imageFiles.AddRange(directoryInfo.GetFiles("*.jpg"));
                //}
                //imageFiles.AddRange(mugImgDirInfo.GetFiles("*.jpg"));

                foreach (var imageFile in imageFiles)
                {
                    if (faceData == typeof(FaceData1))
                    {
                        var faceData1 = GetFaceData1FromImage(imageFile, sp, fd);
                        if (faceData1 == null)
                        {
                            continue;
                        }

                        // Write to CSV
                        csvFile.WriteLine(faceData1.Emotion + "," + faceData1.LeftEyebrow + "," + faceData1.RightEyebrow +
                                          "," + faceData1.LeftLip + "," + faceData1.RightLip +
                                          "," + faceData1.LipHeight + "," + faceData1.LipWidth);
                    }
                    else if (faceData == typeof(FaceData2))
                    {
                        var faceData2 = GetFaceData2FromImage(imageFile, sp, fd);
                        if (faceData2 == null)
                        {
                            continue;
                        }

                        csvFile.Write($"{faceData2.Emotion},");

                        foreach (var rawCoordsX in faceData2.RawCoordiantesX)
                        {
                            csvFile.Write($"{rawCoordsX},");
                        }

                        foreach (var rawCoordsY in faceData2.RawCoordiantesY)
                        {
                            csvFile.Write($"{rawCoordsY},");
                        }

                        foreach (var angleOfFeature in faceData2.AngleBetweenFeatures)
                        {
                            csvFile.Write($"{angleOfFeature},");
                        }

                        foreach (var lengthOfFeature in faceData2.LengthBetweenFeatures)
                        {
                            csvFile.Write($"{lengthOfFeature},");
                        }

                        csvFile.Write("\n");
                    }
                    else if (faceData == typeof(FaceData3))
                    {
                        var faceData3 = GetFaceData3FromImage(imageFile, sp, fd);
                        if (faceData3 == null)
                        {
                            continue;
                        }

                        csvFile.WriteLine(
                            $"{faceData3.Emotion},{faceData3.LeftEyebrowDistance},{faceData3.RightEyebrowDistance},{faceData3.LeftEyeWidth},{faceData3.RightEyeWidth},{faceData3.LeftEyeHeight},{faceData3.RightEyeHeight},{faceData3.OuterLipWidth},{faceData3.InnerLipWidth},{faceData3.OuterLipHeight},{faceData3.InnerLipHeight},{faceData3.LeftLipEdgeAngle},{faceData3.RightLipEdgeAngle}");
                    }
                    else
                    {
                        throw new ArgumentException("Invalid TFaceData.", "faceData");
                    }
                }
                // Close file
                csvFile.Close();
            }
    }