Example #1
0
        /// <summary>
        /// The main program entry point
        /// </summary>
        /// <param name="args">The command line arguments</param>
        static void Main(string[] args)
        {
            // set up Dlib facedetectors and shapedetectors
            using (var fd = Dlib.GetFrontalFaceDetector())
                using (var sp = ShapePredictor.Deserialize("shape_predictor_68_face_landmarks.dat"))
                {
                    // load input image
                    var img = Dlib.LoadImage <RgbPixel>(inputFilePath);

                    // find all faces in the image
                    var faces = fd.Operator(img);
                    foreach (var face in faces)
                    {
                        // find the landmark points for this face
                        var shape = sp.Detect(img, face);

                        // draw the landmark points on the image
                        for (var i = 0; i < shape.Parts; i++)
                        {
                            var point = shape.GetPart((uint)i);
                            var rect  = new Rectangle(point);
                            Dlib.DrawRectangle(img, rect, color: new RgbPixel(255, 255, 0), thickness: 4);
                        }
                    }

                    // export the modified image
                    Dlib.SaveJpeg(img, "output.jpg");
                }
        }
        // The main program entry point
        static void Main(string[] args)
        {
            using (var fd = Dlib.GetFrontalFaceDetector())
                using (var sp = ShapePredictor.Deserialize("shape_predictor_68_face_landmarks.dat"))
                {
                    // Load image from file
                    var img = Dlib.LoadImage <RgbPixel>(inputFilePath);

                    // Detect all faces
                    var faces = fd.Operator(img);

                    foreach (var face in faces)
                    {
                        // Find the landmark points for this face
                        var shape = sp.Detect(img, face);

                        // Loop through detected landmarks
                        for (int i = 0; i < shape.Parts; i++)
                        {
                            var point = shape.GetPart((uint)i);
                            var rect  = new Rectangle(point);
                            Dlib.DrawRectangle(img, rect, color: new RgbPixel(255, 255, 0), thickness: 4);
                        }
                    }

                    // Save the result
                    Dlib.SaveJpeg(img, "output.jpg");
                }
        }
Example #3
0
 public ImageUtils()
 {
     detector = Dlib.GetFrontalFaceDetector();
     sp       =
         ShapePredictor.Deserialize(
             @"..\External\shape_predictor_68_face_landmarks.dat");
 }
Example #4
0
        public void CreateShapePredictor2()
        {
            var path      = this.GetDataFile("shape_predictor_68_face_landmarks.dat");
            var predictor = ShapePredictor.Deserialize(File.ReadAllBytes(path.FullName));

            this.DisposeAndCheckDisposedState(predictor);
        }
        public DLibFaceIdentification(IImageRotationService imageRotationService)
        {
            this.imageRotationService = imageRotationService ?? throw new ArgumentNullException(nameof(imageRotationService));
            detector = Dlib.GetFrontalFaceDetector();

            // set up a 5-point landmark detector
            predictor = ShapePredictor.Deserialize("model/shape_predictor_5_face_landmarks.dat");

            // set up a neural network for face recognition
            dnn = DlibDotNet.Dnn.LossMetric.Deserialize("model/dlib_face_recognition_resnet_model_v1.dat");

            // create a color palette for plotting
            palette = new RgbPixel[]
            {
                new RgbPixel(0xe6, 0x19, 0x4b),
                new RgbPixel(0xf5, 0x82, 0x31),
                new RgbPixel(0xff, 0xe1, 0x19),
                new RgbPixel(0xbc, 0xf6, 0x0c),
                new RgbPixel(0x3c, 0xb4, 0x4b),
                new RgbPixel(0x46, 0xf0, 0xf0),
                new RgbPixel(0x43, 0x63, 0xd8),
                new RgbPixel(0x91, 0x1e, 0xb4),
                new RgbPixel(0xf0, 0x32, 0xe6),
                new RgbPixel(0x80, 0x80, 0x80)
            };
        }
Example #6
0
 public MainForm()
 {
     this.InitializeComponent();
     this._ShapePredictor           = ShapePredictor.Deserialize("shape_predictor_68_face_landmarks.dat");
     this._BackgroundWorker         = new BackgroundWorker();
     this._BackgroundWorker.DoWork += this.BackgroundWorkerOnDoWork;
 }
Example #7
0
 public void GetEyePixels(Image img)
 {
     using (var fd = Dlib.GetFrontalFaceDetector())
         using (var sp = ShapePredictor.Deserialize("shape_predictor_68_face_landmarks.dat"))
         {
             //Dlib.LoadImageData(ImagePixelFormat.Bgr,  img.W, img.H, 3)
         }
 }
        /// <summary>
        /// Initializes a new instance of the <see cref="FaceRecognition"/> class with the directory path that stores model files.
        /// </summary>
        /// <param name="directory">The directory path that stores model files.</param>
        /// <exception cref="FileNotFoundException">The model file is not found.</exception>
        /// <exception cref="DirectoryNotFoundException">The specified directory path is not found.</exception>
        private FaceRecognition(string directory)
        {
            if (!Directory.Exists(directory))
            {
                throw new DirectoryNotFoundException(directory);
            }

            var predictor68PointModel = Path.Combine(directory, FaceRecognitionModels.GetPosePredictorModelLocation());

            if (!File.Exists(predictor68PointModel))
            {
                throw new FileNotFoundException(predictor68PointModel);
            }

            var predictor5PointModel = Path.Combine(directory, FaceRecognitionModels.GetPosePredictorFivePointModelLocation());

            if (!File.Exists(predictor5PointModel))
            {
                throw new FileNotFoundException(predictor5PointModel);
            }

            var cnnFaceDetectionModel = Path.Combine(directory, FaceRecognitionModels.GetCnnFaceDetectorModelLocation());

            if (!File.Exists(cnnFaceDetectionModel))
            {
                throw new FileNotFoundException(cnnFaceDetectionModel);
            }

            var faceRecognitionModel = Path.Combine(directory, FaceRecognitionModels.GetFaceRecognitionModelLocation());

            if (!File.Exists(faceRecognitionModel))
            {
                throw new FileNotFoundException(faceRecognitionModel);
            }

            this._FaceDetector?.Dispose();
            this._FaceDetector = DlibDotNet.Dlib.GetFrontalFaceDetector();

            this._PosePredictor68Point?.Dispose();
            this._PosePredictor68Point = ShapePredictor.Deserialize(predictor68PointModel);

            this._PosePredictor5Point?.Dispose();
            this._PosePredictor5Point = ShapePredictor.Deserialize(predictor5PointModel);

            this._CnnFaceDetector?.Dispose();
            this._CnnFaceDetector = LossMmod.Deserialize(cnnFaceDetectionModel);

            this._FaceEncoder?.Dispose();
            this._FaceEncoder = LossMetric.Deserialize(faceRecognitionModel);

            var predictor194PointModel = Path.Combine(directory, FaceRecognitionModels.GetPosePredictor194PointModelLocation());

            if (File.Exists(predictor194PointModel))
            {
                this._PosePredictor194Point?.Dispose();
                this._PosePredictor194Point = ShapePredictor.Deserialize(predictor194PointModel);
            }
        }
Example #9
0
        /// <summary>
        /// Initializes a new instance of the <see cref="HelenFaceLandmarkDetector"/> class with the model file path that this detector uses.
        /// </summary>
        /// <param name="modelPath">The model file path that this detector uses.</param>
        /// <exception cref="FileNotFoundException">The model file is not found.</exception>
        public HelenFaceLandmarkDetector(string modelPath)
        {
            if (!File.Exists(modelPath))
            {
                throw new FileNotFoundException(modelPath);
            }

            this._Predictor = ShapePredictor.Deserialize(modelPath);
        }
        public DLibFaceIdentificationService(IImageRotationService imageRotationService)
        {
            this.imageRotationService = imageRotationService ?? throw new ArgumentNullException(nameof(imageRotationService));

            // set up a 5-point landmark detector
            predictor = ShapePredictor.Deserialize("model/shape_predictor_5_face_landmarks.dat");

            // set up a neural network for face recognition
            dnn = DlibDotNet.Dnn.LossMetric.Deserialize("model/dlib_face_recognition_resnet_model_v1.dat");
        }
Example #11
0
 public void computeKeyPoint(string imgPath, bool isSource)
 {
     using (var detector = Dlib.GetFrontalFaceDetector())
     {
         using (var sp = ShapePredictor.Deserialize("../../../shape_predictor_68_face_landmarks.dat"))
         {
             using (var img = Dlib.LoadImage <RgbPixel>(imgPath))
             {
                 Dlib.PyramidUp(img);
                 var dets = detector.Operator(img);
                 if (dets.Length == 0)
                 {
                     MessageBox.Show("图中未检测到人脸", "Warning");
                     return;
                 }
                 else if (dets.Length > 1)
                 {
                     MessageBox.Show("图中检测到多张人脸,取其中一张进行变换", "Warning");
                     return;
                 }
                 var shape = sp.Detect(img, dets[0]);
                 if (isSource)
                 {
                     if (sourceKeyPoint != null)
                     {
                         sourceKeyPoint.Clear();
                     }
                     else
                     {
                         sourceKeyPoint = new List <PointF>(68);
                     }
                     for (uint i = 0; i < 68; ++i)
                     {
                         sourceKeyPoint.Add(new PointF((float)shape.GetPart(i).X / 2, (float)shape.GetPart(i).Y / 2));
                     }
                 }
                 else
                 {
                     if (targetKeyPoint != null)
                     {
                         targetKeyPoint.Clear();
                     }
                     else
                     {
                         targetKeyPoint = new List <PointF>(68);
                     }
                     for (uint i = 0; i < 68; ++i)
                     {
                         targetKeyPoint.Add(new PointF((float)shape.GetPart(i).X / 2, (float)shape.GetPart(i).Y / 2));
                     }
                 }
             }
         }
     }
 }
Example #12
0
        private static void Main()
        {
            try
            {
                // You can get this file from http://dlib.net/files/mmod_front_and_rear_end_vehicle_detector.dat.bz2
                // This network was produced by the dnn_mmod_train_find_cars_ex.cpp example program.
                // As you can see, the file also includes a separately trained shape_predictor.  To see
                // a generic example of how to train those refer to train_shape_predictor_ex.cpp.
                using (var deserialize = new ProxyDeserialize("mmod_front_and_rear_end_vehicle_detector.dat"))
                    using (var net = LossMmod.Deserialize(deserialize, 1))
                        using (var sp = ShapePredictor.Deserialize(deserialize))
                            using (var img = Dlib.LoadImageAsMatrix <RgbPixel>("mmod_cars_test_image2.jpg"))
                                using (var win = new ImageWindow())
                                {
                                    win.SetImage(img);

                                    // Run the detector on the image and show us the output.
                                    var dets = net.Operator(img).First();
                                    foreach (var d in dets)
                                    {
                                        // We use a shape_predictor to refine the exact shape and location of the detection
                                        // box.  This shape_predictor is trained to simply output the 4 corner points of
                                        // the box.  So all we do is make a rectangle that tightly contains those 4 points
                                        // and that rectangle is our refined detection position.
                                        var fd   = sp.Detect(img, d);
                                        var rect = Rectangle.Empty;
                                        for (var j = 0u; j < fd.Parts; ++j)
                                        {
                                            rect += fd.GetPart(j);
                                        }

                                        if (d.Label == "rear")
                                        {
                                            win.AddOverlay(rect, new RgbPixel(255, 0, 0), d.Label);
                                        }
                                        else
                                        {
                                            win.AddOverlay(rect, new RgbPixel(255, 255, 0), d.Label);
                                        }
                                    }

                                    Console.WriteLine("Hit enter to end program");
                                    Console.ReadKey();
                                }
            }
            catch (ImageLoadException ile)
            {
                Console.WriteLine(ile.Message);
                Console.WriteLine("The test image is located in the examples folder.  So you should run this program from a sub folder so that the relative path is correct.");
            }
            catch (Exception e)
            {
                Console.WriteLine(e);
            }
        }
 public static void DrawPointsOfLandmarks(FileInfo image)
 {
     using (var fd = Dlib.GetFrontalFaceDetector())
         using (var sp = ShapePredictor.Deserialize(GetFile(ShapePredictorFileName).FullName))
         {
             using (var img = Dlib.LoadImage <RgbPixel>(image.FullName))
             {
                 var faces = fd.Operator(img);
                 // for each face draw over the facial landmarks
                 foreach (var face in faces)
                 {
                     var shape = sp.Detect(img, face);
                     // draw the landmark points on the image
                     for (var i = 0; i < shape.Parts; i++)
                     {
                         var point = shape.GetPart((uint)i);
                         var rect  = new Rectangle(point);
                         if (i == 0)
                         {
                             Dlib.DrawRectangle(img, rect, color: new RgbPixel(255, 255, 255), thickness: 8);
                         }
                         else if (i == 21 || i == 22 || i == 39 || i == 42 || i == 33 || i == 51 || i == 57 ||
                                  i == 48 ||
                                  i == 54)
                         {
                             Dlib.DrawRectangle(img, rect, color: new RgbPixel(255, 0, 255), thickness: 4);
                         }
                         else if (i == 18 || i == 19 || i == 20 || i == 21) // left eye
                         {
                             Dlib.DrawRectangle(img, rect, color: new RgbPixel(255, 0, 0), 6);
                         }
                         else if (i == 22 || i == 23 || i == 24 || i == 25) // right eye
                         {
                             Dlib.DrawRectangle(img, rect, color: new RgbPixel(255, 128, 0), 6);
                         }
                         else if (i == 48 || i == 49 || i == 50) // left lip
                         {
                             Dlib.DrawRectangle(img, rect, color: new RgbPixel(255, 255, 0), 2);
                         }
                         else if (i == 52 || i == 53 || i == 54) // right lip
                         {
                             Dlib.DrawRectangle(img, rect, color: new RgbPixel(255, 0, 128), 2);
                         }
                         else
                         {
                             Dlib.DrawRectangle(img, rect, color: new RgbPixel(0, 0, 0), thickness: 4);
                         }
                     }
                     Dlib.SavePng(img, "output.jpg");
                 }
             }
         }
 }
Example #14
0
        public void DetectFace4()
        {
            var model = this.GetDataFile("shape_predictor_68_face_landmarks.dat");

            var predictor1 = ShapePredictor.Deserialize(model.FullName);
            var predictor2 = ShapePredictor.Deserialize(File.ReadAllBytes(model.FullName));

            if (this._ShapePredictor == null)
            {
                Assert.True(false, "ShapePredictor is not initialized!!");
            }

            string testName = $"{nameof(DetectFace4)}";
            var    path     = this.GetDataFile("Lenna_mini.bmp");
            var    tests    = new[]
            {
                new { Type = MatrixElementTypes.RgbPixel, ExpectResult = true },
            };

            using (var faceDetector = Dlib.GetFrontalFaceDetector())
                using (var matrix = Dlib.LoadImageAsMatrix <RgbPixel>(path.FullName))
                {
                    var dets    = faceDetector.Operator(matrix);
                    var shapes1 = dets.Select(r => predictor1.Detect(matrix, r)).ToList();
                    var shapes2 = dets.Select(r => predictor2.Detect(matrix, r)).ToList();
                    Assert.Equal(shapes1.Count, shapes2.Count);

                    for (var index = 0; index < shapes1.Count; index++)
                    {
                        var shape1 = shapes1[index];
                        var shape2 = shapes2[index];

                        var r1     = shape1.Rect;
                        var r2     = shape2.Rect;
                        var parts1 = shape1.Parts;
                        var parts2 = shape2.Parts;

                        for (uint i = 0; i < parts1; i++)
                        {
                            var part1 = shape1.GetPart(i);
                            var part2 = shape2.GetPart(i);

                            Assert.Equal(part1.X, part2.X);
                            Assert.Equal(part1.Y, part2.Y);
                        }
                    }
                }

            this.DisposeAndCheckDisposedState(predictor2);
            this.DisposeAndCheckDisposedState(predictor1);
        }
Example #15
0
        public void Operator()
        {
            var image = this.GetDataFile("Lenna.jpg");
            var path1 = Path.Combine(this.ModelDirectory, "dlib_face_recognition_resnet_model_v1.dat");
            var path2 = Path.Combine(this.ModelDirectory, "shape_predictor_5_face_landmarks.dat");

            using (var net1 = LossMetric.Deserialize(path1))
                using (var net2 = LossMetric.Deserialize(File.ReadAllBytes(path1)))
                    using (var sp = ShapePredictor.Deserialize(path2))
                        using (var matrix = Dlib.LoadImageAsMatrix <RgbPixel>(image.FullName))
                            using (var detector = Dlib.GetFrontalFaceDetector())
                            {
                                var faces = new List <Matrix <RgbPixel> >();
                                foreach (var face in detector.Operator(matrix))
                                {
                                    var shape          = sp.Detect(matrix, face);
                                    var faceChipDetail = Dlib.GetFaceChipDetails(shape, 150, 0.25);
                                    var faceChip       = Dlib.ExtractImageChip <RgbPixel>(matrix, faceChipDetail);
                                    faces.Add(faceChip);
                                }

                                foreach (var face in faces)
                                {
                                    using (var ret1 = net1.Operator(face))
                                        using (var ret2 = net2.Operator(face))
                                        {
                                            Assert.Equal(1, ret1.Count);
                                            Assert.Equal(1, ret2.Count);

                                            var r1 = ret1[0];
                                            var r2 = ret2[0];

                                            Assert.Equal(r1.Columns, r2.Columns);
                                            Assert.Equal(r1.Rows, r2.Rows);

                                            for (var c = 0; c < r1.Columns; c++)
                                            {
                                                for (var r = 0; r < r1.Rows; r++)
                                                {
                                                    Assert.Equal(r1[r, c], r2[r, c]);
                                                }
                                            }
                                        }

                                    face.Dispose();
                                }
                            }
        }
Example #16
0
        public Form1()
        {
            InitializeComponent();
            DoubleBuffered = true;

            shapes = new List <FullObjectDetection>();

            img           = new Mat();
            eyeImage      = new Bitmap(Properties.Resources.Star);
            mustacheImage = new Bitmap(Properties.Resources.Mustache);

            detector  = Dlib.GetFrontalFaceDetector();
            predictor = ShapePredictor.Deserialize("Resources\\shape_predictor_68_face_landmarks.dat");

            capture = new VideoCapture();
            capture.Open(0);
            Application.Idle += OnCameraFrame;
        }
        public static string TestCustomImage(string dir)
        {
            DataViewSchema predictionPipelineSchema;
            ITransformer   predictionPipeline = mlContext.Model.Load("model.zip", out predictionPipelineSchema);
            PredictionEngine <FeatureInputData, ExpressionPrediction> predictionEngine = mlContext.Model.CreatePredictionEngine <FeatureInputData, ExpressionPrediction>(predictionPipeline);
            var img = Dlib.LoadImage <RgbPixel>(dir);

            // Set up Dlib Face Detector
            using (var fd = Dlib.GetFrontalFaceDetector())
                // ... and Dlib Shape Detector
                using (var sp = ShapePredictor.Deserialize("shape_predictor_68_face_landmarks.dat"))
                {
                    // find all faces in the image
                    var faces = fd.Operator(img);

                    // for each face draw over the facial landmarks
                    foreach (var face in faces)
                    {
                        // find the landmark points for this face
                        var shape = sp.Detect(img, face);

                        FeatureInputData inputData = new FeatureInputData
                        {
                            leftEyebrow  = CalculateLeftEyebrow(shape),
                            rightEyebrow = CalculateRightEyebrow(shape),
                            leftLip      = CalculateLeftLip(shape),
                            rightLip     = CalculateRightLip(shape),
                            lipWidth     = CalculateLipWidth(shape),
                            lipHeight    = CalculateLipHeight(shape)
                        };

                        ExpressionPrediction prediction = predictionEngine.Predict(inputData);

                        return(prediction.expression.ToString());
                    }
                }
            return("N/A");
        }
Example #18
0
        private void GetLandmarks(OpenCvSharp.Mat frame, OpenCvSharp.Rect face, List <System.Drawing.Rectangle> rfaces)
        {
            EyePoints      rightEye  = new EyePoints(leftEye: true);
            EyePoints      leftEye   = new EyePoints(leftEye: false);
            ShapePredictor predictor = ShapePredictor.Deserialize(shapePredictorDataFile);

            //Scalar eyecolor = new Scalar(0, 0, 255);
            Array2D <byte>      gray      = ConvertMatToDlib2DArray(frame);
            FullObjectDetection landmarks = predictor.Detect(gray, ConvertToDlib(face));

            InitializeEyes(landmarks, leftEye, rightEye);
            //DrawEye(que, landmarks, leftEye);
            //DrawEye(que, landmarks, rightEye);
            Rect leftboundingBox = BoundingBoxAroundEye(leftEye, 0);

            rfaces.Add(FromOpenCvRect(leftboundingBox));
            //DrawRect(frame, leftboundingBox);
            OpenCvSharp.Point centerOfLeftEye = DetectCenterOfEye(frame, leftboundingBox);
            centerOfLeftEye.X += leftboundingBox.X;

            Rect rightboundingBox = BoundingBoxAroundEye(rightEye, 0);

            rfaces.Add(FromOpenCvRect(rightboundingBox));
            //DrawRect(frame, rightboundingBox);
            OpenCvSharp.Point centerOfRightEye = DetectCenterOfEye(frame, rightboundingBox);
            centerOfRightEye.X += rightboundingBox.X;

            EyeDirection leftEyeDirection  = leftEye.GetEyePosition(centerOfLeftEye);
            EyeDirection rightEyeDirection = rightEye.GetEyePosition(centerOfRightEye);

            //EyeDirection eyeDirection = EyeDirection.unknown;
            //if (leftEyeDirection == EyeDirection.center || rightEyeDirection == EyeDirection.center) eyeDirection = EyeDirection.center;
            //else if (leftEyeDirection == EyeDirection.left) eyeDirection = EyeDirection.left;
            //else if (rightEyeDirection == EyeDirection.right) eyeDirection = EyeDirection.right;

            //OpenCvSharp.Point position = new OpenCvSharp.Point(50, 50);
            //Cv2.PutText(img: frame, text: eyeDirection.ToDisplay(), org: position, fontFace: HersheyFonts.HersheySimplex, fontScale: 2, new Scalar(0, 0, 255));
        }
Example #19
0
 public static void Detect(Array2D <RgbPixel> image)
 {
     using (var detector = Dlib.GetFrontalFaceDetector())
         using (var sp =
                    ShapePredictor.Deserialize(
                        @"C:\Users\Felix\source\repos\BlinkDetect\External\shape_predictor_68_face_landmarks.dat"))
         {
             var dets   = detector.Operator(image);
             var shapes = new List <FullObjectDetection>();
             foreach (var rect in dets)
             {
                 var shape = sp.Detect(image, rect);
                 Console.WriteLine($"number of parts: {shape.Parts}");
                 if (shape.Parts > 2)
                 {
                     Console.WriteLine($"pixel position of first part:  {shape.GetPart(0)}");
                     Console.WriteLine($"pixel position of second part: {shape.GetPart(1)}");
                     shapes.Add(shape);
                 }
                 var chipLocations = Dlib.GetFaceChipDetails(shapes);
             }
         }
 }
Example #20
0
 public Form1()
 {
     InitializeComponent();
     this.capture = new VideoCapture(0);
     this.frame   = new Mat();
     this.fd      = Dlib.GetFrontalFaceDetector();
     this.sp      = ShapePredictor.Deserialize(@"C:\Users\trago\OneDrive\Desktop\OpenCV\shape_predictor_68_face_landmarks.dat");
     this.model   = Utility.GetFaceModel();
     this.coeffs  = new MatOfDouble(4, 1);
     this.coeffs.SetTo(0);
     this.poseModel      = new MatOfPoint3d(1, 1, new Point3d(0, 0, 1000));
     this.poseProjection = new MatOfPoint2d();
     this.checker        = new int[4] {
         100, -10, 10, 0
     };
     this.text = new string[4] {
         "1. เอาหน้าใส่กรอบ", "2. ก้มหน้าเล็กน้อย", "3. เงยหน้าเล็กน้อย", "4. หน้าตรง"
     };
     this.timeset = 3;
     this.size    = new Size(250, 300);
     SetStart();
     SetZero();
 }
        /// <summary>
        /// 具体计算
        /// </summary>
        /// <param name="bitmap"></param>
        /// <param name="sp"></param>
        /// <returns></returns>
        public List <FullObjectDetection> Face(Bitmap bitmap)
        {
            // 加载模型文件
            if (sp == null)
            {
                var basePath = AppDomain.CurrentDomain.BaseDirectory;
                sp = ShapePredictor.Deserialize(basePath + "ShapeModel/shape_predictor_68_face_landmarks.dat");
            }

            //var link = new ImageWindow.OverlayLine[0];
            var shapes = new List <FullObjectDetection>();

            using (var detector = Dlib.GetFrontalFaceDetector())
            {
                using (var img = bitmap.ToArray2D <RgbPixel>())
                {
                    var dets = detector.Operator(img);

                    foreach (var rect in dets)
                    {
                        var shape = sp.Detect(img, rect);
                        if (shape.Parts > 2)
                        {
                            shapes.Add(shape);
                        }
                    }
                    //if (shapes.Any())
                    //{
                    //    //就是这个
                    //    var lines = Dlib.RenderFaceDetections(shapes);
                    //    link = lines;
                    //}
                }
            }
            return(shapes);
        }
Example #22
0
        private async Task <ILookup <Mat, List <Point2f> > > DetectFaces(IEnumerable <Mat> images)
        {
            const string faceModelPath = "facemodel.dat";

            if (!File.Exists(faceModelPath))
            {
                var modelUrl = "https://github.com/AKSHAYUBHAT/TensorFace/raw/master/" +
                               "openface/models/dlib/shape_predictor_68_face_landmarks.dat";
                using (var modelStream = await new HttpClient().GetStreamAsync(modelUrl))
                    using (var fileStream = File.OpenWrite(faceModelPath))
                    {
                        await modelStream.CopyToAsync(fileStream);
                    }
            }

            using (var detector = Dlib.GetFrontalFaceDetector())
                using (var predictor = ShapePredictor.Deserialize(faceModelPath))
                {
                    var faces = images
                                .SelectMany(image => DetectFaces(detector, predictor, image), (image, face) => (image, face))
                                .ToLookup(t => t.image, t => t.face);
                    return(faces);
                }
        }
Example #23
0
        public ShapePredictorTest()
        {
            var path = this.GetDataFile("shape_predictor_68_face_landmarks.dat");

            this._ShapePredictor = ShapePredictor.Deserialize(path.FullName);
        }
Example #24
0
        private static void Main(string[] args)
        {
            var app = new CommandLineApplication(false);

            app.Name        = nameof(HelenTraining);
            app.Description = "The program for training helen dataset";
            app.HelpOption("-h|--help");

            app.Command("generate", command =>
            {
                command.HelpOption("-?|-h|--help");
                var paddingOption = command.Option("-p|--padding", "padding of detected face", CommandOptionType.SingleValue);
                var modelsOption  = command.Option("-m|--model", "model files directory path", CommandOptionType.SingleValue);

                command.OnExecute(() =>
                {
                    if (!modelsOption.HasValue())
                    {
                        Console.WriteLine("model option is missing");
                        app.ShowHelp();
                        return(-1);
                    }

                    if (!paddingOption.HasValue())
                    {
                        Console.WriteLine("padding option is missing");
                        app.ShowHelp();
                        return(-1);
                    }

                    var directory = modelsOption.Value();
                    if (!Directory.Exists(directory))
                    {
                        Console.WriteLine($"'{directory}' is not found");
                        app.ShowHelp();
                        return(-1);
                    }

                    if (!int.TryParse(paddingOption.Value(), out var padding))
                    {
                        Console.WriteLine($"padding '{paddingOption.Value()}' is not integer");
                        app.ShowHelp();
                        return(-1);
                    }

                    Console.WriteLine($"Model: {directory}");
                    Console.WriteLine($"Padding: {padding}");

                    _FaceRecognition = FaceRecognition.Create(directory);

                    const string extractPath = "helen";
                    var zips = new[]
                    {
                        new{ Zip = "annotation.zip", IsImage = false, Directory = "annotation" },
                        new{ Zip = "helen_1.zip", IsImage = true, Directory = "helen_1" },
                        new{ Zip = "helen_2.zip", IsImage = true, Directory = "helen_2" },
                        new{ Zip = "helen_3.zip", IsImage = true, Directory = "helen_3" },
                        new{ Zip = "helen_4.zip", IsImage = true, Directory = "helen_4" },
                        new{ Zip = "helen_5.zip", IsImage = true, Directory = "helen_5" }
                    };

                    Directory.CreateDirectory(extractPath);

                    foreach (var zip in zips)
                    {
                        if (!Directory.Exists(Path.Combine(extractPath, zip.Directory)))
                        {
                            ZipFile.ExtractToDirectory(zip.Zip, extractPath);
                        }
                    }

                    var annotation = zips.FirstOrDefault(arg => !arg.IsImage);
                    var imageZips  = zips.Where(arg => arg.IsImage).ToArray();
                    if (annotation == null)
                    {
                        return(-1);
                    }

                    var images = new List <Image>();
                    foreach (var file in Directory.EnumerateFiles(Path.Combine(extractPath, annotation.Directory)))
                    {
                        Console.WriteLine($"Process: '{file}'");

                        var txt      = File.ReadAllLines(file);
                        var filename = txt[0];
                        var jpg      = $"{filename}.jpg";
                        foreach (var imageZip in imageZips)
                        {
                            var found = false;
                            var path  = Path.Combine(Path.Combine(extractPath, imageZip.Directory, jpg));
                            if (File.Exists(path))
                            {
                                found = true;
                                using (var fi = FaceRecognition.LoadImageFile(path))
                                {
                                    var locations = _FaceRecognition.FaceLocations(fi, 1, Model.Hog).ToArray();
                                    if (locations.Length != 1)
                                    {
                                        Console.WriteLine($"\t'{path}' has {locations.Length} faces.");
                                    }
                                    else
                                    {
                                        var location = locations.First();
                                        var parts    = new List <Part>();
                                        for (var i = 1; i < txt.Length; i++)
                                        {
                                            var tmp = txt[i].Split(',').Select(s => s.Trim()).Select(float.Parse).Select(s => (int)s).ToArray();
                                            parts.Add(new Part {
                                                X = tmp[0], Y = tmp[1], Name = $"{i - 1}"
                                            });
                                        }

                                        var image = new Image
                                        {
                                            File = Path.Combine(imageZip.Directory, jpg),
                                            Box  = new Box
                                            {
                                                Left   = location.Left - padding,
                                                Top    = location.Top - padding,
                                                Width  = location.Right - location.Left + 1 + padding * 2,
                                                Height = location.Bottom - location.Top + 1 + padding * 2,
                                                Part   = parts.ToArray()
                                            }
                                        };

                                        using (var bitmap = System.Drawing.Image.FromFile(path))
                                        {
                                            var b = image.Box;
                                            using (var g = Graphics.FromImage(bitmap))
                                            {
                                                using (var p = new Pen(Color.Red, bitmap.Width / 400f))
                                                    g.DrawRectangle(p, b.Left, b.Top, b.Width, b.Height);

                                                foreach (var part in b.Part)
                                                {
                                                    g.FillEllipse(Brushes.GreenYellow, part.X, part.Y, 5, 5);
                                                }
                                            }

                                            var result = Path.Combine(extractPath, "Result");
                                            Directory.CreateDirectory(result);

                                            bitmap.Save(Path.Combine(result, jpg), ImageFormat.Jpeg);
                                        }

                                        images.Add(image);
                                    }
                                }
                            }

                            if (found)
                            {
                                break;
                            }
                        }
                    }

                    var dataset = new Dataset
                    {
                        Name    = "helen dataset",
                        Comment = "Created by Takuya Takeuchi.",
                        Images  = images.ToArray()
                    };

                    var settings = new XmlWriterSettings();
                    using (var sw = new StreamWriter(Path.Combine(extractPath, "helen-dataset.xml"), false, new System.Text.UTF8Encoding(false)))
                        using (var writer = XmlWriter.Create(sw, settings))
                        {
                            writer.WriteProcessingInstruction("xml-stylesheet", @"type=""text/xsl"" href=""image_metadata_stylesheet.xsl""");
                            var serializer = new XmlSerializer(typeof(Dataset));
                            serializer.Serialize(writer, dataset);
                        }

                    return(0);
                });
            });

            app.Command("train", command =>
            {
                command.HelpOption("-?|-h|--help");
                var threadOption = command.Option("-t|--threads", "number of threads", CommandOptionType.SingleValue);
                var xmlOption    = command.Option("-x|--xml", "generated xml file from helen dataset", CommandOptionType.SingleValue);

                command.OnExecute(() =>
                {
                    if (!xmlOption.HasValue())
                    {
                        Console.WriteLine("xml option is missing");
                        app.ShowHelp();
                        return(-1);
                    }

                    if (!threadOption.HasValue())
                    {
                        Console.WriteLine("thread option is missing");
                        app.ShowHelp();
                        return(-1);
                    }

                    var xmlFile = xmlOption.Value();
                    if (!File.Exists(xmlFile))
                    {
                        Console.WriteLine($"'{xmlFile}' is not found");
                        app.ShowHelp();
                        return(-1);
                    }

                    if (!uint.TryParse(threadOption.Value(), out var thread))
                    {
                        Console.WriteLine($"thread '{threadOption.Value()}' is not integer");
                        app.ShowHelp();
                        return(-1);
                    }

                    Dlib.LoadImageDataset(xmlFile, out Array <Array2D <byte> > imagesTrain, out var facesTrain);

                    using (var trainer = new ShapePredictorTrainer())
                    {
                        trainer.NumThreads = thread;
                        trainer.BeVerbose();

                        Console.WriteLine("Start training");
                        using (var predictor = trainer.Train(imagesTrain, facesTrain))
                        {
                            Console.WriteLine("Finish training");

                            var directory = Path.GetDirectoryName(xmlFile);
                            var output    = Path.Combine(directory, $"{Path.GetFileNameWithoutExtension(xmlFile)}.dat");
                            ShapePredictor.Serialize(predictor, output);
                        }
                    }

                    return(0);
                });
            });

            app.Command("demo", command =>
            {
                command.HelpOption("-?|-h|--help");
                var imageOption     = command.Option("-i|--image", "test image file", CommandOptionType.SingleValue);
                var modelOption     = command.Option("-m|--model", "model file", CommandOptionType.SingleValue);
                var directoryOption = command.Option("-d|--directory", "model files directory path", CommandOptionType.SingleValue);

                command.OnExecute(() =>
                {
                    if (!imageOption.HasValue())
                    {
                        Console.WriteLine("image option is missing");
                        app.ShowHelp();
                        return(-1);
                    }

                    if (!directoryOption.HasValue())
                    {
                        Console.WriteLine("directory option is missing");
                        app.ShowHelp();
                        return(-1);
                    }

                    if (!modelOption.HasValue())
                    {
                        Console.WriteLine("model option is missing");
                        app.ShowHelp();
                        return(-1);
                    }

                    var modelFile = modelOption.Value();
                    if (!File.Exists(modelFile))
                    {
                        Console.WriteLine($"'{modelFile}' is not found");
                        app.ShowHelp();
                        return(-1);
                    }

                    var imageFile = imageOption.Value();
                    if (!File.Exists(imageFile))
                    {
                        Console.WriteLine($"'{imageFile}' is not found");
                        app.ShowHelp();
                        return(-1);
                    }

                    var directory = directoryOption.Value();
                    if (!Directory.Exists(directory))
                    {
                        Console.WriteLine($"'{directory}' is not found");
                        app.ShowHelp();
                        return(-1);
                    }

                    _FaceRecognition = FaceRecognition.Create(directory);

                    using (var predictor = ShapePredictor.Deserialize(modelFile))
                        using (var image = FaceRecognition.LoadImageFile(imageFile))
                            using (var mat = Dlib.LoadImageAsMatrix <RgbPixel>(imageFile))
                                using (var bitmap = (Bitmap)System.Drawing.Image.FromFile(imageFile))
                                    using (var white = new Bitmap(bitmap.Width, bitmap.Height))
                                        using (var g = Graphics.FromImage(bitmap))
                                            using (var gw = Graphics.FromImage(white))
                                            {
                                                var loc = _FaceRecognition.FaceLocations(image).FirstOrDefault();
                                                if (loc == null)
                                                {
                                                    Console.WriteLine("No face is detected");
                                                    return(0);
                                                }

                                                var b         = new DlibDotNet.Rectangle(loc.Left, loc.Top, loc.Right, loc.Bottom);
                                                var detection = predictor.Detect(mat, b);

                                                using (var p = new Pen(Color.Red, bitmap.Width / 200f))
                                                {
                                                    g.DrawRectangle(p, loc.Left, b.Top, b.Width, b.Height);
                                                    gw.Clear(Color.White);
                                                    gw.DrawRectangle(p, loc.Left, b.Top, b.Width, b.Height);
                                                }

                                                for (int i = 0, parts = (int)detection.Parts; i < parts; i++)
                                                {
                                                    var part = detection.GetPart((uint)i);
                                                    g.FillEllipse(Brushes.GreenYellow, part.X, part.Y, 15, 15);
                                                    gw.DrawString($"{i}", SystemFonts.DefaultFont, Brushes.Black, part.X, part.Y);
                                                }

                                                bitmap.Save("demo.jpg", ImageFormat.Jpeg);
                                                white.Save("white.jpg", ImageFormat.Jpeg);
                                            }

                    return(0);
                });
            });

            app.Execute(args);
        }
 public MainViewModel()
 {
     this._ShapePredictor = ShapePredictor.Deserialize("shape_predictor_68_face_landmarks.dat");
 }
Example #26
0
 public FaceDetector(string serializedFacialLandmarkPredictor)
 {
     this.facialLandmarkPredictor = ShapePredictor.Deserialize(serializedFacialLandmarkPredictor);
 }
Example #27
0
        public BitmapImage faceDetect(string filename)
        {
            // file paths
            faceDetect fd = Dlib.GetFrontalFaceDetector();
            var        sp = ShapePredictor.Deserialize("shape_predictor_68_face_landmarks.dat");

            // the rest of the code goes here....

            // load input image
            var input = new Mat();

            input        = Cv2.ImRead(filename, ImreadModes.AnyColor);
            original_img = input;
            var image = input;

            var img = Mat2array2D(image);

            // find all faces in the image
            var faces = fd.Operator(img);

            foreach (var face in faces)
            {
                // find the landmark points for this face
                var shape = sp.Detect(img, face);

                // draw the landmark points on the image
                //입 끝 좌표받아오기
                OpenCvSharp.Point Mpoint1;
                var pointm1 = shape.GetPart((uint)48);
                Mpoint1.X = pointm1.X;
                Mpoint1.Y = pointm1.Y;
                fp.mouse.Add(Mpoint1);

                OpenCvSharp.Point Mpoint2;
                var pointm2 = shape.GetPart((uint)54);
                Mpoint2.X = pointm2.X;
                Mpoint2.Y = pointm2.Y;
                fp.mouse.Add(Mpoint2);

                //눈 좌표 받아오기(눈중앙점)
                OpenCvSharp.Point cpointe1;
                var pointe1 = shape.GetPart((uint)36);
                var pointe2 = shape.GetPart((uint)39);
                cpointe1.X = (pointe1.X + pointe2.X) / 2;
                cpointe1.Y = (pointe1.Y + pointe2.Y) / 2;
                fp.eye.Add(cpointe1);

                OpenCvSharp.Point cpointe2;
                var pointe3 = shape.GetPart((uint)42);
                var pointe4 = shape.GetPart((uint)45);
                cpointe2.X = (pointe3.X + pointe4.X) / 2;
                cpointe2.Y = (pointe3.Y + pointe4.Y) / 2;
                fp.eye.Add(cpointe2);

                //눈 끝 좌표 구하기
                OpenCvSharp.Point Npointe1;
                var pointn1 = shape.GetPart((uint)39);
                Npointe1.X = pointn1.X;
                Npointe1.Y = pointn1.Y;
                fp.nose.Add(Npointe1);

                OpenCvSharp.Point Npointe2;
                var pointn2 = shape.GetPart((uint)42);
                Npointe2.X = pointn2.X;
                Npointe2.Y = pointn2.Y;
                fp.nose.Add(Npointe2);

                //눈간 사이, 입 중앙
                OpenCvSharp.Point Mcpointe;
                Mcpointe.X = (Mpoint1.X + Mpoint2.X) / 2;
                Mcpointe.Y = (Mpoint1.Y + Mpoint2.Y) / 2;
                fp.midline.Add(Mcpointe);

                OpenCvSharp.Point Ecpointe;
                Ecpointe.X = (cpointe1.X + cpointe2.X) / 2;
                Ecpointe.Y = (cpointe1.Y + cpointe2.Y) / 2;
                fp.midline.Add(Ecpointe);
            }

            //image 반환
            BitmapImage control = Mat2Bmp(image);

            control.DecodePixelHeight = 1100;

            return(control);
        }
Example #28
0
        private static void Main(string[] args)
        {
            if (args.Length == 0)
            {
                Console.WriteLine("Give some image files as arguments to this program.");
                Console.WriteLine("Call this program like this:");
                Console.WriteLine("./face_landmark_detection_ex shape_predictor_68_face_landmarks.dat faces/*.jpg");
                Console.WriteLine("You can get the shape_predictor_68_face_landmarks.dat file from:");
                Console.WriteLine("http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2");
                return;
            }

            using (var win = new ImageWindow())
                using (var winFaces = new ImageWindow())
                    using (var detector = Dlib.GetFrontalFaceDetector())
                        using (var sp = ShapePredictor.Deserialize(args[0]))
                            foreach (var file in args.ToList().GetRange(1, args.Length - 1))
                            {
                                Console.WriteLine($"processing image {file}");

                                using (var img = Dlib.LoadImage <RgbPixel>(file))
                                {
                                    Dlib.PyramidUp(img);

                                    var dets = detector.Operator(img);
                                    Console.WriteLine($"Number of faces detected: {dets.Length}");

                                    var shapes = new List <FullObjectDetection>();
                                    foreach (var rect in dets)
                                    {
                                        var shape = sp.Detect(img, rect);
                                        Console.WriteLine($"number of parts: {shape.Parts}");
                                        if (shape.Parts > 2)
                                        {
                                            Console.WriteLine($"pixel position of first part:  {shape.GetPart(0)}");
                                            Console.WriteLine($"pixel position of second part: {shape.GetPart(1)}");
                                            shapes.Add(shape);
                                        }
                                    }

                                    win.ClearOverlay();
                                    win.SetImage(img);

                                    if (shapes.Any())
                                    {
                                        var lines = Dlib.RenderFaceDetections(shapes);
                                        win.AddOverlay(lines);

                                        foreach (var l in lines)
                                        {
                                            l.Dispose();
                                        }

                                        var chipLocations = Dlib.GetFaceChipDetails(shapes);
                                        using (var faceChips = Dlib.ExtractImageChips <RgbPixel>(img, chipLocations))
                                            using (var tileImage = Dlib.TileImages(faceChips))
                                                winFaces.SetImage(tileImage);

                                        foreach (var c in chipLocations)
                                        {
                                            c.Dispose();
                                        }
                                    }

                                    Console.WriteLine("hit enter to process next frame");
                                    Console.ReadKey();

                                    foreach (var s in shapes)
                                    {
                                        s.Dispose();
                                    }
                                }
                            }
        }
Example #29
0
        /// <summary>
        /// The main program entry point
        /// </summary>
        /// <param name="args">The command line arguments</param>
        static void Main(string[] args)
        {
            // set up Dlib facedetectors and shapedetectors
            using (var fd = Dlib.GetFrontalFaceDetector())
                using (var sp = ShapePredictor.Deserialize("shape_predictor_68_face_landmarks.dat"))
                {
                    // load input image
                    var img = Dlib.LoadImage <RgbPixel>(inputFilePath);

                    // find all faces in the image
                    var faces = fd.Operator(img);
                    foreach (var face in faces)
                    {
                        // find the landmark points for this face
                        var shape = sp.Detect(img, face);

                        // build the 3d face model
                        var model = Utility.GetFaceModel();

                        // get the landmark point we need
                        var landmarks = new MatOfPoint2d(1, 6,
                                                         (from i in new int[] { 30, 8, 36, 45, 48, 54 }
                                                          let pt = shape.GetPart((uint)i)
                                                                   select new OpenCvSharp.Point2d(pt.X, pt.Y)).ToArray());

                        // build the camera matrix
                        var cameraMatrix = Utility.GetCameraMatrix((int)img.Rect.Width, (int)img.Rect.Height);

                        // build the coefficient matrix
                        var coeffs = new MatOfDouble(4, 1);
                        coeffs.SetTo(0);

                        // find head rotation and translation
                        Mat rotation    = new MatOfDouble();
                        Mat translation = new MatOfDouble();
                        Cv2.SolvePnP(model, landmarks, cameraMatrix, coeffs, rotation, translation);

                        // find euler angles
                        var euler = Utility.GetEulerMatrix(rotation);

                        // calculate head rotation in degrees
                        var yaw   = 180 * euler.At <double>(0, 2) / Math.PI;
                        var pitch = 180 * euler.At <double>(0, 1) / Math.PI;
                        var roll  = 180 * euler.At <double>(0, 0) / Math.PI;

                        // looking straight ahead wraps at -180/180, so make the range smooth
                        pitch = Math.Sign(pitch) * 180 - pitch;

                        // calculate if the driver is facing forward
                        // the left/right angle must be in the -25..25 range
                        // the up/down angle must be in the -10..10 range
                        var facingForward =
                            yaw >= -25 && yaw <= 25 &&
                            pitch >= -10 && pitch <= 10;

                        // create a new model point in front of the nose, and project it into 2d
                        var poseModel      = new MatOfPoint3d(1, 1, new Point3d(0, 0, 1000));
                        var poseProjection = new MatOfPoint2d();
                        Cv2.ProjectPoints(poseModel, rotation, translation, cameraMatrix, coeffs, poseProjection);

                        // draw the key landmark points in yellow on the image
                        foreach (var i in new int[] { 30, 8, 36, 45, 48, 54 })
                        {
                            var point = shape.GetPart((uint)i);
                            var rect  = new Rectangle(point);
                            Dlib.DrawRectangle(img, rect, color: new RgbPixel(255, 255, 0), thickness: 4);
                        }

                        // draw a line from the tip of the nose pointing in the direction of head pose
                        var landmark = landmarks.At <Point2d>(0);
                        var p        = poseProjection.At <Point2d>(0);
                        Dlib.DrawLine(
                            img,
                            new DlibDotNet.Point((int)landmark.X, (int)landmark.Y),
                            new DlibDotNet.Point((int)p.X, (int)p.Y),
                            color: new RgbPixel(0, 255, 255));

                        // draw a box around the face if it's facing forward
                        if (facingForward)
                        {
                            Dlib.DrawRectangle(img, face, color: new RgbPixel(0, 255, 255), thickness: 4);
                        }
                    }

                    // export the modified image
                    Dlib.SaveJpeg(img, "output.jpg");
                }
        }
Example #30
0
        private static int Main(string[] args)
        {
            var app = new CommandLineApplication(false);

            app.Name        = nameof(AgeTraining);
            app.Description = "The program for training Adience OUI Unfiltered faces for gender and age classification dataset";
            app.HelpOption("-h|--help");

            app.Command("train", command =>
            {
                const uint epochDefault             = 300;
                const double learningRateDefault    = 0.001d;
                const double minLearningRateDefault = 0.00001d;
                const uint minBatchSizeDefault      = 256;
                const uint validationDefault        = 30;

                var datasetOption         = command.Option("-d|--dataset", "The directory of dataset", CommandOptionType.SingleValue);
                var epochOption           = command.Option("-e|--epoch", $"The epoch. Default is {epochDefault}", CommandOptionType.SingleValue);
                var learningRateOption    = command.Option("-l|--lr", $"The learning rate. Default is {learningRateDefault}", CommandOptionType.SingleValue);
                var minLearningRateOption = command.Option("-m|--min-lr", $"The minimum learning rate. Default is {minLearningRateDefault}", CommandOptionType.SingleValue);
                var minBatchSizeOption    = command.Option("-b|--min-batchsize", $"The minimum batch size. Default is {minBatchSizeDefault}", CommandOptionType.SingleValue);
                var validationOption      = command.Option("-v|--validation-interval", $"The interval of validation. Default is {validationDefault}", CommandOptionType.SingleValue);
                var useMeanOption         = command.Option("-u|--use-mean", "Use mean image", CommandOptionType.NoValue);

                command.OnExecute(() =>
                {
                    var dataset = datasetOption.Value();
                    if (!datasetOption.HasValue() || !Directory.Exists(dataset))
                    {
                        Console.WriteLine("dataset does not exist");
                        return(-1);
                    }

                    var epoch = epochDefault;
                    if (epochOption.HasValue() && !uint.TryParse(epochOption.Value(), out epoch))
                    {
                        Console.WriteLine("epoch is invalid value");
                        return(-1);
                    }

                    var learningRate = learningRateDefault;
                    if (learningRateOption.HasValue() && !double.TryParse(learningRateOption.Value(), NumberStyles.Float, Thread.CurrentThread.CurrentCulture.NumberFormat, out learningRate))
                    {
                        Console.WriteLine("learning rate is invalid value");
                        return(-1);
                    }

                    var minLearningRate = minLearningRateDefault;
                    if (minLearningRateOption.HasValue() && !double.TryParse(minLearningRateOption.Value(), NumberStyles.Float, Thread.CurrentThread.CurrentCulture.NumberFormat, out minLearningRate))
                    {
                        Console.WriteLine("minimum learning rate is invalid value");
                        return(-1);
                    }

                    var minBatchSize = minBatchSizeDefault;
                    if (minBatchSizeOption.HasValue() && !uint.TryParse(minBatchSizeOption.Value(), out minBatchSize))
                    {
                        Console.WriteLine("minimum batch size is invalid value");
                        return(-1);
                    }

                    var validation = validationDefault;
                    if (validationOption.HasValue() && !uint.TryParse(validationOption.Value(), out validation) || validation == 0)
                    {
                        Console.WriteLine("validation interval is invalid value");
                        return(-1);
                    }

                    var useMean = useMeanOption.HasValue();

                    Console.WriteLine($"            Dataset: {dataset}");
                    Console.WriteLine($"              Epoch: {epoch}");
                    Console.WriteLine($"      Learning Rate: {learningRate}");
                    Console.WriteLine($"  Min Learning Rate: {minLearningRate}");
                    Console.WriteLine($"     Min Batch Size: {minBatchSize}");
                    Console.WriteLine($"Validation Interval: {validation}");
                    Console.WriteLine($"           Use Mean: {useMean}");
                    Console.WriteLine();

                    var baseName = $"adience-age-network_{epoch}_{learningRate}_{minLearningRate}_{minBatchSize}_{useMean}";
                    Train(baseName, dataset, epoch, learningRate, minLearningRate, minBatchSize, validation, useMean);

                    return(0);
                });
            });

            app.Command("test", command =>
            {
                var datasetOption = command.Option("-d|--dataset", "The directory of dataset", CommandOptionType.SingleValue);
                var modelOption   = command.Option("-m|--model", "The model file.", CommandOptionType.SingleValue);

                command.OnExecute(() =>
                {
                    var dataset = datasetOption.Value();
                    if (!datasetOption.HasValue() || !Directory.Exists(dataset))
                    {
                        Console.WriteLine("dataset does not exist");
                        return(-1);
                    }

                    var model = modelOption.Value();
                    if (!datasetOption.HasValue() || !File.Exists(model))
                    {
                        Console.WriteLine("model does not exist");
                        return(-1);
                    }

                    Console.WriteLine($"Dataset: {dataset}");
                    Console.WriteLine($"  Model: {model}");
                    Console.WriteLine();

                    Test(dataset, model);

                    return(0);
                });
            });

            app.Command("preprocess", command =>
            {
                var datasetOption = command.Option("-d|--dataset", "The directory of dataset", CommandOptionType.SingleValue);
                var outputOption  = command.Option("-o|--output", "The path to output preprocessed dataset", CommandOptionType.SingleValue);

                command.OnExecute(() =>
                {
                    var dataset = datasetOption.Value();
                    if (!datasetOption.HasValue() || !Directory.Exists(dataset))
                    {
                        Console.WriteLine("dataset does not exist");
                        return(-1);
                    }

                    var output = outputOption.Value();
                    if (!outputOption.HasValue())
                    {
                        Console.WriteLine("output does not specify");
                        return(-1);
                    }

                    Directory.CreateDirectory(output);

                    var types = new[]
                    {
                        "train", "test"
                    };

                    foreach (var type in types)
                    {
                        var imageDir = Path.Combine(dataset, type);
                        if (!Directory.Exists(imageDir))
                        {
                            Console.WriteLine($"{imageDir} does not exist");
                            return(-1);
                        }

                        var csv = Path.Combine(dataset, $"{type}.csv");
                        if (!File.Exists(csv))
                        {
                            Console.WriteLine($"{csv} does not exist");
                            return(-1);
                        }

                        File.Copy(csv, Path.Combine(output, $"{type}.csv"), true);

                        Directory.CreateDirectory(Path.Combine(output, type));
                    }

                    Console.WriteLine($"Dataset: {dataset}");
                    Console.WriteLine($" Output: {output}");
                    Console.WriteLine();

                    using (var posePredictor = ShapePredictor.Deserialize("shape_predictor_5_face_landmarks.dat"))
                        using (var faceDetector = Dlib.GetFrontalFaceDetector())
                        {
                            foreach (var type in types)
                            {
                                Preprocess(type, dataset, faceDetector, posePredictor, output);
                            }
                        }

                    return(0);
                });
            });

            return(app.Execute(args));
        }