/// <summary> /// Called when videoPlayer receives a new frame. /// </summary> /// <param name="sender"></param> /// <param name="image"></param> private void videoPlayer_NewFrameReceived(object sender, Accord.Video.NewFrameEventArgs eventArgs) { // convert image to dlib format var img = eventArgs.Frame.ToArray2D <RgbPixel>(); // detect face every 4 frames if (frameIndex % 4 == 0) { var faces = faceDetector.Detect(img); if (faces.Length > 0) { currentFace = faces.First(); } } // abort if we don't have a face at this point if (currentFace == default(DlibDotNet.Rectangle)) { return; } // detect facial landmarks var shape = shapePredictor.Detect(img, currentFace); // detect head pose if (shape.Parts == 68) { DetectHeadPose(eventArgs.Frame, shape); } // update frame counter frameIndex++; }
/// <summary> /// Called when videoPlayer receives a new frame. /// </summary> /// <param name="sender"></param> /// <param name="image"></param> private void videoPlayer_NewFrame(object sender, ref System.Drawing.Bitmap image) { // conver frame to grayscale var grayscale = new GrayscaleBT709(); var grayImage = grayscale.Apply(image); // convert image to dlib format var img = grayImage.ToArray2D <RgbPixel>(); // detect face every 4 frames if (frameIndex % 4 == 0) { var faces = faceDetector.Detect(img); if (faces.Length > 0) { currentFace = faces.First(); } } // abort if we don't have a face at this point if (currentFace == default(DlibDotNet.Rectangle)) { return; } // detect facial landmarks var shape = shapePredictor.Detect(img, currentFace); // detect eye state DetectEyeState(image, shape); // update frame counter frameIndex++; }
private void CheckFace(double picth, Mat frame, Rectangle face, double yaw, double pitch) { if (this.countdown == 0) { this.picture.Image = frame.ToBitmap(); this.SuccessMsg.Visible = true; SetStart(); SetZero(); } else if (this.step == this.checker.Length) { this.stopwatch.Start(); CountDown(face); if (!IsForntFace(yaw, pitch)) { SetZero(); this.ErrorMsg.Visible = true; } } else if (this.step == 0 && IsFaceInFrame(face) || Math.Abs(this.checker[this.step] - picth) <= 5) { checkedListBox.SetItemChecked(this.step, true); this.step++; } }
private void CountDown(Rectangle face) { var time = this.stopwatch.Elapsed.TotalSeconds; this.countdown = this.timeset - (int)this.stopwatch.Elapsed.TotalSeconds; checkedListBox.Items.RemoveAt(3); checkedListBox.Items.Insert(3, $"4. มองตรง ({this.countdown} วินาที)"); checkedListBox.SetItemChecked(3, true); }
private static List <(OutputLabels <Matrix <float> >, Rectangle[])> GetData(List <Bitmap> bitmaps, bool isAFace = false) { var datas = new List <(OutputLabels <Matrix <float> >, Rectangle[])>(); try { foreach (var bitmap in bitmaps) { var faces = new List <Matrix <RgbPixel> >(); var dets = new Rectangle[0]; //在图像中寻找人脸我们需要一个人脸检测器: using (var detector = Dlib.GetFrontalFaceDetector()) { using (var img = bitmap.ToMatrix <RgbPixel>()) { // 人脸 面积从大到小排序 dets = detector.Operator(img).OrderByDescending(x => x.Area).ToArray(); // 是否只检测面积最大的人脸 if (isAFace) { var shape = _SP.Detect(img, dets[0]); var faceChipDetail = Dlib.GetFaceChipDetails(shape, 150, 0.25); var faceChip = Dlib.ExtractImageChip <RgbPixel>(img, faceChipDetail); faces.Add(faceChip); } else { foreach (var face in dets) { var shape = _SP.Detect(img, face); var faceChipDetail = Dlib.GetFaceChipDetails(shape, 150, 0.25); var faceChip = Dlib.ExtractImageChip <RgbPixel>(img, faceChipDetail); faces.Add(faceChip); } } if (!faces.Any()) { datas.Add((null, null)); } else { //此调用要求DNN将每个人脸图像转换为128D矢量。 //在这个128D的矢量空间中,来自同一个人的图像会彼此接近 //但是来自不同人的载体将会非常不同。所以我们可以用这些向量 //辨别一对图片是来自同一个人还是不同的人。 datas.Add((_NET.Operator(faces), dets)); } } } } } catch (Exception ex) { LogHelperNLog.Error(ex); } return(datas); }
/// <summary> /// Adjust the rectangle to make sure it is inside the range of the image /// </summary> /// <param name="rect">rectangle to be adjusted</param> /// <param name="img">image for reference</param> /// <returns></returns> public static DlibDotNet.Rectangle RectangleAdjust(DlibDotNet.Rectangle rect, Array2D <RgbPixel> img) { DlibDotNet.Rectangle fitRect = new DlibDotNet.Rectangle(); fitRect.Right = rect.Right < img.Rect.Right ? rect.Right : img.Rect.Right; fitRect.Left = rect.Left > img.Rect.Left ? rect.Left : img.Rect.Left; fitRect.Top = rect.Top > img.Rect.Top ? rect.Top : img.Rect.Top; fitRect.Bottom = rect.Bottom < img.Rect.Bottom ? rect.Bottom : img.Rect.Bottom; return(fitRect); }
/// <summary> /// 具体计算 /// </summary> /// <param name="bitmap"></param> /// <returns></returns> public Rectangle[] Face(Bitmap bitmap) { var dets = new Rectangle[0]; using (var detector = Dlib.GetFrontalFaceDetector()) //using (var img = Dlib.LoadImage<RgbPixel>("png.png")) using (var img = bitmap.ToArray2D <RgbPixel>()) { dets = detector.Operator(img); } return(dets); }
private DlibDotNet.Rectangle ConvertToDlib(Rect rect) { DlibDotNet.Rectangle dlibRect = new DlibDotNet.Rectangle() { Left = rect.X, Top = rect.Y, Right = rect.X + rect.Width, Bottom = rect.Y + rect.Height }; return(dlibRect); }
private bool IsFaceInFrame(Rectangle face) { var percent = (double)(face.Area) / (this.size.Width * this.size.Height); var isFaceInFrame = this.rect.Left < face.Center.X && face.Center.X < this.rect.Right && this.rect.Top < face.Center.Y && face.Center.Y < this.rect.Bottom && -0.5 < percent - 1 && percent - 1 < 1; this.ErrorMsg.Visible = !isFaceInFrame; return(isFaceInFrame); }
/// <summary> /// 使用路径获取位置数据 /// </summary> /// <param name="url"></param> /// <returns></returns> public static Rectangle[] GetResult(string url) { var dets = new Rectangle[0]; url = Path.Combine(AppDomain.CurrentDomain.BaseDirectory, url); using (var detector = Dlib.GetFrontalFaceDetector()) //using (var img = Dlib.LoadImage<RgbPixel>("png.png")) using (var img = Dlib.LoadImage <RgbPixel>(url)) { dets = detector.Operator(img); } return(dets); }
/// <summary> /// Called when videoPlayer receives a new frame. /// </summary> /// <param name="sender"></param> /// <param name="image"></param> private void videoPlayer_NewFrame(object sender, ref Bitmap image) { // convert image to dlib format var img = image.ToArray2D <RgbPixel>(); // find the face // note that we only detect faces every 4 frames if (faceRect == default(DlibDotNet.Rectangle) || frameIndex++ % 4 == 0) { var faces = faceDetector.Detect(img); faceRect = faces.FirstOrDefault(); } // abort if we found no face if (faceRect == default(DlibDotNet.Rectangle)) { return; } // find face landmark points var shape = shapePredictor.Detect(img, faceRect); var landmarkPoints = BeardHelper.GetLandmarkPoints(shape); // find beard landmark points var beardPoints = BeardHelper.GetBeardPoints(); // calculate Delaunay triangles var triangles = Utility.GetDelaunayTriangles(landmarkPoints); // get transformations to warp the beard onto the face var warps = Utility.GetWarps(beardPoints, landmarkPoints, triangles); // split the beard image into an alpha mask and an RGB part var beard = BitmapConverter.ToMat(beardImage); BeardHelper.SplitChannels(beard, out var beardMask, out var beardRgb); // warp the beard RGB image var warpedBeard = Utility.ApplyWarps(BitmapConverter.ToBitmap(beardRgb), image.Width, image.Height, warps); // warp the beard alpha mask var warpedBeardMask = Utility.ApplyWarps(BitmapConverter.ToBitmap(beardMask), image.Width, image.Height, warps); // blend the beard onto the camera frame by using the mask var frame = BitmapConverter.ToMat(image); var result = BeardHelper.Blend(warpedBeard, warpedBeardMask, frame); // return result image = BitmapConverter.ToBitmap(result); }
protected override void Demo(FaceRecognition faceRecognition, string modelFile, string imageFile, Image image, Location location) { var networkId = SetupNetwork(); using (var net = LossMulticlassLog.Deserialize(modelFile, networkId)) using (var bitmap = (Bitmap)System.Drawing.Image.FromFile(imageFile)) using (var org = new Bitmap(bitmap.Width, bitmap.Height)) using (var g = Graphics.FromImage(org)) { g.DrawImage(bitmap, new System.Drawing.Rectangle(0, 0, org.Width, org.Height), new System.Drawing.Rectangle(0, 0, bitmap.Width, bitmap.Height), GraphicsUnit.Pixel); var rect = new Rectangle(location.Left, location.Top, location.Right, location.Bottom); var dPoint = new[] { new DPoint(rect.Left, rect.Top), new DPoint(rect.Right, rect.Top), new DPoint(rect.Left, rect.Bottom), new DPoint(rect.Right, rect.Bottom), }; using (var tmp = Dlib.LoadImageAsMatrix <byte>(imageFile)) using (var face = Dlib.ExtractImage4Points(tmp, dPoint, this.Size, this.Size)) { this.SetEvalMode(networkId, net); var results = net.Probability(face, 1).ToArray(); var labels = net.GetLabels(); var dictionary = new Dictionary <string, float>(); for (var index = 0; index < labels.Length; index++) { dictionary.Add(labels[index], results[0][index]); } var maxResult = dictionary.Aggregate((max, working) => (max.Value > working.Value) ? max : working); var emotion = maxResult.Key; var probability = maxResult.Value; using (var p = new Pen(Color.Red, bitmap.Width / 200f)) using (var b = new SolidBrush(Color.Blue)) using (var font = new Font("Calibri", 16)) { g.DrawRectangle(p, rect.Left, rect.Top, rect.Width, rect.Height); g.DrawString($"{emotion}\n({probability})", font, b, new PointF(rect.Left + 10, rect.Top + 10)); } org.Save("demo.jpg"); } } }
private static byte[] ExtractFace(Rect face, Bitmap source) { using (var target = new Bitmap((int)face.Width, (int)face.Height)) { using (Graphics g = Graphics.FromImage(target)) { g.DrawImage(source, new Rectangle(0, 0, target.Width, target.Height), new Rectangle(face.Left, face.Top, (int)face.Width, (int)face.Height), GraphicsUnit.Pixel); } using (var memoryStream = new MemoryStream()) { target.Save(memoryStream, ImageFormat.Png); return(memoryStream.ToArray()); } } }
private void OnCameraFrame(object sender, EventArgs e) { img = capture.RetrieveMat(); Cv2.Flip(img, img, FlipMode.Y); var array = new byte[img.Cols * img.Rows * img.ElemSize()]; Marshal.Copy(img.Data, array, 0, array.Length); var image = Dlib.LoadImageData <RgbPixel>(array, (uint)img.Rows, (uint)img.Cols, (uint)(img.Cols * img.ElemSize())); faces = detector.Operator(image); shapes.Clear(); foreach (var rect in faces) { DlibDotNet.Rectangle face = rect; shapes.Add(predictor.Detect(image, face)); } Invalidate(); }
/// <summary> /// Detect all 68 landmarks on the face on camera /// </summary> /// <param name="image">The current camera frame to analyze</param> /// <param name="frameIndex">The index number of the current camera frame</param> /// <returns>A FullObjectDetection object containing all 68 facial landmark points</returns> private FullObjectDetection DetectLandmarks(Bitmap image, int frameIndex) { // convert image to dlib format var dlibImage = image.ToArray2D <RgbPixel>(); // detect faces every 5 frames if (frameIndex % 5 == 0) { var faces = faceDetector.Detect(dlibImage); if (faces.Length > 0) { // grab the first face currentFace = faces.First(); } } // detect all 68 facial landmarks on the face if (currentFace != default(DlibDotNet.Rectangle)) { return(shapePredictor.Detect(dlibImage, currentFace)); } return(null); }
private void Timer1_Tick(object sender, EventArgs e) { capture.Read(frame); this.point = new Point((frame.Width - size.Width) / 2, (frame.Height - size.Height) / 2); this.rect = new Rect(point, size); Cv2.Flip(frame, frame, FlipMode.Y); if (!frame.Empty() && start) { var img = ConvertToArray2D(frame); var faces = fd.Operator(img); if (faces.Any(face => IsFaceInFrame(face))) { foreach (var face in faces) { if (IsFaceInFrame(face)) { //Dlib.DrawRectangle(img, face, color: new RgbPixel(0, 255, 255), thickness: 4); var shape = sp.Detect(img, face); var landmarks = new MatOfPoint2d(1, 6, (from i in new int[] { 30, 8, 36, 45, 48, 54 } let pt = shape.GetPart((uint)i) select new OpenCvSharp.Point2d(pt.X, pt.Y)).ToArray()); var cameraMatrix = Utility.GetCameraMatrix((int)img.Rect.Width, (int)img.Rect.Height); Mat rotation = new MatOfDouble(); Mat translation = new MatOfDouble(); Cv2.SolvePnP(model, landmarks, cameraMatrix, coeffs, rotation, translation); var euler = Utility.GetEulerMatrix(rotation); var yaw = 180 * euler.At <double>(0, 2) / Math.PI; var pitch = 180 * euler.At <double>(0, 1) / Math.PI; pitch = Math.Sign(pitch) * 180 - pitch; Cv2.ProjectPoints(poseModel, rotation, translation, cameraMatrix, coeffs, poseProjection); //var landmark = landmarks.At<Point2d>(0); //var p = poseProjection.At<Point2d>(0); //Dlib.DrawLine( // img, // new DlibDotNet.Point((int)landmark.X, (int)landmark.Y), // new DlibDotNet.Point((int)p.X, (int)p.Y), // color: new RgbPixel(0, 255, 255)); //foreach (var i in new int[] { 30, 8, 36, 45, 48, 54 }) //{ // var point = shape.GetPart((uint)i); // var rect = new Rectangle(point); // Dlib.DrawRectangle(img, rect, color: new RgbPixel(255, 255, 0), thickness: 4); //} for (var i = 0; i < shape.Parts; i++) { var point = shape.GetPart((uint)i); var rect = new Rectangle(point); Dlib.DrawRectangle(img, rect, color: new RgbPixel(0, 255, 255), thickness: 4); } CheckFace(pitch, frame, face, yaw, pitch); frame = img.ToBitmap().ToMat(); } } } else if (this.step > 0) { SetZero(); this.ErrorMsg.Visible = true; } } Cv2.Rectangle(frame, rect, Scalar.Yellow, thickness: 2); camera.Image = frame.ToBitmap(); }
private static Location TrimBound(Rectangle location, int width, int height) { return(new Location(Math.Max(location.Left, 0), Math.Max(location.Top, 0), Math.Min(location.Right, width), Math.Min(location.Bottom, height))); }
public int Start(string[] args) { var app = new CommandLineApplication(false); app.Name = this._Name; app.Description = this._Description; app.HelpOption("-h|--help"); app.Command("clean", command => { var outputOption = command.Option("-o|--output", "The output directory path.", CommandOptionType.SingleValue); command.OnExecute(() => { if (!outputOption.HasValue() || !Directory.Exists(outputOption.Value())) { Logger.Error($"'{outputOption.Value()} is missing or output option is not specified"); return(-1); } Logger.Info($" Output: {outputOption.Value()}"); Logger.Info(""); Clean(outputOption.Value()); return(0); }); }); app.Command("train", command => { const uint epochDefault = 300; const double learningRateDefault = 0.001d; const double minLearningRateDefault = 0.00001d; const uint minBatchSizeDefault = 256; const uint validationDefault = 30; var datasetOption = command.Option("-d|--dataset", "The directory of dataset", CommandOptionType.SingleValue); var epochOption = command.Option("-e|--epoch", $"The epoch. Default is {epochDefault}", CommandOptionType.SingleValue); var learningRateOption = command.Option("-l|--lr", $"The learning rate. Default is {learningRateDefault}", CommandOptionType.SingleValue); var minLearningRateOption = command.Option("-m|--min-lr", $"The minimum learning rate. Default is {minLearningRateDefault}", CommandOptionType.SingleValue); var minBatchSizeOption = command.Option("-b|--min-batchsize", $"The minimum batch size. Default is {minBatchSizeDefault}", CommandOptionType.SingleValue); var validationOption = command.Option("-v|--validation-interval", $"The interval of validation. Default is {validationDefault}", CommandOptionType.SingleValue); var useMeanOption = command.Option("-u|--use-mean", "Use mean image", CommandOptionType.NoValue); var outputOption = command.Option("-o|--output", "The output directory path.", CommandOptionType.SingleValue); command.OnExecute(() => { var dataset = datasetOption.Value(); if (!datasetOption.HasValue() || !Directory.Exists(dataset)) { Logger.Error("dataset does not exist"); return(-1); } var epoch = epochDefault; if (epochOption.HasValue() && !uint.TryParse(epochOption.Value(), out epoch)) { Logger.Error("epoch is invalid value"); return(-1); } var learningRate = learningRateDefault; if (learningRateOption.HasValue() && !double.TryParse(learningRateOption.Value(), NumberStyles.Float, Thread.CurrentThread.CurrentCulture.NumberFormat, out learningRate)) { Logger.Error("learning rate is invalid value"); return(-1); } var minLearningRate = minLearningRateDefault; if (minLearningRateOption.HasValue() && !double.TryParse(minLearningRateOption.Value(), NumberStyles.Float, Thread.CurrentThread.CurrentCulture.NumberFormat, out minLearningRate)) { Logger.Error("minimum learning rate is invalid value"); return(-1); } var minBatchSize = minBatchSizeDefault; if (minBatchSizeOption.HasValue() && !uint.TryParse(minBatchSizeOption.Value(), out minBatchSize)) { Logger.Error("minimum batch size is invalid value"); return(-1); } var validation = validationDefault; if (validationOption.HasValue() && !uint.TryParse(validationOption.Value(), out validation) || validation == 0) { Logger.Error("validation interval is invalid value"); return(-1); } var output = "result"; if (outputOption.HasValue()) { output = outputOption.Value(); } Directory.CreateDirectory(output); var useMean = useMeanOption.HasValue(); Logger.Info($" Dataset: {dataset}"); Logger.Info($" Epoch: {epoch}"); Logger.Info($" Learning Rate: {learningRate}"); Logger.Info($" Min Learning Rate: {minLearningRate}"); Logger.Info($" Min Batch Size: {minBatchSize}"); Logger.Info($"Validation Interval: {validation}"); Logger.Info($" Use Mean: {useMean}"); Logger.Info($" Output: {output}"); Logger.Info(""); var name = this.GetBaseName(epoch, learningRate, minLearningRate, minBatchSize); var baseName = Path.Combine(output, name); var parameter = new Parameter { BaseName = baseName, Dataset = dataset, Output = output, Epoch = epoch, LearningRate = learningRate, MinLearningRate = minLearningRate, MiniBatchSize = minBatchSize, Validation = validation }; Train(parameter); return(0); }); }); app.Command("test", command => { var datasetOption = command.Option("-d|--dataset", "The directory of dataset", CommandOptionType.SingleValue); var modelOption = command.Option("-m|--model", "The model file path", CommandOptionType.SingleValue); command.OnExecute(() => { var dataset = datasetOption.Value(); if (!datasetOption.HasValue() || !Directory.Exists(dataset)) { Logger.Error("dataset does not exist"); return(-1); } var model = modelOption.Value(); if (!modelOption.HasValue() || !File.Exists(model)) { Logger.Error("model does not exist"); return(-1); } Logger.Info($"Dataset: {dataset}"); Logger.Info($" Model: {model}"); Logger.Info(""); var parameter = new Parameter { Dataset = dataset, Model = model }; Test(parameter); return(0); }); }); app.Command("eval", command => { var imageOption = command.Option("-i|--image", "The image file.", CommandOptionType.SingleValue); var modelOption = command.Option("-m|--model", "The model file path", CommandOptionType.SingleValue); command.OnExecute(() => { var image = imageOption.Value(); if (!imageOption.HasValue() || !File.Exists(image)) { Logger.Error("image does not exist"); return(-1); } var model = modelOption.Value(); if (!modelOption.HasValue() || !File.Exists(model)) { Logger.Error("model file does not exist"); return(-1); } Logger.Info($"Image File: {image}"); Logger.Info($" Model: {model}"); Logger.Info(""); var networkId = SetupNetwork(); using (var net = LossMulticlassLog.Deserialize(model, networkId)) using (var fr = FaceRecognition.Create("Models")) using (var img = FaceRecognition.LoadImageFile(image)) { var location = fr.FaceLocations(img).FirstOrDefault(); if (location == null) { Logger.Info("Missing face"); return(-1); } var rect = new Rectangle(location.Left, location.Top, location.Right, location.Bottom); var dPoint = new[] { new DPoint(rect.Left, rect.Top), new DPoint(rect.Right, rect.Top), new DPoint(rect.Left, rect.Bottom), new DPoint(rect.Right, rect.Bottom), }; using (var tmp = Dlib.LoadImageAsMatrix <byte>(image)) { using (var face = Dlib.ExtractImage4Points(tmp, dPoint, this.Size, this.Size)) { this.SetEvalMode(networkId, net); using (var predictedLabels = net.Operator(face)) Logger.Info($"{this.Cast(predictedLabels[0])}"); } } } return(0); }); }); app.Command("demo", command => { command.HelpOption("-?|-h|--help"); var imageOption = command.Option("-i|--image", "test image file", CommandOptionType.SingleValue); var modelOption = command.Option("-m|--model", "model file", CommandOptionType.SingleValue); var directoryOption = command.Option("-d|--directory", "model files directory path", CommandOptionType.SingleValue); command.OnExecute(() => { if (!imageOption.HasValue()) { Console.WriteLine("image option is missing"); app.ShowHelp(); return(-1); } if (!directoryOption.HasValue()) { Console.WriteLine("directory option is missing"); app.ShowHelp(); return(-1); } if (!modelOption.HasValue()) { Console.WriteLine("model option is missing"); app.ShowHelp(); return(-1); } var modelFile = modelOption.Value(); if (!File.Exists(modelFile)) { Console.WriteLine($"'{modelFile}' is not found"); app.ShowHelp(); return(-1); } var imageFile = imageOption.Value(); if (!File.Exists(imageFile)) { Console.WriteLine($"'{imageFile}' is not found"); app.ShowHelp(); return(-1); } var directory = directoryOption.Value(); if (!Directory.Exists(directory)) { Console.WriteLine($"'{directory}' is not found"); app.ShowHelp(); return(-1); } using (var fr = FaceRecognition.Create(directory)) using (var image = FaceRecognition.LoadImageFile(imageFile)) { var loc = fr.FaceLocations(image).FirstOrDefault(); if (loc == null) { Console.WriteLine("No face is detected"); return(0); } this.Demo(fr, modelFile, imageFile, image, loc); } return(0); }); }); return(app.Execute(args)); }
private static void Main(string[] args) { var app = new CommandLineApplication(false); app.Name = nameof(HelenTraining); app.Description = "The program for training helen dataset"; app.HelpOption("-h|--help"); app.Command("generate", command => { command.HelpOption("-?|-h|--help"); var paddingOption = command.Option("-p|--padding", "padding of detected face", CommandOptionType.SingleValue); var modelsOption = command.Option("-m|--model", "model files directory path", CommandOptionType.SingleValue); command.OnExecute(() => { if (!modelsOption.HasValue()) { Console.WriteLine("model option is missing"); app.ShowHelp(); return(-1); } if (!paddingOption.HasValue()) { Console.WriteLine("padding option is missing"); app.ShowHelp(); return(-1); } var directory = modelsOption.Value(); if (!Directory.Exists(directory)) { Console.WriteLine($"'{directory}' is not found"); app.ShowHelp(); return(-1); } if (!int.TryParse(paddingOption.Value(), out var padding)) { Console.WriteLine($"padding '{paddingOption.Value()}' is not integer"); app.ShowHelp(); return(-1); } Console.WriteLine($"Model: {directory}"); Console.WriteLine($"Padding: {padding}"); _FaceRecognition = FaceRecognition.Create(directory); const string extractPath = "helen"; var zips = new[] { new{ Zip = "annotation.zip", IsImage = false, Directory = "annotation" }, new{ Zip = "helen_1.zip", IsImage = true, Directory = "helen_1" }, new{ Zip = "helen_2.zip", IsImage = true, Directory = "helen_2" }, new{ Zip = "helen_3.zip", IsImage = true, Directory = "helen_3" }, new{ Zip = "helen_4.zip", IsImage = true, Directory = "helen_4" }, new{ Zip = "helen_5.zip", IsImage = true, Directory = "helen_5" } }; Directory.CreateDirectory(extractPath); foreach (var zip in zips) { if (!Directory.Exists(Path.Combine(extractPath, zip.Directory))) { ZipFile.ExtractToDirectory(zip.Zip, extractPath); } } var annotation = zips.FirstOrDefault(arg => !arg.IsImage); var imageZips = zips.Where(arg => arg.IsImage).ToArray(); if (annotation == null) { return(-1); } var images = new List <Image>(); foreach (var file in Directory.EnumerateFiles(Path.Combine(extractPath, annotation.Directory))) { Console.WriteLine($"Process: '{file}'"); var txt = File.ReadAllLines(file); var filename = txt[0]; var jpg = $"{filename}.jpg"; foreach (var imageZip in imageZips) { var found = false; var path = Path.Combine(Path.Combine(extractPath, imageZip.Directory, jpg)); if (File.Exists(path)) { found = true; using (var fi = FaceRecognition.LoadImageFile(path)) { var locations = _FaceRecognition.FaceLocations(fi, 1, Model.Hog).ToArray(); if (locations.Length != 1) { Console.WriteLine($"\t'{path}' has {locations.Length} faces."); } else { var location = locations.First(); var parts = new List <Part>(); for (var i = 1; i < txt.Length; i++) { var tmp = txt[i].Split(',').Select(s => s.Trim()).Select(float.Parse).Select(s => (int)s).ToArray(); parts.Add(new Part { X = tmp[0], Y = tmp[1], Name = $"{i - 1}" }); } var image = new Image { File = Path.Combine(imageZip.Directory, jpg), Box = new Box { Left = location.Left - padding, Top = location.Top - padding, Width = location.Right - location.Left + 1 + padding * 2, Height = location.Bottom - location.Top + 1 + padding * 2, Part = parts.ToArray() } }; using (var bitmap = System.Drawing.Image.FromFile(path)) { var b = image.Box; using (var g = Graphics.FromImage(bitmap)) { using (var p = new Pen(Color.Red, bitmap.Width / 400f)) g.DrawRectangle(p, b.Left, b.Top, b.Width, b.Height); foreach (var part in b.Part) { g.FillEllipse(Brushes.GreenYellow, part.X, part.Y, 5, 5); } } var result = Path.Combine(extractPath, "Result"); Directory.CreateDirectory(result); bitmap.Save(Path.Combine(result, jpg), ImageFormat.Jpeg); } images.Add(image); } } } if (found) { break; } } } var dataset = new Dataset { Name = "helen dataset", Comment = "Created by Takuya Takeuchi.", Images = images.ToArray() }; var settings = new XmlWriterSettings(); using (var sw = new StreamWriter(Path.Combine(extractPath, "helen-dataset.xml"), false, new System.Text.UTF8Encoding(false))) using (var writer = XmlWriter.Create(sw, settings)) { writer.WriteProcessingInstruction("xml-stylesheet", @"type=""text/xsl"" href=""image_metadata_stylesheet.xsl"""); var serializer = new XmlSerializer(typeof(Dataset)); serializer.Serialize(writer, dataset); } return(0); }); }); app.Command("train", command => { command.HelpOption("-?|-h|--help"); var threadOption = command.Option("-t|--threads", "number of threads", CommandOptionType.SingleValue); var xmlOption = command.Option("-x|--xml", "generated xml file from helen dataset", CommandOptionType.SingleValue); command.OnExecute(() => { if (!xmlOption.HasValue()) { Console.WriteLine("xml option is missing"); app.ShowHelp(); return(-1); } if (!threadOption.HasValue()) { Console.WriteLine("thread option is missing"); app.ShowHelp(); return(-1); } var xmlFile = xmlOption.Value(); if (!File.Exists(xmlFile)) { Console.WriteLine($"'{xmlFile}' is not found"); app.ShowHelp(); return(-1); } if (!uint.TryParse(threadOption.Value(), out var thread)) { Console.WriteLine($"thread '{threadOption.Value()}' is not integer"); app.ShowHelp(); return(-1); } Dlib.LoadImageDataset(xmlFile, out Array <Array2D <byte> > imagesTrain, out var facesTrain); using (var trainer = new ShapePredictorTrainer()) { trainer.NumThreads = thread; trainer.BeVerbose(); Console.WriteLine("Start training"); using (var predictor = trainer.Train(imagesTrain, facesTrain)) { Console.WriteLine("Finish training"); var directory = Path.GetDirectoryName(xmlFile); var output = Path.Combine(directory, $"{Path.GetFileNameWithoutExtension(xmlFile)}.dat"); ShapePredictor.Serialize(predictor, output); } } return(0); }); }); app.Command("demo", command => { command.HelpOption("-?|-h|--help"); var imageOption = command.Option("-i|--image", "test image file", CommandOptionType.SingleValue); var modelOption = command.Option("-m|--model", "model file", CommandOptionType.SingleValue); var directoryOption = command.Option("-d|--directory", "model files directory path", CommandOptionType.SingleValue); command.OnExecute(() => { if (!imageOption.HasValue()) { Console.WriteLine("image option is missing"); app.ShowHelp(); return(-1); } if (!directoryOption.HasValue()) { Console.WriteLine("directory option is missing"); app.ShowHelp(); return(-1); } if (!modelOption.HasValue()) { Console.WriteLine("model option is missing"); app.ShowHelp(); return(-1); } var modelFile = modelOption.Value(); if (!File.Exists(modelFile)) { Console.WriteLine($"'{modelFile}' is not found"); app.ShowHelp(); return(-1); } var imageFile = imageOption.Value(); if (!File.Exists(imageFile)) { Console.WriteLine($"'{imageFile}' is not found"); app.ShowHelp(); return(-1); } var directory = directoryOption.Value(); if (!Directory.Exists(directory)) { Console.WriteLine($"'{directory}' is not found"); app.ShowHelp(); return(-1); } _FaceRecognition = FaceRecognition.Create(directory); using (var predictor = ShapePredictor.Deserialize(modelFile)) using (var image = FaceRecognition.LoadImageFile(imageFile)) using (var mat = Dlib.LoadImageAsMatrix <RgbPixel>(imageFile)) using (var bitmap = (Bitmap)System.Drawing.Image.FromFile(imageFile)) using (var white = new Bitmap(bitmap.Width, bitmap.Height)) using (var g = Graphics.FromImage(bitmap)) using (var gw = Graphics.FromImage(white)) { var loc = _FaceRecognition.FaceLocations(image).FirstOrDefault(); if (loc == null) { Console.WriteLine("No face is detected"); return(0); } var b = new DlibDotNet.Rectangle(loc.Left, loc.Top, loc.Right, loc.Bottom); var detection = predictor.Detect(mat, b); using (var p = new Pen(Color.Red, bitmap.Width / 200f)) { g.DrawRectangle(p, loc.Left, b.Top, b.Width, b.Height); gw.Clear(Color.White); gw.DrawRectangle(p, loc.Left, b.Top, b.Width, b.Height); } for (int i = 0, parts = (int)detection.Parts; i < parts; i++) { var part = detection.GetPart((uint)i); g.FillEllipse(Brushes.GreenYellow, part.X, part.Y, 15, 15); gw.DrawString($"{i}", SystemFonts.DefaultFont, Brushes.Black, part.X, part.Y); } bitmap.Save("demo.jpg", ImageFormat.Jpeg); white.Save("white.jpg", ImageFormat.Jpeg); } return(0); }); }); app.Execute(args); }
public static void CreateFeatureVectors() { int faceCount = 0; float leftEyebrow, rightEyebrow, leftLip, rightLip, lipHeight, lipWidth; string output; if (currentDataType == Datatype.Testing) { output = testingOutput; } else { output = trainingOutput; } string[] dirs = Directory.GetFiles(currentFilePath, "*.*", SearchOption.AllDirectories); // Set up Dlib Face Detector using (var fd = Dlib.GetFrontalFaceDetector()) // ... and Dlib Shape Detector using (var sp = ShapePredictor.Deserialize("shape_predictor_68_face_landmarks.dat")) { string header = "leftEyebrow,rightEyebrow,leftLip,rightLip,lipWidth,lipHeight,label\n"; // Create the CSV file and fill in the first line with the header System.IO.File.WriteAllText(output, header); foreach (string dir in dirs) { // call function that sets the label based on what the filename contains string label = DetermineLabel(dir); // load input image if (!(dir.EndsWith("png") || dir.EndsWith("jpg"))) { continue; } var img = Dlib.LoadImage <RgbPixel>(dir); // find all faces in the image var faces = fd.Operator(img); // for each face draw over the facial landmarks foreach (var face in faces) { // Write to the console displaying the progress and current emotion Form1.SetProgress(faceCount, dirs.Length - 1); // find the landmark points for this face var shape = sp.Detect(img, face); for (var i = 0; i < shape.Parts; i++) { RgbPixel colour = new RgbPixel(255, 255, 255); var point = shape.GetPart((uint)i); var rect = new DlibDotNet.Rectangle(point); Dlib.DrawRectangle(img, rect, color: colour, thickness: 2); } SetFormImage(img); leftEyebrow = CalculateLeftEyebrow(shape); rightEyebrow = CalculateRightEyebrow(shape); leftLip = CalculateLeftLip(shape); rightLip = CalculateRightLip(shape); lipWidth = CalculateLipWidth(shape); lipHeight = CalculateLipHeight(shape); using (System.IO.StreamWriter file = new System.IO.StreamWriter(output, true)) { file.WriteLine(leftEyebrow + "," + rightEyebrow + "," + leftLip + "," + rightLip + "," + lipWidth + "," + lipHeight + "," + label); } // Increment count used for console output faceCount++; } } if (currentDataType == Datatype.Testing) { var testDataView = mlContext.Data.LoadFromTextFile <FeatureInputData>(output, hasHeader: true, separatorChar: ','); GenerateMetrics(testDataView); } Form1.HideImage(); } }