public void ResizeInMemoryImages() { var mlContext = new MLContext(seed: 1); var dataFile = GetDataPath("images/images.tsv"); var imageFolder = Path.GetDirectoryName(dataFile); var dataObjects = InMemoryImage.LoadFromTsv(mlContext, dataFile, imageFolder); var dataView = mlContext.Data.LoadFromEnumerable <InMemoryImage>(dataObjects); var pipeline = mlContext.Transforms.ResizeImages("ResizedImage", 100, 100, nameof(InMemoryImage.LoadedImage)); // Check that the output is resized, and that it didn't resize the original image object var model = pipeline.Fit(dataView); var resizedDV = model.Transform(dataView); var rowView = resizedDV.Preview().RowView; var resizedImage = (Bitmap)rowView.First().Values.Last().Value; Assert.Equal(100, resizedImage.Height); Assert.NotEqual(100, dataObjects[0].LoadedImage.Height); // Also check usage of prediction Engine // And that the references to the original image objects aren't lost var predEngine = mlContext.Model.CreatePredictionEngine <InMemoryImage, InMemoryImageOutput>(model); for (int i = 0; i < dataObjects.Count(); i++) { var prediction = predEngine.Predict(dataObjects[i]); Assert.Equal(100, prediction.ResizedImage.Height); Assert.NotEqual(100, prediction.LoadedImage.Height); Assert.True(prediction.LoadedImage == dataObjects[i].LoadedImage); Assert.False(prediction.ResizedImage == dataObjects[i].LoadedImage); } // Check that the last in-memory image hasn't been disposed // By running ResizeImageTransformer (see https://github.com/dotnet/machinelearning/issues/4126) bool disposed = false; try { int i = dataObjects.Last().LoadedImage.Height; } catch { disposed = true; } Assert.False(disposed, "The last in memory image had been disposed by running ResizeImageTransformer"); }
public static IEnumerable <byte[]> DetectAndParseFaces(Byte[] imagedata, int minSize, int marginInPercent) { using (Devmasters.Imaging.InMemoryImage image = new InMemoryImage(imagedata)) { if (marginInPercent > 99 || marginInPercent < 0) { throw new ArgumentOutOfRangeException("marginInPercent", "must be between 0 and 100"); } List <byte[]> facesImg = new List <byte[]>(); CascadeClassifier _cascadeClassifier; _cascadeClassifier = new CascadeClassifier(Lib.Init.WebAppDataPath + "haarcascade_frontalface_default.xml"); Image <Bgr, byte> img = new Image <Bgr, byte>(image.Image); Image <Gray, byte> grayframe = img.Convert <Gray, byte>(); var faces = _cascadeClassifier.DetectMultiScale(grayframe, 1.1, 10, Size.Empty); //the actual face detection happens here foreach (var face in faces) { if (face.Width < minSize || face.Height < minSize) { continue; } int newX = face.X; int newY = face.Y; int changeX = (int)Math.Round(face.Width * ((double)marginInPercent / 100D)); int changeY = (int)Math.Round(face.Height * ((double)marginInPercent / 100D)); newX = newX - changeX / 2; newX = newX < 0 ? 0 : newX; newY = newY - changeY / 2; newY = newY < 0 ? 0 : newY; int newWidth = face.Width + changeX; int newHeight = face.Height + changeY; if (newX + newWidth > image.Image.Width) { newWidth = image.Image.Width - newX; } if (newY + newHeight > image.Image.Height) { newHeight = image.Image.Height - newY; } Rectangle newFaceRect = new Rectangle(newX, newY, newWidth, newHeight); //img.Draw(newFaceRect, new Bgr(Color.BurlyWood), 3); //the detected face(s) is highlighted here using a box that is drawn around it/them using (System.IO.MemoryStream ms = new System.IO.MemoryStream()) { image.SaveAsJPEG(ms, 95); using (InMemoryImage imi = new InMemoryImage(ms.ToArray())) { imi.Crop(newFaceRect); using (System.IO.MemoryStream lms = new System.IO.MemoryStream()) { imi.SaveAsJPEG(lms, 95); facesImg.Add(lms.ToArray()); } } } } return(facesImg); } }