public void TensorFlowTransformInceptionTest() { var modelLocation = @"C:\models\TensorFlow\tensorflow_inception_graph.pb"; var mlContext = new MLContext(seed: 1, conc: 1); var dataFile = GetDataPath("images/images.tsv"); var imageFolder = Path.GetDirectoryName(dataFile); var data = mlContext.CreateLoader("Text{col=ImagePath:TX:0 col=Name:TX:1}", new MultiFileSource(dataFile)); var images = new ImageLoadingTransformer(mlContext, imageFolder, ("ImageReal", "ImagePath")).Transform(data); var cropped = new ImageResizingTransformer(mlContext, "ImageCropped", 224, 224, "ImageReal").Transform(images); var pixels = new ImagePixelExtractingTransformer(mlContext, "input", "ImageCropped").Transform(cropped); var tf = new TensorFlowTransformer(mlContext, modelLocation, "softmax2_pre_activation", "input").Transform(pixels); tf.Schema.TryGetColumnIndex("input", out int input); tf.Schema.TryGetColumnIndex("softmax2_pre_activation", out int b); using (var curs = tf.GetRowCursor(tf.Schema["input"], tf.Schema["softmax2_pre_activation"])) { var get = curs.GetGetter <VBuffer <float> >(b); var getInput = curs.GetGetter <VBuffer <float> >(input); var buffer = default(VBuffer <float>); var inputBuffer = default(VBuffer <float>); while (curs.MoveNext()) { getInput(ref inputBuffer); get(ref buffer); } } }
public void TestSaveImages() { var env = new MLContext(); var dataFile = GetDataPath("images/images.tsv"); var imageFolder = Path.GetDirectoryName(dataFile); var data = TextLoader.Create(env, new TextLoader.Options() { Columns = new[] { new TextLoader.Column("ImagePath", DataKind.TX, 0), new TextLoader.Column("Name", DataKind.TX, 1), } }, new MultiFileSource(dataFile)); var images = new ImageLoadingTransformer(env, imageFolder, ("ImageReal", "ImagePath")).Transform(data); var cropped = new ImageResizingTransformer(env, "ImageCropped", 100, 100, "ImageReal", ImageResizingEstimator.ResizingKind.IsoPad).Transform(images); cropped.Schema.TryGetColumnIndex("ImagePath", out int pathColumn); cropped.Schema.TryGetColumnIndex("ImageCropped", out int cropBitmapColumn); using (var cursor = cropped.GetRowCursorForAllColumns()) { var pathGetter = cursor.GetGetter <ReadOnlyMemory <char> >(pathColumn); ReadOnlyMemory <char> path = default; var bitmapCropGetter = cursor.GetGetter <Bitmap>(cropBitmapColumn); Bitmap bitmap = default; while (cursor.MoveNext()) { pathGetter(ref path); bitmapCropGetter(ref bitmap); Assert.NotNull(bitmap); var fileToSave = GetOutputPath(Path.GetFileNameWithoutExtension(path.ToString()) + ".cropped.jpg"); bitmap.Save(fileToSave, System.Drawing.Imaging.ImageFormat.Jpeg); } } Done(); }
public void TensorFlowTransformCifarInvalidShape() { var modelLocation = "cifar_model/frozen_model.pb"; var mlContext = new MLContext(seed: 1, conc: 1); var imageHeight = 28; var imageWidth = 28; var dataFile = GetDataPath("images/images.tsv"); var imageFolder = Path.GetDirectoryName(dataFile); var data = mlContext.Data.ReadFromTextFile(dataFile, columns: new[] { new TextLoader.Column("ImagePath", DataKind.TX, 0), new TextLoader.Column("Name", DataKind.TX, 1), } ); var images = new ImageLoadingTransformer(mlContext, imageFolder, ("ImageReal", "ImagePath")).Transform(data); var cropped = new ImageResizingTransformer(mlContext, "ImageCropped", imageWidth, imageHeight, "ImageReal").Transform(images); var pixels = new ImagePixelExtractingTransformer(mlContext, "Input", "ImageCropped").Transform(cropped); var thrown = false; try { IDataView trans = new TensorFlowTransformer(mlContext, modelLocation, "Output", "Input").Transform(pixels); } catch { thrown = true; } Assert.True(thrown); }
public void TestBackAndForthConversionWithoutAlphaNoInterleaveNoOffset() { IHostEnvironment env = new MLContext(); const int imageHeight = 100; const int imageWidth = 130; var dataFile = GetDataPath("images/images.tsv"); var imageFolder = Path.GetDirectoryName(dataFile); var data = TextLoader.Create(env, new TextLoader.Options() { Columns = new[] { new TextLoader.Column("ImagePath", DataKind.String, 0), new TextLoader.Column("Name", DataKind.String, 1), } }, new MultiFileSource(dataFile)); var images = new ImageLoadingTransformer(env, imageFolder, ("ImageReal", "ImagePath")).Transform(data); var cropped = new ImageResizingTransformer(env, "ImageCropped", imageWidth, imageHeight, "ImageReal").Transform(images); var pixels = new ImagePixelExtractingTransformer(env, "ImagePixels", "ImageCropped").Transform(cropped); IDataView backToBitmaps = new VectorToImageConvertingTransformer(env, "ImageRestored", imageHeight, imageWidth, "ImagePixels").Transform(pixels); var fname = nameof(TestBackAndForthConversionWithoutAlphaNoInterleaveNoOffset) + "_model.zip"; var fh = env.CreateOutputFile(fname); using (var ch = env.Start("save")) TrainUtils.SaveModel(env, ch, fh, null, new RoleMappedData(backToBitmaps)); backToBitmaps = ModelFileUtils.LoadPipeline(env, fh.OpenReadStream(), new MultiFileSource(dataFile)); DeleteOutputPath(fname); backToBitmaps.Schema.TryGetColumnIndex("ImageRestored", out int bitmapColumn); backToBitmaps.Schema.TryGetColumnIndex("ImageCropped", out int cropBitmapColumn); using (var cursor = backToBitmaps.GetRowCursorForAllColumns()) { var bitmapGetter = cursor.GetGetter <Bitmap>(bitmapColumn); Bitmap restoredBitmap = default; var bitmapCropGetter = cursor.GetGetter <Bitmap>(cropBitmapColumn); Bitmap croppedBitmap = default; while (cursor.MoveNext()) { bitmapGetter(ref restoredBitmap); Assert.NotNull(restoredBitmap); bitmapCropGetter(ref croppedBitmap); Assert.NotNull(croppedBitmap); for (int x = 0; x < imageWidth; x++) { for (int y = 0; y < imageHeight; y++) { var c = croppedBitmap.GetPixel(x, y); var r = restoredBitmap.GetPixel(x, y); Assert.True(c.R == r.R && c.G == r.G && c.B == r.B); } } } Done(); } }
public void TestGreyscaleTransformImages() { if (RuntimeInformation.IsOSPlatform(OSPlatform.OSX)) { return; } IHostEnvironment env = new MLContext(); var imageHeight = 150; var imageWidth = 100; var dataFile = GetDataPath("images/images.tsv"); var imageFolder = Path.GetDirectoryName(dataFile); var data = TextLoader.Create(env, new TextLoader.Options() { Columns = new[] { new TextLoader.Column("ImagePath", DataKind.String, 0), new TextLoader.Column("Name", DataKind.String, 1), } }, new MultiFileSource(dataFile)); var images = new ImageLoadingTransformer(env, imageFolder, ("ImageReal", "ImagePath")).Transform(data); var cropped = new ImageResizingTransformer(env, "ImageCropped", imageWidth, imageHeight, "ImageReal").Transform(images); IDataView grey = new ImageGrayscalingTransformer(env, ("ImageGrey", "ImageCropped")).Transform(cropped); var fname = nameof(TestGreyscaleTransformImages) + "_model.zip"; var fh = env.CreateOutputFile(fname); using (var ch = env.Start("save")) TrainUtils.SaveModel(env, ch, fh, null, new RoleMappedData(grey)); grey = ModelFileUtils.LoadPipeline(env, fh.OpenReadStream(), new MultiFileSource(dataFile)); DeleteOutputPath(fname); grey.Schema.TryGetColumnIndex("ImageGrey", out int greyColumn); using (var cursor = grey.GetRowCursorForAllColumns()) { var bitmapGetter = cursor.GetGetter <Bitmap>(grey.Schema["ImageGrey"]); Bitmap bitmap = default; while (cursor.MoveNext()) { bitmapGetter(ref bitmap); Assert.NotNull(bitmap); for (int x = 0; x < imageWidth; x++) { for (int y = 0; y < imageHeight; y++) { var pixel = bitmap.GetPixel(x, y); // greyscale image has same values for R,G and B Assert.True(pixel.R == pixel.G && pixel.G == pixel.B); } } } } Done(); }
public static CommonOutputs.TransformOutput ImageResizer(IHostEnvironment env, ImageResizingTransformer.Arguments input) { var h = EntryPointUtils.CheckArgsAndCreateHost(env, "ImageResizerTransform", input); var xf = ImageResizingTransformer.Create(h, input, input.Data); return(new CommonOutputs.TransformOutput() { Model = new TransformModelImpl(h, xf, input.Data), OutputData = xf }); }
[ConditionalFact(typeof(Environment), nameof(Environment.Is64BitProcess))] // TensorFlow is 64-bit only public void TensorFlowTransformCifarSavedModel() { var modelLocation = "cifar_saved_model"; var mlContext = new MLContext(seed: 1, conc: 1); var tensorFlowModel = TensorFlowUtils.LoadTensorFlowModel(mlContext, modelLocation); var schema = tensorFlowModel.GetInputSchema(); Assert.True(schema.TryGetColumnIndex("Input", out int column)); var type = (VectorType)schema[column].Type; var imageHeight = type.Dimensions[0]; var imageWidth = type.Dimensions[1]; var dataFile = GetDataPath("images/images.tsv"); var imageFolder = Path.GetDirectoryName(dataFile); var data = mlContext.Data.ReadFromTextFile(dataFile, columns: new[] { new TextLoader.Column("ImagePath", DataKind.TX, 0), new TextLoader.Column("Name", DataKind.TX, 1), } ); var images = new ImageLoadingTransformer(mlContext, imageFolder, ("ImageReal", "ImagePath")).Transform(data); var cropped = new ImageResizingTransformer(mlContext, "ImageCropped", imageWidth, imageHeight, "ImageReal").Transform(images); var pixels = new ImagePixelExtractingTransformer(mlContext, "Input", "ImageCropped", interleave: true).Transform(cropped); IDataView trans = new TensorFlowTransformer(mlContext, tensorFlowModel, "Output", "Input").Transform(pixels); trans.Schema.TryGetColumnIndex("Output", out int output); using (var cursor = trans.GetRowCursorForAllColumns()) { var buffer = default(VBuffer <float>); var getter = cursor.GetGetter <VBuffer <float> >(output); var numRows = 0; while (cursor.MoveNext()) { getter(ref buffer); Assert.Equal(10, buffer.Length); numRows += 1; } Assert.Equal(4, numRows); } }
public void TensorFlowTransformObjectDetectionTest() { var modelLocation = @"C:\models\TensorFlow\ssd_mobilenet_v1_coco_2018_01_28\frozen_inference_graph.pb"; var mlContext = new MLContext(seed: 1, conc: 1); var dataFile = GetDataPath("images/images.tsv"); var imageFolder = Path.GetDirectoryName(dataFile); var data = mlContext.CreateLoader("Text{col=ImagePath:TX:0 col=Name:TX:1}", new MultiFileSource(dataFile)); var images = new ImageLoadingTransformer(mlContext, imageFolder, ("ImageReal", "ImagePath")).Transform(data); var cropped = new ImageResizingTransformer(mlContext, "ImageCropped", 32, 32, "ImageReal").Transform(images); var pixels = new ImagePixelExtractingTransformer(mlContext, "image_tensor", "ImageCropped", asFloat: false).Transform(cropped); var tf = new TensorFlowTransformer(mlContext, modelLocation, new[] { "detection_boxes", "detection_scores", "num_detections", "detection_classes" }, new[] { "image_tensor" }).Transform(pixels); tf.Schema.TryGetColumnIndex("image_tensor", out int input); tf.Schema.TryGetColumnIndex("detection_boxes", out int boxes); tf.Schema.TryGetColumnIndex("detection_scores", out int scores); tf.Schema.TryGetColumnIndex("num_detections", out int num); tf.Schema.TryGetColumnIndex("detection_classes", out int classes); using (var curs = tf.GetRowCursor(tf.Schema["image_tensor"], tf.Schema["detection_boxes"], tf.Schema["detection_scores"], tf.Schema["detection_classes"], tf.Schema["num_detections"])) { var getInput = curs.GetGetter <VBuffer <byte> >(input); var getBoxes = curs.GetGetter <VBuffer <float> >(boxes); var getScores = curs.GetGetter <VBuffer <float> >(scores); var getNum = curs.GetGetter <VBuffer <float> >(num); var getClasses = curs.GetGetter <VBuffer <float> >(classes); var buffer = default(VBuffer <float>); var inputBuffer = default(VBuffer <byte>); while (curs.MoveNext()) { getInput(ref inputBuffer); getBoxes(ref buffer); getScores(ref buffer); getNum(ref buffer); getClasses(ref buffer); } } }
public void TestBackAndForthConversionWithDifferentOrder() { if (RuntimeInformation.IsOSPlatform(OSPlatform.OSX)) { return; } IHostEnvironment env = new MLContext(); const int imageHeight = 100; const int imageWidth = 130; var dataFile = GetDataPath("images/images.tsv"); var imageFolder = Path.GetDirectoryName(dataFile); var data = TextLoader.Create(env, new TextLoader.Options() { Columns = new[] { new TextLoader.Column("ImagePath", DataKind.String, 0), new TextLoader.Column("Name", DataKind.String, 1), } }, new MultiFileSource(dataFile)); var images = new ImageLoadingTransformer(env, imageFolder, ("ImageReal", "ImagePath")).Transform(data); var cropped = new ImageResizingTransformer(env, "ImageCropped", imageWidth, imageHeight, "ImageReal").Transform(images); var pixels = new ImagePixelExtractingTransformer(env, "ImagePixels", "ImageCropped", ImagePixelExtractingEstimator.ColorBits.All, orderOfExtraction: ImagePixelExtractingEstimator.ColorsOrder.ABRG).Transform(cropped); IDataView backToBitmaps = new VectorToImageConvertingTransformer(env, "ImageRestored", imageHeight, imageWidth, "ImagePixels", ImagePixelExtractingEstimator.ColorBits.All, orderOfColors: ImagePixelExtractingEstimator.ColorsOrder.ABRG).Transform(pixels); var fname = nameof(TestBackAndForthConversionWithDifferentOrder) + "_model.zip"; var fh = env.CreateOutputFile(fname); using (var ch = env.Start("save")) TrainUtils.SaveModel(env, ch, fh, null, new RoleMappedData(backToBitmaps)); backToBitmaps = ModelFileUtils.LoadPipeline(env, fh.OpenReadStream(), new MultiFileSource(dataFile)); DeleteOutputPath(fname); using (var cursor = backToBitmaps.GetRowCursorForAllColumns()) { var bitmapGetter = cursor.GetGetter <Bitmap>(backToBitmaps.Schema["ImageRestored"]); Bitmap restoredBitmap = default; var bitmapCropGetter = cursor.GetGetter <Bitmap>(backToBitmaps.Schema["ImageCropped"]); Bitmap croppedBitmap = default; while (cursor.MoveNext()) { bitmapGetter(ref restoredBitmap); Assert.NotNull(restoredBitmap); bitmapCropGetter(ref croppedBitmap); Assert.NotNull(croppedBitmap); for (int x = 0; x < imageWidth; x++) { for (int y = 0; y < imageHeight; y++) { var c = croppedBitmap.GetPixel(x, y); var r = restoredBitmap.GetPixel(x, y); if (c != r) { Assert.False(true); } Assert.True(c == r); } } } } Done(); }