public void Noise_Is_Applied() { foreach (FileInfo file in TestImages.GetTestImagesFromTestFolder("")) { string outputFileName = $"{OutputPath}{file.Name.Substring(0, file.Name.LastIndexOf('.'))}"; using (var imageLoader = new ImageLoader()) { var noises = new List <INoise> { new GaussNoise(new Normal(0, 0.25)), new ImpulseNoise(0.025, 0.025) }; imageLoader.Load(file.FullName); Image image = imageLoader.Image; foreach (INoise noise in noises) { string noiseFileName = $"{outputFileName}_{noise.GetType().Name.ToLower()}{file.Extension}"; imageLoader.AddNoise(noise); imageLoader.Save(noiseFileName); Assert.IsTrue(File.Exists(noiseFileName)); ImageAssert.AssertImagesAreDifferent(image, imageLoader.Image); imageLoader.Image = image; } //File.Delete(outputFileName); } } }
public void lena_test() { string localPath = NUnit.Framework.TestContext.CurrentContext.TestDirectory; #region doc_lena // In this example, we will compute an integral image // representation of Lena Söderberg's famous picture: TestImages testImages = new TestImages(path: localPath); Bitmap lena = testImages["lena.bmp"]; // get the image // Create a new Integral Image (squared and tilted) from Lena's picture: IntegralImage2 ii = IntegralImage2.FromBitmap(lena, computeTilted: true); // Let's say we would like to get the summed area in the rectangular region // delimited by pixel (34, 50) until pixels (60, 105). This is equivalent to // the region under the rectangle (34, 50, 34+60, 50+105) = (34, 50, 94, 155): long sum = ii.GetSum(34, 50, 94, 155); // this is the sum of values (1760032) // Now let's say we would like to get the squared sum and tilted sum as well: long ssum = ii.GetSum2(34, 50, 94, 155); // this is the sum of squared values (229508896) long tsum = ii.GetSumT(34, 50, 94, 155); // this is the sum of tilted values (-593600) #endregion Assert.AreEqual(1760032, sum); Assert.AreEqual(229508896, ssum); Assert.AreEqual(-593600, tsum); }
public void doc_test() { string localPath = TestContext.CurrentContext.TestDirectory; #region doc_apply // Let's load an example image, such as Lena, // from a standard dataset of example images: var images = new TestImages(path: localPath); Bitmap lena = images["lena.bmp"]; // Create a new SURF with the default parameter values: var surf = new SpeededUpRobustFeaturesDetector(threshold: 0.0002f, octaves: 5, initial: 2); // Use it to extract the SURF point descriptors from the Lena image: List <SpeededUpRobustFeaturePoint> descriptors = surf.ProcessImage(lena); // We can obtain the actual double[] descriptors using double[][] features = descriptors.Apply(d => d.Descriptor); // Now those descriptors can be used to represent the image itself, such // as for example, in the Bag-of-Visual-Words approach for classification. #endregion Assert.AreEqual(523, descriptors.Count); double sum = features.Sum(x => x.Sum()); Assert.AreEqual(2340.9402310500964, sum, 1e-10); }
public void BinaryDilation3x3Test1() { string basePath = NUnit.Framework.TestContext.CurrentContext.TestDirectory; #region doc_binary_dilation_3x3 // Let's start with one of the default // color test images in the framework: var test = new TestImages(basePath); // Let's get Lena's picture Bitmap bmp = test["lena.bmp"]; // And transform it to a binary mask // using Niblack's threshold class var niblack = new NiblackThreshold(); Bitmap binary = niblack.Apply(bmp); // The result can be seen below: // ImageBox.Show(binary); // Now, let's finally apply the dilation // filter to the binarized image below: var dil3x3 = new BinaryDilation3x3(); Bitmap result = dil3x3.Apply(binary); // The result can be seen below: // ImageBox.Show(result); #endregion //result.Save(@"C:\Projects\morpho-dilation3x3-result.png"); //binary.Save(@"C:\Projects\morpho-dilation3x3-binary.png"); }
public void test_images_test() { string localPath = TestContext.CurrentContext.TestDirectory; var images = new TestImages(path: localPath); Bitmap lena = images["lena.bmp"]; Assert.IsTrue(lena.IsGrayscale()); }
private void PredictAll() { _trainer.PredictAll(TestImages); Score = (double)TestImages .Where(i => i.PredictionCorrect) .Count() / TestImageCount; }
public void Image_Is_Loaded_From_File() { foreach (var file in TestImages.GetTestImagesFromTestFolder("")) { using (var imageLoader = new ImageLoader()) { imageLoader.Load(file.FullName); Assert.AreEqual(imageLoader.ImagePath, file.FullName); Assert.NotNull(imageLoader.Image); } } }
public void Image_Is_Saved_To_File() { foreach (var file in TestImages.GetTestImagesFromTestFolder("")) { var outputFileName = $"{OutputPath}{file.Name}"; using (var imageLoader = new ImageLoader()) { imageLoader.Load(file.FullName); imageLoader.Save(outputFileName); Assert.IsTrue(File.Exists(outputFileName)); File.Delete(outputFileName); } } }
public void meanShift() { string basePath = Path.Combine(NUnit.Framework.TestContext.CurrentContext.TestDirectory, "kmeans"); Directory.CreateDirectory(basePath); #region doc_meanshift // Load a test image (shown in a picture box below) var sampleImages = new TestImages(path: basePath); Bitmap image = sampleImages.GetImage("airplane.png"); // ImageBox.Show("Original", image).Hold(); // Create converters to convert between Bitmap images and double[] arrays var imageToArray = new ImageToArray(min: -1, max: +1); var arrayToImage = new ArrayToImage(image.Width, image.Height, min: -1, max: +1); // Transform the image into an array of pixel values double[][] pixels; imageToArray.Convert(image, out pixels); // Create a MeanShift algorithm using given bandwidth // and a Gaussian density kernel as kernel function. MeanShift meanShift = new MeanShift() { Kernel = new GaussianKernel(3), Bandwidth = 0.06, // We will compute the mean-shift algorithm until the means // change less than 0.05 between two iterations of the algorithm Tolerance = 0.05, MaxIterations = 10 }; // Learn the clusters from the data var clusters = meanShift.Learn(pixels); // Use clusters to decide class labels int[] labels = clusters.Decide(pixels); // Replace every pixel with its corresponding centroid double[][] replaced = pixels.Apply((x, i) => clusters.Modes[labels[i]]); // Retrieve the resulting image (shown in a picture box) Bitmap result; arrayToImage.Convert(replaced, out result); // ImageBox.Show("Mean-Shift clustering", result).Hold(); #endregion }
public void kmeans() { string basePath = Path.Combine(NUnit.Framework.TestContext.CurrentContext.TestDirectory, "kmeans"); Directory.CreateDirectory(basePath); #region doc_kmeans // Load a test image (shown in a picture box below) var sampleImages = new TestImages(path: basePath); Bitmap image = sampleImages.GetImage("airplane.png"); // ImageBox.Show("Original", image).Hold(); // Create converters to convert between Bitmap images and double[] arrays var imageToArray = new ImageToArray(min: -1, max: +1); var arrayToImage = new ArrayToImage(image.Width, image.Height, min: -1, max: +1); // Transform the image into an array of pixel values double[][] pixels; imageToArray.Convert(image, out pixels); // Create a K-Means algorithm using given k and a // square Euclidean distance as distance metric. KMeans kmeans = new KMeans(k: 5) { Distance = new SquareEuclidean(), // We will compute the K-Means algorithm until cluster centroids // change less than 0.5 between two iterations of the algorithm Tolerance = 0.05 }; // Learn the clusters from the data var clusters = kmeans.Learn(pixels); // Use clusters to decide class labels int[] labels = clusters.Decide(pixels); // Replace every pixel with its corresponding centroid double[][] replaced = pixels.Apply((x, i) => clusters.Centroids[labels[i]]); // Retrieve the resulting image (shown in a picture box) Bitmap result; arrayToImage.Convert(replaced, out result); // ImageBox.Show("k-Means clustering", result).Hold(); #endregion }
private void Form1_Load(object sender, EventArgs e) { sourcebox1.SizeMode = PictureBoxSizeMode.Zoom; sourcebox2.SizeMode = PictureBoxSizeMode.Zoom; resultbox1.SizeMode = PictureBoxSizeMode.Zoom; resultbox2.SizeMode = PictureBoxSizeMode.Zoom; TestImages t = new TestImages(); string path1 = System.IO.Path.GetFullPath(@"../../janre1.png"); string path2 = System.IO.Path.GetFullPath(@"../../janre1.png"); var bitmap1 = new Bitmap(path1); var bitmap2 = new Bitmap(path2); sourcebox1.Image = bitmap1; sourcebox2.Image = bitmap2; }
public void GaussFilter_Is_Applied() { foreach (FileInfo file in TestImages.GetTestImagesFromTestFolder("")) { string outputFileName = $"{OutputPath}{file.Name.Substring(0, file.Name.LastIndexOf('.'))}"; using (var imageLoader = new ImageLoader()) { imageLoader.Load(file.FullName); Image image = imageLoader.Image; string fileName = $"{outputFileName}_gaussFilter{file.Extension}"; imageLoader.AddGaussFilter(5, 1.4); imageLoader.Save(fileName); Assert.IsTrue(File.Exists(fileName)); ImageAssert.AssertImagesAreDifferent(image, imageLoader.Image); } } }
public void doc_test() { string localPath = TestContext.CurrentContext.TestDirectory; #region doc_apply // Let's load an example image, such as Lena, // from a standard dataset of example images: var images = new TestImages(path: localPath); Bitmap lena = images["lena.bmp"]; // Create FAST with the default parameter values: var fast = new FastCornersDetector(threshold: 20); // Use it to extract interest points from the Lena image: List <IntPoint> descriptors = fast.ProcessImage(lena); // Now those descriptors can be used to represent the image itself, such // as for example, in the Bag-of-Visual-Words approach for classification. #endregion Assert.AreEqual(1144, descriptors.Count); }
public void doc_test() { string localPath = TestContext.CurrentContext.TestDirectory; #region doc_apply // Let's load an example image, such as Lena, // from a standard dataset of example images: var images = new TestImages(path: localPath); Bitmap lena = images["lena.bmp"]; // Create a new Histogram of Oriented Gradients with the default parameter values: var hog = new HistogramsOfOrientedGradients(numberOfBins: 9, blockSize: 3, cellSize: 6); // Use it to extract descriptors from the Lena image: List <double[]> descriptors = hog.ProcessImage(lena); // Now those descriptors can be used to represent the image itself, such // as for example, in the Bag-of-Visual-Words approach for classification. #endregion Assert.AreEqual(784, descriptors.Count); double sum = descriptors.Sum(x => x.Sum()); Assert.AreEqual(3359.1014569812564, sum, 1e-3); }
public void doc_test() { string localPath = TestContext.CurrentContext.TestDirectory; #region doc_apply // Let's load an example image, such as Lena, // from a standard dataset of example images: var images = new TestImages(path: localPath); Bitmap lena = images["lena.bmp"]; // Create a new Local Binary Pattern with default values: var lbp = new LocalBinaryPattern(blockSize: 3, cellSize: 6); // Use it to extract descriptors from the Lena image: List <double[]> descriptors = lbp.ProcessImage(lena); // Now those descriptors can be used to represent the image itself, such // as for example, in the Bag-of-Visual-Words approach for classification. #endregion Assert.AreEqual(784, descriptors.Count); double sum = descriptors.Sum(x => x.Sum()); Assert.AreEqual(6094.543992693033, sum, 1e-10); }
public void load_true_grayscale_test() { string localPath = TestContext.CurrentContext.TestDirectory; var images = new TestImages(path: localPath); Bitmap lena1 = images["lena.bmp"]; Bitmap lena2 = Accord.Imaging.Image.Clone(Resources.lena512); Assert.AreEqual(lena1.Width, lena2.Width); Assert.AreEqual(lena1.Height, lena2.Height); Assert.IsTrue(lena1.IsGrayscale()); Assert.IsTrue(lena2.IsGrayscale()); int max1 = lena1.Max(); int max2 = lena2.Max(); int min1 = lena1.Min(); int min2 = lena2.Min(); Assert.AreEqual(244, max1); Assert.AreEqual(245, max2); Assert.AreEqual(28, min1); Assert.AreEqual(25, min2); }
public ActionResult UploadTest(TestImages model) { return(View(model)); }
public ActionResult UploadTest() { var model = new TestImages(); return(View(model)); }