/// <summary> /// Perform the enhancements in the frequency domain /// </summary> static void frequencyEnhance() { //Load the images HsvImage distorted = new HsvImage(new RgbImage(Resources.dogDistorted)); HsvImage original = new HsvImage(new RgbImage(Resources.dogOriginal)); int width = HsvImage.nextPow2(original.Width); int height = HsvImage.nextPow2(original.Height); //Find their fourier transforms ComplexF[] fourierd = distorted.FFT(2,FourierDirection.Forward); ComplexF[] fouriero = original.FFT(2, FourierDirection.Forward); //Visualise the fourier transforms HsvImage fourierdV = HsvImage.visualiseFFT(fourierd, width, height); HsvImage fourieroV = HsvImage.visualiseFFT(fouriero, width, height); //Perform the masking operation ObscureMask obs = new ObscureMask(); obs.addRegion(new RectangleF(30, 0, 40, 512)); obs.addRegion(new RectangleF(130, 0, 40, 512)); obs.addRegion(new RectangleF(340, 0, 40, 512)); obs.addRegion(new RectangleF(440, 0, 40, 512)); ComplexF[] obscured = obs.apply(fourierd, width); HsvImage fourierObs = HsvImage.visualiseFFT(obscured, width, height); //Save out the visualisations fourierdV.toBitmap().Save(@"c:\output\fourierdV.bmp"); fourieroV.toBitmap().Save(@"c:\output\fourieroV.bmp"); fourierObs.toBitmap().Save(@"c:\output\fourierdObs.bmp"); }
/// <summary> /// Perform the enhancement on the waterfall image in HSV space /// </summary> static void hsvEnhance() { //Load the image HsvImage distorted = new HsvImage(new RgbImage(Resources.waterfall)); //Perform the 2 enhancements HsvImage vBalanced = distorted.balanceHistogram('V'); HsvImage sBalanced = vBalanced.balanceHistogram('S'); //Save out the 2 enhancements vBalanced.toBitmap().Save(@"c:\output\vBalanced.bmp"); sBalanced.toBitmap().Save(@"c:\output\sBalanced.bmp"); }
/// <summary> /// Calculates the Root Mean Squared deviation between the RGB values of two images /// </summary> /// <param name="comparison">The image to compare to</param> /// <returns>The RMSD between the RGB values of the inputs</returns> public int compareBitmaps(HsvImage comparison) { return (new RgbImage(this)).compareBitmaps(new RgbImage(comparison)); }
/// <summary> /// Generates a second image based off of the first, with a balanced histogram /// </summary> /// <param name="property"> /// The Property to generate for (0 is H, 1 is S, 2 is V) /// </param> /// <returns>a second image with better contrast</returns> public HsvImage balanceHistogram(int property) { SortedDictionary<int, int> histogram = generateHistogram(property); SortedDictionary<int, int> culmulative = new SortedDictionary<int, int>(); SortedDictionary<int, int> balanced = new SortedDictionary<int, int>(); HsvImage output = new HsvImage(Height, Width); int runningTotal = 0; //Cycle through the histogram, turning absolute values in to culmulative totals foreach (KeyValuePair<int, int> i in histogram) { runningTotal += i.Value; culmulative.Add(i.Key, runningTotal); } //Cycle through the culmulative histogram, balancing the values based on the formula given in the lecture notes foreach (KeyValuePair<int, int> i in culmulative) { int v = (int)Math.Round((i.Value - culmulative.First().Value) / (Width * Height - 1.0) * 255); balanced.Add(i.Key, v); } //Cycle through all the pixels, reassigning the relevant channel's value to the corrected value for (int x = 0; x < Width; x++) { for (int y = 0; y < Height; y++) { for (int p = 0; p < Image[0, 0].Length; p++) { if (p == property) output.Image[x, y][p] = balanced[Image[x, y][p]]; else output.Image[x, y][p] = Image[x, y][p]; } } } return output; }
/// <summary> /// Turn a ComplexF[] in to an Image for visualisation /// </summary> /// <param name="data">The ComplexF[] to transform</param> /// <param name="w">The width of the image to output</param> /// <param name="h">The height of the image to output</param> /// <returns></returns> public static HsvImage visualiseFFT(ComplexF[] data, int w, int h) { //If the given size is larger than the available data, error out if(h * w < data.Length) return null; //Turn the stream in to values suitable for displaying int[] stream = NormaliseStream(data); //Cycle through all the pixels, outputing the normalised value as the V channel HsvImage output = new HsvImage(h, w); for (int y = 0; y < h; y++) { for (int x = 0; x < w; x++) { output.Image[x, y][0] = 0; output.Image[x, y][1] = 0; output.Image[x, y][2] = stream[y * w + x]; } } return output; }
/// <summary> /// Perform the enhancement on the dog image in the spatial domain /// </summary> static void spatialEnhance() { //Load the images HsvImage distorted = new HsvImage(new RgbImage(Resources.dogDistorted)); HsvImage original = new HsvImage(new RgbImage(Resources.dogOriginal)); //Perform the 2 enhancements HsvImage guassian = distorted.applyFilter(SpatialFilter.gaussian(7), 2); HsvImage neighbourhood = distorted.applyFilter(SpatialFilter.neighbourhoodAverage(5), 2); //Save out the 2 enhancements guassian.toBitmap().Save(@"c:\output\guassian.bmp"); neighbourhood.toBitmap().Save(@"c:\output\neighbour.bmp"); //Write out the diffrenences System.Console.WriteLine("The difference between the Guassian and original is: {0}", original.compareBitmaps(guassian)); System.Console.WriteLine("The difference between the neighbourd avg and original is: {0}", original.compareBitmaps(neighbourhood)); }
/// <summary> /// Create an RGB image from an HSL representation using the method listed here http://en.wikipedia.org/wiki/HSL_and_HSV#From_HSV /// </summary> /// <param name="input">An image in the HSL color space</param> public RgbImage(HsvImage input) { //Take the dimensions from the input image Height = input.Height; Width = input.Width; //Cycle through all the pixels Image = new int[Width, Height][]; for (int x = 0; x < Width; x++) { for (int y = 0; y < Height; y++) { //Create a new array to store the details in int R, G, B; Image[x, y] = new int[3]; //Pull the pixel details from the existing image int H = input.Image[x, y][0]; int S = input.Image[x, y][1]; int V = input.Image[x, y][2]; //Perform the transform listed on the wiki page mentioned above //Seems pretty self-explanitory to me int C = (V * S) / 255; float hp = (float)(H / 60.0); int X = (int)(C * (1 - Math.Abs((hp % 2) - 1))); if (C == 0) { R = 0; G = 0; B = 0; } else if (hp < 1) { R = C; G = X; B = 0; } else if (hp < 2) { R = X; G = C; B = 0; } else if (hp < 3) { R = 0; G = C; B = X; } else if (hp < 4) { R = 0; G = X; B = C; } else if (hp < 5) { R = X; G = 0; B = C; } else { R = C; G = 0; B = X; } int m = V - C; R += m; G += m; B += m; //Load the calculated RGB values in to the array Image[x, y][0] = R; Image[x, y][1] = G; Image[x, y][2] = B; } } }
/// <summary> /// Apply the filter to an HSV image /// </summary> /// <param name="subject">the image to apply to</param> /// <param name="property">the channel to apply to</param> /// <returns>the transformed image</returns> public HsvImage apply(HsvImage subject, int property) { //If the property doesn't exist, error out if (property > subject.Image[0, 0].Length) { return null; } HsvImage output = new HsvImage(subject.Height, subject.Width); //Cycle through every pixel of the image for (int y = 0; y < subject.Height; y++) { for (int x = 0; x < subject.Width; x++) { int newVal = 0; //Cycle through every row of the matrix for (int y2 = 0; y2 < Height; y2++) { //Work out which row it refers to int refY = y2 - Cy + y; //If that row exists in the image if(refY >= 0 && refY < subject.Height) { //Cycle through every column in the matrix for (int x2 = 0; x2 < Width; x2++) { //Work out which pixel it refers to int refX = x2 - Cx + x; //If the pixel exists in the matrix if (refX >= 0 && refX < subject.Width) { //Apply the multiplier to the pixel value, and add it to the running total newVal += Filter[x2, y2] * subject.Image[refX, refY][property]; } } } } //Divide the running total by the total number of items in the matrix newVal /= Total; //Apply the new values to the image for (int p = 0; p < subject.Image[0, 0].Length; p++) { if (p == property) output.Image[x, y][p] = newVal; else output.Image[x, y][p] = subject.Image[x, y][p]; } } } return output; }