/// <summary> /// Applies sepia filter to image. /// (Reddish-brown color associated with old photographs) /// </summary> /// <param name="bitmap">Image to process</param> public static void ToSepiaDirect(ref Bitmap bitmap) { byte[] imageBytes = ImageEdit.Begin(bitmap, out BitmapData bmpData); try { if (!bmpData.IsColor()) { throw new ArgumentException("Image is not color, sepia filter cannot be applied."); } int stride = bmpData.Stride; int stridePadding = bmpData.GetStridePaddingLength(); int width = stride - stridePadding; int height = bmpData.Height; int limit = bmpData.GetSafeArrayLimitForImage(imageBytes); // May also have alpha byte int pixelDepth = bmpData.GetPixelDepth(); byte originalRed, originalGreen, originalBlue; // Apply mask for each color pixel for (int y = 0; y < height; y++) { // Images may have extra bytes per row to pad for CPU addressing. // so need to ensure we traverse to the correct byte when moving between rows. // I.e. not divisible by 3 int offset = y * stride; for (int x = 0; x < width; x += pixelDepth) { int i = offset + x; // Set RGB to sepia values (Source: Microsoft) if (i < limit) { // Get original RGB pixel values originalRed = imageBytes[i + 2]; originalGreen = imageBytes[i + 1]; originalBlue = imageBytes[i]; // Red (LSB) imageBytes[i + 2] = ConvertToByte((0.393 * originalRed) + (0.769 * originalGreen) + (0.189 * originalBlue)); // Green imageBytes[i + 1] = ConvertToByte((0.349 * originalRed) + (0.686 * originalGreen) + (0.168 * originalBlue)); // Blue (MSB) imageBytes[i] = ConvertToByte((0.272 * originalRed) + (0.534 * originalGreen) + (0.131 * originalBlue)); } } } } finally { ImageEdit.End(bitmap, bmpData, imageBytes); } }
/// <summary> /// Any pixel values outside the threshold will be changed to min/max. /// </summary> /// <param name="bitmap">Image to process</param> /// <param name="minValue">Minimum threshold value</param> /// <param name="maxValue">Maximum threshold value</param> public static void ApplyMinMaxDirect(ref Bitmap bitmap, byte minValue, byte maxValue) { byte[] imageBytes = ImageEdit.Begin(bitmap, out BitmapData bmpData); ApplyMinMaxDirect(imageBytes, bmpData.GetPixelDepth(), minValue, maxValue); ImageEdit.End(bitmap, bmpData, imageBytes); }
/// <summary> /// Any pixel values below threshold will be changed to 0 (black). /// Any pixel values above (or equal to) threshold will be changed to 255 (white). /// </summary> /// <param name="bitmap">Reference to bitmap</param> /// <param name="threshold">Threshold value</param> public static void ApplyDirect(ref Bitmap bitmap, byte threshold) { byte[] imageBytes = ImageEdit.Begin(bitmap, out BitmapData bmpData); // Apply threshold value to image. ApplyDirect(imageBytes, bmpData, threshold); ImageEdit.End(bitmap, bmpData, imageBytes); }
/// <summary> /// Stretches image contrast to specified min/max values. /// </summary> /// <param name="bitmap">Image to process</param> /// <param name="min">Minimum contrast value</param> /// <param name="max">Maximum contrast value</param> public static void StretchDirect(ref Bitmap bitmap, byte min, byte max) { // Lock image for processing byte[] imageBytes = ImageEdit.Begin(bitmap, out BitmapData bmpData); StretchDirect(imageBytes, bmpData, min, max); // Copy modified array back to image, and release lock ImageEdit.End(bitmap, bmpData, imageBytes); }
/// <summary> /// Applies convolution matrix/kernel to image. /// i.e. edge detection. /// </summary> /// <param name="bitmap">Image to apply kernel to</param> /// <param name="kernelMatrix">Kernel matrix</param> public static void ApplyKernelDirect(ref Bitmap bitmap, int[,] kernelMatrix) { // Lock image for processing byte[] rgbValues = ImageEdit.Begin(bitmap, out BitmapData bmpData); rgbValues = ApplyKernel(rgbValues, bmpData, kernelMatrix); // Copy modified array back to image, and release lock ImageEdit.End(bitmap, bmpData, rgbValues); }
/// <summary> /// Any pixel values above threshold will be changed to max value. /// (Low-pass filter) /// </summary> /// <param name="bitmap">Image to process</param> /// <param name="maxValue">Max value to retain</param> public static void ApplyMaxDirect(ref Bitmap bitmap, byte maxValue) { byte[] imageBytes = ImageEdit.Begin(bitmap, out BitmapData bmpData); // Determine whether color int pixelDepth = bmpData.GetPixelDepth(); ApplyMaxDirect(imageBytes, pixelDepth, maxValue); ImageEdit.End(bitmap, bmpData, imageBytes); }
/// <summary> /// Histogram Equalization will enhance general contrast /// by distributing grey levels wider and more evenly. /// </summary> /// <param name="bitmap">Image to equalize</param> public static void HistogramEqualizationDirect(ref Bitmap bitmap) { // Get image bytes byte[] imageBytes = ImageEdit.Begin(bitmap, out BitmapData bmpData); int pixelDepth = bmpData.GetPixelDepth(); HistogramEqualizationDirect(imageBytes, pixelDepth, bitmap.Width, bitmap.Height); ImageEdit.End(bitmap, bmpData, imageBytes); }
/// <summary> /// Applies thresholding directly to image using Otsu's Method. /// Returned image will consist of black (0) and white (255) values only. /// </summary> /// <param name="bitmap">Image to process</param> public static void ApplyOtsuMethodDirect(ref Bitmap bitmap) { byte[] imageBytes = ImageEdit.Begin(bitmap, out BitmapData bmpData); // Determine whether color int pixelDepth = bmpData.GetPixelDepth(); // Get threshold using Otsu byte threshold = GetByOtsuMethod(imageBytes, pixelDepth); // Apply using BitmapData to account for stride padding ApplyDirect(imageBytes, bmpData, threshold); ImageEdit.End(bitmap, bmpData, imageBytes); }
/// <summary> /// Inverts image to negative directly. /// </summary> /// <param name="bitmap">Image to process</param> public static void ToNegativeDirect(ref Bitmap bitmap) { byte[] imageBytes = ImageEdit.Begin(bitmap, out BitmapData bmpData); // Check if monochrome if (bmpData.PixelFormat == PixelFormat.Format1bppIndexed) { MonochromeToNegative(imageBytes); } else { ToNegative(imageBytes); } ImageEdit.End(bitmap, bmpData, imageBytes); }
/// <summary> /// Copies pixels from source image to destination. /// </summary> public static void FromSourceToDestination(Bitmap imageSource, Bitmap imageDestination) { // Get bytes to copy byte[] sourceBytes = ImageBytes.FromImage(imageSource); // Lock destination during copy/write byte[] destinationBytes = ImageEdit.Begin(imageDestination, out BitmapData bmpDataDest); // Copy as many bytes of the image as possible int limit = Math.Min(sourceBytes.Length, destinationBytes.Length); Array.Copy(sourceBytes, destinationBytes, limit); // Release lock on destination ImageEdit.End(imageDestination, bmpDataDest, destinationBytes); }
/// <summary> /// Applies color filter to image. /// </summary> /// <param name="bitmap">Image to process</param> /// <param name="r">Red component to apply</param> /// <param name="g">Green component to apply</param> /// <param name="b">Blue component to apply</param> public static void ApplyFilterDirectRGB(ref Bitmap bitmap, byte r, byte g, byte b) { // Get image bytes and info byte[] imageBytes = ImageEdit.Begin(bitmap, out BitmapData bmpData); try { // Can only apply color filter to a color image if (bmpData.IsColor()) { ApplyFilterDirectRGB(imageBytes, r, g, b, bmpData); } else { throw new ArgumentException("Image is not color, RGB filter cannot be applied."); } } finally { ImageEdit.End(bitmap, bmpData, imageBytes); } }
/// <summary> /// Applies localized/adaptive region thresholding to image using Chow & Kaneko method. /// </summary> /// <param name="bitmap">Image to process</param> /// <param name="horizontalRegions">Number of horizonal regions to apply</param> /// <param name="verticalRegions">Number of vertical regions to apply</param> public static void ApplyChowKanekoMethodDirect(ref Bitmap bitmap, int horizontalRegions, int verticalRegions) { // Ensure at least 1 region if (horizontalRegions < 1 || verticalRegions < 1) { throw new ArgumentOutOfRangeException("Chow & Kaneko requires at least one region."); } // We will use Otsu's method to determine threshold for each region ThresholdRegionData[] regionThresholds = new ThresholdRegionData[horizontalRegions * verticalRegions]; // Get image bytes and info byte[] imageBytes = ImageEdit.Begin(bitmap, out BitmapData bmpData); // Use stride to ensure correct row length int stride = Math.Abs(bmpData.Stride); int imageHeight = bmpData.Height; int imageWidth = bmpData.Width; // Get pixels per region // (Round up to ensure thresholding applied to every pixel) int horizontalPPZ = (int)Math.Ceiling((double)imageWidth / horizontalRegions); int verticalPPZ = (int)Math.Ceiling((double)imageHeight / verticalRegions); // Determine whether color int pixelDepth = bmpData.GetPixelDepth(); // Variables for region loop byte[] regionBytes; int regionY1, regionY2, regionHeight; int regionX1, regionX2, regionWidth; int yOffset; int imageOffset, regionOffset; int y, x, i; ThresholdRegionData rd; // Apply Otsu to obtain localized threshold for each region for (y = 0; y < verticalRegions; y++) { // Get y positions regionY1 = y * verticalPPZ; regionY2 = Math.Min(imageHeight, regionY1 + verticalPPZ); regionHeight = regionY2 - regionY1; yOffset = stride * regionY1; for (x = 0; x < horizontalRegions; x++) { // Get current region position, limit to edge of image regionX1 = x * horizontalPPZ * pixelDepth; regionX2 = Math.Min(imageWidth, regionX1 + horizontalPPZ) * pixelDepth; regionWidth = regionX2 - regionX1; // Allocate bytes regionBytes = new byte[regionWidth * regionHeight]; // Copy bytes from image to region array // Copy row by row (region bytes not consecutive) for (i = 0; i < regionHeight; i++) { imageOffset = yOffset + (stride * i) + regionX1; regionOffset = regionWidth * i; Buffer.BlockCopy(imageBytes, imageOffset, regionBytes, regionOffset, regionWidth); } // Create struct with threshold data rd = new ThresholdRegionData() { X = x, Y = y, CenterX = regionX1 + (regionWidth / 2), CenterY = (y * regionHeight) + (regionHeight / 2) }; // Apply Otsu to localized region rd.Threshold = GetByOtsuMethod(regionBytes, pixelDepth); // Assign threshold data to region regionThresholds[(y * horizontalRegions) + x] = rd; } } // Variables for pixel loop int pixelSum; bool belowThreshold; byte threshold; double totalDistance; ThresholdRegionData[] nearestRegions; int py, px, n, j; const int NumberOfRegions = 4; // Go through each pixel calculating threshold for (py = 0; py < imageHeight; py++) { for (px = 0; px < imageWidth; px++) { // Calculate distances from pixel to each region for (n = 0; n < regionThresholds.Length; n++) { regionThresholds[n].CalculateDistance(px, py); } // Find the nearest 4 regions to pixel, then get their thresholds nearestRegions = regionThresholds.OrderBy(r => r.Distance).Take(NumberOfRegions).ToArray(); // Sum all distances so we have reference for each relative distance totalDistance = nearestRegions.Sum(r => r.Distance); // Weight bias the threshold of each nearest region based on proximity to pixel // (Shorter distance has higher weight bias) // Sum of each weight bias should add up to 1. if (nearestRegions.Length > 1) { threshold = (byte)nearestRegions.Sum(r => ((1 - (r.Distance / totalDistance)) / (nearestRegions.Length - 1)) * r.Threshold); } else { threshold = nearestRegions.First().Threshold; } // Get pixel index within image bytes i = (px * pixelDepth) + (py * stride); pixelSum = 0; // Sum each pixel component for color images for (j = 0; j < pixelDepth && i + j < imageBytes.Length; j++) { pixelSum += imageBytes[i + j]; } // Compare average to threshold belowThreshold = (pixelSum / pixelDepth) < threshold; // Apply threshold for (j = 0; j < pixelDepth && i + j < imageBytes.Length; j++) { imageBytes[i + j] = belowThreshold ? byte.MinValue : byte.MaxValue; } } } // End edit, write bytes back to image ImageEdit.End(bitmap, bmpData, imageBytes); }
/// <summary> /// Combines multiple images together. /// (Pixel values combined via bitwise or, not averaged) /// </summary> /// <param name="images">Images to combine</param> /// <returns>New combined image as bitmap</returns> public static Bitmap All<T>(IEnumerable<T> images) where T : Image { // Check have at least 1 image if (!images.Any()) { return null; } // Use first image as starting point Bitmap combinedImage = ImageFormatting.ToBitmap(images.ElementAt(0)); // Get bytes for image byte[] rgbValues1 = ImageEdit.Begin(combinedImage, out BitmapData bmpData1); try { // Get image 1 data int pixelDepth1 = bmpData1.GetPixelDepth(); int pixelDepthWithoutAlpha = Math.Min(pixelDepth1, Constants.PixelDepthRGB); int stride1 = bmpData1.Stride; int width1 = bmpData1.GetStrideWithoutPadding(); int height1 = bmpData1.Height; // Add additional images to first foreach (Image image in images.Skip(1)) { // Only reading this image byte[] rgbValues2 = ImageBytes.FromImage(image, out BitmapData bmpData2); // Check both images are color or B&W if (pixelDepth1 == bmpData2.GetPixelDepth()) { // Protect against different sized images int limit = Math.Min(bmpData1.GetSafeArrayLimitForImage(rgbValues1), bmpData2.GetSafeArrayLimitForImage(rgbValues2)); int minHeight = Math.Min(height1, bmpData2.Height); int minStride = Math.Min(stride1, bmpData2.Stride); int minWidth = Math.Min(width1, bmpData2.GetStrideWithoutPadding()); for (int y = 0; y < minHeight; y++) { // Images may have extra bytes per row to pad for CPU addressing. // so need to ensure we traverse to the correct byte when moving between rows. int offset = y * minStride; for (int x = 0; x < minWidth; x += pixelDepth1) { int i = offset + x; if (i < limit) { for (int j = 0; j < pixelDepthWithoutAlpha; j++) { // Combine images rgbValues1[i + j] |= rgbValues2[i + j]; } } else { break; } } } } else { throw new ArgumentException("Not all images have the same color depth"); } } } finally { // Release combined image ImageEdit.End(combinedImage, bmpData1, rgbValues1); } return combinedImage; }