// Get grayscale image out of the specified one public static void ConvertToGrayscale( UnmanagedImage source, UnmanagedImage destination ) { if ( source.PixelFormat != PixelFormat.Format8bppIndexed ) { Grayscale.CommonAlgorithms.BT709.Apply( source, destination ); } else { source.Copy( destination ); } }
/// <summary> /// Process the filter on the specified image. /// </summary> /// /// <param name="sourceData">Source image data.</param> /// <param name="destinationData">Destination image data.</param> /// protected unsafe override void ProcessFilter(UnmanagedImage sourceData, UnmanagedImage destinationData) { int width = sourceData.Width; int height = sourceData.Height; PixelFormat format = sourceData.PixelFormat; int pixelSize = System.Drawing.Bitmap.GetPixelFormatSize(format) / 8; sourceData.Clone(); UnmanagedImage temp = UnmanagedImage.Create(width, height, format); int lineWidth = width * pixelSize; int srcStride = temp.Stride; int srcOffset = srcStride - lineWidth; int dstStride = destinationData.Stride; int dstOffset = dstStride - lineWidth; byte* srcStart = (byte*)temp.ImageData.ToPointer(); byte* dstStart = (byte*)destinationData.ImageData.ToPointer(); // first Convolution c = new Convolution(masks[0]); c.Apply(sourceData, destinationData); // others for (int i = 1; i < masks.Length; i++) { c.Kernel = masks[i]; c.Apply(sourceData, temp); byte* src = srcStart; byte* dst = dstStart; for (int y = 0; y < height; y++) { for (int x = 0; x < lineWidth; x++, src++, dst++) { if (*src > *dst) *dst = *src; } dst += dstOffset; src += srcOffset; } } }
/// <summary> /// Process image looking for corners. /// </summary> /// /// <param name="image">Source image data to process.</param> /// /// <returns>Returns list of found corners (X-Y coordinates).</returns> /// /// <exception cref="UnsupportedImageFormatException"> /// The source image has incorrect pixel format. /// </exception> /// public List<IntPoint> ProcessImage(UnmanagedImage image) { // check image format if ( (image.PixelFormat != PixelFormat.Format8bppIndexed) && (image.PixelFormat != PixelFormat.Format24bppRgb) && (image.PixelFormat != PixelFormat.Format32bppRgb) && (image.PixelFormat != PixelFormat.Format32bppArgb) ) { throw new UnsupportedImageFormatException("Unsupported pixel format of the source image."); } // make sure we have grayscale image UnmanagedImage grayImage = null; if (image.PixelFormat == PixelFormat.Format8bppIndexed) { grayImage = image; } else { // create temporary grayscale image grayImage = Grayscale.CommonAlgorithms.BT709.Apply(image); } // get source image size int width = grayImage.Width; int height = grayImage.Height; int stride = grayImage.Stride; int offset = stride - width; // 1. Calculate partial differences float[,] diffx = new float[height, width]; float[,] diffy = new float[height, width]; float[,] diffxy = new float[height, width]; unsafe { fixed (float* pdx = diffx, pdy = diffy, pdxy = diffxy) { // Begin skipping first line byte* src = (byte*)grayImage.ImageData.ToPointer() + stride; float* dx = pdx + width; float* dy = pdy + width; float* dxy = pdxy + width; // for each line for (int y = 1; y < height - 1; y++) { // skip first column dx++; dy++; dxy++; src++; // for each inner pixel in line (skipping first and last) for (int x = 1; x < width - 1; x++, src++, dx++, dy++, dxy++) { // Retrieve the pixel neighborhood byte a11 = src[+stride + 1], a12 = src[+1], a13 = src[-stride + 1]; byte a21 = src[+stride + 0], /* a22 */ a23 = src[-stride + 0]; byte a31 = src[+stride - 1], a32 = src[-1], a33 = src[-stride - 1]; // Convolution with horizontal differentiation kernel mask float h = ((a11 + a12 + a13) - (a31 + a32 + a33)) * 0.166666667f; // Convolution with vertical differentiation kernel mask float v = ((a11 + a21 + a31) - (a13 + a23 + a33)) * 0.166666667f; // Store squared differences directly *dx = h * h; *dy = v * v; *dxy = h * v; } // Skip last column dx++; dy++; dxy++; src += offset + 1; } // Free some resources which wont be needed anymore if (image.PixelFormat != PixelFormat.Format8bppIndexed) grayImage.Dispose(); } } // 2. Smooth the diff images if (sigma > 0.0) { float[,] temp = new float[height, width]; // Convolve with Gaussian kernel convolve(diffx, temp, kernel); convolve(diffy, temp, kernel); convolve(diffxy, temp, kernel); } // 3. Compute Harris Corner Response Map float[,] map = new float[height, width]; unsafe { fixed (float* pdx = diffx, pdy = diffy, pdxy = diffxy, pmap = map) { float* dx = pdx; float* dy = pdy; float* dxy = pdxy; float* H = pmap; float M, A, B, C; for (int y = 0; y < height; y++) { for (int x = 0; x < width; x++, dx++, dy++, dxy++, H++) { A = *dx; B = *dy; C = *dxy; if (measure == HarrisCornerMeasure.Harris) { // Original Harris corner measure M = (A * B - C * C) - (k * ((A + B) * (A + B))); } else { // Harris-Noble corner measure M = (A * B - C * C) / (A + B + Constants.SingleEpsilon); } if (M > threshold) { *H = M; // insert value in the map } } } } } // 4. Suppress non-maximum points List<IntPoint> cornersList = new List<IntPoint>(); // for each row for (int y = r, maxY = height - r; y < maxY; y++) { // for each pixel for (int x = r, maxX = width - r; x < maxX; x++) { float currentValue = map[y, x]; // for each windows' row for (int i = -r; (currentValue != 0) && (i <= r); i++) { // for each windows' pixel for (int j = -r; j <= r; j++) { if (map[y + i, x + j] > currentValue) { currentValue = 0; break; } } } // check if this point is really interesting if (currentValue != 0) { cornersList.Add(new IntPoint(x, y)); } } } return cornersList; }
/// <summary> /// Computes the Gray-level Difference Method (GLDM) /// Histogram for the given source image. /// </summary> /// /// <param name="source">The source image.</param> /// /// <returns>An histogram containing co-occurrences /// for every gray level in <paramref name="source"/>.</returns> /// public int[] Compute(UnmanagedImage source) { int width = source.Width; int height = source.Height; int stride = source.Stride; int offset = stride - width; int maxGray = 255; int[] hist; unsafe { byte* src = (byte*)source.ImageData.ToPointer(); if (autoGray) maxGray = max(width, height, offset, src); hist = new int[maxGray + 1]; switch (degree) { case CooccurrenceDegree.Degree0: for (int y = 0; y < height; y++) { for (int x = 1; x < width; x++) { byte a = src[stride * y + (x - 1)]; byte b = src[stride * y + x]; int bin = Math.Abs(a - b); hist[bin]++; } } break; case CooccurrenceDegree.Degree45: for (int y = 1; y < height; y++) { for (int x = 0; x < width - 1; x++) { byte a = src[stride * y + x]; byte b = src[stride * (y - 1) + (x + 1)]; int bin = Math.Abs(a - b); hist[bin]++; } } break; case CooccurrenceDegree.Degree90: for (int y = 1; y < height; y++) { for (int x = 0; x < width; x++) { byte a = src[stride * (y - 1) + x]; byte b = src[stride * y + x]; int bin = Math.Abs(a - b); hist[bin]++; } } break; case CooccurrenceDegree.Degree135: for (int y = 1; y < height; y++) { int steps = width - 1; for (int x = 0; x < width - 1; x++) { byte a = src[stride * y + (steps - x)]; byte b = src[stride * (y - 1) + (steps - 1 - x)]; int bin = Math.Abs(a - b); hist[bin]++; } } break; } } return hist; }
/// <summary> /// Process an image building Hough map. /// </summary> /// /// <param name="image">Source unmanaged image to process.</param> /// /// <exception cref="UnsupportedImageFormatException">Unsupported pixel format of the source image.</exception> /// public void ProcessImage(UnmanagedImage image) { if (image.PixelFormat != PixelFormat.Format8bppIndexed) { throw new UnsupportedImageFormatException("Unsupported pixel format of the source image."); } // get source image size width = image.Width; height = image.Height; int srcOffset = image.Stride - width; // allocate Hough map of the same size like image houghMap = new short[height, width]; // do the job unsafe { byte* src = (byte*)image.ImageData.ToPointer(); // for each row for (int y = 0; y < height; y++) { // for each pixel for (int x = 0; x < width; x++, src++) { if (*src != 0) { DrawHoughCircle(x, y); } } src += srcOffset; } } // find max value in Hough map maxMapIntensity = 0; for (int i = 0; i < height; i++) { for (int j = 0; j < width; j++) { if (houghMap[i, j] > maxMapIntensity) { maxMapIntensity = houghMap[i, j]; } } } CollectCircles(); }
/// <summary> /// Process the filter on the specified image. /// </summary> /// /// <param name="sourceData">Source image data.</param> /// <param name="destinationData">Destination image data.</param> /// protected override unsafe void ProcessFilter(UnmanagedImage sourceData, UnmanagedImage destinationData) { int width = sourceData.Width; int height = sourceData.Height; int pixelSize = System.Drawing.Image.GetPixelFormatSize(sourceData.PixelFormat) / 8; int srcStride = sourceData.Stride; int dstStride = destinationData.Stride; int srcOffset = srcStride - width * pixelSize; int dstOffset = dstStride - width * pixelSize; byte* src = (byte*)sourceData.ImageData.ToPointer(); byte* dst = (byte*)destinationData.ImageData.ToPointer(); // TODO: Move or cache the creation of those filters int[,] kernel = Accord.Math.Matrix.Create(radius * 2 + 1, radius * 2 + 1, 1); Convolution conv = new Convolution(kernel); FastVariance fv = new FastVariance(radius); // Mean filter UnmanagedImage mean = conv.Apply(sourceData); // Variance filter UnmanagedImage var = fv.Apply(sourceData); // do the processing job if (sourceData.PixelFormat == PixelFormat.Format8bppIndexed) { byte* srcVar = (byte*)var.ImageData.ToPointer(); byte* srcMean = (byte*)mean.ImageData.ToPointer(); // Store maximum value from variance. int maxV = Max(width, height, srcVar, srcOffset); // Store minimum value from image. int minG = Min(width, height, src, srcOffset); for (int y = 0; y < height; y++) { for (int x = 0; x < width; x++, src++, srcMean++, srcVar++, dst++) { double mP = *srcMean; double vP = *srcVar; double threshold = (mP + k * ((Math.Sqrt(vP) / (double)maxV - 1.0) * (mP - (double)minG))); *dst = (byte)(*src > threshold ? 255 : 0); } src += srcOffset; srcMean += srcOffset; srcVar += srcOffset; dst += dstOffset; } } }
/// <summary> /// Computes the new image size. /// </summary> /// protected override Size CalculateNewImageSize(UnmanagedImage sourceData) { // Calculate source size float w = sourceData.Width; float h = sourceData.Height; // Get the four corners and the center of the image PointF[] corners = { new PointF(0, 0), new PointF(w, 0), new PointF(0, h), new PointF(w, h), new PointF(w / 2f, h / 2f) }; // Project those points corners = homography.Inverse().TransformPoints(corners); // Recalculate image size float[] px = { corners[0].X, corners[1].X, corners[2].X, corners[3].X }; float[] py = { corners[0].Y, corners[1].Y, corners[2].Y, corners[3].Y }; float maxX = Matrix.Max(px); float minX = Matrix.Min(px); float newWidth = Math.Max(maxX, overlayImage.Width) - Math.Min(0, minX); float maxY = Accord.Math.Matrix.Max(py); float minY = Accord.Math.Matrix.Min(py); float newHeight = Math.Max(maxY, overlayImage.Height) - Math.Min(0, minY); // Store overlay image size this.imageSize = new Size((int)Math.Round(maxX - minX), (int)Math.Round(maxY - minY)); // Store image center this.center = Point.Round(corners[4]); // Calculate and store image offset int offsetX = 0, offsetY = 0; if (minX < 0) offsetX = (int)Math.Round(minX); if (minY < 0) offsetY = (int)Math.Round(minY); this.offset = new Point(offsetX, offsetY); if (Double.IsNaN(newWidth) || newWidth == 0) newWidth = 1; if (Double.IsNaN(newHeight) || newHeight == 0) newHeight = 1; // Return the final image size return new Size((int)Math.Ceiling(newWidth), (int)Math.Ceiling(newHeight)); }
/// <summary> /// Reset motion detector to initial state. /// </summary> /// /// <remarks><para>The method resets motion detection and motion processing algotithms by calling /// their <see cref="IMotionDetector.Reset"/> and <see cref="IMotionProcessing.Reset"/> methods.</para> /// </remarks> /// public void Reset( ) { lock ( sync ) { if ( detector != null ) { detector.Reset( ); } if ( processor != null ) { processor.Reset( ); } videoWidth = 0; videoHeight = 0; if ( zonesFrame != null ) { zonesFrame.Dispose( ); zonesFrame = null; } } }
/// <summary> /// Process the filter on the specified image. /// </summary> /// /// <param name="image">Source image data.</param> /// protected unsafe override void ProcessFilter(UnmanagedImage image) { int pixelSize = System.Drawing.Image.GetPixelFormatSize(image.PixelFormat) / 8; // get source image size int width = image.Width; int height = image.Height; // check is the same size if (image.Height != baseHeight || image.Width != baseWidth) throw new InvalidImagePropertiesException("Image does not have expected dimensions.", "image"); if (pixelSize == 8) { // for each channel for (int c = 0; c < channels.Length; c++) { byte* dst = (byte*)((int)image.ImageData + c); byte* src = (byte*)channels[c].ImageData; // copy channel to image for (int y = 0; y < height; y++) { for (int x = 0; x < width; x++) { *(dst += pixelSize) = *(src++); } } } } else if (pixelSize == 16) { // for each channel for (int c = 0; c < channels.Length; c++) { short* dst = (short*)((int)image.ImageData + c); short* src = (short*)channels[c].ImageData; // copy channel to image for (int y = 0; y < height; y++) { for (int x = 0; x < width; x++) { *(dst += pixelSize) = *(src++); } } } } else { throw new UnsupportedImageFormatException("Unsupported pixel size."); } }
private void BtnCalcAccord_Click(object sender, RoutedEventArgs e) { if (imageLoaded) { Stopwatch stopwatch = new Stopwatch(); stopwatch.Start(); Accord.Imaging.UnmanagedImage unmanagedImage1 = Accord.Imaging.UnmanagedImage.FromManagedImage(GrayScaleImage); Accord.Imaging.BlobCounter bc = new Accord.Imaging.BlobCounter { BackgroundThreshold = Color.Black, CoupledSizeFiltering = true, FilterBlobs = true, MinHeight = 30, MinWidth = 30, MaxHeight = 100, MaxWidth = 100 }; bc.ProcessImage(GrayScaleImage); Bitmap indexMap = AForge.Imaging.Image.Clone(GrayScaleImage); for (int x = 0; x < indexMap.Width; x++) { for (int y = 0; y < indexMap.Height; y++) { indexMap.SetPixel(x, y, System.Drawing.Color.Black); } } System.Drawing.Rectangle[] rects = bc.GetObjectsRectangles(); // process blobs BreadBlob[] breadBlob1 = new BreadBlob[bc.ObjectsCount]; int blobArrayIndex = 0; int blobPt = Convert.ToInt16(txbBlobNum.Text); int blobThreshold = Convert.ToInt16(txbBlobThreshold.Text); if (blobPt > bc.ObjectsCount) { blobPt = bc.ObjectsCount - 1; } StaticsCalculator MuffinStatistics = new StaticsCalculator(); Graphics g = Graphics.FromImage(indexMap); List <Accord.Imaging.Blob> blobList = new List <Accord.Imaging.Blob>(); foreach (Accord.Imaging.Blob blob in bc.GetObjects(GrayScaleImage, false)) { blobList.Add(blob); breadBlob1[blobArrayIndex] = new BreadBlob(); breadBlob1[blobArrayIndex].TopDownThreshold = blobThreshold; byte[,] blobArray = new byte[blob.Rectangle.Width, blob.Rectangle.Height]; for (int x = blob.Rectangle.Left; x < blob.Rectangle.Right; x++) { for (int y = blob.Rectangle.Top; y < blob.Rectangle.Bottom; y++) { System.Drawing.Color tempPixelColor = GrayScaleImage.GetPixel(x, y); blobArray[x - blob.Rectangle.Left, y - blob.Rectangle.Top] = tempPixelColor.G; } } breadBlob1[blobArrayIndex].PixelArray = blobArray; breadBlob1[blobArrayIndex].X = blob.Rectangle.X; breadBlob1[blobArrayIndex].Y = blob.Rectangle.Y; if (blobArrayIndex == blobPt) { System.Drawing.Rectangle tempRect = blob.Rectangle; tempRect.X -= 1; tempRect.Y -= 1; tempRect.Width += 2; tempRect.Height += 2; Accord.Imaging.Drawing.Rectangle(unmanagedImage1, tempRect, System.Drawing.Color.Yellow); } if (breadBlob1[blobArrayIndex].IsTop()) { Accord.Imaging.Drawing.Rectangle(unmanagedImage1, blob.Rectangle, System.Drawing.Color.Green); } else { Accord.Imaging.Drawing.Rectangle(unmanagedImage1, blob.Rectangle, System.Drawing.Color.Red); } RectangleF rectf = new RectangleF(blob.Rectangle.X, blob.Rectangle.Y, blob.Rectangle.Width, blob.Rectangle.Height); g.SmoothingMode = SmoothingMode.AntiAlias; g.InterpolationMode = InterpolationMode.HighQualityBicubic; g.PixelOffsetMode = PixelOffsetMode.HighQuality; g.DrawString(Convert.ToString(blob.ID - 1), new Font("Arial", 5), System.Drawing.Brushes.White, rectf); lblBlobHeight.Content = blob.Rectangle.Height; lblBlobWidth.Content = blob.Rectangle.Width; blobArrayIndex++; } lblAccordStdDev.Content = blobList[blobPt].ColorStdDev.B; BitmapImage indexMap_temp = ToBitmapImage(indexMap); g.Flush(); // conver to managed image if it is required to display it at some point of time Bitmap managedImage = unmanagedImage1.ToManagedImage(); // create filter Add filter = new Add(indexMap); // apply the filter Bitmap resultImage = filter.Apply(managedImage); BitmapImage GrayImage_temp = ToBitmapImage(resultImage); imgGray.Source = GrayImage_temp; stopwatch.Stop(); lblTime.Content = stopwatch.ElapsedMilliseconds; lblBlobCount.Content = bc.ObjectsCount; lblLib.Content = "Accord"; lblVariance.Content = breadBlob1[blobPt].GetVariance(BreadBlob.VarianceType.All); lblX.Content = breadBlob1[blobPt].X; lblY.Content = breadBlob1[blobPt].Y; lblQ1Variance.Content = ""; lblQ2Variance.Content = ""; lblQ3Variance.Content = ""; lblQ4Variance.Content = ""; lblQAverage.Content = breadBlob1[blobPt].GetVariance(BreadBlob.VarianceType.QAverage); //lblAllMuffinStat.Content = MuffinStatistics.StandardDeviation; } }
List <FeatureDescriptor> IFeatureDetector <FeatureDescriptor, double[]> .ProcessImage(UnmanagedImage image) { return(ProcessImage(image).ConvertAll(p => new FeatureDescriptor(p))); }
/// <summary> /// Process image looking for interest points. /// </summary> /// /// <param name="image">Source image data to process.</param> /// /// <returns>Returns list of found features points.</returns> /// /// <exception cref="UnsupportedImageFormatException"> /// The source image has incorrect pixel format. /// </exception> /// public List <double[]> ProcessImage(UnmanagedImage image) { // check image format if ( (image.PixelFormat != PixelFormat.Format8bppIndexed) && (image.PixelFormat != PixelFormat.Format24bppRgb) && (image.PixelFormat != PixelFormat.Format32bppRgb) && (image.PixelFormat != PixelFormat.Format32bppArgb) ) { throw new UnsupportedImageFormatException("Unsupported pixel format of the source image."); } // make sure we have grayscale image UnmanagedImage grayImage = null; if (image.PixelFormat == PixelFormat.Format8bppIndexed) { grayImage = image; } else { // create temporary grayscale image grayImage = Grayscale.CommonAlgorithms.BT709.Apply(image); } // get source image size int width = grayImage.Width; int height = grayImage.Height; int stride = grayImage.Stride; int offset = stride - width; // 1. Calculate 8-pixel neighborhood binary patterns patterns = new int[height, width]; unsafe { fixed(int *ptrPatterns = patterns) { // Begin skipping first line byte *src = (byte *)grayImage.ImageData.ToPointer() + stride; int * neighbors = ptrPatterns + width; // for each line for (int y = 1; y < height - 1; y++) { // skip first column neighbors++; src++; // for each inner pixel in line (skipping first and last) for (int x = 1; x < width - 1; x++, src++, neighbors++) { // Retrieve the pixel neighborhood byte a11 = src[+stride + 1], a12 = src[+1], a13 = src[-stride + 1]; byte a21 = src[+stride + 0], a22 = src[0], a23 = src[-stride + 0]; byte a31 = src[+stride - 1], a32 = src[-1], a33 = src[-stride - 1]; int sum = 0; if (a22 < a11) { sum += 1 << 0; } if (a22 < a12) { sum += 1 << 1; } if (a22 < a13) { sum += 1 << 2; } if (a22 < a21) { sum += 1 << 3; } if (a22 < a23) { sum += 1 << 4; } if (a22 < a31) { sum += 1 << 5; } if (a22 < a32) { sum += 1 << 6; } if (a22 < a33) { sum += 1 << 7; } *neighbors = sum; } // Skip last column neighbors++; src += offset + 1; } } } // Free some resources which wont be needed anymore if (image.PixelFormat != PixelFormat.Format8bppIndexed) { grayImage.Dispose(); } // 2. Compute cell histograms int cellCountX; int cellCountY; if (cellSize > 0) { cellCountX = (int)Math.Floor(width / (double)cellSize); cellCountY = (int)Math.Floor(height / (double)cellSize); histograms = new int[cellCountX, cellCountY][]; // For each cell for (int i = 0; i < cellCountX; i++) { for (int j = 0; j < cellCountY; j++) { // Compute the histogram int[] histogram = new int[numberOfBins]; int startCellX = i * cellSize; int startCellY = j * cellSize; // for each pixel in the cell for (int x = 0; x < cellSize; x++) { for (int y = 0; y < cellSize; y++) { histogram[patterns[startCellY + y, startCellX + x]]++; } } histograms[i, j] = histogram; } } } else { cellCountX = cellCountY = 1; int[] histogram = new int[numberOfBins]; for (int i = 0; i < height; i++) { for (int j = 0; j < width; j++) { histogram[patterns[i, j]]++; } } histograms = new int[, ][] { { histogram } }; } // 3. Group the cells into larger, normalized blocks int blocksCountX; int blocksCountY; if (blockSize > 0) { blocksCountX = (int)Math.Floor(cellCountX / (double)blockSize); blocksCountY = (int)Math.Floor(cellCountY / (double)blockSize); } else { blockSize = blocksCountX = blocksCountY = 1; } List <double[]> blocks = new List <double[]>(); for (int i = 0; i < blocksCountX; i++) { for (int j = 0; j < blocksCountY; j++) { double[] v = new double[blockSize * blockSize * numberOfBins]; int startBlockX = i * blockSize; int startBlockY = j * blockSize; int c = 0; // for each cell in the block for (int x = 0; x < blockSize; x++) { for (int y = 0; y < blockSize; y++) { int[] histogram = histograms[startBlockX + x, startBlockY + y]; // Copy all histograms to the block vector for (int k = 0; k < histogram.Length; k++) { v[c++] = histogram[k]; } } } double[] block = (normalize) ? v.Divide(v.Euclidean() + epsilon) : v; blocks.Add(block); } } return(blocks); }
/// <summary> /// Constructs a new Integral image from an unmanaged image. /// </summary> /// /// <param name="image">The source image from where the integral image should be computed.</param> /// <param name="channel">The image channel to consider in the computations. Default is 0.</param> /// <param name="computeTilted"><c>True</c> to compute the tilted version of the integral image, /// <c>false</c> otherwise. Default is false.</param> /// /// <returns> /// The <see cref="IntegralImage2"/> representation of /// the <paramref name="image">source image</paramref>.</returns> /// public static IntegralImage2 FromBitmap(UnmanagedImage image, int channel, bool computeTilted) { // check image format if (!(image.PixelFormat == PixelFormat.Format8bppIndexed || image.PixelFormat == PixelFormat.Format24bppRgb || image.PixelFormat == PixelFormat.Format32bppArgb || image.PixelFormat == PixelFormat.Format32bppRgb)) { throw new UnsupportedImageFormatException("Only grayscale, 24 and 32 bpp RGB images are supported."); } int pixelSize = System.Drawing.Image.GetPixelFormatSize(image.PixelFormat) / 8; // get source image size int width = image.Width; int height = image.Height; int stride = image.Stride; int offset = stride - width * pixelSize; // create integral image IntegralImage2 im = new IntegralImage2(width, height, computeTilted); long * nSum = im.nSum; long * sSum = im.sSum; long * tSum = im.tSum; int nWidth = im.nWidth; int tWidth = im.tWidth; if (image.PixelFormat == PixelFormat.Format8bppIndexed && channel != 0) { throw new ArgumentException("Only the first channel is available for 8 bpp images.", "channel"); } byte *srcStart = (byte *)image.ImageData.ToPointer() + channel; // do the job byte *src = srcStart; // for each line for (int y = 1; y <= height; y++) { int yy = nWidth * (y); int y1 = nWidth * (y - 1); // for each pixel for (int x = 1; x <= width; x++, src += pixelSize) { long p1 = *src; long p2 = p1 * p1; int r = yy + (x); int a = yy + (x - 1); int b = y1 + (x); int c = y1 + (x - 1); nSum[r] = p1 + nSum[a] + nSum[b] - nSum[c]; sSum[r] = p2 + sSum[a] + sSum[b] - sSum[c]; } src += offset; } if (computeTilted) { src = srcStart; // Left-to-right, top-to-bottom pass for (int y = 1; y <= height; y++, src += offset) { int yy = tWidth * (y); int y1 = tWidth * (y - 1); for (int x = 2; x < width + 2; x++, src += pixelSize) { int a = y1 + (x - 1); int b = yy + (x - 1); int c = y1 + (x - 2); int r = yy + (x); tSum[r] = *src + tSum[a] + tSum[b] - tSum[c]; } } { int yy = tWidth * (height); int y1 = tWidth * (height + 1); for (int x = 2; x < width + 2; x++, src += pixelSize) { int a = yy + (x - 1); int c = yy + (x - 2); int b = y1 + (x - 1); int r = y1 + (x); tSum[r] = tSum[a] + tSum[b] - tSum[c]; } } // Right-to-left, bottom-to-top pass for (int y = height; y >= 0; y--) { int yy = tWidth * (y); int y1 = tWidth * (y + 1); for (int x = width + 1; x >= 1; x--) { int r = yy + (x); int b = y1 + (x - 1); tSum[r] += tSum[b]; } } for (int y = height + 1; y >= 0; y--) { int yy = tWidth * (y); for (int x = width + 1; x >= 2; x--) { int r = yy + (x); int b = yy + (x - 2); tSum[r] -= tSum[b]; } } } return(im); }
/// <summary> /// Constructs a new Integral image from an unmanaged image. /// </summary> /// /// <param name="image">The source image from where the integral image should be computed.</param> /// <param name="computeTilted"><c>True</c> to compute the tilted version of the integral image, /// <c>false</c> otherwise. Default is false.</param> /// /// <returns> /// The <see cref="IntegralImage2"/> representation of /// the <paramref name="image">source image</paramref>.</returns> /// public static IntegralImage2 FromBitmap(UnmanagedImage image, bool computeTilted) { return(FromBitmap(image, 0, computeTilted)); }
/// <summary> /// Computes the center moments for the specified image. /// </summary> /// /// <param name="image">The image whose moments should be computed.</param> /// <param name="area">The region of interest in the image to compute moments for.</param> /// public override void Compute(UnmanagedImage image, Rectangle area) { this.Compute(new RawMoments(image, area, Order)); }
/// <summary> /// Reset motion detector to initial state. /// </summary> /// /// <remarks><para>Resets internal state and variables of motion detection algorithm. /// Usually this is required to be done before processing new video source, but /// may be also done at any time to restart motion detection algorithm.</para> /// </remarks> /// public void Reset( ) { lock ( sync ) { if ( backgroundFrame != null ) { backgroundFrame.Dispose( ); backgroundFrame = null; } if ( motionFrame != null ) { motionFrame.Dispose( ); motionFrame = null; } if ( tempFrame != null ) { tempFrame.Dispose( ); tempFrame = null; } framesCounter = 0; } }
/// <summary> /// Process the filter on the specified image. /// </summary> /// /// <param name="image">Source image data.</param> /// protected unsafe override void ProcessFilter(UnmanagedImage image) { int width = image.Width; int height = image.Height; int pixelSize = System.Drawing.Image.GetPixelFormatSize(image.PixelFormat) / 8; int stride = image.Stride; int offset = stride - image.Width * pixelSize; byte* src = (byte*)image.ImageData.ToPointer(); // Get maximum color image values int maxR = 0, maxG = 0, maxB = 0; for (int i = 0; i < height; i++) { for (int j = 0; j < width; j++) { if (src[RGB.R] > maxR) maxR = src[RGB.R]; if (src[RGB.G] > maxG) maxG = src[RGB.G]; if (src[RGB.B] > maxB) maxB = src[RGB.B]; } } double kr = maxR > 0 ? (255.0 / maxR) : 0; double kg = maxG > 0 ? (255.0 / maxG) : 0; double kb = maxB > 0 ? (255.0 / maxB) : 0; src = (byte*)image.ImageData.ToPointer(); for (int y = 0; y < height; y++) { for (int x = 0; x < width; x++, src += pixelSize) { double r = kr * src[RGB.R]; double g = kg * src[RGB.G]; double b = kb * src[RGB.B]; src[RGB.R] = (byte)(r > 255 ? 255 : r); src[RGB.G] = (byte)(g > 255 ? 255 : g); src[RGB.B] = (byte)(b > 255 ? 255 : b); } src += offset; } }
/// <summary> /// Constructs a new Integral image from an unmanaged image. /// </summary> /// /// <param name="image">The source image from where the integral image should be computed.</param> /// /// <returns> /// The <see cref="IntegralImage2"/> representation of /// the <paramref name="image">source image</paramref>.</returns> /// public static IntegralImage2 FromBitmap(UnmanagedImage image) { return(FromBitmap(image, 0, false)); }
/// <summary> /// Process image looking for corners. /// </summary> /// /// <param name="image">Unmanaged source image to process.</param> /// /// <returns>Returns array of found corners (X-Y coordinates).</returns> /// /// <exception cref="UnsupportedImageFormatException">The source image has incorrect pixel format.</exception> /// public List<IntPoint> ProcessImage(UnmanagedImage image) { // check image format if ( (image.PixelFormat != PixelFormat.Format8bppIndexed) && (image.PixelFormat != PixelFormat.Format24bppRgb) && (image.PixelFormat != PixelFormat.Format32bppRgb) && (image.PixelFormat != PixelFormat.Format32bppArgb) ) { throw new UnsupportedImageFormatException("Unsupported pixel format of the source image."); } // get source image size int width = image.Width; int height = image.Height; int stride = image.Stride; int pixelSize = Bitmap.GetPixelFormatSize(image.PixelFormat) / 8; // window radius int windowRadius = windowSize / 2; // offset int offset = stride - windowSize * pixelSize; // create moravec cornerness map int[,] moravecMap = new int[height, width]; // do the job unsafe { byte* ptr = (byte*)image.ImageData.ToPointer(); // for each row for (int y = windowRadius, maxY = height - windowRadius; y < maxY; y++) { // for each pixel for (int x = windowRadius, maxX = width - windowRadius; x < maxX; x++) { int minSum = int.MaxValue; // go through 8 possible shifting directions for (int k = 0; k < 8; k++) { // calculate center of shifted window int sy = y + yDelta[k]; int sx = x + xDelta[k]; // check if shifted window is within the image if ( (sy < windowRadius) || (sy >= maxY) || (sx < windowRadius) || (sx >= maxX) ) { // skip this shifted window continue; } int sum = 0; byte* ptr1 = ptr + (y - windowRadius) * stride + (x - windowRadius) * pixelSize; byte* ptr2 = ptr + (sy - windowRadius) * stride + (sx - windowRadius) * pixelSize; // for each windows' rows for (int i = 0; i < windowSize; i++) { // for each windows' pixels for (int j = 0, maxJ = windowSize * pixelSize; j < maxJ; j++, ptr1++, ptr2++) { int dif = *ptr1 - *ptr2; sum += dif * dif; } ptr1 += offset; ptr2 += offset; } // check if the sum is mimimal if (sum < minSum) { minSum = sum; } } // threshold the minimum sum if (minSum < threshold) { minSum = 0; } moravecMap[y, x] = minSum; } } } // collect interesting points - only those points, which are local maximums List<IntPoint> cornersList = new List<IntPoint>(); // for each row for (int y = windowRadius, maxY = height - windowRadius; y < maxY; y++) { // for each pixel for (int x = windowRadius, maxX = width - windowRadius; x < maxX; x++) { int currentValue = moravecMap[y, x]; // for each windows' rows for (int i = -windowRadius; (currentValue != 0) && (i <= windowRadius); i++) { // for each windows' pixels for (int j = -windowRadius; j <= windowRadius; j++) { if (moravecMap[y + i, x + j] > currentValue) { currentValue = 0; break; } } } // check if this point is really interesting if (currentValue != 0) { cornersList.Add(new IntPoint(x, y)); } } } return cornersList; }
// Create motion zones' image private unsafe void CreateMotionZonesFrame( ) { lock ( sync ) { // free previous motion zones frame if ( zonesFrame != null ) { zonesFrame.Dispose( ); zonesFrame = null; } // create motion zones frame only in the case if the algorithm has processed at least one frame if ( ( motionZones != null ) && ( motionZones.Length != 0 ) && ( videoWidth != 0 ) ) { zonesFrame = UnmanagedImage.Create( videoWidth, videoHeight, PixelFormat.Format8bppIndexed ); Rectangle imageRect = new Rectangle( 0, 0, videoWidth, videoHeight ); // draw all motion zones on motion frame foreach ( Rectangle rect in motionZones ) { rect.Intersect( imageRect ); // rectangle's dimenstion int rectWidth = rect.Width; int rectHeight = rect.Height; // start pointer int stride = zonesFrame.Stride; byte* ptr = (byte*) zonesFrame.ImageData.ToPointer( ) + rect.Y * stride + rect.X; for ( int y = 0; y < rectHeight; y++ ) { Accord.SystemTools.SetUnmanagedMemory( ptr, 255, rectWidth ); ptr += stride; } } } } }
private void source_NewFrame(object sender, NewFrameEventArgs eventArgs) { if (requestedToStop) return; if (!IsTracking && !IsDetecting) return; lock (syncObject) { // skip first frames during initialization if (skip < 10) { skip++; return; } Bitmap frame = eventArgs.Frame; int width = frame.Width; int height = frame.Height; BitmapData data = frame.LockBits(new Rectangle(0, 0, width, height), ImageLockMode.ReadWrite, frame.PixelFormat); UnmanagedImage image = new UnmanagedImage(data); if (IsDetecting) { // Reduce frame size to process it faster float xscale = (float)width / resize.NewWidth; float yscale = (float)height / resize.NewHeight; UnmanagedImage downsample = resize.Apply(image); // Process the face detector in the downsampled image Rectangle[] regions = detector.ProcessFrame(downsample); // Check if the face has been steady 5 frames in a row if (detector.Steady >= 5) { // Yes, so track the face Rectangle face = regions[0]; // Reduce the face size to avoid tracking background Rectangle window = new Rectangle( (int)((face.X + face.Width / 2f) * xscale), (int)((face.Y + face.Height / 2f) * yscale), 1, 1); window.Inflate((int)(0.25f * face.Width * xscale), (int)(0.40f * face.Height * yscale)); // Re-initialize tracker tracker.Reset(); tracker.SearchWindow = window; tracker.ProcessFrame(image); // Update initial position computeCurrentPosition(); OnHeadEnter(new HeadEventArgs(currentX, currentY, currentAngle, currentScale)); } } else if (IsTracking) { tracker.Extract = (NewFrame != null); // Track the object tracker.ProcessFrame(image); // Get the object position TrackingObject obj = tracker.TrackingObject; // Update current position computeCurrentPosition(); if (obj.IsEmpty || !tracker.IsSteady) { OnHeadLeave(EventArgs.Empty); } else { OnHeadMove(new HeadEventArgs(currentX, currentY, currentAngle, currentScale)); if (NewFrame != null && obj.Image != null) { Bitmap headFrame = obj.Image.ToManagedImage(); NewFrame(this, new NewFrameEventArgs(headFrame)); } } } frame.UnlockBits(data); } }
/// <summary> /// Process the image filter. /// </summary> /// protected override void ProcessFilter(UnmanagedImage sourceData, UnmanagedImage destinationData) { // Locks the overlay image BitmapData overlayData = overlayImage.LockBits( new Rectangle(0, 0, overlayImage.Width, overlayImage.Height), ImageLockMode.ReadOnly, overlayImage.PixelFormat); // get source image size int width = sourceData.Width; int height = sourceData.Height; // get destination image size int newWidth = destinationData.Width; int newHeight = destinationData.Height; int srcPixelSize = System.Drawing.Image.GetPixelFormatSize(sourceData.PixelFormat) / 8; int orgPixelSize = System.Drawing.Image.GetPixelFormatSize(overlayData.PixelFormat) / 8; int srcStride = sourceData.Stride; int dstOffset = destinationData.Stride - newWidth * 4; // destination always 32bpp argb // Get center of first image Point center1 = new Point((int)(overlayImage.Width / 2f), (int)(overlayImage.Height / 2f)); // Get center of second image Point center2 = this.center; // Compute maximum center distances float dmax1 = Math.Min( distance(center1.X, center1.Y, center2.X - imageSize.Width / 2f, center1.Y), distance(center1.X, center1.Y, center1.X, center1.Y + overlayImage.Height / 2f)); float dmax2 = Math.Min( distance(center2.X, center2.Y, center2.X + imageSize.Width / 2f, center2.Y), distance(center2.X, center2.Y, center2.X, center2.Y + imageSize.Height / 2f)); float dmax = -System.Math.Abs(dmax2 - dmax1); // fill values byte fillR = fillColor.R; byte fillG = fillColor.G; byte fillB = fillColor.B; byte fillA = 0;//fillColor.A; // Retrieve homography matrix as float array float[,] H = (float[,])homography; // do the job unsafe { byte* org = (byte*)overlayData.Scan0.ToPointer(); byte* src = (byte*)sourceData.ImageData.ToPointer(); byte* dst = (byte*)destinationData.ImageData.ToPointer(); // destination pixel's coordinate relative to image center double cx, cy; // destination pixel's homogenous coordinate double hx, hy, hw; // source pixel's coordinates int ox, oy; // Copy the overlay image for (int y = 0; y < newHeight; y++) { for (int x = 0; x < newWidth; x++, dst += 4) { ox = (int)(x + offset.X); oy = (int)(y + offset.Y); // validate source pixel's coordinates if ((ox < 0) || (oy < 0) || (ox >= overlayData.Width) || (oy >= overlayData.Height)) { // fill destination image with filler dst[0] = fillB; dst[1] = fillG; dst[2] = fillR; dst[3] = fillA; } else { int c = oy * overlayData.Stride + ox * orgPixelSize; // fill destination image with pixel from original image if (orgPixelSize == 3) { // 24 bpp dst[0] = org[c + 0]; dst[1] = org[c + 1]; dst[2] = org[c + 2]; dst[3] = (byte)255; } else if (orgPixelSize == 4) { // 32 bpp dst[0] = org[c + 0]; dst[1] = org[c + 1]; dst[2] = org[c + 2]; dst[3] = org[c + 3]; } else { // 8 bpp dst[0] = org[c]; dst[1] = org[c]; dst[2] = org[c]; dst[3] = org[c]; } } } dst += dstOffset; } org = (byte*)overlayData.Scan0.ToPointer(); src = (byte*)sourceData.ImageData.ToPointer(); dst = (byte*)destinationData.ImageData.ToPointer(); // Project and blend the second image for (int y = 0; y < newHeight; y++) { for (int x = 0; x < newWidth; x++, dst += 4) { cx = x + offset.X; cy = y + offset.Y; // projection using homogenous coordinates hw = (H[2, 0] * cx + H[2, 1] * cy + H[2, 2]); hx = (H[0, 0] * cx + H[0, 1] * cy + H[0, 2]) / hw; hy = (H[1, 0] * cx + H[1, 1] * cy + H[1, 2]) / hw; // coordinate of the nearest point ox = (int)(hx); oy = (int)(hy); // validate source pixel's coordinates if ((ox >= 0) && (oy >= 0) && (ox < width) && (oy < height)) { int c = oy * srcStride + ox * srcPixelSize; // fill destination image with pixel from source image if (srcPixelSize == 4 && src[c + 3] == 0) { // source pixel is fully transparent, nothing to copy } else if (dst[3] > 0) { float f1 = 0.5f, f2 = 0.5f; if (Gradient) { // there is a pixel from the other image here, blend float d1 = distance(x, y, center1.X, center1.Y); float d2 = distance(x, y, center2.X, center2.Y); f1 = Vector.Scale(d1 - d2, 0, dmax, 0, 1); if (f1 < 0) f1 = 0f; if (f1 > 1) f1 = 1f; f2 = (1f - f1); } if (!AlphaOnly) { if (srcPixelSize == 3) { // 24 bpp dst[0] = (byte)(src[c + 0] * f2 + dst[0] * f1); dst[1] = (byte)(src[c + 1] * f2 + dst[1] * f1); dst[2] = (byte)(src[c + 2] * f2 + dst[2] * f1); dst[3] = (byte)255; } else if (srcPixelSize == 4) { // 32 bpp dst[0] = (byte)(src[c + 0] * f2 + dst[0] * f1); dst[1] = (byte)(src[c + 1] * f2 + dst[1] * f1); dst[2] = (byte)(src[c + 2] * f2 + dst[2] * f1); dst[3] = (byte)(src[c + 3] * f2 + dst[3] * f1); } else { // 8 bpp dst[0] = (byte)(src[c] * f2 + dst[0] * f1); dst[1] = (byte)(src[c] * f2 + dst[1] * f1); dst[2] = (byte)(src[c] * f2 + dst[2] * f1); dst[3] = (byte)255; } } else { if (srcPixelSize == 3) { // 24 bpp dst[0] = (byte)(src[c + 0]); dst[1] = (byte)(src[c + 1]); dst[2] = (byte)(src[c + 2]); } else if (srcPixelSize == 4) { // 32 bpp dst[0] = (byte)(src[c + 0]); dst[1] = (byte)(src[c + 1]); dst[2] = (byte)(src[c + 2]); } else { // 8 bpp dst[0] = (byte)(src[c]); dst[1] = (byte)(src[c]); dst[2] = (byte)(src[c]); } } } else { // just copy the source into the destination image if (srcPixelSize == 3) { // 24bpp dst[0] = src[c + 0]; dst[1] = src[c + 1]; dst[2] = src[c + 2]; dst[3] = (byte)255; } else if (srcPixelSize == 4) { // 32bpp dst[0] = src[c + 0]; dst[1] = src[c + 1]; dst[2] = src[c + 2]; dst[3] = src[c + 3]; } else { // 8bpp dst[0] = src[c]; dst[1] = src[c]; dst[2] = src[c]; dst[3] = 0; } } } } dst += dstOffset; } } overlayImage.UnlockBits(overlayData); }
/// <summary> /// Clone the unmanaged images. /// </summary> /// /// <returns>Returns clone of the unmanaged image.</returns> /// /// <remarks><para>The method does complete cloning of the object.</para></remarks> /// public UnmanagedImage Clone() { // allocate memory for the image IntPtr newImageData = System.Runtime.InteropServices.Marshal.AllocHGlobal(stride * height); System.GC.AddMemoryPressure(stride * height); UnmanagedImage newImage = new UnmanagedImage(newImageData, width, height, stride, pixelFormat); newImage.mustBeDisposed = true; Accord.SystemTools.CopyUnmanagedMemory(newImageData, imageData, stride * height); return newImage; }
/// <summary> /// Copy unmanaged image. /// </summary> /// /// <param name="destImage">Destination image to copy this image to.</param> /// /// <remarks><para>The method copies current unmanaged image to the specified image. /// Size and pixel format of the destination image must be exactly the same.</para></remarks> /// /// <exception cref="InvalidImagePropertiesException">Destination image has different size or pixel format.</exception> /// public void Copy(UnmanagedImage destImage) { if ( (width != destImage.width) || (height != destImage.height) || (pixelFormat != destImage.pixelFormat)) { throw new InvalidImagePropertiesException("Destination image has different size or pixel format."); } if (stride == destImage.stride) { // copy entire image Accord.SystemTools.CopyUnmanagedMemory(destImage.imageData, imageData, stride * height); } else { unsafe { int dstStride = destImage.stride; int copyLength = (stride < dstStride) ? stride : dstStride; byte* src = (byte*)imageData.ToPointer(); byte* dst = (byte*)destImage.imageData.ToPointer(); // copy line by line for (int i = 0; i < height; i++) { Accord.SystemTools.CopyUnmanagedMemory(dst, src, copyLength); dst += dstStride; src += stride; } } } }
/// <summary> /// Allocate new image in unmanaged memory. /// </summary> /// /// <param name="width">Image width.</param> /// <param name="height">Image height.</param> /// <param name="pixelFormat">Image pixel format.</param> /// /// <returns>Return image allocated in unmanaged memory.</returns> /// /// <remarks><para>Allocate new image with specified attributes in unmanaged memory.</para> /// /// <para><note>The method supports only /// <see cref="System.Drawing.Imaging.PixelFormat">Format8bppIndexed</see>, /// <see cref="System.Drawing.Imaging.PixelFormat">Format16bppGrayScale</see>, /// <see cref="System.Drawing.Imaging.PixelFormat">Format24bppRgb</see>, /// <see cref="System.Drawing.Imaging.PixelFormat">Format32bppRgb</see>, /// <see cref="System.Drawing.Imaging.PixelFormat">Format32bppArgb</see>, /// <see cref="System.Drawing.Imaging.PixelFormat">Format32bppPArgb</see>, /// <see cref="System.Drawing.Imaging.PixelFormat">Format48bppRgb</see>, /// <see cref="System.Drawing.Imaging.PixelFormat">Format64bppArgb</see> and /// <see cref="System.Drawing.Imaging.PixelFormat">Format64bppPArgb</see> pixel formats. /// In the case if <see cref="System.Drawing.Imaging.PixelFormat">Format8bppIndexed</see> /// format is specified, pallete is not not created for the image (supposed that it is /// 8 bpp grayscale image). /// </note></para> /// </remarks> /// /// <exception cref="UnsupportedImageFormatException">Unsupported pixel format was specified.</exception> /// <exception cref="InvalidImagePropertiesException">Invalid image size was specified.</exception> /// public static UnmanagedImage Create(int width, int height, PixelFormat pixelFormat) { int bytesPerPixel = 0; // calculate bytes per pixel switch (pixelFormat) { case PixelFormat.Format8bppIndexed: bytesPerPixel = 1; break; case PixelFormat.Format16bppGrayScale: bytesPerPixel = 2; break; case PixelFormat.Format24bppRgb: bytesPerPixel = 3; break; case PixelFormat.Format32bppRgb: case PixelFormat.Format32bppArgb: case PixelFormat.Format32bppPArgb: bytesPerPixel = 4; break; case PixelFormat.Format48bppRgb: bytesPerPixel = 6; break; case PixelFormat.Format64bppArgb: case PixelFormat.Format64bppPArgb: bytesPerPixel = 8; break; default: throw new UnsupportedImageFormatException("Can not create image with specified pixel format."); } // check image size if ((width <= 0) || (height <= 0)) { throw new InvalidImagePropertiesException("Invalid image size specified."); } // calculate stride int stride = width * bytesPerPixel; if (stride % 4 != 0) { stride += (4 - (stride % 4)); } // allocate memory for the image IntPtr imageData = System.Runtime.InteropServices.Marshal.AllocHGlobal(stride * height); Accord.SystemTools.SetUnmanagedMemory(imageData, 0, stride * height); System.GC.AddMemoryPressure(stride * height); UnmanagedImage image = new UnmanagedImage(imageData, width, height, stride, pixelFormat); image.mustBeDisposed = true; return image; }
/// <summary> /// Extracts the contour from a single object in a grayscale image. /// </summary> /// /// <param name="image">A grayscale image.</param> /// <returns>A list of <see cref="IntPoint"/>s defining a contour.</returns> /// public List<IntPoint> FindContour(UnmanagedImage image) { CheckPixelFormat(image.PixelFormat); int width = image.Width; int height = image.Height; int stride = image.Stride; List<IntPoint> contour = new List<IntPoint>(); unsafe { byte* src = (byte*)image.ImageData.ToPointer(); byte* start = null; IntPoint prevPosition = new IntPoint(); // 1. Find the lowest point in the image // The lowest point is searched first by lowest X, then lowest Y, to use // the same ordering of AForge.NET's GrahamConvexHull. Unfortunately, this // means we have to search our image by inspecting columns rather than rows. bool found = false; byte* col = src; for (int x = 0; x < width && !found; x++, col++) { byte* row = col; for (int y = 0; y < height && !found; y++, row += stride) { if (*row > Threshold) { start = row; prevPosition = new IntPoint(x, y); contour.Add(prevPosition); found = true; } } } if (contour.Count == 0) { // Empty image return contour; } // 2. Beginning on the first point, starting from left // neighbor and going into counter-clockwise direction, // find a neighbor pixel which is black. int[] windowOffset = { +1, // 0: Right -stride + 1, // 1: Top-Right -stride, // 2: Top -stride - 1, // 3: Top-Left -1, // 4: Left +stride - 1, // 5: Bottom-Left +stride, // 6: Bottom +stride + 1, // 7: Bottom-Right }; int direction = 4; // 4: Left byte* current = start; byte* previous = null; do // Search until we find a dead end (or the starting pixel) { found = false; // Search in the neighborhood window for (int i = 0; i < windowOffset.Length; i++) { // Find the next candidate neighbor point IntPoint next = prevPosition + positionOffset[direction]; // Check if it is inside the blob area if (next.X < 0 || next.X >= width || next.Y < 0 || next.Y >= height) { // It isn't. Change direction and continue. direction = (direction + 1) % windowOffset.Length; continue; } // Find the next candidate neighbor pixel byte* neighbor = unchecked(current + windowOffset[direction]); // Check if it is a colored pixel if (*neighbor <= Threshold) { // It isn't. Change direction and continue. direction = (direction + 1) % windowOffset.Length; continue; } // Check if it is a previously found pixel if (neighbor == previous || neighbor == start) { // We found a dead end. found = false; break; } // If we reached until here, we have // found a neighboring black pixel. found = true; break; } if (found) { // Navigate to neighbor pixel previous = current; current = unchecked(current + windowOffset[direction]); // Add to the contour prevPosition += positionOffset[direction]; contour.Add(prevPosition); // Continue counter-clockwise search // from the most promising direction direction = nextDirection[direction]; } } while (found); } return contour; }
/// <summary> /// Create unmanaged image from the specified managed image. /// </summary> /// /// <param name="imageData">Source locked image data.</param> /// /// <returns>Returns new unmanaged image, which is a copy of source managed image.</returns> /// /// <remarks><para>The method creates an exact copy of specified managed image, but allocated /// in unmanaged memory. This means that managed image may be unlocked right after call to this /// method.</para></remarks> /// /// <exception cref="UnsupportedImageFormatException">Unsupported pixel format of source image.</exception> /// public static UnmanagedImage FromManagedImage(BitmapData imageData) { PixelFormat pixelFormat = imageData.PixelFormat; // check source pixel format if ( (pixelFormat != PixelFormat.Format8bppIndexed) && (pixelFormat != PixelFormat.Format16bppGrayScale) && (pixelFormat != PixelFormat.Format24bppRgb) && (pixelFormat != PixelFormat.Format32bppRgb) && (pixelFormat != PixelFormat.Format32bppArgb) && (pixelFormat != PixelFormat.Format32bppPArgb) && (pixelFormat != PixelFormat.Format48bppRgb) && (pixelFormat != PixelFormat.Format64bppArgb) && (pixelFormat != PixelFormat.Format64bppPArgb)) { throw new UnsupportedImageFormatException("Unsupported pixel format of the source image."); } // allocate memory for the image IntPtr dstImageData = System.Runtime.InteropServices.Marshal.AllocHGlobal(imageData.Stride * imageData.Height); System.GC.AddMemoryPressure(imageData.Stride * imageData.Height); UnmanagedImage image = new UnmanagedImage(dstImageData, imageData.Width, imageData.Height, imageData.Stride, pixelFormat); Accord.SystemTools.CopyUnmanagedMemory(dstImageData, imageData.Scan0, imageData.Stride * imageData.Height); image.mustBeDisposed = true; return image; }
/// <summary> /// Process the filter on the specified image. /// </summary> /// /// <param name="sourceData">Source image data.</param> /// <param name="destinationData">Destination image data.</param> /// protected override void ProcessFilter(UnmanagedImage sourceData, UnmanagedImage destinationData) { convolution.Apply(sourceData, destinationData); }
/// <summary> /// Construct integral image from source grayscale image. /// </summary> /// /// <param name="image">Source unmanaged image.</param> /// /// <returns>Returns integral image.</returns> /// /// <exception cref="UnsupportedImageFormatException">The source image has incorrect pixel format.</exception> /// public static IntegralImage FromBitmap(UnmanagedImage image) { // check image format if (image.PixelFormat != PixelFormat.Format8bppIndexed) { throw new ArgumentException("Source image can be graysclae (8 bpp indexed) image only."); } // get source image size int width = image.Width; int height = image.Height; int offset = image.Stride - width; // create integral image var im = new IntegralImage(width, height); uint[,] integralImage = im.integralImage; // do the job unsafe { byte* src = (byte*)image.ImageData.ToPointer(); // for each line for (int y = 1; y <= height; y++) { uint rowSum = 0; // for each pixel for (int x = 1; x <= width; x++, src++) { rowSum += *src; integralImage[y, x] = rowSum + integralImage[y - 1, x]; } src += offset; } } return im; }
/// <summary> /// Initializes a new instance of the <see cref="CentralMoments"/> class. /// </summary> /// /// <param name="order">The maximum order for the moments.</param> /// <param name="image">The image whose moments should be computed.</param> /// <param name="area">The region of interest in the image to compute moments for.</param> /// public CentralMoments(UnmanagedImage image, Rectangle area, int order = DefaultOrder) : base(image, area, order) { }
/// <summary> /// Initializes a new instance of the <see cref="RecursiveBlobCounter"/> class. /// </summary> /// /// <param name="image">Unmanaged image to look for objects in.</param> /// public RecursiveBlobCounter(UnmanagedImage image) : base(image) { }
/// <summary> /// Process new video frame. /// </summary> /// /// <param name="videoFrame">Video frame to process (detect motion in).</param> /// /// <remarks><para>Processes new frame from video source and detects motion in it.</para> /// /// <para>Check <see cref="MotionLevel"/> property to get information about amount of motion /// (changes) in the processed frame.</para> /// </remarks> /// public void ProcessFrame( UnmanagedImage videoFrame ) { lock ( sync ) { // check background frame if ( backgroundFrame == null ) { lastTimeMeasurment = DateTime.Now; // save image dimension width = videoFrame.Width; height = videoFrame.Height; // alocate memory for previous and current frames backgroundFrame = UnmanagedImage.Create( width, height, PixelFormat.Format8bppIndexed ); motionFrame = UnmanagedImage.Create( width, height, PixelFormat.Format8bppIndexed ); frameSize = motionFrame.Stride * height; // temporary buffer if ( suppressNoise ) { tempFrame = UnmanagedImage.Create( width, height, PixelFormat.Format8bppIndexed ); } // convert source frame to grayscale Accord.Vision.Tools.ConvertToGrayscale(videoFrame, backgroundFrame); return; } // check image dimension if ( ( videoFrame.Width != width ) || ( videoFrame.Height != height ) ) return; // convert current image to grayscale Accord.Vision.Tools.ConvertToGrayscale(videoFrame, motionFrame); unsafe { // pointers to background and current frames byte* backFrame; byte* currFrame; int diff; // update background frame if ( millisecondsPerBackgroundUpdate == 0 ) { // update background frame using frame counter as a base if ( ++framesCounter == framesPerBackgroundUpdate ) { framesCounter = 0; backFrame = (byte*) backgroundFrame.ImageData.ToPointer( ); currFrame = (byte*) motionFrame.ImageData.ToPointer( ); for ( int i = 0; i < frameSize; i++, backFrame++, currFrame++ ) { diff = *currFrame - *backFrame; if ( diff > 0 ) { ( *backFrame )++; } else if ( diff < 0 ) { ( *backFrame )--; } } } } else { // update background frame using timer as a base // get current time and calculate difference DateTime currentTime = DateTime.Now; TimeSpan timeDff = currentTime - lastTimeMeasurment; // save current time as the last measurment lastTimeMeasurment = currentTime; int millisonds = (int) timeDff.TotalMilliseconds + millisecondsLeftUnprocessed; // save remainder so it could be taken into account in the future millisecondsLeftUnprocessed = millisonds % millisecondsPerBackgroundUpdate; // get amount for background update int updateAmount = (int) ( millisonds / millisecondsPerBackgroundUpdate ); backFrame = (byte*) backgroundFrame.ImageData.ToPointer( ); currFrame = (byte*) motionFrame.ImageData.ToPointer( ); for ( int i = 0; i < frameSize; i++, backFrame++, currFrame++ ) { diff = *currFrame - *backFrame; if ( diff > 0 ) { ( *backFrame ) += (byte) ( ( diff < updateAmount ) ? diff : updateAmount ); } else if ( diff < 0 ) { ( *backFrame ) += (byte) ( ( -diff < updateAmount ) ? diff : -updateAmount ); } } } backFrame = (byte*) backgroundFrame.ImageData.ToPointer( ); currFrame = (byte*) motionFrame.ImageData.ToPointer( ); // 1 - get difference between frames // 2 - threshold the difference for ( int i = 0; i < frameSize; i++, backFrame++, currFrame++ ) { // difference diff = (int) *currFrame - (int) *backFrame; // treshold *currFrame = ( ( diff >= differenceThreshold ) || ( diff <= differenceThresholdNeg ) ) ? (byte) 255 : (byte) 0; } if ( suppressNoise ) { // suppress noise and calculate motion amount Accord.SystemTools.CopyUnmanagedMemory( tempFrame.ImageData, motionFrame.ImageData, frameSize ); erosionFilter.Apply( tempFrame, motionFrame ); if ( keepObjectEdges ) { Accord.SystemTools.CopyUnmanagedMemory( tempFrame.ImageData, motionFrame.ImageData, frameSize ); dilatationFilter.Apply( tempFrame, motionFrame ); } } // calculate amount of motion pixels pixelsChanged = 0; byte* motion = (byte*) motionFrame.ImageData.ToPointer( ); for ( int i = 0; i < frameSize; i++, motion++ ) { pixelsChanged += ( *motion & 1 ); } } } }
/// <summary> /// Actual objects map building. /// </summary> /// /// <param name="image">Unmanaged image to process.</param> /// /// <remarks>The method supports 8 bpp indexed grayscale images and 24/32 bpp color images.</remarks> /// /// <exception cref="UnsupportedImageFormatException">Unsupported pixel format of the source image.</exception> /// protected override void BuildObjectsMap(UnmanagedImage image) { this.stride = image.Stride; // check pixel format if ((image.PixelFormat != PixelFormat.Format8bppIndexed) && (image.PixelFormat != PixelFormat.Format24bppRgb) && (image.PixelFormat != PixelFormat.Format32bppRgb) && (image.PixelFormat != PixelFormat.Format32bppArgb) && (image.PixelFormat != PixelFormat.Format32bppPArgb)) { throw new UnsupportedImageFormatException("Unsupported pixel format of the source image."); } // allocate temporary labels array tempLabels = new int[(ImageWidth + 2) * (ImageHeight + 2)]; // fill boundaries with reserved value for (int x = 0, mx = ImageWidth + 2; x < mx; x++) { tempLabels[x] = -1; tempLabels[x + (ImageHeight + 1) * (ImageWidth + 2)] = -1; } for (int y = 0, my = ImageHeight + 2; y < my; y++) { tempLabels[y * (ImageWidth + 2)] = -1; tempLabels[y * (ImageWidth + 2) + ImageWidth + 1] = -1; } // initial objects count ObjectsCount = 0; // do the job unsafe { byte* src = (byte*)image.ImageData.ToPointer(); int p = ImageWidth + 2 + 1; if (image.PixelFormat == PixelFormat.Format8bppIndexed) { int offset = stride - ImageWidth; // for each line for (int y = 0; y < ImageHeight; y++) { // for each pixel for (int x = 0; x < ImageWidth; x++, src++, p++) { // check for non-labeled pixel if ((*src > backgroundThresholdG) && (tempLabels[p] == 0)) { ObjectsCount++; LabelPixel(src, p); } } src += offset; p += 2; } } else { pixelSize = Bitmap.GetPixelFormatSize(image.PixelFormat) / 8; int offset = stride - ImageWidth * pixelSize; // for each line for (int y = 0; y < ImageHeight; y++) { // for each pixel for (int x = 0; x < ImageWidth; x++, src += pixelSize, p++) { // check for non-labeled pixel if (( (src[RGB.R] > backgroundThresholdR) || (src[RGB.G] > backgroundThresholdG) || (src[RGB.B] > backgroundThresholdB) ) && (tempLabels[p] == 0)) { ObjectsCount++; LabelColorPixel(src, p); } } src += offset; p += 2; } } } // allocate labels array ObjectLabels = new int[ImageWidth * ImageHeight]; for (int y = 0; y < ImageHeight; y++) Array.Copy(tempLabels, (y + 1) * (ImageWidth + 2) + 1, ObjectLabels, y * ImageWidth, ImageWidth); }
/// <summary> /// Initializes a new instance of the <see cref="FastRetinaKeypointDescriptor"/> class. /// </summary> /// internal FastRetinaKeypointDescriptor(UnmanagedImage image, IntegralImage integral, FastRetinaKeypointPattern pattern) { this.Extended = false; this.IsOrientationNormal = true; this.IsScaleNormal = true; this.Image = image; this.Integral = integral; this.pattern = pattern; }
/// <summary> /// Process new video frame. /// </summary> /// /// <param name="videoFrame">Video frame to process (detect motion in).</param> /// /// <returns>Returns amount of motion, which is provided <see cref="IMotionDetector.MotionLevel"/> /// property of the <see cref="MotionDetectionAlgorithm">motion detection algorithm in use</see>.</returns> /// /// <remarks><para>The method first of all applies motion detection algorithm to the specified video /// frame to calculate <see cref="IMotionDetector.MotionLevel">motion level</see> and /// <see cref="IMotionDetector.MotionFrame">motion frame</see>. After this it applies motion processing algorithm /// (if it was set) to do further post processing, like highlighting motion areas, counting moving /// objects, etc.</para> /// /// <para><note>In the case if <see cref="MotionZones"/> property is set, this method will perform /// motion filtering right after motion algorithm is done and before passing motion frame to motion /// processing algorithm. The method does filtering right on the motion frame, which is produced /// by motion detection algorithm. At the same time the method recalculates motion level and returns /// new value, which takes motion zones into account (but the new value is not set back to motion detection /// algorithm' <see cref="IMotionDetector.MotionLevel"/> property). /// </note></para> /// </remarks> /// public float ProcessFrame( UnmanagedImage videoFrame ) { lock ( sync ) { if ( detector == null ) return 0; videoWidth = videoFrame.Width; videoHeight = videoFrame.Height; float motionLevel = 0; // call motion detection detector.ProcessFrame( videoFrame ); motionLevel = detector.MotionLevel; // check if motion zones are specified if ( motionZones != null ) { if ( zonesFrame == null ) { CreateMotionZonesFrame( ); } if ( ( videoWidth == zonesFrame.Width ) && ( videoHeight == zonesFrame.Height ) ) { unsafe { // pointers to background and current frames byte* zonesPtr = (byte*) zonesFrame.ImageData.ToPointer( ); byte* motionPtr = (byte*) detector.MotionFrame.ImageData.ToPointer( ); motionLevel = 0; for ( int i = 0, frameSize = zonesFrame.Stride * videoHeight; i < frameSize; i++, zonesPtr++, motionPtr++ ) { *motionPtr &= *zonesPtr; motionLevel += ( *motionPtr & 1 ); } motionLevel /= ( videoWidth * videoHeight ); } } } // call motion post processing if ( ( processor != null ) && ( detector.MotionFrame != null ) ) { processor.ProcessFrame( videoFrame, detector.MotionFrame ); } return motionLevel; } }
/// <summary> /// Process video and motion frames doing further post processing after /// performed motion detection. /// </summary> /// /// <param name="videoFrame">Original video frame.</param> /// <param name="motionFrame">Motion frame provided by motion detection /// algorithm (see <see cref="IMotionDetector"/>).</param> /// /// <remarks><para>Processes provided motion frame and highlights motion areas /// on the original video frame with <see cref="HighlightColor">specified color</see>.</para> /// </remarks> /// /// <exception cref="InvalidImagePropertiesException">Motion frame is not 8 bpp image, but it must be so.</exception> /// <exception cref="UnsupportedImageFormatException">Video frame must be 8 bpp grayscale image or 24/32 bpp color image.</exception> /// public void ProcessFrame( UnmanagedImage videoFrame, UnmanagedImage motionFrame ) { if ( motionFrame.PixelFormat != PixelFormat.Format8bppIndexed ) { throw new InvalidImagePropertiesException( "Motion frame must be 8 bpp image." ); } if ( ( videoFrame.PixelFormat != PixelFormat.Format8bppIndexed ) && ( videoFrame.PixelFormat != PixelFormat.Format24bppRgb ) && ( videoFrame.PixelFormat != PixelFormat.Format32bppRgb ) && ( videoFrame.PixelFormat != PixelFormat.Format32bppArgb ) ) { throw new UnsupportedImageFormatException( "Video frame must be 8 bpp grayscale image or 24/32 bpp color image." ); } int width = videoFrame.Width; int height = videoFrame.Height; int pixelSize = Bitmap.GetPixelFormatSize( videoFrame.PixelFormat ) / 8; if ( ( motionFrame.Width != width ) || ( motionFrame.Height != height ) ) return; unsafe { byte* src = (byte*) videoFrame.ImageData.ToPointer( ); byte* motion = (byte*) motionFrame.ImageData.ToPointer( ); int srcOffset = videoFrame.Stride - width * pixelSize; int motionOffset = motionFrame.Stride - width; if ( pixelSize == 1 ) { // grayscale case byte fillG = (byte) ( 0.2125 * highlightColor.R + 0.7154 * highlightColor.G + 0.0721 * highlightColor.B ); for ( int y = 0; y < height; y++ ) { for ( int x = 0; x < width; x++, motion++, src++ ) { if ( ( *motion != 0 ) && ( ( ( x + y ) & 1 ) == 0 ) ) { *src = fillG; } } src += srcOffset; motion += motionOffset; } } else { // color case byte fillR = highlightColor.R; byte fillG = highlightColor.G; byte fillB = highlightColor.B; for ( int y = 0; y < height; y++ ) { for ( int x = 0; x < width; x++, motion++, src += pixelSize ) { if ( ( *motion != 0 ) && ( ( ( x + y ) & 1 ) == 0 ) ) { src[RGB.R] = fillR; src[RGB.G] = fillG; src[RGB.B] = fillB; } } src += srcOffset; motion += motionOffset; } } } }
/// <summary> /// Constructs a new Integral image from an unmanaged image. /// </summary> /// /// <param name="image">The source image from where the integral image should be computed.</param> /// <param name="channel">The image channel to consider in the computations. Default is 0.</param> /// /// <returns> /// The <see cref="IntegralImage2"/> representation of /// the <paramref name="image">source image</paramref>.</returns> /// public static IntegralImage2 FromBitmap(UnmanagedImage image, int channel) { return(FromBitmap(image, channel, false)); }