Esempio n. 1
0
        /// <summary>
        /// Checks and produces mask where values != 0 means that values on those indicies are in the range.
        /// </summary>
        /// <param name="img">Input image.</param>
        /// <param name="min">Minimal value.</param>
        /// <param name="max">Maximal value.</param>
        /// <param name="valueToSet">Value to set to result mask.</param>
        /// <param name="channelIndicies">Which channel indicies to check. If not used then it is assumed that all indicies are used.</param>
        /// <returns>Mask</returns>
        public static Gray <byte>[,] InRange <TColor>(this TColor[,] img, TColor min, TColor max, byte valueToSet = 255, params int[] channelIndicies)
        where TColor : struct, IColor <byte>
        {
            var minArr = min.ColorToArray <TColor, byte>();
            var maxArr = max.ColorToArray <TColor, byte>();

            if (channelIndicies == null || channelIndicies.Length == 0)
            {
                channelIndicies = Enumerable.Range(0, img.ColorInfo().ChannelCount).ToArray();
            }

            if (channelIndicies.Length > img.ColorInfo().ChannelCount)
            {
                throw new Exception("Number of processed channels must not exceed the number of available image channels!");
            }

            var destMask = new Gray <byte> [img.Height(), img.Width()];

            destMask.SetValue <Gray <byte> >(Byte.MaxValue);

            using (var uImg = img.Lock())
                using (var uDestMask = destMask.Lock())
                {
                    inRangeByte(uImg, minArr, maxArr, channelIndicies, uDestMask, valueToSet);
                }

            return(destMask);
        }
Esempio n. 2
0
 /// <summary>
 /// Does non-maxima supression for the following gray image. Can be useful for detections filtering (e.g. post-processing output from Harris detector).
 /// </summary>
 /// <param name="img">Image.</param>
 /// <param name="dest">Destination image. Must have the same size as source image.</param>
 /// <param name="radius">Non-maxima supression radius (kernel radius).</param>
 /// <param name="discardValue">The value will be discarded (0 - for black).</param>
 public static void SupressNonMaxima(this Gray <float>[,] img, Gray <float>[,] dest, int radius = 3, int discardValue = 0)
 {
     using (var uImg = img.Lock())
         using (var uDest = dest.Lock())
         {
             supressNonMaxima_Float(uImg, uDest, radius, discardValue);
         }
 }
 /// <summary>
 /// Does non-maxima supression for the following gray image. Can be useful for detections filtering (e.g. post-processing output from Harris detector).
 /// </summary>
 /// <param name="img">Image.</param>
 /// <param name="dest">Destination image. Must have the same size as source image.</param>
 /// <param name="radius">Non-maxima supression radius (kernel radius).</param>
 /// <param name="discardValue">The value will be discarded (0 - for black).</param>
 public static void SupressNonMaxima(this Gray<float>[,] img, Gray<float>[,] dest, int radius = 3, int discardValue = 0)
 {
     using (var uImg = img.Lock())
     using(var uDest = dest.Lock())
     {
         supressNonMaxima_Float(uImg, uDest, radius, discardValue);
     }
 }
 /// <summary>
 /// Replaces the selected image channel with the specified channel.
 /// </summary>
 /// <typeparam name="TSrcColor">Source color type.</typeparam>
 /// <typeparam name="TDepth">Channel depth type.</typeparam>
 /// <param name="image">Image.</param>
 /// <param name="channel">Channel.</param>
 /// <param name="channelIndex">Index of a channel to replace.</param>
 public static void ReplaceChannel <TSrcColor, TDepth>(this TSrcColor[,] image, Gray <TDepth>[,] channel, int channelIndex)
     where TSrcColor : struct, IColor <TDepth>
     where TDepth : struct
 {
     using (var im = image.Lock())
         using (var ch = channel.Lock())
         {
             replaceChannel <TSrcColor, TDepth>(im, ch, channelIndex);
         }
 }
        /// <summary>
        /// Converts an image to an bitmap.
        /// </summary>
        /// <param name="img">Input image.</param>
        /// <returns>Bitmap</returns>
        public static Bitmap ToBitmap(this Gray <short>[,] img)
        {
            Bitmap bmp = null;

            using (var uImg = img.Lock())
            {
                bmp = toBitmap(uImg, PixelFormat.Format16bppGrayScale);
            }
            return(bmp);
        }
        /// <summary>
        /// Converts an image to an bitmap.
        /// </summary>
        /// <param name="img">Input image.</param>
        /// <returns>Bitmap</returns>
        public static Bitmap ToBitmap(this Gray <byte>[,] img)
        {
            Bitmap bmp = null;

            using (var uImg = img.Lock())
            {
                bmp = toBitmap(uImg, PixelFormat.Format8bppIndexed);
            }
            return(bmp);
        }
Esempio n. 7
0
        /// <summary>
        /// Process image looking for corners.
        /// </summary>
        /// <param name="cornerDetector">Corner detection algorithm instance.</param>
        /// <param name="image">Source image to process.</param>
        /// <returns>Returns list of found corners (X-Y coordinates).</returns>
        public static List <Point> ProcessImage(this ICornersDetector cornerDetector, Gray <byte>[,] image)
        {
            List <Point> points = null;

            using (var uImg = image.Lock())
            {
                points = cornerDetector.ProcessImage(uImg.AsAForgeImage());
            }

            return(points);
        }
        /// <summary>
        /// Process image looking for corners.
        /// </summary>
        /// <param name="cornerDetector">Corner detection algorithm instance.</param>
        /// <param name="image">Source image to process.</param>
        /// <returns>Returns list of found corners (X-Y coordinates).</returns>
        public static List<Point> ProcessImage(this ICornersDetector cornerDetector, Gray<byte>[,] image)
        {
            List<Point> points = null;
            using (var uImg = image.Lock())
            {
                points = cornerDetector.ProcessImage(uImg.AsAForgeImage())
                                       .Select(x => x.ToPoint())
                                       .ToList();
            }

            return points;
        }
Esempio n. 9
0
        /// <summary>
        /// Gets specified image portion.
        /// If the coordinates are not the rounded, they will be interpolated.
        /// </summary>
        /// <param name="source">Image.</param>
        /// <param name="area">Requested area.</param>
        /// <returns>Interpolated image area.</returns>
        public static Gray <float>[,] GetRectSubPix(this Gray <float>[,] source, RectangleF area)
        {
            var destination = new Gray <float> [(int)area.Height, (int)area.Width];

            using (var srcImg = source.Lock())
                using (var dstImg = destination.Lock())
                {
                    getRectSubPix_Float(srcImg, area.Location, dstImg);
                }

            return(destination);
        }
Esempio n. 10
0
        /// <summary>
        ///  Gray-Level Difference Method (GLDM).
        ///  <para>Computes an gray-level histogram of difference values between adjacent pixels in an image.</para>
        ///  <para>Accord.NET internal call. Please see: <see cref="Accord.Imaging.GrayLevelDifferenceMethod">Gray-Level Difference Method</see> for details.</para>
        /// </summary>
        /// <param name="image">The source image.</param>
        /// <param name="autoGray">Whether the maximum value of gray should be automatically computed from the image. </param>
        /// <param name="degree">The direction at which the co-occurrence should be found.</param>
        /// <returns>An histogram containing co-occurrences for every gray level in <paramref name="image"/>.</returns>
        public static int[] GrayLevelDifferenceMethod(this Gray <byte>[,] image, CooccurrenceDegree degree, bool autoGray = true)
        {
            GrayLevelDifferenceMethod gldm = new GrayLevelDifferenceMethod(degree, autoGray);

            int[] hist;
            using (var uImg = image.Lock())
            {
                hist = gldm.Compute(uImg.AsAForgeImage());
            }

            return(hist);
        }
        /// <summary>
        /// Process image looking for interest points.
        /// </summary>
        /// <typeparam name="TPoint">The type of returned feature points.</typeparam>
        /// <typeparam name="TFeature">The type of extracted features.</typeparam>
        /// <param name="featureDetector">Feature detector.</param>
        /// <param name="image">Source image data to process.</param>
        /// <returns>Returns list of found interest points.</returns>
        public static List <TPoint> ProcessImage <TPoint, TFeature>(this IFeatureDetector <TPoint, TFeature> featureDetector, Gray <byte>[,] image)
            where TPoint : IFeatureDescriptor <TFeature>
        {
            List <TPoint> points;

            using (var uImg = image.Lock())
            {
                points = featureDetector.ProcessImage(uImg.AsAForgeImage());
            }

            return(points);
        }
Esempio n. 12
0
        /// <summary>
        /// Features from Accelerated Segment Test (FAST) corners detector.
        /// <para>Accord.NET internal call. Please see: <see cref="Accord.Imaging.FastCornersDetector"/> for details.</para>
        /// </summary>
        /// <param name="im">Image.</param>
        /// <param name="threshold">The suppression threshold. Decreasing this value increases the number of points detected by the algorithm.</param>
        /// <returns>Interest point locations.</returns>
        public static List <IntPoint> CornerFeaturesDetector(this Gray <byte>[,] im, int threshold = 20)
        {
            FastCornersDetector fast = new FastCornersDetector(threshold);

            List <IntPoint> points;

            using (var uImg = im.Lock())
            {
                points = fast.ProcessImage(uImg.AsAForgeImage());
            }

            return(points);
        }
Esempio n. 13
0
        /// <summary>
        /// Harris Corners Detector.
        /// <para>Accord.NET internal call. Please see: <see cref="Accord.Imaging.HarrisCornersDetector"/> for details.</para>
        /// </summary>
        /// <param name="im">Image.</param>
        /// <param name="measure">Corners measures.</param>
        /// <param name="threshold">Harris threshold.</param>
        /// <param name="sigma">Gaussian smoothing sigma.</param>
        /// <param name="suppression">Non-maximum suppression window radius.</param>
        /// <returns>Interest point locations.</returns>
        public static List <IntPoint> HarrisCorners <TDepth>(this Gray <byte>[,] im, HarrisCornerMeasure measure = HarrisCornerMeasure.Harris, float threshold = 20000f, double sigma = 1.2, int suppression = 3)
        {
            HarrisCornersDetector harris = new HarrisCornersDetector(measure, threshold, sigma, suppression);

            List <IntPoint> points;

            using (var uImg = im.Lock())
            {
                points = harris.ProcessImage(uImg.AsAForgeImage());
            }

            return(points);
        }
Esempio n. 14
0
        /// <summary>
        ///  Maximum cross-correlation feature point matching algorithm.
        /// </summary>
        /// <param name="image1">First image.</param>
        /// <param name="image2">Second image.</param>
        /// <param name="points1">Points from the first image.</param>
        /// <param name="points2">Points from the second image.</param>
        /// <param name="windowSize">The size of the correlation window.</param>
        /// <param name="maxDistance">The maximum distance to consider points as correlated.</param>
        /// <returns>Matched point-pairs.</returns>
        public static Point[][] Match(Gray <byte>[,] image1, Gray <byte>[,] image2, Point[] points1, Point[] points2, int windowSize, int maxDistance)
        {
            Point[][] matches = null;

            using (var uImg1 = image1.Lock())
                using (var uImg2 = image2.Lock())
                {
                    var correlationMatching = new CorrelationMatching(windowSize, maxDistance, uImg1.AsBitmap(), uImg2.AsBitmap());
                    matches = correlationMatching.Match(points1.ToPoints(), points2.ToPoints()).ToPoints();
                }

            return(matches);
        }
        /// <summary>
        ///  Maximum cross-correlation feature point matching algorithm.
        /// </summary>
        /// <param name="image1">First image.</param>
        /// <param name="image2">Second image.</param>
        /// <param name="points1">Points from the first image.</param>
        /// <param name="points2">Points from the second image.</param>
        /// <param name="windowSize">The size of the correlation window.</param>
        /// <param name="maxDistance">The maximum distance to consider points as correlated.</param>
        /// <returns>Matched point-pairs.</returns>
        public static Point[][] Match(Gray<byte>[,] image1, Gray<byte>[,] image2, Point[] points1, Point[] points2, int windowSize, int maxDistance)
        {
            Point[][] matches = null;

            using (var uImg1 = image1.Lock())
            using(var uImg2 = image2.Lock())
            {
                var correlationMatching = new CorrelationMatching(windowSize, maxDistance, uImg1.AsBitmap(), uImg2.AsBitmap());
                matches = correlationMatching.Match(points1.ToPoints(), points2.ToPoints()).ToPoints();
            }

            return matches;
        }
Esempio n. 16
0
        /// <summary>
        /// Extracts the contour from a single object in a grayscale image. (uses Accord built-in function)
        /// </summary>
        /// <param name="im">Image.</param>
        /// <param name="minGradientStrength">The pixel value threshold above which a pixel
        /// is considered black (belonging to the object). Default is zero.</param>
        public static List <Point> FindContour(this Gray <byte>[,] im, byte minGradientStrength = 0)
        {
            BorderFollowing bf = new BorderFollowing(minGradientStrength);

            List <Point> points;

            using (var uImg = im.Lock())
            {
                points = bf.FindContour(uImg.AsAForgeImage());
            }

            return(points);
        }
Esempio n. 17
0
        /// <summary>
        /// Find non-zero locations in the image.
        /// </summary>
        /// <param name="img">Image.</param>
        /// <param name="values">Found non-zero values at the returned positions.</param>
        /// <returns>List of found non-zero locations.</returns>
        public static List <Point> FindNonZero(this Gray <float>[,] img, out IList <float> values)
        {
            List <Point> locationsPatch;
            IList        valuesPatch;

            using (var uImg = img.Lock())
            {
                findNonZero_Float(uImg, out locationsPatch, out valuesPatch);
            }

            values = valuesPatch as IList <float>;
            return(locationsPatch);
        }
Esempio n. 18
0
        /// <summary>
        /// The blending filter is able to blend two images using a homography matrix.
        /// A linear alpha gradient is used to smooth out differences between the two
        /// images, effectively blending them in two images. The gradient is computed
        /// considering the distance between the centers of the two images.
        /// </summary>
        /// <param name="im">Image.</param>
        /// <param name="overlayIm">The overlay image (also called the anchor).</param>
        /// <param name="homography">Homography matrix used to map a image passed to
        /// the filter to the overlay image specified at filter creation.</param>
        /// <param name="fillColor">The filling color used to fill blank spaces. The filling color will only be visible after the image is converted
        /// to 24bpp. The alpha channel will be used internally by the filter.</param>
        /// <param name="gradient">A value indicating whether to blend using a linear
        ///  gradient or just superimpose the two images with equal weights.</param>
        /// <param name="alphaOnly">A value indicating whether only the alpha channel
        /// should be blended. This can be used together with a transparency
        /// mask to selectively blend only portions of the image.</param>
        /// <returns>Blended image.</returns>
        public static Bgra <byte>[,] Blend(this Gray <byte>[,] im, Gray <byte>[,] overlayIm, MatrixH homography, Bgra <byte> fillColor, bool gradient = true, bool alphaOnly = false)
        {
            Bgra <byte>[,] resultImage = null;

            using (var uOverlayIm = overlayIm.Lock())
            {
                Blend blend = new Blend(homography, uOverlayIm.AsBitmap());
                blend.AlphaOnly = alphaOnly;
                blend.Gradient  = gradient;
                blend.FillColor = fillColor.ToColor();

                resultImage = im.ApplyBaseTransformationFilter <Gray <byte>, Bgra <byte> >(blend);
            }

            return(resultImage);
        }
Esempio n. 19
0
        /// <summary>
        /// The blending filter is able to blend two images using a homography matrix.
        /// A linear alpha gradient is used to smooth out differences between the two
        /// images, effectively blending them in two images. The gradient is computed
        /// considering the distance between the centers of the two images.
        /// </summary>
        /// <param name="im">Image.</param>
        /// <param name="overlayIm">The overlay image (also called the anchor).</param>
        /// <param name="homography">Homography matrix used to map a image passed to
        /// the filter to the overlay image specified at filter creation.</param>
        /// <param name="fillColor">The filling color used to fill blank spaces. The filling color will only be visible after the image is converted
        /// to 24bpp. The alpha channel will be used internally by the filter.</param>
        /// <param name="gradient">A value indicating whether to blend using a linear
        ///  gradient or just superimpose the two images with equal weights.</param>
        /// <param name="alphaOnly">A value indicating whether only the alpha channel
        /// should be blended. This can be used together with a transparency
        /// mask to selectively blend only portions of the image.</param>
        /// <returns>Blended image.</returns>
        public static Bgra<byte>[,] Blend(this Gray<byte>[,] im, Gray<byte>[,] overlayIm, MatrixH homography, Bgra<byte> fillColor, bool gradient = true, bool alphaOnly = false)
        {
            Bgra<byte>[,] resultImage = null;

            using (var uOverlayIm = overlayIm.Lock())
            {
                Blend blend = new Blend(homography, uOverlayIm.AsBitmap());
                blend.AlphaOnly = alphaOnly;
                blend.Gradient = gradient;
                blend.FillColor = fillColor.ToColor();

                resultImage = im.ApplyBaseTransformationFilter<Gray<byte>, Bgra<byte>>(blend);
            }

            return resultImage;
        }
Esempio n. 20
0
        /// <summary>
        /// Extracts a single image channel.
        /// </summary>
        /// <typeparam name="TSrcColor">Source color type.</typeparam>
        /// <typeparam name="TDepth">Channel depth type.</typeparam>
        /// <param name="image">Image.</param>
        /// <param name="area">Working area.</param>
        /// <param name="channelIndex">Channel index.</param>
        /// <returns>Extracted channel.</returns>
        public static unsafe Gray <TDepth>[,] GetChannel <TSrcColor, TDepth>(this TSrcColor[,] image, Rectangle area, int channelIndex)
        where TSrcColor : struct, IColor <TDepth>
        where TDepth : struct
        {
            int width  = area.Width;
            int height = area.Height;

            var dest = new Gray <TDepth> [area.Height, area.Width];

            using (var lockedImage = image.Lock())
                using (var dstImg = dest.Lock())
                {
                    var srcImg      = lockedImage.GetSubRect(area);
                    int channelSize = srcImg.ColorInfo.ChannelSize;
                    int colorSize   = srcImg.ColorInfo.Size;

                    byte *srcPtr = (byte *)srcImg.ImageData + channelIndex * srcImg.ColorInfo.ChannelSize;
                    byte *dstPtr = (byte *)dstImg.ImageData;

                    for (int row = 0; row < height; row++)
                    {
                        byte *srcColPtr = srcPtr;
                        byte *dstColPtr = dstPtr;
                        for (int col = 0; col < width; col++)
                        {
                            /********** copy channel byte-per-byte ************/
                            for (int partIdx = 0; partIdx < channelSize; partIdx++)
                            {
                                dstColPtr[partIdx] = srcColPtr[partIdx];
                            }

                            srcColPtr += colorSize; //move to the next column
                            dstColPtr += channelSize;
                            /********** copy channel byte-per-byte ************/
                        }

                        srcPtr += srcImg.Stride;
                        dstPtr += dstImg.Stride;
                    }
                }

            return(dest);
        }
        private unsafe void calculateLinearMapForNeighbour(Gray<byte>[,] responseMap, int neigbourRow, int neighbourCol,
                                                           Gray<byte>[,] linearMap)
        {
            using (var uResponseMap = responseMap.Lock())
            using (var uLinearMap = linearMap.Lock())
            {
                int neigborhood = this.NeigborhoodSize;

                byte* linMapPtr = (byte*)uLinearMap.ImageData;
                int linMapStride = uLinearMap.Stride;

                int width = uResponseMap.Width;
                int height = uResponseMap.Height;
                int stride = uResponseMap.Stride;
                byte* responseMapPtr = (byte*)uResponseMap.GetData(neigbourRow);

                //Two loops copy every T-th pixel into the linear memory
                for (int r = neigbourRow; r < height; r += neigborhood)
                {
                    int linMapIdx = 0;
                    for (int c = neighbourCol; c < width; c += neigborhood)
                    {
                        linMapPtr[linMapIdx] = responseMapPtr[c];
                        linMapIdx++;
                    }

                    responseMapPtr += stride * neigborhood; //skip neighborhood rows
                    linMapPtr += linMapStride;
                }
            }
        }
Esempio n. 22
0
        /// <summary>
        /// Take only those orientations that have MINIMAL_NUM_OF_SAME_ORIENTED_PIXELS in 3x3 negborhood.
        /// Performs angle transformation into binary form ([0..7] -> [1, 2, 4, 8, ..., 128]) as well.
        /// </summary>
        /// <param name="qunatizedOrientionImg">Quantized orientation image where angles are represented by lables [0..GlobalParameters.NUM_OF_QUNATIZED_ORIENTATIONS] (invalid orientation label included).</param>
        /// <param name="minSameOrientations">Minimal number of same orientations for 3x3 neigborhood. The range is: [0..9] (3x3 neigborhood).</param>
        private static Gray<byte>[,] RetainImportantQuantizedOrientations(Gray<byte>[,] qunatizedOrientionImg, int minSameOrientations)
        {
            if (minSameOrientations < 0 || minSameOrientations > 9 /*3x3 neigborhood*/)
                throw new Exception("Minimal number of same orientations should be in: [0..9].");

            var quantizedFilteredOrient = qunatizedOrientionImg.CopyBlank();

            using (var uQunatizedOrientionImg = qunatizedOrientionImg.Lock())
            using (var uQuantizedFilteredOrient = quantizedFilteredOrient.Lock())
            {
                //debugImg = new Image<Hsv, byte>(orientDegImg.Width, orientDegImg.Height);
                //debugImg = null;
                int qOrinetStride = uQunatizedOrientionImg.Stride;
                int qOrinetAllign = uQunatizedOrientionImg.Stride - uQunatizedOrientionImg.Width;

                byte* qOrinetUnfilteredPtr = (byte*)uQunatizedOrientionImg.ImageData + qOrinetStride + 1;
                byte* qOrinetFilteredPtr = (byte*)uQuantizedFilteredOrient.ImageData + qOrinetStride + 1;

                //Debug.Assert(qunatizedOrientionImg.Stride == quantizedFilteredOrient.Stride);

                int imgWidth = uQunatizedOrientionImg.Width;
                int imgHeight = uQunatizedOrientionImg.Height;

                for (int j = 1; j < imgHeight - 1; j++)
                {
                    for (int i = 1; i < imgWidth - 1; i++)
                    {
                        if (*qOrinetUnfilteredPtr != INVALID_QUANTIZED_ORIENTATION)
                        {
                            byte[] histogram = new byte[INVALID_QUANTIZED_ORIENTATION + 1]; //gleda se susjedstvo 3x3

                            histogram[qOrinetUnfilteredPtr[-qOrinetStride - 1]]++; histogram[qOrinetUnfilteredPtr[-qOrinetStride + 0]]++; histogram[qOrinetUnfilteredPtr[-qOrinetStride + 1]]++;
                            histogram[qOrinetUnfilteredPtr[-1]]++; histogram[qOrinetUnfilteredPtr[0]]++; histogram[qOrinetUnfilteredPtr[+1]]++;
                            histogram[qOrinetUnfilteredPtr[+qOrinetStride - 1]]++; histogram[qOrinetUnfilteredPtr[+qOrinetStride + 0]]++; histogram[qOrinetUnfilteredPtr[+qOrinetStride + 1]]++;

                            int maxBinVotes = 0; byte quantizedAngle = 0;
                            for (byte histBinIdx = 0; histBinIdx < GlobalParameters.NUM_OF_QUNATIZED_ORIENTATIONS /*discard invalid orientation*/; histBinIdx++)
                            {
                                if (histogram[histBinIdx] > maxBinVotes)
                                {
                                    maxBinVotes = histogram[histBinIdx];
                                    quantizedAngle = histBinIdx;
                                }
                            }

                            if (maxBinVotes >= minSameOrientations)
                                *qOrinetFilteredPtr = (byte)(1 << quantizedAngle); //[1,2,4,8...128] (8 orientations)

                            //*qOrinetFilteredPtr = (byte)(1 << *qOrinetUnfilteredPtr); //[1,2,4,8...128] (8 orientations)
                            //debugImg[j, i] = new Hsv((*qOrinetFilteredPtr-1) * 35, 100, 100);
                        }

                        qOrinetUnfilteredPtr++;
                        qOrinetFilteredPtr++;
                    }

                    qOrinetUnfilteredPtr += 1 + qOrinetAllign + 1;
                    qOrinetFilteredPtr += 1 + qOrinetAllign + 1; //preskoči zadnji piksel, poravnanje, i početni piksel
                }
            }

            //magnitudeImg.Save("magnitude.bmp");
            //quantizedFilteredOrient.Save("quantizedImg.bmp");
            return quantizedFilteredOrient;
        }
Esempio n. 23
0
        private static Gray<short>[,] calculateSimilarityMap(ITemplate template, LinearizedMaps maps, Rectangle searchArea)
        {
            Debug.Assert(searchArea.Right <= maps.ImageSize.Width && 
                         searchArea.Bottom <= maps.ImageSize.Height);
            Debug.Assert(template.Size.Width + searchArea.X < maps.ImageSize.Width &&
                         template.Size.Height + searchArea.Y < maps.ImageSize.Height);

            int width = searchArea.Width / maps.NeigborhoodSize;
            int height = searchArea.Height / maps.NeigborhoodSize;

            Gray<short>[,] similarityMap = new Gray<short>[height, width]; //performance penalty (alloc, dealloc)!!!
            Gray<byte>[,] buffer = new Gray<byte>[height, width];

            using (var uSimilarityMap = similarityMap.Lock())
            using(var uBuffer = buffer.Lock())
            {
                int nAddsInBuffer = 0;
                foreach (var feature in template.Features)
                {
                    var position = new Point(feature.X + searchArea.X, feature.Y + searchArea.Y); //shifted position

                    Point mapPoint;
                    var neighbourMap = maps.GetMapElement(position, feature.AngleIndex, out mapPoint);

                    neighbourMap.AddTo(uBuffer, mapPoint);
                    nAddsInBuffer++;

                    if (nAddsInBuffer / GlobalParameters.MAX_SUPPORTED_NUM_OF_FEATURES_ADDDED_AS_BYTE != 0)
                    {
                        uBuffer.AddTo(uSimilarityMap);
                        buffer.Clear(); //clear buffer

                        nAddsInBuffer = 0;
                    }
                }

                bool finalAdd = (template.Features.Length % GlobalParameters.MAX_SUPPORTED_NUM_OF_FEATURES_ADDDED_AS_BYTE != 0) ? true : false;
                if (finalAdd)
                {
                    uBuffer.AddTo(uSimilarityMap);
                }
            }

            return similarityMap;
        }    
        private static List<Feature> ExtractTemplate(Gray<byte>[,] orientationImage, int maxNumOfFeatures, Func<Feature, int> featureImportanceFunc)
        {
            List<Feature> candidates = new List<Feature>();

            using (var uOrientationImage = orientationImage.Lock())
            {
                byte* orientImgPtr = (byte*)uOrientationImage.ImageData;
                int orientImgStride = uOrientationImage.Stride;

                int imgWidth = uOrientationImage.Width;
                int imgHeight = uOrientationImage.Height;

                for (int row = 0; row < imgHeight; row++)
                {
                    for (int col = 0; col < imgWidth; col++)
                    {
                        if (orientImgPtr[col] == 0) //quantized orientations are: [1,2,4,8,...,128];
                            continue;

                        var candidate = new Feature(x: col, y: row, angleBinaryRepresentation: orientImgPtr[col]);
                        candidates.Add(candidate);
                    }

                    orientImgPtr += orientImgStride;
                }
            }

            candidates = candidates.OrderByDescending(featureImportanceFunc).ToList(); //order descending
            return FilterScatteredFeatures(candidates, maxNumOfFeatures, 5); //candidates.Count must be >= MIN_NUM_OF_FEATURES
        }
        private Image<Gray<byte>>[,] linearizeResponseMap(Gray<byte>[,] responseMap)
        {
            var linearizedMaps = new Image<Gray<byte>>[NeigborhoodSize, NeigborhoodSize];

            //Outer two for loops iterate over top-left T^2 starting pixels
            for (int rowNeigbor = 0; rowNeigbor < this.NeigborhoodSize; rowNeigbor++)
            {
                for (int colNeigbor = 0; colNeigbor < this.NeigborhoodSize; colNeigbor++)
                {
                    var linearMap = new Gray<byte>[this.LinearMapSize.Height, this.LinearMapSize.Width];
                    calculateLinearMapForNeighbour(responseMap, rowNeigbor, colNeigbor, linearMap);
                    linearizedMaps[rowNeigbor, colNeigbor] = linearMap.Lock();
                }
            }

            return linearizedMaps;
        }
Esempio n. 26
0
        private static Gray<byte>[,] SpreadOrientations(Gray<byte>[,] quantizedOrientationImage, int neghborhood)
        {
            var destImg = quantizedOrientationImage.CopyBlank();

            using (var uQuantizedOrientationImage = quantizedOrientationImage.Lock())
            using(var uDestImg = destImg.Lock())
            {
                byte* srcImgPtr = (byte*)uQuantizedOrientationImage.ImageData;
                int imgStride = uQuantizedOrientationImage.Stride;
                byte* destImgPtr = (byte*)uDestImg.ImageData;

                int imgHeight = uDestImg.Height;
                int imgWidth = uDestImg.Width;

                for (int row = 0; row < neghborhood; row++)
                {
                    int subImageHeight = imgHeight - row;
                    for (int col = 0; col < neghborhood; col++)
                    {
                        OrImageBits(&srcImgPtr[col], destImgPtr,
                                    imgStride,
                                    imgWidth - col, subImageHeight);
                    }

                    srcImgPtr += imgStride;
                }
            }
           
            return destImg;
        }
Esempio n. 27
0
        private static List<Point> searchSimilarityMap(Gray<short>[,] similarityMap, int minValue, out List<short> values)
        {
            List<Point> positions = new List<Point>();
            values = new List<short>();

            using (var uSimilarityMap = similarityMap.Lock())
            {
                int width = uSimilarityMap.Width;
                int height = uSimilarityMap.Height;
                int stride = uSimilarityMap.Stride; //stride should be == width * sizeof(short) (see linearized maps)
                short* similarityMapPtr = (short*)uSimilarityMap.ImageData;

                for (int row = 0; row < height; row++)
                {
                    for (int col = 0; col < width; col++)
                    {
                        if (similarityMapPtr[col] >= minValue)
                        {
                            positions.Add(new Point(col, row));
                            values.Add(similarityMapPtr[col]);
                        }
                    }

                    similarityMapPtr = (short*)((byte*)similarityMapPtr + stride);
                }
            }

            return positions;
        }
Esempio n. 28
0
        private static Gray<byte>[,] QuantizeOrientations(Gray<int>[,] orientDegImg)
        {
            var quantizedUnfilteredOrient = new Gray<byte>[orientDegImg.Height(), orientDegImg.Width()];

            using (var uOrientDegImg = orientDegImg.Lock())
            using (var uQuantizedUnfilteredOrient = quantizedUnfilteredOrient.Lock())
            {
                int* orientDegImgPtr = (int*)uOrientDegImg.ImageData;
                byte* qOrinetUnfilteredPtr = (byte*)uQuantizedUnfilteredOrient.ImageData;
                int qOrinetUnfilteredStride = uQuantizedUnfilteredOrient.Stride;

                int imgWidth = uOrientDegImg.Width;
                int imgHeight = uOrientDegImg.Height;

                for (int j = 0; j < imgHeight; j++)
                {
                    for (int i = 0; i < imgWidth; i++)
                    {
                        int angle = orientDegImgPtr[i];
                        qOrinetUnfilteredPtr[i] = AngleQuantizationTable[angle]; //[0-360] -> [...] -> [0-7] (for mapping see "CalculateAngleQuantizationTable()")
                    }

                    orientDegImgPtr += imgWidth; //<Gray<int>> is always alligned
                    qOrinetUnfilteredPtr += qOrinetUnfilteredStride;
                }
            }

            //quantizedUnfilteredOrient.Mul(36).Save("quantizedUnfilteredImg.bmp");
            return quantizedUnfilteredOrient;
        }
        /// <summary>
        /// Computes gradient orientations from the color image. Orientation from the channel which has the maximum gradient magnitude is taken as the orientation for a location.
        /// </summary>
        /// <param name="frame">Image.</param>
        /// <param name="magnitudeSqrImage">Squared magnitude image.</param>
        /// <param name="minValidMagnitude">Minimal valid magnitude.</param>
        /// <returns>Orientation image (angles are in degrees).</returns>
        public static unsafe Gray<int>[,] Compute(Gray<byte>[,] frame, out Gray<int>[,] magnitudeSqrImage, int minValidMagnitude)
        {
            var minSqrMagnitude = minValidMagnitude * minValidMagnitude;

            var orientationImage = new Gray<int>[frame.Height(), frame.Width()];
            var _magnitudeSqrImage = orientationImage.CopyBlank();

            using (var uFrame = frame.Lock())
            {
                ParallelLauncher.Launch(thread =>
                {
                    computeGray(thread, (byte*)uFrame.ImageData, uFrame.Stride, orientationImage, _magnitudeSqrImage, minSqrMagnitude);
                },
                frame.Width() - 2 * kernelRadius, frame.Height() - 2 * kernelRadius);
            }

            magnitudeSqrImage = _magnitudeSqrImage;
            return orientationImage;
        }
        private Gray<byte>[,] computeResponseMap(Gray<byte>[,] sprededQuantizedImage, int orientationIndex)
        {
            int width = this.ImageValidSize.Width;
            int height = this.ImageValidSize.Height;
            var responseMap = new Gray<byte>[height, width];

            using (var uSprededQuantizedImage = sprededQuantizedImage.Lock())
            {
                int srcStride = uSprededQuantizedImage.Stride;
                byte* srcPtr = (byte*)uSprededQuantizedImage.ImageData;
           
                using (var uResponseMap = responseMap.Lock())
                {
                    int dstStride = uResponseMap.Stride;
                    byte* dstPtr = (byte*)uResponseMap.ImageData;

                    //za sliku
                    fixed (byte* angleTablePtr = &SimilarityAngleTable[0, 0])
                    {
                        byte* orientPtr = angleTablePtr + orientationIndex * SimilarityAngleTable.GetLength(1);

                        for (int row = 0; row < height; row++)
                        {
                            for (int col = 0; col < width; col++)
                            {
                                //destPtr[col] = SimilarityAngleTable[orient, srcPtr[col]];
                                dstPtr[col] = orientPtr[srcPtr[col]]; //faster way
                            }

                            srcPtr += srcStride;
                            dstPtr += dstStride;
                        }
                    }
                }
            }

            return responseMap;
        }