Exemplo n.º 1
1
        private static Rectangle process(Gray<byte>[,] probabilityMap, Rectangle roi, TermCriteria termCriteria, out CentralMoments centralMoments)
        {
            Rectangle imageArea = new Rectangle(0, 0, probabilityMap.Width(), probabilityMap.Height());

            Rectangle searchWindow = roi;
            RawMoments moments = new RawMoments(order: 1);

            // Mean shift with fixed number of iterations
            int i = 0;
            double shift = Byte.MaxValue;
            while (termCriteria.ShouldTerminate(i, shift) == false && !searchWindow.IsEmptyArea())
            {
                // Locate first order moments
                moments.Compute(probabilityMap, searchWindow);

                int shiftX = (int)(moments.CenterX - searchWindow.Width / 2f);
                int shiftY = (int)(moments.CenterY - searchWindow.Height / 2f);

                // Shift the mean (centroid)
                searchWindow.X += shiftX;
                searchWindow.Y += shiftY;

                // Keep the search window inside the image
                searchWindow.Intersect(imageArea);
                
                shift = System.Math.Abs((double)shiftX) + System.Math.Abs((double)shiftY); //for term criteria only
                i++;
            }

            if (searchWindow.IsEmptyArea() == false)
            {
                // Locate second order moments and perform final shift
                moments.Order = 2;
                moments.Compute(probabilityMap, searchWindow);

                searchWindow.X += (int)(moments.CenterX - searchWindow.Width / 2f);
                searchWindow.Y += (int)(moments.CenterY - searchWindow.Height / 2f);

                // Keep the search window inside the image
                searchWindow.Intersect(imageArea);
            }

            centralMoments = new CentralMoments(moments); // moments to be used by camshift
            return searchWindow;
        }
        private unsafe static void computeColor(KernelThread thread, byte* frame, int frameStride, Gray<int>[,] orientationImage, Gray<int>[,] magnitudeSqrImage, int minSqrMagnitude)
        {
            frame = frame + frameStride * thread.Y + thread.X * 3 /*sizeof(Bgr<byte>)*/; 

            int maxMagSqr = 0, maxDx = 0, maxDy = 0;
            for (int ch = 0; ch < 3; ch++)
            {
                var srcPtr = frame + ch;

                int sumX = 0, sumY = 0;
                for (int r = 0; r < 3; r++)
                {
                    var chPtr = srcPtr;
                    for (int c = 0; c < 3; c++)
                    {
                        sumX += *chPtr * Sobel_3x3_X[r, c];
                        sumY += *chPtr * Sobel_3x3_Y[r, c];

                        chPtr += 3 * sizeof(byte);
                    }

                    srcPtr = (byte*)srcPtr + frameStride;
                }
                //sumX >>= 3; sumY >>= 3; //divide by 8 (normalize kernel)

                var grad = sumX * sumX + sumY * sumY;
                if (grad > maxMagSqr)
                {
                    maxMagSqr = grad;
                    maxDx = sumX;
                    maxDy = sumY;
                }
            }

            if (maxMagSqr < minSqrMagnitude)
            {
                //magnitudeSqrImage[thread.Y + kernelRadius, thread.X + kernelRadius] = 0;  //redundant
                orientationImage[thread.Y + kernelRadius, thread.X + kernelRadius] = FeatureMap.INVALID_ORIENTATION;
            }
            else
            {
                magnitudeSqrImage[thread.Y + kernelRadius, thread.X + kernelRadius] = maxMagSqr;
                orientationImage[thread.Y + kernelRadius, thread.X + kernelRadius] = MathExtensions.Atan2Aprox(maxDy, maxDx);
            }
        }
        /// <summary>
        /// Rectification filter for projective transformation.
        /// <para>Accord.NET internal call. Please see: <see cref="Accord.Imaging.Filters.Rectification"/> for details.</para>
        /// </summary>
        /// <param name="img">Image.</param>
        /// <param name="homography">The homography matrix used to map a image passed to the filter to the overlay image.</param>
        /// <param name="fillColor">The filling color used to fill blank spaces.</param>
        /// <returns>Rectified image.</returns>
        public static Gray<byte>[,] Rectification(this Gray<byte>[,] img, double[,] homography, Gray<byte> fillColor) 
        {
            Rectification r = new Rectification(homography);
            r.FillColor = fillColor.ToColor();

            return img.ApplyBaseTransformationFilter(r);
        }
        private unsafe static void goodFeaturesToTrack(Gray<float>[,] integralDxx, Gray<float>[,] integralDxy, Gray<float>[,] integralDyy,
                                                       int winSize, float minEigValue, Gray<float>[,] strengthImg)
        {
            minEigValue = System.Math.Max(1E-3f, minEigValue);
            int normFactor = winSize * winSize * 255;

            int maxCol = integralDxx.Width() - winSize;
            int maxRow = integralDxx.Height() - winSize;

            for (int row = 0; row < maxRow; row++)
            {
                for (int col = 0; col < maxCol; col++)
                {
                    var Dxx = integralDxx.GetSum(col, row, winSize, winSize);
                    var Dxy = integralDxy.GetSum(col, row, winSize, winSize);
                    var Dyy = integralDyy.GetSum(col, row, winSize, winSize);

                    var eigenVal = calcMinEigenVal(Dxx, Dxy, Dyy);
                    eigenVal /= normFactor;

                    if (eigenVal > minEigValue)
                    {
                        strengthImg[winSize / 2 + row, winSize / 2 + col] = eigenVal;
                    }
                }
            }
        }
Exemplo n.º 5
0
 /// <summary>
 /// Does non-maxima supression for the following gray image. Can be useful for detections filtering (e.g. post-processing output from Harris detector).
 /// </summary>
 /// <param name="img">Image.</param>
 /// <param name="dest">Destination image. Must have the same size as source image.</param>
 /// <param name="radius">Non-maxima supression radius (kernel radius).</param>
 /// <param name="discardValue">The value will be discarded (0 - for black).</param>
 public static void SupressNonMaxima(this Gray <float>[,] img, Gray <float>[,] dest, int radius = 3, int discardValue = 0)
 {
     using (var uImg = img.Lock())
         using (var uDest = dest.Lock())
         {
             supressNonMaxima_Float(uImg, uDest, radius, discardValue);
         }
 }
 /// <summary>
 /// Does non-maxima supression for the following gray image. Can be useful for detections filtering (e.g. post-processing output from Harris detector).
 /// </summary>
 /// <param name="img">Image.</param>
 /// <param name="dest">Destination image. Must have the same size as source image.</param>
 /// <param name="radius">Non-maxima supression radius (kernel radius).</param>
 /// <param name="discardValue">The value will be discarded (0 - for black).</param>
 public static void SupressNonMaxima(this Gray<float>[,] img, Gray<float>[,] dest, int radius = 3, int discardValue = 0)
 {
     using (var uImg = img.Lock())
     using(var uDest = dest.Lock())
     {
         supressNonMaxima_Float(uImg, uDest, radius, discardValue);
     }
 }
Exemplo n.º 7
0
        /// <summary>
        /// Does non-maxima supression for the following gray image. Can be useful for detections filtering (e.g. post-processing output from Harris detector).
        /// </summary>
        /// <param name="img">Image.</param>
        /// <param name="radius">Non-maxima supression radius (kernel radius).</param>
        /// <param name="discardValue">The value will be discarded (0 - for black).</param>
        /// <returns>Processed image.</returns>
        public static Gray <float>[,] SupressNonMaxima(this Gray <float>[,] img, int radius = 3, int discardValue = 0)
        {
            var dest = img.CopyBlank();

            SupressNonMaxima(img, dest, radius);

            return(dest);
        }
Exemplo n.º 8
0
        public static void Convert(ref Bgr <byte> bgr, ref Gray <byte> gray)
        {
            int val = ((bgr.R << 1) +          //2 * red
                       (bgr.G << 2) + bgr.G +  //5 * green
                       bgr.B                   //1 * blue

                       ) >> 3;                 //divide by 8

            gray.Intensity = (byte)val;
        }
        /// <summary>
        /// Converts an image to an bitmap.
        /// </summary>
        /// <param name="img">Input image.</param>
        /// <returns>Bitmap</returns>
        public static Bitmap ToBitmap(this Gray <short>[,] img)
        {
            Bitmap bmp = null;

            using (var uImg = img.Lock())
            {
                bmp = toBitmap(uImg, PixelFormat.Format16bppGrayScale);
            }
            return(bmp);
        }
        /// <summary>
        /// Converts an image to an bitmap.
        /// </summary>
        /// <param name="img">Input image.</param>
        /// <returns>Bitmap</returns>
        public static Bitmap ToBitmap(this Gray <byte>[,] img)
        {
            Bitmap bmp = null;

            using (var uImg = img.Lock())
            {
                bmp = toBitmap(uImg, PixelFormat.Format8bppIndexed);
            }
            return(bmp);
        }
        /// <summary>
        /// Creates template from the input image by using provided parameters.
        /// </summary>
        /// <param name="sourceImage">Input image.</param>
        /// <param name="minFeatureStrength">Minimum gradient value for the feature.</param>
        /// <param name="maxNumberOfFeatures">Maximum number of features per template. The features will be extracted so that their locations are semi-uniformly spread.</param>
        /// <param name="classLabel">Template class label.</param>
        public override void Initialize(Gray<byte>[,] sourceImage, int minFeatureStrength, int maxNumberOfFeatures, string classLabel)
        {
            base.Initialize(sourceImage, minFeatureStrength, maxNumberOfFeatures, classLabel);

            this.BinaryMask = sourceImage.Clone(BoundingRect);

            if (this.BinaryMask[0, 0].Intensity != 0) //background should be black
                BinaryMask = this.BinaryMask.Not();

            this.BinaryMask = this.BinaryMask.ThresholdToZero((byte)(255 * 0.75), (byte)255); //if Gauss kernel was applied...
        }
        /// <summary>
        /// Process image looking for corners.
        /// </summary>
        /// <param name="cornerDetector">Corner detection algorithm instance.</param>
        /// <param name="image">Source image to process.</param>
        /// <returns>Returns list of found corners (X-Y coordinates).</returns>
        public static List<Point> ProcessImage(this ICornersDetector cornerDetector, Gray<byte>[,] image)
        {
            List<Point> points = null;
            using (var uImg = image.Lock())
            {
                points = cornerDetector.ProcessImage(uImg.AsAForgeImage())
                                       .Select(x => x.ToPoint())
                                       .ToList();
            }

            return points;
        }
Exemplo n.º 13
0
        /// <summary>
        /// Gets specified image portion.
        /// If the coordinates are not the rounded, they will be interpolated.
        /// </summary>
        /// <param name="source">Image.</param>
        /// <param name="area">Requested area.</param>
        /// <returns>Interpolated image area.</returns>
        public static Gray <float>[,] GetRectSubPix(this Gray <float>[,] source, RectangleF area)
        {
            var destination = new Gray <float> [(int)area.Height, (int)area.Width];

            using (var srcImg = source.Lock())
                using (var dstImg = destination.Lock())
                {
                    getRectSubPix_Float(srcImg, area.Location, dstImg);
                }

            return(destination);
        }
Exemplo n.º 14
0
        /// <summary>
        ///  Gray-Level Difference Method (GLDM).
        ///  <para>Computes an gray-level histogram of difference values between adjacent pixels in an image.</para>
        ///  <para>Accord.NET internal call. Please see: <see cref="Accord.Imaging.GrayLevelDifferenceMethod">Gray-Level Difference Method</see> for details.</para>
        /// </summary>
        /// <param name="image">The source image.</param>
        /// <param name="autoGray">Whether the maximum value of gray should be automatically computed from the image. </param>
        /// <param name="degree">The direction at which the co-occurrence should be found.</param>
        /// <returns>An histogram containing co-occurrences for every gray level in <paramref name="image"/>.</returns>
        public static int[] GrayLevelDifferenceMethod(this Gray <byte>[,] image, CooccurrenceDegree degree, bool autoGray = true)
        {
            GrayLevelDifferenceMethod gldm = new GrayLevelDifferenceMethod(degree, autoGray);

            int[] hist;
            using (var uImg = image.Lock())
            {
                hist = gldm.Compute(uImg.AsAForgeImage());
            }

            return(hist);
        }
        /// <summary>
        ///  Maximum cross-correlation feature point matching algorithm.
        /// </summary>
        /// <param name="image1">First image.</param>
        /// <param name="image2">Second image.</param>
        /// <param name="points1">Points from the first image.</param>
        /// <param name="points2">Points from the second image.</param>
        /// <param name="windowSize">The size of the correlation window.</param>
        /// <param name="maxDistance">The maximum distance to consider points as correlated.</param>
        /// <returns>Matched point-pairs.</returns>
        public static Point[][] Match(Gray<byte>[,] image1, Gray<byte>[,] image2, Point[] points1, Point[] points2, int windowSize, int maxDistance)
        {
            Point[][] matches = null;

            using (var uImg1 = image1.Lock())
            using(var uImg2 = image2.Lock())
            {
                var correlationMatching = new CorrelationMatching(windowSize, maxDistance, uImg1.AsBitmap(), uImg2.AsBitmap());
                matches = correlationMatching.Match(points1.ToPoints(), points2.ToPoints()).ToPoints();
            }

            return matches;
        }
Exemplo n.º 16
0
        /// <summary>
        /// Features from Accelerated Segment Test (FAST) corners detector.
        /// <para>Accord.NET internal call. Please see: <see cref="Accord.Imaging.FastCornersDetector"/> for details.</para>
        /// </summary>
        /// <param name="im">Image.</param>
        /// <param name="threshold">The suppression threshold. Decreasing this value increases the number of points detected by the algorithm.</param>
        /// <returns>Interest point locations.</returns>
        public static List <IntPoint> CornerFeaturesDetector(this Gray <byte>[,] im, int threshold = 20)
        {
            FastCornersDetector fast = new FastCornersDetector(threshold);

            List <IntPoint> points;

            using (var uImg = im.Lock())
            {
                points = fast.ProcessImage(uImg.AsAForgeImage());
            }

            return(points);
        }
        /// <summary>
        /// Back-projects (creates probability map) from histogram values.
        /// </summary>
        /// <param name="srcs">Image channels.</param>
        /// <returns>Back-projection image (probability image) </returns>
        public Gray<byte>[,] BackProject(Gray<byte>[][,] srcs)
        {
            var destImg = srcs.First().CopyBlank();

            using (var uDestImg = destImg.Lock())
            {
                var uChannels = srcs.Select(x => x.Lock()).ToArray();
                backProjectByte(this, uChannels, uDestImg);
                uChannels.ForEach(x => x.Dispose());
            }

            return destImg;
        }
Exemplo n.º 18
0
        /// <summary>
        /// Harris Corners Detector.
        /// <para>Accord.NET internal call. Please see: <see cref="Accord.Imaging.HarrisCornersDetector"/> for details.</para>
        /// </summary>
        /// <param name="im">Image.</param>
        /// <param name="measure">Corners measures.</param>
        /// <param name="threshold">Harris threshold.</param>
        /// <param name="sigma">Gaussian smoothing sigma.</param>
        /// <param name="suppression">Non-maximum suppression window radius.</param>
        /// <returns>Interest point locations.</returns>
        public static List <IntPoint> HarrisCorners <TDepth>(this Gray <byte>[,] im, HarrisCornerMeasure measure = HarrisCornerMeasure.Harris, float threshold = 20000f, double sigma = 1.2, int suppression = 3)
        {
            HarrisCornersDetector harris = new HarrisCornersDetector(measure, threshold, sigma, suppression);

            List <IntPoint> points;

            using (var uImg = im.Lock())
            {
                points = harris.ProcessImage(uImg.AsAForgeImage());
            }

            return(points);
        }
Exemplo n.º 19
0
        /// <summary>
        /// Extracts the contour from a single object in a grayscale image. (uses Accord built-in function)
        /// </summary>
        /// <param name="im">Image.</param>
        /// <param name="minGradientStrength">The pixel value threshold above which a pixel
        /// is considered black (belonging to the object). Default is zero.</param>
        public static List <Point> FindContour(this Gray <byte>[,] im, byte minGradientStrength = 0)
        {
            BorderFollowing bf = new BorderFollowing(minGradientStrength);

            List <Point> points;

            using (var uImg = im.Lock())
            {
                points = bf.FindContour(uImg.AsAForgeImage());
            }

            return(points);
        }
Exemplo n.º 20
0
        /// <summary>
        /// Find non-zero locations in the image.
        /// </summary>
        /// <param name="img">Image.</param>
        /// <param name="values">Found non-zero values at the returned positions.</param>
        /// <returns>List of found non-zero locations.</returns>
        public static List <Point> FindNonZero(this Gray <float>[,] img, out IList <float> values)
        {
            List <Point> locationsPatch;
            IList        valuesPatch;

            using (var uImg = img.Lock())
            {
                findNonZero_Float(uImg, out locationsPatch, out valuesPatch);
            }

            values = valuesPatch as IList <float>;
            return(locationsPatch);
        }
Exemplo n.º 21
0
        /// <summary>
        /// Back-projects (creates probability map) from histogram values.
        /// </summary>
        /// <param name="srcs">Image channels.</param>
        /// <returns>Back-projection image (probability image) </returns>
        public Gray <byte>[,] BackProject(Gray <byte>[][,] srcs)
        {
            var destImg = srcs.First().CopyBlank();

            using (var uDestImg = destImg.Lock())
            {
                var uChannels = srcs.Select(x => x.Lock()).ToArray();
                backProjectByte(this, uChannels, uDestImg);
                uChannels.ForEach(x => x.Dispose());
            }

            return(destImg);
        }
Exemplo n.º 22
0
        /// <summary>
        ///  Maximum cross-correlation feature point matching algorithm.
        /// </summary>
        /// <param name="image1">First image.</param>
        /// <param name="image2">Second image.</param>
        /// <param name="points1">Points from the first image.</param>
        /// <param name="points2">Points from the second image.</param>
        /// <param name="windowSize">The size of the correlation window.</param>
        /// <param name="maxDistance">The maximum distance to consider points as correlated.</param>
        /// <returns>Matched point-pairs.</returns>
        public static Point[][] Match(Gray <byte>[,] image1, Gray <byte>[,] image2, Point[] points1, Point[] points2, int windowSize, int maxDistance)
        {
            Point[][] matches = null;

            using (var uImg1 = image1.Lock())
                using (var uImg2 = image2.Lock())
                {
                    var correlationMatching = new CorrelationMatching(windowSize, maxDistance, uImg1.AsBitmap(), uImg2.AsBitmap());
                    matches = correlationMatching.Match(points1.ToPoints(), points2.ToPoints()).ToPoints();
                }

            return(matches);
        }
Exemplo n.º 23
0
        /// <summary>
        /// The blending filter is able to blend two images using a homography matrix.
        /// A linear alpha gradient is used to smooth out differences between the two
        /// images, effectively blending them in two images. The gradient is computed
        /// considering the distance between the centers of the two images.
        /// </summary>
        /// <param name="im">Image.</param>
        /// <param name="overlayIm">The overlay image (also called the anchor).</param>
        /// <param name="homography">Homography matrix used to map a image passed to
        /// the filter to the overlay image specified at filter creation.</param>
        /// <param name="fillColor">The filling color used to fill blank spaces. The filling color will only be visible after the image is converted
        /// to 24bpp. The alpha channel will be used internally by the filter.</param>
        /// <param name="gradient">A value indicating whether to blend using a linear
        ///  gradient or just superimpose the two images with equal weights.</param>
        /// <param name="alphaOnly">A value indicating whether only the alpha channel
        /// should be blended. This can be used together with a transparency
        /// mask to selectively blend only portions of the image.</param>
        /// <returns>Blended image.</returns>
        public static Bgra <byte>[,] Blend(this Gray <byte>[,] im, Gray <byte>[,] overlayIm, MatrixH homography, Bgra <byte> fillColor, bool gradient = true, bool alphaOnly = false)
        {
            Bgra <byte>[,] resultImage = null;

            using (var uOverlayIm = overlayIm.Lock())
            {
                Blend blend = new Blend(homography, uOverlayIm.AsBitmap());
                blend.AlphaOnly = alphaOnly;
                blend.Gradient  = gradient;
                blend.FillColor = fillColor.ToColor();

                resultImage = im.ApplyBaseTransformationFilter <Gray <byte>, Bgra <byte> >(blend);
            }

            return(resultImage);
        }
Exemplo n.º 24
0
        /// <summary>
        /// The blending filter is able to blend two images using a homography matrix.
        /// A linear alpha gradient is used to smooth out differences between the two
        /// images, effectively blending them in two images. The gradient is computed
        /// considering the distance between the centers of the two images.
        /// </summary>
        /// <param name="im">Image.</param>
        /// <param name="overlayIm">The overlay image (also called the anchor).</param>
        /// <param name="homography">Homography matrix used to map a image passed to
        /// the filter to the overlay image specified at filter creation.</param>
        /// <param name="fillColor">The filling color used to fill blank spaces. The filling color will only be visible after the image is converted
        /// to 24bpp. The alpha channel will be used internally by the filter.</param>
        /// <param name="gradient">A value indicating whether to blend using a linear
        ///  gradient or just superimpose the two images with equal weights.</param>
        /// <param name="alphaOnly">A value indicating whether only the alpha channel
        /// should be blended. This can be used together with a transparency
        /// mask to selectively blend only portions of the image.</param>
        /// <returns>Blended image.</returns>
        public static Bgra<byte>[,] Blend(this Gray<byte>[,] im, Gray<byte>[,] overlayIm, MatrixH homography, Bgra<byte> fillColor, bool gradient = true, bool alphaOnly = false)
        {
            Bgra<byte>[,] resultImage = null;

            using (var uOverlayIm = overlayIm.Lock())
            {
                Blend blend = new Blend(homography, uOverlayIm.AsBitmap());
                blend.AlphaOnly = alphaOnly;
                blend.Gradient = gradient;
                blend.FillColor = fillColor.ToColor();

                resultImage = im.ApplyBaseTransformationFilter<Gray<byte>, Bgra<byte>>(blend);
            }

            return resultImage;
        }
        /// <summary>
        /// Computes the Bag of Words model.
        /// </summary>
        /// <typeparam name="TPoint">
        /// The <see cref="Accord.Imaging.IFeaturePoint{TFeature}"/> type to be used with this class,
        /// such as <see cref="Accord.Imaging.SpeededUpRobustFeaturePoint"/>.
        /// </typeparam>
        /// <typeparam name="TFeature">
        /// The feature type of the <typeparamref name="TPoint"/>, such
        /// as <see cref="T:double[]"/>.
        /// </typeparam>
        /// <param name="bow">Bag of Visual Words.</param>
        /// <param name="images">The set of images to initialize the model.</param>
        /// <param name="threshold">Convergence rate for the k-means algorithm. Default is 1e-5.</param>
        /// <returns>The list of feature points detected in all images.</returns>
        public static List <TPoint>[] Compute <TPoint, TFeature>(this BagOfVisualWords <TPoint, TFeature> bow,
                                                                 Gray <byte>[][,] images, double threshold = 1e-5)
            where TPoint : IFeatureDescriptor <TFeature>
        {
            var uImages = images.Select(x => x.Lock());

            var featurePoints = bow.Compute
                                (
                uImages.Select(x => x.AsBitmap()).ToArray(),
                threshold
                                );

            uImages.ForEach(x => x.Dispose());

            return(featurePoints);
        }
Exemplo n.º 26
0
        /// <summary>
        /// Extracts the specified image channels.
        /// </summary>
        /// <typeparam name="TSrcColor">Source color type.</typeparam>
        /// <typeparam name="TDepth">Channel depth type.</typeparam>
        /// <param name="image">Image.</param>
        /// <param name="area">Working area.</param>
        /// <param name="channelIndices">Channel indicies to extract. If null, all channels are extracted.</param>
        /// <returns>Channel collection.</returns>
        public static unsafe Gray <TDepth>[][,] SplitChannels <TSrcColor, TDepth>(this TSrcColor[,] image, Rectangle area, params int[] channelIndices)
        where TSrcColor : struct, IColor <TDepth>
        where TDepth : struct
        {
            if (channelIndices == null || channelIndices.Length == 0)
            {
                channelIndices = Enumerable.Range(0, ColorInfo.GetInfo <TSrcColor>().ChannelCount).ToArray();
            }

            var channels = new Gray <TDepth> [channelIndices.Length][, ];
            for (int i = 0; i < channelIndices.Length; i++)
            {
                channels[i] = GetChannel <TSrcColor, TDepth>(image, area, channelIndices[i]);
            }

            return(channels);
        }
Exemplo n.º 27
0
        private static Rectangle process(Gray <byte>[,] probabilityMap, Rectangle roi, TermCriteria termCriteria, out CentralMoments centralMoments)
        {
            Rectangle imageArea = new Rectangle(0, 0, probabilityMap.Width(), probabilityMap.Height());

            Rectangle  searchWindow = roi;
            RawMoments moments      = new RawMoments(order: 1);

            // Mean shift with fixed number of iterations
            int    i     = 0;
            double shift = Byte.MaxValue;

            while (termCriteria.ShouldTerminate(i, shift) == false && !searchWindow.IsEmptyArea())
            {
                // Locate first order moments
                moments.Compute(probabilityMap, searchWindow);

                int shiftX = (int)(moments.CenterX - searchWindow.Width / 2f);
                int shiftY = (int)(moments.CenterY - searchWindow.Height / 2f);

                // Shift the mean (centroid)
                searchWindow.X += shiftX;
                searchWindow.Y += shiftY;

                // Keep the search window inside the image
                searchWindow.Intersect(imageArea);

                shift = System.Math.Abs((double)shiftX) + System.Math.Abs((double)shiftY); //for term criteria only
                i++;
            }

            if (searchWindow.IsEmptyArea() == false)
            {
                // Locate second order moments and perform final shift
                moments.Order = 2;
                moments.Compute(probabilityMap, searchWindow);

                searchWindow.X += (int)(moments.CenterX - searchWindow.Width / 2f);
                searchWindow.Y += (int)(moments.CenterY - searchWindow.Height / 2f);

                // Keep the search window inside the image
                searchWindow.Intersect(imageArea);
            }

            centralMoments = new CentralMoments(moments); // moments to be used by camshift
            return(searchWindow);
        }
        private static void createRange(Dictionary<ModelParams, ITemplate> dict, Gray<byte>[,] templateImg,
                                        int templateIdx, IEnumerable<int> scaleRange, IEnumerable<int> rotationRange, string label = "")
        {
            var contour = findContour(templateImg);
            var pts = contour.GetEqualyDistributedPoints(N_SAMPLE_POINTS, treatAsClosed: false);
            pts = pts.Normalize();

            foreach(var s in scaleRange)
            {
                foreach(var r in rotationRange)
                {
                    var template = create(pts, s, r, label);
                    var mParams = new ModelParams(templateIdx, (short)s, (short)r);

                    dict.Add(mParams, template);
                }
            }
        }
Exemplo n.º 29
0
        /// <summary>
        /// Extracts a single image channel.
        /// </summary>
        /// <typeparam name="TSrcColor">Source color type.</typeparam>
        /// <typeparam name="TDepth">Channel depth type.</typeparam>
        /// <param name="image">Image.</param>
        /// <param name="area">Working area.</param>
        /// <param name="channelIndex">Channel index.</param>
        /// <returns>Extracted channel.</returns>
        public static unsafe Gray <TDepth>[,] GetChannel <TSrcColor, TDepth>(this TSrcColor[,] image, Rectangle area, int channelIndex)
        where TSrcColor : struct, IColor <TDepth>
        where TDepth : struct
        {
            int width  = area.Width;
            int height = area.Height;

            var dest = new Gray <TDepth> [area.Height, area.Width];

            using (var lockedImage = image.Lock())
                using (var dstImg = dest.Lock())
                {
                    var srcImg      = lockedImage.GetSubRect(area);
                    int channelSize = srcImg.ColorInfo.ChannelSize;
                    int colorSize   = srcImg.ColorInfo.Size;

                    byte *srcPtr = (byte *)srcImg.ImageData + channelIndex * srcImg.ColorInfo.ChannelSize;
                    byte *dstPtr = (byte *)dstImg.ImageData;

                    for (int row = 0; row < height; row++)
                    {
                        byte *srcColPtr = srcPtr;
                        byte *dstColPtr = dstPtr;
                        for (int col = 0; col < width; col++)
                        {
                            /********** copy channel byte-per-byte ************/
                            for (int partIdx = 0; partIdx < channelSize; partIdx++)
                            {
                                dstColPtr[partIdx] = srcColPtr[partIdx];
                            }

                            srcColPtr += colorSize; //move to the next column
                            dstColPtr += channelSize;
                            /********** copy channel byte-per-byte ************/
                        }

                        srcPtr += srcImg.Stride;
                        dstPtr += dstImg.Stride;
                    }
                }

            return(dest);
        }
        /// <summary>
        /// Calculates histogram.
        /// </summary>
        /// <param name="channels">Image channels.</param>
        /// <param name="accumulate">Accumulate or erase histogram before.</param>
        /// <param name="mask">Mask for image color locations.</param>
        /// <param name="maskOffset">The location offset for the mask. The mask area will be [offsetX, offsetY, channelWidth, channelHeight].</param>
        public void Calculate(Gray<byte>[][,] channels, bool accumulate, Gray<byte>[,] mask, Point maskOffset)
        {
            if (!accumulate)
                Array.Clear(histogram, 0, this.NumberOfElements);

            if (mask == null)
            {
                mask = new Gray<byte>[channels[0].Width(), channels[0].Height()];
                mask.SetValue<Gray<byte>>(Byte.MaxValue);
            }

            var maskArea = new Rectangle(maskOffset, channels.First().Size());
            using (var uMask = mask.Lock(maskArea))
            {
                var uChannels = channels.Select(x => x.Lock()).ToArray();
                calculateHistByte(this, uChannels, uMask);
                uChannels.ForEach(x => x.Dispose());
            }
        }
        /// <summary>
        /// Computes gradient orientations from the color image. Orientation from the channel which has the maximum gradient magnitude is taken as the orientation for a location.
        /// </summary>
        /// <param name="frame">Image.</param>
        /// <param name="magnitudeSqrImage">Squared magnitude image.</param>
        /// <param name="minValidMagnitude">Minimal valid magnitude.</param>
        /// <returns>Orientation image (angles are in degrees).</returns>
        public static unsafe Gray<int>[,] Compute(Bgr<byte>[,] frame, out Gray<int>[,] magnitudeSqrImage, int minValidMagnitude)
        {
            var minSqrMagnitude = minValidMagnitude * minValidMagnitude;

            var orientationImage = new Gray<int>[frame.Height(), frame.Width()];
            var _magnitudeSqrImage = orientationImage.CopyBlank();

            using (var uFrame = frame.Lock())
            {
                ParallelLauncher.Launch(thread =>
                {
                    computeColor(thread, (byte*)uFrame.ImageData, uFrame.Stride, orientationImage, _magnitudeSqrImage, minSqrMagnitude);
                },
                frame.Width() - 2 * kernelRadius, frame.Height() - 2 * kernelRadius);
            }

            magnitudeSqrImage = _magnitudeSqrImage;
            return orientationImage;
        }
Exemplo n.º 32
0
        private static Gray <float>[,] convolve(Gray <float>[,] image, IList <float[, ]> kernels, ConvolutionBorder options)
        {
            int biggestKernelWidth, biggestKernelHeight;

            getTheBiggestSize(kernels, out biggestKernelWidth, out biggestKernelHeight);

            int fillX, fillY;
            var paddedIm = prepareImage(image, biggestKernelWidth, biggestKernelHeight, options, out fillX, out fillY);

            var convolvedIm = paddedIm;

            foreach (var kernel in kernels)
            {
                var preparedKernel = prepareKernel(kernel, convolvedIm.Size());
                convolvedIm = convolvedIm.MulComplex(preparedKernel, inPlace: false);
            }

            return(getConvolutionResult(convolvedIm, fillX, fillY, image.Size()));
        }
        private static IList<PointF> findContour(Gray<byte>[,] templateImg)
        {
            var contour = templateImg.FindContour(minGradientStrength: 150).Select(x => (PointF)x).ToList();

            /*********** cut bottom border and shift contour beginning to the first non-border point ***************/
            int firstIdx = -1;
            int lastIdx = -1;

            for (int i = 0; i < contour.Count; i++)
            {
                if (contour[i].Y == (templateImg.Height() - 1))
                {
                    if (firstIdx == -1) firstIdx = i;
                    lastIdx = i;
                }
            }

            //return contour;
            return new CircularList<PointF>(contour).GetRange(lastIdx, contour.Count -  (lastIdx - firstIdx + 1));
        }
Exemplo n.º 34
0
        private void processImage(Bgr<byte>[,] frame, out Gray<byte>[,] probabilityMap, out Rectangle prevSearchArea, out Box2D foundBox)
        {
            prevSearchArea = searchArea;

            //convert to HSV
            var hsvImg = frame.ToHsv(); 
            //back-project ratio hist => create probability map
            probabilityMap = ratioHist.BackProject(hsvImg.SplitChannels<Hsv<byte>, byte>(0, 1)); //or new Image<Gray<byte>>[]{ hsvImg[0], hsvImg[1]...} 

            //user constraints...
            Gray<byte>[,] mask = hsvImg.InRange(new Hsv<byte>(0, 0, (byte)minV), new Hsv<byte>(0, 0, (byte)maxV), Byte.MaxValue, 2);
            probabilityMap.AndByte(mask, inPlace:true);

            //run Camshift algorithm to find new object position, size and angle
            foundBox = Camshift.Process(probabilityMap, searchArea);
            var foundArea = Rectangle.Round(foundBox.GetMinArea());

            searchArea = foundArea.Inflate(0.05, 0.05, frame.Size()); //inflate found area for search (X factor)...
            if (searchArea.IsEmpty) isROISelected = false; //reset tracking
        }
Exemplo n.º 35
0
        /// <summary>
        /// Camshift algorithm
        /// </summary>
        /// <param name="probabilityMap">Probability map [0-255].</param>
        /// <param name="roi">Initial Search area</param>
        /// <param name="termCriteria">Mean shift termination criteria (PLEASE DO NOT REMOVE (but you can move it) THIS CLASS; PLEASE!!!)</param>
        /// <param name="centralMoments">Calculated central moments (up to order 2).</param>
        /// <returns>Object position, size and angle packed into a structure.</returns>
        public static Box2D Process(Gray <byte>[,] probabilityMap, Rectangle roi, TermCriteria termCriteria, out CentralMoments centralMoments)
        {
            // Compute mean shift
            Rectangle objArea = Meanshift.Process(probabilityMap, roi, termCriteria, out centralMoments);

            //fit ellipse
            Ellipse ellipse = centralMoments.GetEllipse();

            ellipse.Center = objArea.Center();

            //return empty structure is the object is lost
            var sz = ellipse.Size;

            if (Single.IsNaN(sz.Width) || Single.IsNaN(sz.Height) ||
                sz.Width < 1 || sz.Height < 1)
            {
                return(Box2D.Empty);
            }

            return((Box2D)ellipse);
        }
Exemplo n.º 36
0
        /// <summary>
        /// Calculates histogram.
        /// </summary>
        /// <param name="channels">Image channels.</param>
        /// <param name="accumulate">Accumulate or erase histogram before.</param>
        /// <param name="mask">Mask for image color locations.</param>
        /// <param name="maskOffset">The location offset for the mask. The mask area will be [offsetX, offsetY, channelWidth, channelHeight].</param>
        public void Calculate(Gray <byte>[][,] channels, bool accumulate, Gray <byte>[,] mask, Point maskOffset)
        {
            if (!accumulate)
            {
                Array.Clear(histogram, 0, this.NumberOfElements);
            }

            if (mask == null)
            {
                mask = new Gray <byte> [channels[0].Width(), channels[0].Height()];
                mask.SetValue <Gray <byte> >(Byte.MaxValue);
            }

            var maskArea = new Rectangle(maskOffset, channels.First().Size());

            using (var uMask = mask.Lock(maskArea))
            {
                var uChannels = channels.Select(x => x.Lock()).ToArray();
                calculateHistByte(this, uChannels, uMask);
                uChannels.ForEach(x => x.Dispose());
            }
        }
        public void TestLKFlow()
        {
            var im1 = new Gray<float>[480, 640];
            im1.SetValue<Gray<float>>(System.Byte.MaxValue, new Accord.Extensions.Rectangle(272, 82, 116, 64));

            var im2 = new Gray<float>[480, 640];
            im2.SetValue<Gray<float>>(System.Byte.MaxValue, new Accord.Extensions.Rectangle(277, 83, 116, 64));

            var pts = new List<PointF>();
            pts.Add(new PointF(272, 82)); //-> 277,83

            PointF[] currFeatures;
            KLTFeatureStatus[] featureStatus;
            /*LKOpticalFlow<Gray<float>>.EstimateFlow(lkStorage, pts.ToArray(),
                                                      out currFeatures, out featureStatus);*/

            PyrLKOpticalFlow<Gray<float>>.EstimateFlow(im1, im2, pts.ToArray(),
                                                       out currFeatures, out featureStatus);

            Assert.IsTrue(featureStatus[0] == KLTFeatureStatus.Success);
            Assert.IsTrue(Math.Round(currFeatures[0].X) == 277 && Math.Round(currFeatures[0].Y) == 83);
        }
Exemplo n.º 38
0
        private void initTracking(Bgr <byte>[,] frame)
        {
            //get hue channel from search area
            var hsvImg = frame.ToHsv();

            //user constraints...
            Gray <byte>[,] mask = hsvImg.InRange(new Hsv <byte>(0, 0, (byte)minV), new Hsv <byte>(0, 0, (byte)maxV), Byte.MaxValue, 2);

            originalObjHist.Calculate(hsvImg.SplitChannels <Hsv <byte>, byte>(roi, 0, 1), !false, mask, roi.Location);
            originalObjHist.Scale((float)1 / roi.Area());
            //originalObjHist.Normalize(Byte.MaxValue);

            var backgroundArea = roi.Inflate(1.5, 1.5, frame.Size());

            backgroundHist.Calculate(hsvImg.SplitChannels <Hsv <byte>, byte>(backgroundArea, 0, 1), !false, mask, backgroundArea.Location);
            backgroundHist.Scale((float)1 / backgroundArea.Area());
            //backgroundHist.Normalize(Byte.MaxValue);

            //how good originalObjHist and objHist match (suppresses possible selected background)
            ratioHist = originalObjHist.CreateRatioHistogram(backgroundHist, Byte.MaxValue, 10);

            searchArea = roi;
            roi        = Rectangle.Empty;
        }
Exemplo n.º 39
0
        private void processImage(Bgr <byte>[,] frame, out Gray <byte>[,] probabilityMap, out Rectangle prevSearchArea, out Box2D foundBox)
        {
            prevSearchArea = searchArea;

            //convert to HSV
            var hsvImg = frame.ToHsv();

            //back-project ratio hist => create probability map
            probabilityMap = ratioHist.BackProject(hsvImg.SplitChannels <Hsv <byte>, byte>(0, 1)); //or new Image<Gray<byte>>[]{ hsvImg[0], hsvImg[1]...}

            //user constraints...
            Gray <byte>[,] mask = hsvImg.InRange(new Hsv <byte>(0, 0, (byte)minV), new Hsv <byte>(0, 0, (byte)maxV), Byte.MaxValue, 2);
            probabilityMap.AndByte(mask, inPlace: true);

            //run Camshift algorithm to find new object position, size and angle
            foundBox = Camshift.Process(probabilityMap, searchArea);
            var foundArea = Rectangle.Round(foundBox.GetMinArea());

            searchArea = foundArea.Inflate(0.05, 0.05, frame.Size()); //inflate found area for search (X factor)...
            if (searchArea.IsEmpty)
            {
                isROISelected = false;                     //reset tracking
            }
        }
Exemplo n.º 40
0
 /// <summary>
 /// Resizes an image using specified interpolation mode.
 /// </summary>
 /// <param name="img">Input image.</param>
 /// <param name="scale">Non-negative image size scale factor. If 1 the new size will be equal.</param>
 /// <param name="mode">Interpolation mode.</param>
 /// <returns>Resized image.</returns>
 public static Gray <byte>[,] Resize(this Gray <byte>[,] img, float scale, InterpolationMode mode)
 {
     return(ResizeExtensionsBase.Resize <Gray <byte> >(img, scale, mode));
 }
Exemplo n.º 41
0
 /// <summary>
 /// Meanshift algorithm
 /// </summary>
 /// <param name="probabilityMap">Probability map [0-1].</param>
 /// <param name="roi">Initial search area</param>
 /// <param name="termCriteria">Mean shift termination criteria.</param>
 /// <returns>Object area.</returns>
 public static Rectangle Process(Gray<byte>[,] probabilityMap, Rectangle roi, TermCriteria termCriteria)
 {
     CentralMoments centralMoments;
     return process(probabilityMap, roi, termCriteria, out centralMoments);
 }
Exemplo n.º 42
0
 /// <summary>
 /// Calculates phase using Atan2 (secondImage / firstImage).
 /// </summary>
 /// <param name="imageX">First image.</param>
 /// <param name="imageY">Second image.</param>
 /// <returns>Phase.</returns>
 public static Gray <double>[,] Phase(this  Gray <double>[,] imageX, Gray <double>[,] imageY)
 {
     return(imageX.Calculate(imageY, phase_Double, inPlace: false));
 }
Exemplo n.º 43
0
 /// <summary>
 /// Calculates phase using Atan2 (secondImage / firstImage).
 /// </summary>
 /// <param name="imageX">First image.</param>
 /// <param name="imageY">Second image.</param>
 /// <returns>Phase.</returns>
 public static Gray <float>[,] Phase(this  Gray <float>[,] imageX, Gray <float>[,] imageY)
 {
     return(imageX.Calculate(imageY, phase_Float, inPlace: false));
 }
Exemplo n.º 44
0
 /// <summary>
 /// Calculates phase using Atan2 (secondImage / firstImage). 
 /// </summary>
 /// <param name="imageX">First image.</param>
 /// <param name="imageY">Second image.</param>
 /// <returns>Phase.</returns>
 public static Gray<double>[,] Phase(this  Gray<double>[,] imageX, Gray<double>[,] imageY)
 {
     return imageX.Calculate(imageY, phase_Double, inPlace: false);
 }
Exemplo n.º 45
0
        /// <summary>
        /// Camshift algorithm
        /// </summary>
        /// <param name="probabilityMap">Probability map [0-1].</param>
        /// <param name="roi">Initial Search area</param>
        /// <returns>Object position, size and angle packed into a structure.</returns>
        public static Box2D Process(Gray <byte>[,] probabilityMap, Rectangle roi)
        {
            CentralMoments centralMoments;

            return(Process(probabilityMap, roi, Meanshift.DEFAULT_TERM, out centralMoments));
        }
        private static unsafe void computeGray(KernelThread thread, byte* frame, int frameStride, Gray<int>[,] orientationImage, Gray<int>[,] magnitudeSqrImage, int minSqrMagnitude)
        {
            frame = frame + frameStride * thread.Y + thread.X;
            var srcPtr = frame;

            int sumX = 0, sumY = 0;
            for (int r = 0; r < 3; r++)
            {
                for (int c = 0; c < 3; c++)
                {
                    sumX += srcPtr[c] * Sobel_3x3_X[r, c];
                    sumY += srcPtr[c] * Sobel_3x3_Y[r, c];
                }

                srcPtr = (byte*)srcPtr + frameStride;
            }
            //sumX >>= 3; sumY >>= 3; //divide by 8 (normalize kernel) //without this

            var grad = sumX * sumX + sumY * sumY;
            if (grad < minSqrMagnitude)
            {
                //magnitudeSqrImage[thread.Y + kernelRadius, thread.X + kernelRadius] = 0;  //redundant
                orientationImage[thread.Y + kernelRadius, thread.X + kernelRadius] = FeatureMap.INVALID_ORIENTATION;
            }
            else
            {
                magnitudeSqrImage[thread.Y + kernelRadius, thread.X + kernelRadius] = grad;
                orientationImage[thread.Y + kernelRadius, thread.X + kernelRadius] = MathExtensions.Atan2Aprox(sumY, sumX);
            }
        }
Exemplo n.º 47
0
 /// <summary>
 /// The blending filter is able to blend two images using a homography matrix.
 /// A linear alpha gradient is used to smooth out differences between the two
 /// images, effectively blending them in two images. The gradient is computed
 /// considering the distance between the centers of the two images.
 /// <para>Homography matrix is set to identity.</para>
 /// <para>Fill color is set to black with alpha set to 0 (all zeros).</para>
 /// </summary>
 /// <param name="im">Image.</param>
 /// <param name="overlayIm">The overlay image (also called the anchor).</param>
 /// <param name="gradient">A value indicating whether to blend using a linear
 ///  gradient or just superimpose the two images with equal weights.</param>
 /// <param name="alphaOnly">A value indicating whether only the alpha channel
 /// should be blended. This can be used together with a transparency
 /// mask to selectively blend only portions of the image.</param>
 /// <returns>Blended image.</returns>
 public static Bgra <byte>[,] Blend(this Gray <byte>[,] im, Gray <byte>[,] overlayIm, bool gradient = true, bool alphaOnly = false)
 {
     return(Blend(im, overlayIm, new MatrixH(Matrix.Identity(3)), new Bgra <byte>(), gradient, alphaOnly));
 }
        private void trackCamshift(Bgr<byte>[,] frame, Rectangle searchArea, out Gray<byte>[,] probabilityMap, out Box2D foundBox)
        {
            const int PROBABILITY_MIN_VAL = (int)(0.3f * Byte.MaxValue);

            //convert to HSV
            var hsvImg = frame.ToHsv(); 
            //back-project ratio hist => create probability map
            probabilityMap = ratioHist.BackProject(hsvImg.SplitChannels<Hsv<byte>, byte>(0, 1)); //or new Image<Gray<byte>>[]{ hsvImg[0], hsvImg[1]...} 

            //user constraints...
            Gray<byte>[,] mask = hsvImg.InRange(new Hsv<byte>(0, 0, (byte)minV), new Hsv<byte>(0, 0, (byte)maxV), Byte.MaxValue, 2);
            probabilityMap.AndByte(mask, inPlace: true);

            //run Camshift algorithm to find new object position, size and angle
            CentralMoments centralMoments;
            foundBox = Camshift.Process(probabilityMap, searchArea, Meanshift.DEFAULT_TERM, out centralMoments); 

             //stopping conditions
            float avgIntensity = centralMoments.Mu00 / (foundBox.Size.Area() + Single.Epsilon);
            if (avgIntensity < PROBABILITY_MIN_VAL || foundBox.Size.IsEmpty || foundBox.Size.Height < 12)
            {
                foundBox = Box2D.Empty; //invalid box
            }
        }
        private void trackOneStep(Bgr<byte>[,] frame, out Gray<byte>[,] probabilityMap, out Box2D foundBox)
        {
            const float SEARCH_AREA_INFLATE_FACTOR = 0.05f;

            /**************************** KALMAN predict **************************/
            kalman.Predict(); 
            searchArea = createRect(kalman.State.Position, searchArea.Size, frame.Size());
            /**************************** KALMAN predict **************************/

            trackCamshift(frame, searchArea, out probabilityMap, out foundBox);

            if (!foundBox.IsEmpty)
            {
                /**************************** KALMAN correct **************************/
                kalman.Correct(new PointF(foundBox.Center.X, foundBox.Center.Y)); //correct predicted state by measurement
                /**************************** KALMAN correct **************************/
            
                var foundArea = Rectangle.Round(foundBox.GetMinArea());
                searchArea = foundArea.Inflate(SEARCH_AREA_INFLATE_FACTOR, SEARCH_AREA_INFLATE_FACTOR, frame.Size()); //inflate found area for search (X factor)...
                nonVisibleCount = 0;
            }
            else
            {
                nonVisibleCount++;
                if (nonVisibleCount == 1) //for the first time 
                {
                    searchArea = searchArea.Inflate(-SEARCH_AREA_INFLATE_FACTOR * 1.5, -SEARCH_AREA_INFLATE_FACTOR * 1.5, frame.Size()); //shrink (hysteresis)
                }

                searchArea = createRect(kalman.State.Position, searchArea.Size, frame.Size()); 
            }

            if (nonVisibleCount > 100) //if not visible for a longer time => reset tracking
            {
                nonVisibleCount = 0;
                isROISelected = false;
            }
        }
        private static List<Feature> ExtractTemplate(Gray<byte>[,] orientationImage, int maxNumOfFeatures, Func<Feature, int> featureImportanceFunc)
        {
            List<Feature> candidates = new List<Feature>();

            using (var uOrientationImage = orientationImage.Lock())
            {
                byte* orientImgPtr = (byte*)uOrientationImage.ImageData;
                int orientImgStride = uOrientationImage.Stride;

                int imgWidth = uOrientationImage.Width;
                int imgHeight = uOrientationImage.Height;

                for (int row = 0; row < imgHeight; row++)
                {
                    for (int col = 0; col < imgWidth; col++)
                    {
                        if (orientImgPtr[col] == 0) //quantized orientations are: [1,2,4,8,...,128];
                            continue;

                        var candidate = new Feature(x: col, y: row, angleBinaryRepresentation: orientImgPtr[col]);
                        candidates.Add(candidate);
                    }

                    orientImgPtr += orientImgStride;
                }
            }

            candidates = candidates.OrderByDescending(featureImportanceFunc).ToList(); //order descending
            return FilterScatteredFeatures(candidates, maxNumOfFeatures, 5); //candidates.Count must be >= MIN_NUM_OF_FEATURES
        }
Exemplo n.º 51
0
 /// <summary>
 /// Calculates phase using Atan2 (secondImage / firstImage). 
 /// </summary>
 /// <param name="imageX">First image.</param>
 /// <param name="imageY">Second image.</param>
 /// <returns>Phase.</returns>
 public static Gray<float>[,] Phase(this  Gray<float>[,] imageX, Gray<float>[,] imageY)
 {
     return imageX.Calculate(imageY, phase_Float, inPlace: false);
 }
Exemplo n.º 52
0
        /// <summary>
        /// Adjusts pixels' contrast value by increasing RGB values of bright pixel and decreasing
        /// pixel values of dark pixels (or vise versa if contrast needs to be decreased).
        /// </summary>
        /// <param name="im">Image.</param>
        /// <param name="factor">Factor which is used to adjust contrast. Factor values greater than
        /// 0 increase contrast making light areas lighter and dark areas darker. Factor values
        /// less than 0 decrease contrast - decreasing variety of contrast.</param>
        /// <param name="inPlace">Process in place or make not. If in place is set to true, returned value may be discarded.</param>
        /// <returns>Corrected image.</returns>
        public static Gray <byte>[,] CorrectContrast(this Gray <byte>[,] im, int factor = 10, bool inPlace = false)
        {
            ContrastCorrection conrastCorrection = new ContrastCorrection(factor);

            return(im.ApplyFilter(conrastCorrection, inPlace));
        }
        /// <summary>
        /// Process image looking for corners.
        /// </summary>
        /// <param name="cornerDetector">Corner detection algorithm instance.</param>
        /// <param name="image">Source image to process.</param>
        /// <returns>Returns list of found corners (X-Y coordinates).</returns>
        public static List <Point> ProcessImage(this ICornersDetector cornerDetector, Gray <byte>[,] image)
        {
            List <Point> points = null;

            using (var uImg = image.Lock())
            {
                points = cornerDetector.ProcessImage(uImg.AsAForgeImage())
                         .Select(x => x.ToPoint())
                         .ToList();
            }

            return(points);
        }
Exemplo n.º 54
0
 /// <summary>
 /// Camshift algorithm
 /// </summary>
 /// <param name="probabilityMap">Probability map [0-1].</param>
 /// <param name="roi">Initial Search area</param>
 /// <returns>Object position, size and angle packed into a structure.</returns>
 public static Box2D Process(Gray<byte>[,] probabilityMap, Rectangle roi)
 {
     CentralMoments centralMoments;
     return Process(probabilityMap, roi, Meanshift.DEFAULT_TERM, out centralMoments);
 }
Exemplo n.º 55
0
        /// <summary>
        /// Camshift algorithm
        /// </summary>
        /// <param name="probabilityMap">Probability map [0-255].</param>
        /// <param name="roi">Initial Search area</param>
        /// <param name="termCriteria">Mean shift termination criteria (PLEASE DO NOT REMOVE (but you can move it) THIS CLASS; PLEASE!!!)</param>
        /// <param name="centralMoments">Calculated central moments (up to order 2).</param>
        /// <returns>Object position, size and angle packed into a structure.</returns>
        public static Box2D Process(Gray<byte>[,] probabilityMap, Rectangle roi, TermCriteria termCriteria, out CentralMoments centralMoments)
        {         
            // Compute mean shift
            Rectangle objArea = Meanshift.Process(probabilityMap, roi, termCriteria, out centralMoments);

            //fit ellipse
            Ellipse ellipse = centralMoments.GetEllipse();
            ellipse.Center = objArea.Center();

            //return empty structure is the object is lost
            var sz = ellipse.Size;
            if (Single.IsNaN(sz.Width) || Single.IsNaN(sz.Height) ||
                sz.Width < 1 || sz.Height < 1)
            {
                return Box2D.Empty;
            }

            return (Box2D)ellipse;
        }
Exemplo n.º 56
0
 /// <summary>
 /// The blending filter is able to blend two images using a homography matrix.
 /// A linear alpha gradient is used to smooth out differences between the two
 /// images, effectively blending them in two images. The gradient is computed
 /// considering the distance between the centers of the two images.
 /// <para>Homography matrix is set to identity.</para>
 /// <para>Fill color is set to black with alpha set to 0 (all zeros).</para>
 /// </summary>
 /// <param name="im">Image.</param>
 /// <param name="overlayIm">The overlay image (also called the anchor).</param>
 /// <param name="gradient">A value indicating whether to blend using a linear
 ///  gradient or just superimpose the two images with equal weights.</param>
 /// <param name="alphaOnly">A value indicating whether only the alpha channel
 /// should be blended. This can be used together with a transparency
 /// mask to selectively blend only portions of the image.</param>
 /// <returns>Blended image.</returns>
 public static Bgra<byte>[,] Blend(this Gray<byte>[,] im, Gray<byte>[,] overlayIm, bool gradient = true, bool alphaOnly = false)
 {
     return Blend(im, overlayIm, new MatrixH(Matrix.Identity(3)), new Bgra<byte>(), gradient, alphaOnly);
 }
 /// <summary>
 /// Calculates histogram.
 /// </summary>
 /// <param name="channel">Image channel.</param>
 /// <param name="accumulate">Accumulate or erase histogram before.</param>
 /// <param name="mask">Mask for image color locations.</param>
 /// <param name="maskOffset">The location offset for the mask. The mask area will be [offsetX, offsetY, channelWidth, channelHeight].</param>
 public void Calculate(Gray<byte>[,] channel, bool accumulate, Gray<byte>[,] mask, Point maskOffset)
 {
     Calculate(new Gray<byte>[][,]{ channel }, accumulate, mask, maskOffset);
 }
        /// <summary>
        /// Creates template from the input image by using provided parameters.
        /// </summary>
        /// <param name="sourceImage">Input image.</param>
        /// <param name="minFeatureStrength">Minimum gradient value for the feature.</param>
        /// <param name="maxNumberOfFeatures">Maximum number of features per template. The features will be extracted so that their locations are semi-uniformly spread.</param>
        /// <param name="classLabel">Template class label.</param>
        public virtual void Initialize(Gray<byte>[,] sourceImage, int minFeatureStrength, int maxNumberOfFeatures, string classLabel)
        {
            Gray<int>[,] sqrMagImg;
            Gray<int>[,] orientationImg = GradientComputation.Compute(sourceImage, out sqrMagImg, minFeatureStrength);

            Func<Feature, int> featureImportanceFunc = (feature) => sqrMagImg[feature.Y, feature.X].Intensity;

            Initialize(orientationImg, maxNumberOfFeatures, classLabel, featureImportanceFunc);
        }
        /// <summary>
        /// Creates template from the input image by using provided parameters.
        /// </summary>
        /// <param name="orientation">Orientation image.</param>
        /// <param name="maxNumberOfFeatures">Maximum number of features per template. The features will be extracted so that their locations are semi-uniformly spread.</param>
        /// <param name="classLabel">Template class label.</param>
        /// <param name="featureImportanceFunc">Function which returns feature's strength.</param>
        public void Initialize(Gray<int>[,] orientation, int maxNumberOfFeatures, string classLabel, Func<Feature, int> featureImportanceFunc = null)
        {
            maxNumberOfFeatures = System.Math.Max(0, System.Math.Min(maxNumberOfFeatures, GlobalParameters.MAX_NUM_OF_FEATURES));
            featureImportanceFunc = (featureImportanceFunc != null) ? featureImportanceFunc: (feature) => 0;

            Gray<byte>[,] importantQuantizedOrient = FeatureMap.Calculate(orientation, 0);
            List<Feature> features = ExtractTemplate(importantQuantizedOrient, maxNumberOfFeatures, featureImportanceFunc);

            BoundingRect = GetBoundingRectangle(features);
            //if (boundingRect.X == 1 && boundingRect.Y  == 1 && boundingRect.Width == 18)
            //    Console.WriteLine();

            for (int i = 0; i < features.Count; i++)
            {
                features[i].X -= BoundingRect.X;
                features[i].Y -= BoundingRect.Y;

                //if(features[i].X < 0 || features[i].Y < 0)
                //    Console.WriteLine();

                //PATCH!!!
                features[i].X = System.Math.Max(0, features[i].X);
                features[i].Y = System.Math.Max(0, features[i].Y);
            }

            this.Features = features.ToArray();
            this.Size = BoundingRect.Size;
            this.ClassLabel = classLabel;
        }
Exemplo n.º 60
0
 /// <summary>
 /// Meanshift algorithm
 /// </summary>
 /// <param name="probabilityMap">Probability map [0-1].</param>
 /// <param name="roi">Initial search area</param>
 /// <returns>Object area.</returns>
 public static Rectangle Process(Gray<byte>[,] probabilityMap, Rectangle roi)
 {
     CentralMoments centralMoments;
     return process(probabilityMap, roi, DEFAULT_TERM, out centralMoments);
 }