public Mat Describe(Mat image, out KeyPoint[] keypoints)
        {
            Mat descriptors = new Mat();

            _featureDetector.DetectAndCompute(image, null, out keypoints, descriptors);
            return(descriptors);
        }
Exemple #2
0
        public StopSignDetector(IInputArray stopSignModel)
        {
            _detector = new KAZE();
            using (Mat redMask = new Mat())
            {
                GetRedPixelMask(stopSignModel, redMask);
                _modelKeypoints   = new VectorOfKeyPoint();
                _modelDescriptors = new Mat();
                _detector.DetectAndCompute(redMask, null, _modelKeypoints, _modelDescriptors, false);
                if (_modelKeypoints.Size == 0)
                {
                    throw new Exception("No image feature has been found in the stop sign model");
                }
            }

            _modelDescriptorMatcher = new BFMatcher(DistanceType.L2);
            _modelDescriptorMatcher.Add(_modelDescriptors);

            _octagon = new VectorOfPoint(
                new Point[]
            {
                new Point(1, 0),
                new Point(2, 0),
                new Point(3, 1),
                new Point(3, 2),
                new Point(2, 3),
                new Point(1, 3),
                new Point(0, 2),
                new Point(0, 1)
            });
        }
Exemple #3
0
        private void FindStopSign(Mat img, List <Mat> stopSignList, List <Rectangle> boxList, VectorOfVectorOfPoint contours, int[,] hierachy, int idx)
        {
            for (; idx >= 0; idx = hierachy[idx, 0])
            {
                using (VectorOfPoint c = contours[idx])
                    using (VectorOfPoint approx = new VectorOfPoint())
                    {
                        CvInvoke.ApproxPolyDP(c, approx, CvInvoke.ArcLength(c, true) * 0.02, true);
                        double area = CvInvoke.ContourArea(approx);
                        if (area > 200)
                        {
                            double ratio = CvInvoke.MatchShapes(_octagon, approx, Emgu.CV.CvEnum.ContoursMatchType.I3);

                            if (ratio > 0.1) //not a good match of contour shape
                            {
                                //check children
                                if (hierachy[idx, 2] >= 0)
                                {
                                    FindStopSign(img, stopSignList, boxList, contours, hierachy, hierachy[idx, 2]);
                                }
                                continue;
                            }

                            Rectangle box = CvInvoke.BoundingRectangle(c);

                            Mat candidate = new Mat();
                            using (Mat tmp = new Mat(img, box))
                                CvInvoke.CvtColor(tmp, candidate, ColorConversion.Bgr2Gray);

                            //set the value of pixels not in the contour region to zero
                            using (Mat mask = new Mat(candidate.Size.Height, candidate.Width, DepthType.Cv8U, 1))
                            {
                                mask.SetTo(new MCvScalar(0));
                                CvInvoke.DrawContours(mask, contours, idx, new MCvScalar(255), -1, LineType.EightConnected, null, int.MaxValue, new Point(-box.X, -box.Y));

                                double mean = CvInvoke.Mean(candidate, mask).V0;
                                CvInvoke.Threshold(candidate, candidate, mean, 255, ThresholdType.Binary);
                                CvInvoke.BitwiseNot(candidate, candidate);
                                CvInvoke.BitwiseNot(mask, mask);

                                candidate.SetTo(new MCvScalar(0), mask);
                            }

                            int              minMatchCount         = 8;
                            double           uniquenessThreshold   = 0.8;
                            VectorOfKeyPoint _observeredKeypoint   = new VectorOfKeyPoint();
                            Mat              _observeredDescriptor = new Mat();
                            _detector.DetectAndCompute(candidate, null, _observeredKeypoint, _observeredDescriptor, false);

                            if (_observeredKeypoint.Size >= minMatchCount)
                            {
                                int k = 2;

                                Mat mask;

                                using (VectorOfVectorOfDMatch matches = new VectorOfVectorOfDMatch())
                                {
                                    _modelDescriptorMatcher.KnnMatch(_observeredDescriptor, matches, k, null);
                                    mask = new Mat(matches.Size, 1, DepthType.Cv8U, 1);
                                    mask.SetTo(new MCvScalar(255));
                                    Features2DToolbox.VoteForUniqueness(matches, uniquenessThreshold, mask);
                                }

                                int nonZeroCount = CvInvoke.CountNonZero(mask);
                                if (nonZeroCount >= minMatchCount)
                                {
                                    boxList.Add(box);
                                    stopSignList.Add(candidate);
                                }
                            }
                        }
                    }
            }
        }
Exemple #4
0
        public static void FindMatch(
            Mat modelImage,
            Mat observedImage,
            Feature2D featureDetectorExtractor,
            out long matchTime,
            out VectorOfKeyPoint modelKeyPoints,
            out VectorOfKeyPoint observedKeyPoints,
            VectorOfVectorOfDMatch matches,
            out Mat mask,
            out Mat homography)
        {
            int    k = 2;
            double uniquenessThreshold = 0.80;

            Stopwatch watch;

            homography = null;

            modelKeyPoints    = new VectorOfKeyPoint();
            observedKeyPoints = new VectorOfKeyPoint();

            using (UMat uModelImage = modelImage.GetUMat(AccessType.Read))
                using (UMat uObservedImage = observedImage.GetUMat(AccessType.Read))
                {
                    //extract features from the object image
                    Mat modelDescriptors = new Mat();
                    featureDetectorExtractor.DetectAndCompute(uModelImage, null, modelKeyPoints, modelDescriptors, false);

                    watch = Stopwatch.StartNew();

                    // extract features from the observed image
                    Mat observedDescriptors = new Mat();
                    featureDetectorExtractor.DetectAndCompute(uObservedImage, null, observedKeyPoints, observedDescriptors, false);

                    // Brute force, slower but more accurate
                    // You can use KDTree for faster matching with slight loss in accuracy
                    using (Emgu.CV.Flann.LinearIndexParams ip = new Emgu.CV.Flann.LinearIndexParams())
                        using (Emgu.CV.Flann.SearchParams sp = new SearchParams())
                            using (DescriptorMatcher matcher = new FlannBasedMatcher(ip, sp))
                            {
                                matcher.Add(modelDescriptors);

                                matcher.KnnMatch(observedDescriptors, matches, k, null);
                                mask = new Mat(matches.Size, 1, DepthType.Cv8U, 1);
                                mask.SetTo(new MCvScalar(255));
                                Features2DToolbox.VoteForUniqueness(matches, uniquenessThreshold, mask);

                                int nonZeroCount = CvInvoke.CountNonZero(mask);
                                if (nonZeroCount >= 4)
                                {
                                    nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints,
                                                                                               matches, mask, 1.5, 20);
                                    if (nonZeroCount >= 4)
                                    {
                                        homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints,
                                                                                                              observedKeyPoints, matches, mask, 2);
                                    }
                                }
                            }
                    watch.Stop();
                }
            matchTime = watch.ElapsedMilliseconds;
        }
Exemple #5
0
        public static bool TestFeature2DTracker(Feature2D keyPointDetector, Feature2D descriptorGenerator)
        {
            //for (int k = 0; k < 1; k++)
            {
                Feature2D feature2D = null;
                if (keyPointDetector == descriptorGenerator)
                {
                    feature2D = keyPointDetector as Feature2D;
                }

                Mat modelImage = EmguAssert.LoadMat("box.png");
                //Image<Gray, Byte> modelImage = new Image<Gray, byte>("stop.jpg");
                //modelImage = modelImage.Resize(400, 400, true);

                //modelImage._EqualizeHist();

                #region extract features from the object image
                Stopwatch        stopwatch      = Stopwatch.StartNew();
                VectorOfKeyPoint modelKeypoints = new VectorOfKeyPoint();
                Mat modelDescriptors            = new Mat();
                if (feature2D != null)
                {
                    feature2D.DetectAndCompute(modelImage, null, modelKeypoints, modelDescriptors, false);
                }
                else
                {
                    keyPointDetector.DetectRaw(modelImage, modelKeypoints);
                    descriptorGenerator.Compute(modelImage, modelKeypoints, modelDescriptors);
                }
                stopwatch.Stop();
                EmguAssert.WriteLine(String.Format("Time to extract feature from model: {0} milli-sec", stopwatch.ElapsedMilliseconds));
                #endregion

                //Image<Gray, Byte> observedImage = new Image<Gray, byte>("traffic.jpg");
                Image <Gray, Byte> observedImage = EmguAssert.LoadImage <Gray, byte>("box_in_scene.png");
                //Image<Gray, Byte> observedImage = modelImage.Rotate(45, new Gray(0.0));
                //image = image.Resize(400, 400, true);

                //observedImage._EqualizeHist();
                #region extract features from the observed image
                stopwatch.Reset();
                stopwatch.Start();
                VectorOfKeyPoint observedKeypoints = new VectorOfKeyPoint();
                using (Mat observedDescriptors = new Mat())
                {
                    if (feature2D != null)
                    {
                        feature2D.DetectAndCompute(observedImage, null, observedKeypoints, observedDescriptors, false);
                    }
                    else
                    {
                        keyPointDetector.DetectRaw(observedImage, observedKeypoints);
                        descriptorGenerator.Compute(observedImage, observedKeypoints, observedDescriptors);
                    }

                    stopwatch.Stop();
                    EmguAssert.WriteLine(String.Format("Time to extract feature from image: {0} milli-sec", stopwatch.ElapsedMilliseconds));
                    #endregion

                    //Merge the object image and the observed image into one big image for display
                    Image <Gray, Byte> res = modelImage.ToImage <Gray, Byte>().ConcateVertical(observedImage);

                    Rectangle rect = new Rectangle(Point.Empty, modelImage.Size);
                    PointF[]  pts  = new PointF[] {
                        new PointF(rect.Left, rect.Bottom),
                        new PointF(rect.Right, rect.Bottom),
                        new PointF(rect.Right, rect.Top),
                        new PointF(rect.Left, rect.Top)
                    };

                    Mat homography = null;

                    stopwatch.Reset();
                    stopwatch.Start();

                    int          k  = 2;
                    DistanceType dt = modelDescriptors.Depth == CvEnum.DepthType.Cv8U ? DistanceType.Hamming : DistanceType.L2;
                    //using (Matrix<int> indices = new Matrix<int>(observedDescriptors.Rows, k))
                    //using (Matrix<float> dist = new Matrix<float>(observedDescriptors.Rows, k))
                    using (VectorOfVectorOfDMatch matches = new VectorOfVectorOfDMatch())
                        using (BFMatcher matcher = new BFMatcher(dt))
                        {
                            //ParamDef[] parameterDefs = matcher.GetParams();
                            matcher.Add(modelDescriptors);
                            matcher.KnnMatch(observedDescriptors, matches, k, null);

                            Mat mask = new Mat(matches.Size, 1, DepthType.Cv8U, 1);
                            mask.SetTo(new MCvScalar(255));
                            //mask.SetValue(255);
                            Features2DToolbox.VoteForUniqueness(matches, 0.8, mask);

                            int nonZeroCount = CvInvoke.CountNonZero(mask);
                            if (nonZeroCount >= 4)
                            {
                                nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeypoints, observedKeypoints, matches, mask, 1.5, 20);
                                if (nonZeroCount >= 4)
                                {
                                    homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeypoints, observedKeypoints, matches, mask, 2);
                                }
                            }
                        }
                    stopwatch.Stop();
                    EmguAssert.WriteLine(String.Format("Time for feature matching: {0} milli-sec", stopwatch.ElapsedMilliseconds));

                    bool success = false;
                    if (homography != null)
                    {
                        PointF[] points = pts.Clone() as PointF[];
                        points = CvInvoke.PerspectiveTransform(points, homography);
                        //homography.ProjectPoints(points);

                        for (int i = 0; i < points.Length; i++)
                        {
                            points[i].Y += modelImage.Height;
                        }

                        res.DrawPolyline(
#if NETFX_CORE
                            Extensions.
#else
                            Array.
#endif
                            ConvertAll <PointF, Point>(points, Point.Round), true, new Gray(255.0), 5);

                        success = true;
                    }
                    //Emgu.CV.UI.ImageViewer.Show(res);
                    return(success);
                }



                /*
                 * stopwatch.Reset(); stopwatch.Start();
                 * //set the initial region to be the whole image
                 * using (Image<Gray, Single> priorMask = new Image<Gray, float>(observedImage.Size))
                 * {
                 * priorMask.SetValue(1.0);
                 * homography = tracker.CamShiftTrack(
                 *    observedFeatures,
                 *    (RectangleF)observedImage.ROI,
                 *    priorMask);
                 * }
                 * Trace.WriteLine(String.Format("Time for feature tracking: {0} milli-sec", stopwatch.ElapsedMilliseconds));
                 *
                 * if (homography != null) //set the initial tracking window to be the whole image
                 * {
                 * PointF[] points = pts.Clone() as PointF[];
                 * homography.ProjectPoints(points);
                 *
                 * for (int i = 0; i < points.Length; i++)
                 *    points[i].Y += modelImage.Height;
                 * res.DrawPolyline(Array.ConvertAll<PointF, Point>(points, Point.Round), true, new Gray(255.0), 5);
                 * return true;
                 * }
                 * else
                 * {
                 * return false;
                 * }*/
            }
        }
 private static bool FindDescriptors(Feature2D feature2D, VectorOfKeyPoint modelKeyPoints, UMat uModelImage, out UMat modelDescriptors)
 {
     modelDescriptors = new UMat();
     feature2D.DetectAndCompute(uModelImage, null, modelKeyPoints, modelDescriptors, false);
     return(modelDescriptors.Size.Height + modelDescriptors.Size.Width > 1);
 }