Exemple #1
0
        public StopSignDetector(IInputArray stopSignModel)
        {
            _detector = new KAZE();
            using (Mat redMask = new Mat())
            {
                GetRedPixelMask(stopSignModel, redMask);
                _modelKeypoints   = new VectorOfKeyPoint();
                _modelDescriptors = new Mat();
                _detector.DetectAndCompute(redMask, null, _modelKeypoints, _modelDescriptors, false);
                if (_modelKeypoints.Size == 0)
                {
                    throw new Exception("No image feature has been found in the stop sign model");
                }
            }

            _modelDescriptorMatcher = new BFMatcher(DistanceType.L2);
            _modelDescriptorMatcher.Add(_modelDescriptors);

            _octagon = new VectorOfPoint(
                new Point[]
            {
                new Point(1, 0),
                new Point(2, 0),
                new Point(3, 1),
                new Point(3, 2),
                new Point(2, 3),
                new Point(1, 3),
                new Point(0, 2),
                new Point(0, 1)
            });
        }
 public CVObjectDetector()
 {
     // Brisk, KAZE, AKAZE, ORBDetector, HarrisLaplaceFeatureDetector, FastDetector
     featureDetector = new Brisk();
     // Brisk, ORBDetector, BriefDescriptorExtractor, Freak
     featureDescriptor = new Brisk();
     featureMatcher    = new BFMatcher(DistanceType.L2);
 }
        public static void Init()
        {
            _featureDetector = new SURF(500);
            //_featureDetector = new ORBDetector();
            //_featureDetector = new FastDetector(10, true);
            //_featureDetector = new SIFT();

            //_descriptorsComputer = _featureDetector;
            _descriptorsComputer = new Freak();
        }
Exemple #4
0
        /// <summary>
        /// Draw the model image and observed image, the matched features and homography projection.
        /// </summary>
        /// <param name="modelImage">The model image</param>
        /// <param name="observedImage">The observed image</param>
        /// <param name="matchTime">The output total time for computing the homography matrix.</param>
        /// <param name="featureDetectorExtractor">The feature detector extractor</param>
        /// <returns>The model image and observed image, the matched features and homography projection.</returns>
        public static Mat Draw(Mat modelImage, Mat observedImage, Feature2D featureDetectorExtractor, out long matchTime)
        {
            Mat homography;
            VectorOfKeyPoint modelKeyPoints;
            VectorOfKeyPoint observedKeyPoints;

            using (VectorOfVectorOfDMatch matches = new VectorOfVectorOfDMatch())
            {
                Mat mask;
                FindMatch(modelImage, observedImage, featureDetectorExtractor, out matchTime, out modelKeyPoints, out observedKeyPoints, matches,
                          out mask, out homography);

                //Draw the matched keypoints
                Mat result = new Mat();
                Features2DToolbox.DrawMatches(modelImage, modelKeyPoints, observedImage, observedKeyPoints,
                                              matches, result, new MCvScalar(255, 255, 255), new MCvScalar(255, 255, 255), mask);

                #region draw the projected region on the image

                if (homography != null)
                {
                    //draw a rectangle along the projected model
                    Rectangle rect = new Rectangle(Point.Empty, modelImage.Size);
                    PointF[]  pts  = new PointF[]
                    {
                        new PointF(rect.Left, rect.Bottom),
                        new PointF(rect.Right, rect.Bottom),
                        new PointF(rect.Right, rect.Top),
                        new PointF(rect.Left, rect.Top)
                    };
                    pts = CvInvoke.PerspectiveTransform(pts, homography);

                    Point[] points = new Point[pts.Length];
                    for (int i = 0; i < points.Length; i++)
                    {
                        points[i] = Point.Round(pts[i]);
                    }

                    using (VectorOfPoint vp = new VectorOfPoint(points))
                    {
                        CvInvoke.Polylines(result, vp, true, new MCvScalar(255, 0, 0, 255), 5);
                    }
                }
                #endregion

                return(result);
            }
        }
Exemple #5
0
        public TestForm()
        {
            InitializeComponent();
            updateTimer          = new Timer();
            updateTimer.Tick    += UpdateTimer_Tick;
            capture              = new VideoCapture();
            fpsSw                = new Stopwatch();
            perfSw               = new Stopwatch();
            updateTimer.Interval = 100;
            //featureDetector = new ORBDetector(500);
            featureDetector = new Emgu.CV.Features2D.GFTTDetector(500);

            angleHistory = new CircularBuffer(historyLength);
            avgXHistory  = new CircularBuffer(historyLength);
            avgYHistory  = new CircularBuffer(historyLength);
        }
        public static Image <Arthmetic, double> K(List <Mat> mats, Feature2D detector, Feature2D descriptor, DistanceType distanceType, double maxDistance)
        {
            List <Image <Arthmetic, double> > Fs = new List <Image <Arthmetic, double> >();

            for (int i = 0; i < mats.Count - 1; i += 2)
            {
                var match = MatchImagePair.Match(mats[i], mats[i + 1], detector, descriptor, distanceType, maxDistance);
                var F     = ComputeMatrix.F(match.LeftPoints, match.RightPoints);
                if (F == null)
                {
                    continue;
                }
                Fs.Add(F);
            }
            return(K(Fs, mats[0].Width, mats[0].Height));
        }
Exemple #7
0
        /// <summary>
        ///
        /// </summary>
        /// <param name="featuresFinder"></param>
        /// <param name="images"></param>
        /// <param name="masks"></param>
        public static ImageFeatures[] ComputeImageFeatures(
            Feature2D featuresFinder,
            IEnumerable <Mat> images,
            IEnumerable <Mat>?masks = null)
        {
            if (featuresFinder == null)
            {
                throw new ArgumentNullException(nameof(featuresFinder));
            }
            if (images == null)
            {
                throw new ArgumentNullException(nameof(images));
            }
            featuresFinder.ThrowIfDisposed();

            var imagesArray = images as Mat[] ?? images.ToArray();

            if (imagesArray.Length == 0)
            {
                throw new ArgumentException("Empty array", nameof(images));
            }

            var imagesPointers = imagesArray.Select(i => i.CvPtr).ToArray();
            var masksPointers  = masks?.Select(i => i.CvPtr).ToArray();

            if (masksPointers != null && imagesPointers.Length != masksPointers.Length)
            {
                throw new ArgumentException("size of images != size of masks");
            }

            using var wImageFeaturesVec = new VectorOfImageFeatures();
            NativeMethods.HandleException(
                NativeMethods.stitching_computeImageFeatures1(
                    featuresFinder.CvPtr,
                    imagesPointers,
                    imagesPointers.Length,
                    wImageFeaturesVec.CvPtr,
                    masksPointers));

            GC.KeepAlive(featuresFinder);
            GC.KeepAlive(images);
            GC.KeepAlive(masks);
            GC.KeepAlive(imagesPointers);

            return(wImageFeaturesVec.ToArray());
        }
        public void ProcessImages(Mat left, Mat right, Feature2D detector, Feature2D descriptor, DistanceType distanceType, double takeBest)
        {
            var match = MatchImagePair.Match(left, right, detector, descriptor, distanceType, 20.0);

            DrawFeatures(left, right, match, takeBest);

            var lps = match.LeftPointsList.Take((int)(match.LeftPoints.Size * takeBest));
            var rps = match.RightPointsList.Take((int)(match.RightPoints.Size * takeBest));

            var F = ComputeMatrix.F(new VectorOfPointF(lps.ToArray()), new VectorOfPointF(rps.ToArray()));
            var K = EstimateCameraFromImagePair.K(F, left.Width, right.Height);
            var E = ComputeMatrix.E(F, K);

            FindTransformation.DecomposeToRTAndTriangulate(lps.ToList(), rps.ToList(), K, E,
                                                           out Image <Arthmetic, double> R, out Image <Arthmetic, double> t, out Image <Arthmetic, double> X);
            PrintMatricesInfo(E, K, R, t);
        }
Exemple #9
0
        /// <summary>
        ///
        /// </summary>
        /// <param name="featuresFinder"></param>
        /// <param name="image"></param>
        /// <param name="features"></param>
        /// <param name="mask"></param>
        public static void ComputeImageFeatures(
            Feature2D featuresFinder,
            InputArray image,
            out ImageFeatures features,
            InputArray?mask = null)
        {
            if (featuresFinder == null)
            {
                throw new ArgumentNullException(nameof(featuresFinder));
            }
            if (image == null)
            {
                throw new ArgumentNullException(nameof(image));
            }
            featuresFinder.ThrowIfDisposed();
            image.ThrowIfDisposed();

            var descriptorsMat = new Mat();
            var keypointsVec   = new VectorOfKeyPoint();
            var wImageFeatures = new WImageFeatures
            {
                Keypoints   = keypointsVec.CvPtr,
                Descriptors = descriptorsMat.CvPtr
            };

            unsafe
            {
                NativeMethods.HandleException(
                    NativeMethods.stitching_computeImageFeatures2(
                        featuresFinder.CvPtr, image.CvPtr, &wImageFeatures, mask?.CvPtr ?? IntPtr.Zero));
            }

            features = new ImageFeatures(
                wImageFeatures.ImgIdx,
                wImageFeatures.ImgSize,
                keypointsVec.ToArray(),
                descriptorsMat);

            GC.KeepAlive(featuresFinder);
            GC.KeepAlive(image);
            GC.KeepAlive(mask);
            GC.KeepAlive(descriptorsMat);
        }
Exemple #10
0
        static object[] DetectAndCompute2(UMat img, Feature2D detector, Feature2D computer) // находит и обрабатывает дескрипторы изображения
        {
            object[]         outp        = new object[0];
            UMat             descriptors = new UMat();
            var              mkp         = new MKeyPoint[0];
            VectorOfKeyPoint keypoints;

            try
            {
                mkp       = detector.Detect(img);
                keypoints = new VectorOfKeyPoint(mkp);
                computer.Compute(img, keypoints, descriptors);
                outp = new object[] { keypoints, descriptors };
            }
            finally
            {
            }
            return(outp);
        }
Exemple #11
0
        private static void BowTest()
        {
            DescriptorMatcher matcher   = new BFMatcher();
            Feature2D         extractor = AKAZE.Create();
            Feature2D         detector  = AKAZE.Create();

            TermCriteria              criteria               = new TermCriteria(CriteriaType.Count | CriteriaType.Eps, 10, 0.001);
            BOWKMeansTrainer          bowTrainer             = new BOWKMeansTrainer(200, criteria, 1);
            BOWImgDescriptorExtractor bowDescriptorExtractor = new BOWImgDescriptorExtractor(extractor, matcher);

            Mat img = null;

            KeyPoint[] keypoint = detector.Detect(img);
            Mat        features = new Mat();

            extractor.Compute(img, ref keypoint, features);
            bowTrainer.Add(features);

            throw new NotImplementedException();
        }
Exemple #12
0
        public static OdometerFrame GetOdometerFrame(Mat left, Mat right, Feature2D detector, Feature2D descriptor, DistanceType distanceType, double maxDistance, Image <Arthmetic, double> K, double takeBest = 1.0)
        {
            var match = MatchImagePair.Match(left, right, detector, descriptor, distanceType, maxDistance);

            var lps = match.LeftPointsList.Take((int)(match.LeftPoints.Size * takeBest));
            var rps = match.RightPointsList.Take((int)(match.RightPoints.Size * takeBest));

            var lps_n = lps.ToList();
            var rps_n = rps.ToList();
            var H     = EstimateHomography(lps_n, rps_n, K);

            if (IsPureRotation(H))
            {
                OdometerFrame odometerFrame = new OdometerFrame();
                odometerFrame.Rotation       = RotationConverter.MatrixToEulerXYZ(H);
                odometerFrame.RotationMatrix = RotationConverter.EulerXYZToMatrix(odometerFrame.Rotation);
                odometerFrame.MatK           = K;
                odometerFrame.Match          = match;
                odometerFrame.Translation    = new Image <Arthmetic, double>(1, 3);
                return(odometerFrame);
            }
            else
            {
                if (!FindTwoViewsMatrices(lps_n, rps_n, K, out var F, out var E, out var R, out var t, out var X))
                {
                    return(null);
                }

                OdometerFrame odometerFrame = new OdometerFrame();
                odometerFrame.Rotation       = RotationConverter.MatrixToEulerXYZ(R);
                odometerFrame.RotationMatrix = R;
                odometerFrame.MatK           = K;
                odometerFrame.Match          = match;

                Image <Arthmetic, double> C = R.T().Multiply(t).Mul(-1);
                odometerFrame.Translation = C.Mul(1.0 / C.Norm);
                return(odometerFrame);
            }
        }
Exemple #13
0
        /*
         * public static void TestDrawLine(IntPtr img, int startX, int startY, int endX, int endY, MCvScalar color)
         * {
         * TestDrawLine(img, startX, startY, endX, endY, color.v0, color.v1, color.v2, color.v3);
         * }
         *
         * [DllImport(CvInvoke.ExternLibrary, CallingConvention = CvInvoke.CvCallingConvention, EntryPoint="testDrawLine")]
         * private static extern void TestDrawLine(IntPtr img, int startX, int startY, int endX, int endY, double v0, double v1, double v2, double v3);
         *
         * /// <summary>
         * /// Implements the chamfer matching algorithm on images taking into account both distance from
         * /// the template pixels to the nearest pixels and orientation alignment between template and image
         * /// contours.
         * /// </summary>
         * /// <param name="img">The edge image where search is performed</param>
         * /// <param name="templ">The template (an edge image)</param>
         * /// <param name="contours">The output contours</param>
         * /// <param name="cost">The cost associated with the matching</param>
         * /// <param name="templScale">The template scale</param>
         * /// <param name="maxMatches">The maximum number of matches</param>
         * /// <param name="minMatchDistance">The minimum match distance</param>
         * /// <param name="padX">PadX</param>
         * /// <param name="padY">PadY</param>
         * /// <param name="scales">Scales</param>
         * /// <param name="minScale">Minimum scale</param>
         * /// <param name="maxScale">Maximum scale</param>
         * /// <param name="orientationWeight">Orientation weight</param>
         * /// <param name="truncate">Truncate</param>
         * /// <returns>The number of matches</returns>
         * public static int ChamferMatching(Mat img, Mat templ,
         * out Point[][] contours, out float[] cost,
         * double templScale = 1, int maxMatches = 20,
         * double minMatchDistance = 1.0, int padX = 3,
         * int padY = 3, int scales = 5, double minScale = 0.6, double maxScale = 1.6,
         * double orientationWeight = 0.5, double truncate = 20)
         * {
         * using (Emgu.CV.Util.VectorOfVectorOfPoint vecOfVecOfPoint = new Util.VectorOfVectorOfPoint())
         * using (Emgu.CV.Util.VectorOfFloat vecOfFloat = new Util.VectorOfFloat())
         * {
         *    int count = cveChamferMatching(img, templ, vecOfVecOfPoint, vecOfFloat, templScale, maxMatches, minMatchDistance, padX, padY, scales, minScale, maxScale, orientationWeight, truncate);
         *    contours = vecOfVecOfPoint.ToArrayOfArray();
         *    cost = vecOfFloat.ToArray();
         *    return count;
         * }
         * }
         * [DllImport(ExternLibrary, CallingConvention = CvInvoke.CvCallingConvention)]
         * private static extern int cveChamferMatching(
         * IntPtr img, IntPtr templ,
         * IntPtr results, IntPtr cost,
         * double templScale, int maxMatches,
         * double minMatchDistance, int padX,
         * int padY, int scales, double minScale, double maxScale,
         * double orientationWeight, double truncate);
         */

        /// <summary>
        /// Finds centers in the grid of circles
        /// </summary>
        /// <param name="image">Source chessboard view</param>
        /// <param name="patternSize">The number of inner circle per chessboard row and column</param>
        /// <param name="flags">Various operation flags</param>
        /// <param name="featureDetector">The feature detector. Use a SimpleBlobDetector for default</param>
        /// <returns>The center of circles detected if the chess board pattern is found, otherwise null is returned</returns>
        public static PointF[] FindCirclesGrid(Image <Gray, Byte> image, Size patternSize, CvEnum.CalibCgType flags, Feature2D featureDetector)
        {
            using (Util.VectorOfPointF vec = new Util.VectorOfPointF())
            {
                bool patternFound =
                    FindCirclesGrid(
                        image,
                        patternSize,
                        vec,
                        flags,
                        featureDetector
                        );
                return(patternFound ? vec.ToArray() : null);
            }
        }
Exemple #14
0
 /// <summary>
 /// Finds centers in the grid of circles
 /// </summary>
 /// <param name="image">Source chessboard view</param>
 /// <param name="patternSize">The number of inner circle per chessboard row and column</param>
 /// <param name="flags">Various operation flags</param>
 /// <param name="featureDetector">The feature detector. Use a SimpleBlobDetector for default</param>
 /// <param name="centers">output array of detected centers.</param>
 /// <returns>True if grid found.</returns>
 public static bool FindCirclesGrid(IInputArray image, Size patternSize, IOutputArray centers, CvEnum.CalibCgType flags, Feature2D featureDetector)
 {
     using (InputArray iaImage = image.GetInputArray())
         using (OutputArray oaCenters = centers.GetOutputArray())
             return(cveFindCirclesGrid(iaImage, ref patternSize, oaCenters, flags, featureDetector.Feature2DPtr));
 }
Exemple #15
0
        public static OdometerFrame GetOdometerFrame3(
            Mat left, Mat middle, Mat right, double lastScale, out double thisScale,
            Feature2D detector, Feature2D descriptor, DistanceType distanceType, double maxDistance,
            Image <Arthmetic, double> K, double takeBest = 1.0)
        {
            thisScale = lastScale;

            var match12 = MatchImagePair.Match(left, middle, detector, descriptor, distanceType, maxDistance);
            var match23 = MatchImagePair.Match(middle, right, detector, descriptor, distanceType, maxDistance);
            var match13 = MatchImagePair.Match(left, right, detector, descriptor, distanceType, maxDistance);

            var left1    = match12.LeftPoints;
            var right1   = match12.RightPoints;
            var left2    = match23.LeftPoints;
            var left2_X  = MatchClosePoints.SortByX(match23.LeftPoints);
            var right2   = match23.RightPoints;
            var left3    = match13.LeftPoints;
            var right3   = match13.RightPoints;
            var right3_X = MatchClosePoints.SortByX(match13.LeftPoints);

            TripletMatch tmatch = new TripletMatch();

            List <MDMatch> m12 = new List <MDMatch>();
            List <MDMatch> m23 = new List <MDMatch>();

            for (int idx12 = 0; idx12 < left1.Size; ++idx12)
            {
                var p1    = left1[idx12];
                var p2    = right1[idx12];
                int idx23 = IndexOf_X(left2_X, p2);
                if (idx23 != -1)
                {
                    var p3    = right2[idx23];
                    int idx13 = IndexOf_X(right3_X, p1);
                    if (idx13 != -1)
                    {
                        if (AreEqual(left1[idx12], left3[idx13], maxDistance))
                        {
                            tmatch.Left.Add(p1);
                            tmatch.Middle.Add(p2);
                            tmatch.Right.Add(p3);

                            m12.Add(match12.Matches[idx12]);
                            m23.Add(match23.Matches[idx23]);
                        }
                    }
                }
            }

            match12.Matches = new VectorOfDMatch(m12.ToArray());
            match23.Matches = new VectorOfDMatch(m23.ToArray());

            var F12 = ComputeMatrix.F(new VectorOfPointF(tmatch.Left.ToArray()), new VectorOfPointF(tmatch.Middle.ToArray()));
            //  var F23 = ComputeMatrix.F(new VectorOfPointF(tmatch.Middle.ToArray()), new VectorOfPointF(tmatch.Right.ToArray()));
            var F13 = ComputeMatrix.F(new VectorOfPointF(tmatch.Left.ToArray()), new VectorOfPointF(tmatch.Right.ToArray()));

            if (F12 == null || F13 == null)
            {
                return(null);
            }

            var Es = new List <Image <Arthmetic, double> >
            {
                ComputeMatrix.E(F12, K),
                //  ComputeMatrix.E(F23, K),
                ComputeMatrix.E(F13, K)
            };

            FindTransformation.DecomposeToRTAndTriangulate(tmatch.Left, tmatch.Middle, K, Es[0],
                                                           out Image <Arthmetic, double> R12, out Image <Arthmetic, double> t12, out Image <Arthmetic, double> X12);
            // FindTransformation.DecomposeToRT(Es[1], out Image<Arthmetic, double> R23, out Image<Arthmetic, double> t23);
            FindTransformation.DecomposeToRTAndTriangulate(tmatch.Left, tmatch.Right, K, Es[1],
                                                           out Image <Arthmetic, double> R13, out Image <Arthmetic, double> t13, out Image <Arthmetic, double> X13);

            var Rs = new List <Image <Arthmetic, double> >
            {
                R12,
                R13
            };
            var ts = new List <Image <Arthmetic, double> >
            {
                t12,
                t13
            };

            var cc = ComputeCameraCenter3(K, Rs, ts, tmatch);

            OdometerFrame odometerFrame = new OdometerFrame();

            odometerFrame.Rotation       = RotationConverter.MatrixToEulerXYZ(Rs[0]);
            odometerFrame.RotationMatrix = Rs[0];
            odometerFrame.MatK           = K;
            odometerFrame.Match          = match12;

            //    Image<Arthmetic, double> C = ComputeCameraCenter(R, t, K, match);
            //  odometerFrame.Translation = R.Multiply(C);
            //   odometerFrame.Translation = R.T().Multiply(odometerFrame.Translation);
            odometerFrame.Translation = ts[0].Mul(lastScale / ts[0].Norm);
            odometerFrame.Center      = lastScale * cc.C12;
            thisScale = cc.Ratio3To2;

            return(odometerFrame);
        }
 public SURFDescriptor()
 {
     //_featureDetector = ORB.Create(nFeatures: 200, edgeThreshold: 40, patchSize: 40, wtaK: 3);
     _featureDetector = SURF.Create(1200); // better but a lot slower
 }
Exemple #17
0
        public static double hessianThresh       = 300;                                                                                           // настройка SURF
        public static void DetectAndCompute(Mat img, out VectorOfKeyPoint keypoints, out Mat descriptors, Feature2D detector, Feature2D computer) // находит и обрабатывает дескрипторы изображения
        {
            keypoints   = null;
            descriptors = new Mat();
            try
            {
                var mkp = detector.Detect(img, null);
                keypoints = new VectorOfKeyPoint(mkp);
            }
            catch (Exception e)
            {
                throw e;
            }

            try
            {
                computer.Compute(img, keypoints, descriptors);
            }
            catch (Exception e)
            {
                throw e;
            }
        }
 private static bool FindDescriptors(Feature2D feature2D, VectorOfKeyPoint modelKeyPoints, UMat uModelImage, out UMat modelDescriptors)
 {
     modelDescriptors = new UMat();
     feature2D.DetectAndCompute(uModelImage, null, modelKeyPoints, modelDescriptors, false);
     return(modelDescriptors.Size.Height + modelDescriptors.Size.Width > 1);
 }
Exemple #19
0
        public static void FindMatchWM(Mat modelImage, Mat observedImage, out long matchTime, out VectorOfKeyPoint modelKeyPoints, out VectorOfKeyPoint observedKeyPoints, VectorOfVectorOfDMatch matches, out Mat mask, out Mat homography, Feature2D computer, Feature2D detector)
        {
            Stopwatch watch;

            modelKeyPoints    = new VectorOfKeyPoint(); // точки на модели
            observedKeyPoints = new VectorOfKeyPoint(); // точки на большем изображении
            homography        = null;
            int k = 2;


            using (Mat uModelImage = modelImage.Clone())
                using (Mat uObservedImage = observedImage.Clone())
                {
                    //получаем дескрипторы из первого изображения
                    Mat modelDescriptors = new Mat();
                    DetectAndCompute(uModelImage, out modelKeyPoints, out modelDescriptors, detector, computer);

                    watch = Stopwatch.StartNew();

                    // ... из второго изображения
                    Mat observedDescriptors = new Mat();
                    DetectAndCompute(uObservedImage, out observedKeyPoints, out observedDescriptors, detector, computer);


                    BFMatcher matcher = new BFMatcher(DistanceType.L2); // "сравниватель" дескрипторов на 2-х изображениях
                    matcher.Add(modelDescriptors);

                    matcher.KnnMatch(observedDescriptors, matches, k, null); // сравнение
                    mask = new Mat(matches.Size, 1, DepthType.Cv8U, 1);
                    mask.SetTo(new MCvScalar(255));
                    Features2DToolbox.VoteForUniqueness(matches, 0.8, mask); // построениии маски (см ниже)

                    int nonZeroCount = CvInvoke.CountNonZero(mask);
                    if (nonZeroCount >= 4)
                    {
                        nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints,
                                                                                   matches, mask, 1.5, 20);
                        if (nonZeroCount >= 4)
                        {
                            homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints, // получение предположительной зоны, куда должна встать модель
                                                                                                  observedKeyPoints, matches, mask, 2);
                        }
                    }

                    watch.Stop();
                }
            matchTime = watch.ElapsedMilliseconds;
        }
Exemple #20
0
        public static void FindMatch(
            Mat modelImage,
            Mat observedImage,
            Feature2D featureDetectorExtractor,
            out long matchTime,
            out VectorOfKeyPoint modelKeyPoints,
            out VectorOfKeyPoint observedKeyPoints,
            VectorOfVectorOfDMatch matches,
            out Mat mask,
            out Mat homography)
        {
            int    k = 2;
            double uniquenessThreshold = 0.80;

            Stopwatch watch;

            homography = null;

            modelKeyPoints    = new VectorOfKeyPoint();
            observedKeyPoints = new VectorOfKeyPoint();

            using (UMat uModelImage = modelImage.GetUMat(AccessType.Read))
                using (UMat uObservedImage = observedImage.GetUMat(AccessType.Read))
                {
                    //extract features from the object image
                    Mat modelDescriptors = new Mat();
                    featureDetectorExtractor.DetectAndCompute(uModelImage, null, modelKeyPoints, modelDescriptors, false);

                    watch = Stopwatch.StartNew();

                    // extract features from the observed image
                    Mat observedDescriptors = new Mat();
                    featureDetectorExtractor.DetectAndCompute(uObservedImage, null, observedKeyPoints, observedDescriptors, false);

                    // Brute force, slower but more accurate
                    // You can use KDTree for faster matching with slight loss in accuracy
                    using (Emgu.CV.Flann.LinearIndexParams ip = new Emgu.CV.Flann.LinearIndexParams())
                        using (Emgu.CV.Flann.SearchParams sp = new SearchParams())
                            using (DescriptorMatcher matcher = new FlannBasedMatcher(ip, sp))
                            {
                                matcher.Add(modelDescriptors);

                                matcher.KnnMatch(observedDescriptors, matches, k, null);
                                mask = new Mat(matches.Size, 1, DepthType.Cv8U, 1);
                                mask.SetTo(new MCvScalar(255));
                                Features2DToolbox.VoteForUniqueness(matches, uniquenessThreshold, mask);

                                int nonZeroCount = CvInvoke.CountNonZero(mask);
                                if (nonZeroCount >= 4)
                                {
                                    nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints,
                                                                                               matches, mask, 1.5, 20);
                                    if (nonZeroCount >= 4)
                                    {
                                        homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints,
                                                                                                              observedKeyPoints, matches, mask, 2);
                                    }
                                }
                            }
                    watch.Stop();
                }
            matchTime = watch.ElapsedMilliseconds;
        }
Exemple #21
0
        public static bool TestFeature2DTracker(Feature2D keyPointDetector, Feature2D descriptorGenerator)
        {
            //for (int k = 0; k < 1; k++)
            {
                Feature2D feature2D = null;
                if (keyPointDetector == descriptorGenerator)
                {
                    feature2D = keyPointDetector as Feature2D;
                }

                Mat modelImage = EmguAssert.LoadMat("box.png");
                //Image<Gray, Byte> modelImage = new Image<Gray, byte>("stop.jpg");
                //modelImage = modelImage.Resize(400, 400, true);

                //modelImage._EqualizeHist();

                #region extract features from the object image
                Stopwatch        stopwatch      = Stopwatch.StartNew();
                VectorOfKeyPoint modelKeypoints = new VectorOfKeyPoint();
                Mat modelDescriptors            = new Mat();
                if (feature2D != null)
                {
                    feature2D.DetectAndCompute(modelImage, null, modelKeypoints, modelDescriptors, false);
                }
                else
                {
                    keyPointDetector.DetectRaw(modelImage, modelKeypoints);
                    descriptorGenerator.Compute(modelImage, modelKeypoints, modelDescriptors);
                }
                stopwatch.Stop();
                EmguAssert.WriteLine(String.Format("Time to extract feature from model: {0} milli-sec", stopwatch.ElapsedMilliseconds));
                #endregion

                //Image<Gray, Byte> observedImage = new Image<Gray, byte>("traffic.jpg");
                Image <Gray, Byte> observedImage = EmguAssert.LoadImage <Gray, byte>("box_in_scene.png");
                //Image<Gray, Byte> observedImage = modelImage.Rotate(45, new Gray(0.0));
                //image = image.Resize(400, 400, true);

                //observedImage._EqualizeHist();
                #region extract features from the observed image
                stopwatch.Reset();
                stopwatch.Start();
                VectorOfKeyPoint observedKeypoints = new VectorOfKeyPoint();
                using (Mat observedDescriptors = new Mat())
                {
                    if (feature2D != null)
                    {
                        feature2D.DetectAndCompute(observedImage, null, observedKeypoints, observedDescriptors, false);
                    }
                    else
                    {
                        keyPointDetector.DetectRaw(observedImage, observedKeypoints);
                        descriptorGenerator.Compute(observedImage, observedKeypoints, observedDescriptors);
                    }

                    stopwatch.Stop();
                    EmguAssert.WriteLine(String.Format("Time to extract feature from image: {0} milli-sec", stopwatch.ElapsedMilliseconds));
                    #endregion

                    //Merge the object image and the observed image into one big image for display
                    Image <Gray, Byte> res = modelImage.ToImage <Gray, Byte>().ConcateVertical(observedImage);

                    Rectangle rect = new Rectangle(Point.Empty, modelImage.Size);
                    PointF[]  pts  = new PointF[] {
                        new PointF(rect.Left, rect.Bottom),
                        new PointF(rect.Right, rect.Bottom),
                        new PointF(rect.Right, rect.Top),
                        new PointF(rect.Left, rect.Top)
                    };

                    Mat homography = null;

                    stopwatch.Reset();
                    stopwatch.Start();

                    int          k  = 2;
                    DistanceType dt = modelDescriptors.Depth == CvEnum.DepthType.Cv8U ? DistanceType.Hamming : DistanceType.L2;
                    //using (Matrix<int> indices = new Matrix<int>(observedDescriptors.Rows, k))
                    //using (Matrix<float> dist = new Matrix<float>(observedDescriptors.Rows, k))
                    using (VectorOfVectorOfDMatch matches = new VectorOfVectorOfDMatch())
                        using (BFMatcher matcher = new BFMatcher(dt))
                        {
                            //ParamDef[] parameterDefs = matcher.GetParams();
                            matcher.Add(modelDescriptors);
                            matcher.KnnMatch(observedDescriptors, matches, k, null);

                            Mat mask = new Mat(matches.Size, 1, DepthType.Cv8U, 1);
                            mask.SetTo(new MCvScalar(255));
                            //mask.SetValue(255);
                            Features2DToolbox.VoteForUniqueness(matches, 0.8, mask);

                            int nonZeroCount = CvInvoke.CountNonZero(mask);
                            if (nonZeroCount >= 4)
                            {
                                nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeypoints, observedKeypoints, matches, mask, 1.5, 20);
                                if (nonZeroCount >= 4)
                                {
                                    homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeypoints, observedKeypoints, matches, mask, 2);
                                }
                            }
                        }
                    stopwatch.Stop();
                    EmguAssert.WriteLine(String.Format("Time for feature matching: {0} milli-sec", stopwatch.ElapsedMilliseconds));

                    bool success = false;
                    if (homography != null)
                    {
                        PointF[] points = pts.Clone() as PointF[];
                        points = CvInvoke.PerspectiveTransform(points, homography);
                        //homography.ProjectPoints(points);

                        for (int i = 0; i < points.Length; i++)
                        {
                            points[i].Y += modelImage.Height;
                        }

                        res.DrawPolyline(
#if NETFX_CORE
                            Extensions.
#else
                            Array.
#endif
                            ConvertAll <PointF, Point>(points, Point.Round), true, new Gray(255.0), 5);

                        success = true;
                    }
                    //Emgu.CV.UI.ImageViewer.Show(res);
                    return(success);
                }



                /*
                 * stopwatch.Reset(); stopwatch.Start();
                 * //set the initial region to be the whole image
                 * using (Image<Gray, Single> priorMask = new Image<Gray, float>(observedImage.Size))
                 * {
                 * priorMask.SetValue(1.0);
                 * homography = tracker.CamShiftTrack(
                 *    observedFeatures,
                 *    (RectangleF)observedImage.ROI,
                 *    priorMask);
                 * }
                 * Trace.WriteLine(String.Format("Time for feature tracking: {0} milli-sec", stopwatch.ElapsedMilliseconds));
                 *
                 * if (homography != null) //set the initial tracking window to be the whole image
                 * {
                 * PointF[] points = pts.Clone() as PointF[];
                 * homography.ProjectPoints(points);
                 *
                 * for (int i = 0; i < points.Length; i++)
                 *    points[i].Y += modelImage.Height;
                 * res.DrawPolyline(Array.ConvertAll<PointF, Point>(points, Point.Round), true, new Gray(255.0), 5);
                 * return true;
                 * }
                 * else
                 * {
                 * return false;
                 * }*/
            }
        }
Exemple #22
0
        /// <summary>
        ///
        /// </summary>
        /// <param name="featuresFinder"></param>
        /// <param name="images"></param>
        /// <param name="features"></param>
        /// <param name="masks"></param>
        public static void ComputeImageFeatures(
            Feature2D featuresFinder,
            IEnumerable <Mat> images,
            out ImageFeatures[] features,
            IEnumerable <Mat>?masks = null)
        {
            if (featuresFinder == null)
            {
                throw new ArgumentNullException(nameof(featuresFinder));
            }
            if (images == null)
            {
                throw new ArgumentNullException(nameof(images));
            }
            featuresFinder.ThrowIfDisposed();

            var imagesArray = images as Mat[] ?? images.ToArray();

            if (imagesArray.Length == 0)
            {
                throw new ArgumentException("Empty array", nameof(images));
            }

            var descriptorsMat = new Mat[imagesArray.Length];
            var keypointsVec   = new VectorOfKeyPoint[imagesArray.Length];
            var wImageFeatures = new WImageFeatures[imagesArray.Length];

            for (int i = 0; i < imagesArray.Length; i++)
            {
                descriptorsMat[i] = new Mat();
                keypointsVec[i]   = new VectorOfKeyPoint();
                wImageFeatures[i] = new WImageFeatures
                {
                    Keypoints   = keypointsVec[i].CvPtr,
                    Descriptors = descriptorsMat[i].CvPtr
                };
            }

            var imagesPointers = imagesArray.Select(i => i.CvPtr).ToArray();
            var masksPointers  = masks?.Select(i => i.CvPtr).ToArray();

            if (masksPointers != null && imagesPointers.Length != masksPointers.Length)
            {
                throw new ArgumentException("size of images != size of masks");
            }

            var wImageFeaturesPointers = new GCHandle[wImageFeatures.Length];

            try
            {
                for (int i = 0; i < wImageFeatures.Length; i++)
                {
                    wImageFeaturesPointers[i] = GCHandle.Alloc(wImageFeatures[i], GCHandleType.Pinned);
                }

                if (masksPointers == null)
                {
                    NativeMethods.HandleException(
                        NativeMethods.stitching_computeImageFeatures1_2(
                            featuresFinder.CvPtr,
                            imagesPointers,
                            imagesPointers.Length,
                            wImageFeaturesPointers.Select(gch => gch.AddrOfPinnedObject()).ToArray(),
                            IntPtr.Zero));
                }
                else
                {
                    NativeMethods.HandleException(
                        NativeMethods.stitching_computeImageFeatures1_1(
                            featuresFinder.CvPtr,
                            imagesPointers,
                            imagesPointers.Length,
                            wImageFeaturesPointers.Select(gch => gch.AddrOfPinnedObject()).ToArray(),
                            masksPointers));
                }
            }
            finally
            {
                for (int i = 0; i < wImageFeaturesPointers.Length; i++)
                {
                    wImageFeaturesPointers[i].Free();
                }
            }

            features = new ImageFeatures[wImageFeatures.Length];
            for (int i = 0; i < wImageFeaturesPointers.Length; i++)
            {
                features[i] = new ImageFeatures(
                    wImageFeatures[i].ImgIdx,
                    wImageFeatures[i].ImgSize,
                    keypointsVec[i].ToArray(),
                    descriptorsMat[i]);
            }

            GC.KeepAlive(featuresFinder);
            GC.KeepAlive(images);
            GC.KeepAlive(masks);
            GC.KeepAlive(descriptorsMat);
            GC.KeepAlive(imagesPointers);
        }
Exemple #23
0
        static Bitmap FindPoints(Bitmap Imodel, Bitmap Iobserved, Feature2D computer, Feature2D detector, out string matchTime, out string mCount, out BitmapImage model, [InField(PVal = "White")] Brush PointColor) // сводим все полученные данные
        {
            Mat homography;
            VectorOfKeyPoint modelKeyPoints;
            VectorOfKeyPoint observedKeyPoints;
            long             time;

            var modelImage    = new Image <Bgr, byte>(Imodel);
            var observedImage = new Image <Bgr, byte>(Iobserved);

            using (VectorOfVectorOfDMatch matches = new VectorOfVectorOfDMatch())
            {
                Mat mask;
                SURFMethods.FindMatchWM(modelImage.Mat, observedImage.Mat, out time, out modelKeyPoints, out observedKeyPoints, matches,
                                        out mask, out homography, computer, detector);

                int miw = Imodel.Width;
                int oiw = Iobserved.Width;
                int oih = Iobserved.Height;

                var size = new System.Drawing.Size(miw + oiw, oih);

                Image <Bgr, byte> Combined = new Image <Bgr, byte>(size);
                var g = Graphics.FromImage(Combined.Bitmap);
                g.DrawImage(new Bitmap(Iobserved), PointF.Empty);
                g.DrawImage(new Bitmap(Imodel), new PointF(oiw, 0));


                Mat   result = new Mat();
                Color color  = new Pen(PointColor).Color;
                Features2DToolbox.DrawMatches(modelImage.Mat, modelKeyPoints, observedImage.Mat, observedKeyPoints, matches, result, new MCvScalar(color.B, color.G, color.R), new MCvScalar(color.B, color.G, color.R), mask, Features2DToolbox.KeypointDrawType.NotDrawSinglePoints);


                if (homography != null)
                {
                    //draw a rectangle along the projected model
                    Rectangle rect = new Rectangle(System.Drawing.Point.Empty, modelImage.Size);
                    PointF[]  pts  = new PointF[]
                    {
                        new PointF(rect.Left, rect.Bottom),
                        new PointF(rect.Right, rect.Bottom),
                        new PointF(rect.Right, rect.Top),
                        new PointF(rect.Left, rect.Top)
                    };
                    pts = CvInvoke.PerspectiveTransform(pts, homography);

                    System.Drawing.Point[] points = Array.ConvertAll <PointF, System.Drawing.Point>(pts, System.Drawing.Point.Round);
                    using (VectorOfPoint vp = new VectorOfPoint(points))
                    {
                        CvInvoke.Polylines(result, vp, true, new MCvScalar(0, 0, 255, 255), 1);
                    }
                }


                var resc = result.ToImage <Bgr, byte>();
                model = B2BI(new Bitmap(resc.Bitmap));


                matchTime = "Time: " + time + " ms";
                mCount    = "Amount: " + matches.Size;

                return(result.Bitmap);
            }
        }
Exemple #24
0
 public CreateProjectViewModel()
 {
     _detector   = GetNativeDetector(SelectedDetector);
     _descripter = GetNativeDescripter(SelectedDescripter);
 }
        public void ProcessImages(Mat left, Mat middle, Mat right, Feature2D detector, Feature2D descriptor, DistanceType distance)
        {
            double maxDistance = 20.0;
            var    match12     = MatchImagePair.Match(left, middle, detector, descriptor, distance, maxDistance);
            var    match23     = MatchImagePair.Match(middle, right, detector, descriptor, distance, maxDistance);
            var    match13     = MatchImagePair.Match(left, right, detector, descriptor, distance, maxDistance);

            TripletMatch tmatch = new TripletMatch();

            List <MDMatch> m12 = new List <MDMatch>();
            List <MDMatch> m23 = new List <MDMatch>();

            var left1    = match12.LeftPoints;
            var right1   = match12.RightPoints;
            var left2    = match23.LeftPoints;
            var left2_X  = MatchClosePoints.SortByX(match23.LeftPoints);
            var right2   = match23.RightPoints;
            var left3    = match13.LeftPoints;
            var right3   = match13.RightPoints;
            var right3_X = MatchClosePoints.SortByX(match13.LeftPoints);

            for (int idx12 = 0; idx12 < left1.Size; ++idx12)
            {
                var p1    = left1[idx12];
                var p2    = right1[idx12];
                int idx23 = IndexOf_X(left2_X, p2);
                if (idx23 != -1)
                {
                    var p3    = right2[idx23];
                    int idx13 = IndexOf_X(right3_X, p1);
                    if (idx13 != -1)
                    {
                        if (AreEqual(left1[idx12], left3[idx13]))
                        {
                            tmatch.Left.Add(p1);
                            tmatch.Middle.Add(p2);
                            tmatch.Right.Add(p3);

                            m12.Add(match12.Matches[idx12]);
                            m23.Add(match23.Matches[idx23]);
                        }
                    }
                }
            }

            match12.Matches = new VectorOfDMatch(m12.ToArray());
            match23.Matches = new VectorOfDMatch(m23.ToArray());

            MatchDrawer.DrawFeatures(left, right, match12, 1.0, bottomView);
            MatchDrawer.DrawFeatures(left, right, match23, 1.0, upperView);

            var F12 = ComputeMatrix.F(new VectorOfPointF(tmatch.Left.ToArray()), new VectorOfPointF(tmatch.Middle.ToArray()));
            var F23 = ComputeMatrix.F(new VectorOfPointF(tmatch.Middle.ToArray()), new VectorOfPointF(tmatch.Right.ToArray()));
            var F13 = ComputeMatrix.F(new VectorOfPointF(tmatch.Left.ToArray()), new VectorOfPointF(tmatch.Right.ToArray()));

            if (F12 == null || F23 == null || F13 == null)
            {
                info.Text = "Too few matches";
                return;
            }

            var Fs = new List <Image <Arthmetic, double> > {
                F12, F23, F13
            };

            var K = EstimateCameraFromImageSequence.K(Fs, left.Width, right.Height);

            var Es = new List <Image <Arthmetic, double> >
            {
                ComputeMatrix.E(F12, K),
                ComputeMatrix.E(F23, K),
                ComputeMatrix.E(F13, K)
            };

            FindTransformation.DecomposeToRTAndTriangulate(tmatch.Left, tmatch.Middle, K, Es[0],
                                                           out Image <Arthmetic, double> R12, out Image <Arthmetic, double> t12, out Image <Arthmetic, double> X12);
            FindTransformation.DecomposeToRTAndTriangulate(tmatch.Middle, tmatch.Right, K, Es[1],
                                                           out Image <Arthmetic, double> R23, out Image <Arthmetic, double> t23, out Image <Arthmetic, double> X23);
            FindTransformation.DecomposeToRTAndTriangulate(tmatch.Left, tmatch.Right, K, Es[2],
                                                           out Image <Arthmetic, double> R13, out Image <Arthmetic, double> t13, out Image <Arthmetic, double> X13);

            var Rs = new List <Image <Arthmetic, double> >
            {
                RotationConverter.MatrixToEulerXYZ(R12),
                RotationConverter.MatrixToEulerXYZ(R23),
                RotationConverter.MatrixToEulerXYZ(R13)
            };
            var ts = new List <Image <Arthmetic, double> >
            {
                t12,
                t23,
                t13
            };

            PrintMatricesInfo(Es, K, Rs, ts);
        }