/// <summary>
 /// Compute the descriptor given the image and the point location
 /// </summary>
 /// <param name="extractor">The descriptor extractor</param>
 /// <param name="image">The image where the descriptor will be computed from</param>
 /// <param name="mask">The optional mask, can be null if not needed</param>
 /// <param name="keyPoints">The keypoint where the descriptor will be computed from</param>
 /// <returns>The descriptors founded on the keypoint location</returns>
 public static ImageFeature <TDepth>[] ComputeDescriptors <TDepth>(this IDescriptorExtractor <TDepth> extractor, Image <Gray, Byte> image, Image <Gray, byte> mask, MKeyPoint[] keyPoints)
     where TDepth : struct
 {
     if (keyPoints.Length == 0)
     {
         return(new ImageFeature <TDepth> [0]);
     }
     using (VectorOfKeyPoint kpts = new VectorOfKeyPoint())
     {
         kpts.Push(keyPoints);
         using (Matrix <TDepth> descriptor = extractor.ComputeDescriptorsRaw(image, mask, kpts))
         {
             return(ImageFeature <TDepth> .ConvertFromRaw(kpts, descriptor));
         }
     }
 }
Esempio n. 2
0
        /// <summary>
        /// Computes the descriptors for a set of keypoints in an image.
        /// </summary>
        /// <param name="extractor">The descriptor extractor used to compute keypoint descriptors.</param>
        /// <param name="image">The image from which to extract keypoint descriptors.</param>
        /// <param name="keyPoints">
        /// The keypoints for which to extract descriptors. Keypoints for which a
        /// descriptor cannot be computed are removed.
        /// </param>
        /// <returns>The array of descriptors computed for the specified set of keypoints.</returns>
        public static Mat Compute(this IDescriptorExtractor extractor, Arr image, KeyPointCollection keyPoints)
        {
            if (extractor == null)
            {
                throw new ArgumentNullException("extractor");
            }

            if (keyPoints == null)
            {
                throw new ArgumentNullException("keyPoints");
            }

            var descriptors = new Mat(keyPoints.Count, extractor.DescriptorSize, extractor.DescriptorType);

            extractor.Compute(image, keyPoints, descriptors);
            if (descriptors.Rows != keyPoints.Count)
            {
                descriptors = descriptors.GetSubRect(new Rect(0, 0, descriptors.Cols, keyPoints.Count));
            }

            return(descriptors);
        }
Esempio n. 3
0
 /// <summary>
 ///
 /// </summary>
 /// <param name="descriptorExtractor">Descriptor extractor that is used to compute descriptors for an input image and its key points.</param>
 /// <param name="descriptorMatcher">Descriptor matcher that is used to find the nearest word of the trained vocabulary for each key point descriptor of the image.</param>
 public BOWImgDescriptorExtractor(IDescriptorExtractor descriptorExtractor, DescriptorMatcher descriptorMatcher)
 {
     _ptr = BOWImgDescriptorExtractorInvoke.CvBOWImgDescriptorExtractorCreate(descriptorExtractor.DescriptorExtratorPtr, descriptorMatcher);
 }
Esempio n. 4
0
 /// <summary>
 ///
 /// </summary>
 /// <param name="descriptorExtractor">Descriptor extractor that is used to compute descriptors for an input image and its keypoints.</param>
 /// <param name="descriptorMatcher">Descriptor matcher that is used to find the nearest word of the trained vocabulary for each keypoint descriptor of the image.</param>
 public BOWImgDescriptorExtractor(IDescriptorExtractor <Gray, T> descriptorExtractor, DescriptorMatcher <T> descriptorMatcher)
 {
     _ptr = CvInvoke.CvBOWImgDescriptorExtractorCreate(descriptorExtractor.DescriptorExtratorPtr, descriptorMatcher);
 }
Esempio n. 5
0
 /// <summary>
 /// Compute the descriptors on the image from the given keypoint locations.
 /// </summary>
 /// <param name="extractor">The descriptor extractor</param>
 /// <param name="image">The image to compute descriptors from</param>
 /// <param name="keyPoints">The keypoints where the descriptor computation is perfromed</param>
 /// <param name="mask">The optional mask, can be null if not needed</param>
 /// <returns>The descriptors from the given keypoints</returns>
 public static Matrix <TDescriptor> ComputeDescriptorsRaw <TColor, TDescriptor>(this IDescriptorExtractor <TColor, TDescriptor> extractor, Image <TColor, Byte> image, Image <Gray, Byte> mask, VectorOfKeyPoint keyPoints)
     where TColor : struct, IColor
     where TDescriptor : struct
 {
     using (Mat descriptors = new Mat())
     {
         CvInvoke.CvDescriptorExtractorCompute(extractor.DescriptorExtratorPtr, image, keyPoints, descriptors);
         if (keyPoints.Size == 0)
         {
             return(null);
         }
         Matrix <TDescriptor> result = new Matrix <TDescriptor>(descriptors.Size);
         CvInvoke.cvMatCopyToCvArr(descriptors, result);
         return(result);
     }
 }
Esempio n. 6
0
 /// <summary>
 /// Get the number of elements in the descriptor.
 /// </summary>
 /// <typeparam name="TColor">The type of image the descriptor extractor operates on</typeparam>
 /// <typeparam name="TDescriptor">The depth of the type of descriptor</typeparam>
 /// <param name="extractor">The descriptor extractor</param>
 /// <returns>The number of elements in the descriptor</returns>
 public static int GetDescriptorSize <TColor, TDescriptor>(this IDescriptorExtractor <TColor, TDescriptor> extractor)
     where TColor : struct, IColor
     where TDescriptor : struct
 {
     return(CvInvoke.CvDescriptorExtractorGetDescriptorSize(extractor.DescriptorExtratorPtr));
 }
Esempio n. 7
0
 /// <summary>
 /// Get the number of elements in the descriptor.
 /// </summary>
 /// <param name="extractor">The descriptor extractor</param>
 /// <returns>The number of elements in the descriptor</returns>
 public static int GetDescriptorSize(this IDescriptorExtractor extractor)
 {
     return(CvDescriptorExtractorGetDescriptorSize(extractor.DescriptorExtratorPtr));
 }
Esempio n. 8
0
        /*
         * /// <summary>
         * /// Compute the descriptor given the image and the point location
         * /// </summary>
         * /// <param name="extractor">The descriptor extractor</param>
         * /// <param name="image">The image where the descriptor will be computed from</param>
         * /// <param name="keyPoints">The keypoint where the descriptor will be computed from</param>
         * /// <returns>The descriptors founded on the keypoint location</returns>
         * public static ImageFeature<TDescriptor>[] Compute<TColor, TDescriptor>(this IDescriptorExtractor<TColor, TDescriptor> extractor, Image<TColor, Byte> image, MKeyPoint[] keyPoints)
         * where TColor : struct, IColor
         * where TDescriptor : struct
         * {
         * if (keyPoints.Length == 0) return new ImageFeature<TDescriptor>[0];
         * using (VectorOfKeyPoint kpts = new VectorOfKeyPoint())
         * {
         *    kpts.Push(keyPoints);
         *    using (Matrix<TDescriptor> descriptor = extractor.Compute(image, kpts))
         *    {
         *       return ImageFeature<TDescriptor>.ConvertFromRaw(kpts, descriptor);
         *    }
         * }
         * }*/

        /// <summary>
        /// Compute the descriptors on the image from the given keypoint locations.
        /// </summary>
        /// <param name="extractor">The descriptor extractor</param>
        /// <param name="image">The image to compute descriptors from</param>
        /// <param name="keyPoints">The keypoints where the descriptor computation is perfromed</param>
        /// <param name="descriptors">The descriptors from the given keypoints</param>
        public static void Compute(this IDescriptorExtractor extractor, IInputArray image, VectorOfKeyPoint keyPoints, IOutputArray descriptors)
        {
            using (InputArray iaImage = image.GetInputArray())
                using (OutputArray oaDescriptors = descriptors.GetOutputArray())
                    CvDescriptorExtractorCompute(extractor.DescriptorExtratorPtr, iaImage, keyPoints.Ptr, oaDescriptors);
        }
 /// <summary>
 /// Create a opponent Color descriptor extractor
 /// </summary>
 /// <param name="extractor">The base descriptor extractor</param>
 public OpponentColorDescriptorExtractor(IDescriptorExtractor <Gray, TDescriptor> extractor)
 {
     _baseExtractor = extractor;
     _ptr           = CvInvoke.CvOpponentColorDescriptorExtractorCreate(extractor.DescriptorExtratorPtr);
 }
Esempio n. 10
0
        public static bool TestFeature2DTracker(IFeatureDetector keyPointDetector, IDescriptorExtractor descriptorGenerator)
        {
            //for (int k = 0; k < 1; k++)
            {
                Feature2D feature2D = null;
                if (keyPointDetector == descriptorGenerator)
                {
                    feature2D = keyPointDetector as Feature2D;
                }

                Image <Gray, Byte> modelImage = EmguAssert.LoadImage <Gray, byte>("box.png");
                //Image<Gray, Byte> modelImage = new Image<Gray, byte>("stop.jpg");
                //modelImage = modelImage.Resize(400, 400, true);

                //modelImage._EqualizeHist();

                #region extract features from the object image
                Stopwatch        stopwatch      = Stopwatch.StartNew();
                VectorOfKeyPoint modelKeypoints = new VectorOfKeyPoint();
                Mat modelDescriptors            = new Mat();
                if (feature2D != null)
                {
                    feature2D.DetectAndCompute(modelImage, null, modelKeypoints, modelDescriptors, false);
                }
                else
                {
                    keyPointDetector.DetectRaw(modelImage, modelKeypoints);
                    descriptorGenerator.Compute(modelImage, modelKeypoints, modelDescriptors);
                }
                stopwatch.Stop();
                EmguAssert.WriteLine(String.Format("Time to extract feature from model: {0} milli-sec", stopwatch.ElapsedMilliseconds));
                #endregion

                //Image<Gray, Byte> observedImage = new Image<Gray, byte>("traffic.jpg");
                Image <Gray, Byte> observedImage = EmguAssert.LoadImage <Gray, byte>("box_in_scene.png");
                //Image<Gray, Byte> observedImage = modelImage.Rotate(45, new Gray(0.0));
                //image = image.Resize(400, 400, true);

                //observedImage._EqualizeHist();
                #region extract features from the observed image
                stopwatch.Reset();
                stopwatch.Start();
                VectorOfKeyPoint observedKeypoints = new VectorOfKeyPoint();
                using (Mat observedDescriptors = new Mat())
                {
                    if (feature2D != null)
                    {
                        feature2D.DetectAndCompute(observedImage, null, observedKeypoints, observedDescriptors, false);
                    }
                    else
                    {
                        keyPointDetector.DetectRaw(observedImage, observedKeypoints);
                        descriptorGenerator.Compute(observedImage, observedKeypoints, observedDescriptors);
                    }

                    stopwatch.Stop();
                    EmguAssert.WriteLine(String.Format("Time to extract feature from image: {0} milli-sec", stopwatch.ElapsedMilliseconds));
                    #endregion

                    //Merge the object image and the observed image into one big image for display
                    Image <Gray, Byte> res = modelImage.ConcateVertical(observedImage);

                    Rectangle rect = modelImage.ROI;
                    PointF[]  pts  = new PointF[] {
                        new PointF(rect.Left, rect.Bottom),
                        new PointF(rect.Right, rect.Bottom),
                        new PointF(rect.Right, rect.Top),
                        new PointF(rect.Left, rect.Top)
                    };

                    HomographyMatrix homography = null;

                    stopwatch.Reset();
                    stopwatch.Start();

                    int          k  = 2;
                    DistanceType dt = modelDescriptors.Depth == CvEnum.DepthType.Cv8U ? DistanceType.Hamming : DistanceType.L2;
                    //using (Matrix<int> indices = new Matrix<int>(observedDescriptors.Rows, k))
                    //using (Matrix<float> dist = new Matrix<float>(observedDescriptors.Rows, k))
                    using (VectorOfVectorOfDMatch matches = new VectorOfVectorOfDMatch())
                        using (BruteForceMatcher matcher = new BruteForceMatcher(dt))
                        {
                            matcher.Add(modelDescriptors);
                            matcher.KnnMatch(observedDescriptors, matches, k, null);

                            Matrix <byte> mask = new Matrix <byte>(matches.Size, 1);
                            mask.SetValue(255);
                            Features2DToolbox.VoteForUniqueness(matches, 0.8, mask);

                            int nonZeroCount = CvInvoke.CountNonZero(mask);
                            if (nonZeroCount >= 4)
                            {
                                nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeypoints, observedKeypoints, matches, mask, 1.5, 20);
                                if (nonZeroCount >= 4)
                                {
                                    homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeypoints, observedKeypoints, matches, mask, 2);
                                }
                            }
                        }
                    stopwatch.Stop();
                    EmguAssert.WriteLine(String.Format("Time for feature matching: {0} milli-sec", stopwatch.ElapsedMilliseconds));

                    bool success = false;
                    if (homography != null)
                    {
                        PointF[] points = pts.Clone() as PointF[];
                        homography.ProjectPoints(points);

                        for (int i = 0; i < points.Length; i++)
                        {
                            points[i].Y += modelImage.Height;
                        }

                        res.DrawPolyline(
#if NETFX_CORE
                            Extensions.
#else
                            Array.
#endif
                            ConvertAll <PointF, Point>(points, Point.Round), true, new Gray(255.0), 5);

                        success = true;
                    }
                    //Emgu.CV.UI.ImageViewer.Show(res);
                    return(success);
                }



                /*
                 * stopwatch.Reset(); stopwatch.Start();
                 * //set the initial region to be the whole image
                 * using (Image<Gray, Single> priorMask = new Image<Gray, float>(observedImage.Size))
                 * {
                 * priorMask.SetValue(1.0);
                 * homography = tracker.CamShiftTrack(
                 *    observedFeatures,
                 *    (RectangleF)observedImage.ROI,
                 *    priorMask);
                 * }
                 * Trace.WriteLine(String.Format("Time for feature tracking: {0} milli-sec", stopwatch.ElapsedMilliseconds));
                 *
                 * if (homography != null) //set the initial tracking window to be the whole image
                 * {
                 * PointF[] points = pts.Clone() as PointF[];
                 * homography.ProjectPoints(points);
                 *
                 * for (int i = 0; i < points.Length; i++)
                 *    points[i].Y += modelImage.Height;
                 * res.DrawPolyline(Array.ConvertAll<PointF, Point>(points, Point.Round), true, new Gray(255.0), 5);
                 * return true;
                 * }
                 * else
                 * {
                 * return false;
                 * }*/
            }
        }