Esempio n. 1
0
 /// <summary>
 /// Detect image features from the given image
 /// </summary>
 /// <param name="image">The image to detect features from</param>
 /// <param name="mask">The optional mask, can be null if not needed</param>
 /// <returns>The Image features detected from the given image</returns>
 public ImageFeature[] DetectFeatures(Image <Gray, Byte> image, Image <Gray, byte> mask)
 {
     using (VectorOfKeyPoint kpts = DetectKeyPointsRaw(image, mask))
         using (Matrix <float> desc = ComputeDescriptorsRaw(image, mask, kpts))
         {
             return(Features2DTracker.ConvertToImageFeature(kpts, desc));
         }
 }
Esempio n. 2
0
        /// <summary>
        /// Compute the descriptor given the image and the point location
        /// </summary>
        /// <param name="image">The image where the descriptor will be computed from</param>
        /// <param name="mask">The optional mask, can be null if not needed</param>
        /// <param name="keyPoints">The keypoint where the descriptor will be computed from</param>
        /// <returns>The image features founded on the keypoint location</returns>
        public ImageFeature[] ComputeDescriptors(Image <Gray, Byte> image, Image <Gray, byte> mask, MKeyPoint[] keyPoints)
        {
            int sizeOfdescriptor = _surfParams.Extended ? 128 : 64;

            using (VectorOfKeyPoint pts = new VectorOfKeyPoint())
            {
                pts.Push(keyPoints);
                using (Matrix <float> descriptors = ComputeDescriptorsRaw(image, mask, pts))
                    return(Features2DTracker.ConvertToImageFeature(pts, descriptors));
            }
        }
Esempio n. 3
0
        /*
         * /// <summary>
         * /// Compute the descriptor given the bgr image and the point location, using oppponent color (CGIV 2008 "Color Descriptors for Object Category Recognition").
         * /// </summary>
         * /// <param name="image">The image where the descriptor will be computed from</param>
         * /// <param name="keyPoints">The keypoint where the descriptor will be computed from</param>
         * /// <returns>The descriptors founded on the keypoint location</returns>
         * public Matrix<float> ComputeDescriptorsRaw(Image<Bgr, Byte> image, VectorOfKeyPoint keyPoints)
         * {
         * int count = keyPoints.Size;
         * if (count == 0) return null;
         * Matrix<float> descriptors = new Matrix<float>(count, DescriptorSize * 3, 1);
         * CvSIFTDetectorComputeDescriptorsBGR(_ptr, image, keyPoints, descriptors);
         * return descriptors;
         * }*/

        /// <summary>
        /// Compute the descriptor given the image and the point location
        /// </summary>
        /// <param name="image">The image where the descriptor will be computed from</param>
        /// <param name="mask">The optional mask, can be null if not needed</param>
        /// <param name="keyPoints">The keypoint where the descriptor will be computed from</param>
        /// <returns>The descriptors founded on the keypoint location</returns>
        public ImageFeature[] ComputeDescriptors(Image <Gray, Byte> image, Image <Gray, byte> mask, MKeyPoint[] keyPoints)
        {
            if (keyPoints.Length == 0)
            {
                return(new ImageFeature[0]);
            }
            using (VectorOfKeyPoint kpts = new VectorOfKeyPoint())
            {
                kpts.Push(keyPoints);
                using (Matrix <float> descriptor = ComputeDescriptorsRaw(image, mask, kpts))
                {
                    return(Features2DTracker.ConvertToImageFeature(kpts, descriptor));
                }
            }
        }