Exemplo n.º 1
0
 /// <summary>
 /// Release all the unmanaged memory associated with this object
 /// </summary>
 protected override void DisposeObject()
 {
     if (IntPtr.Zero != _ptr)
     {
         Features2DInvoke.cveBOWImgDescriptorExtractorRelease(ref _ptr);
     }
 }
Exemplo n.º 2
0
 /// <summary>
 /// Detect keypoints in an image and compute the descriptors on the image from the keypoint locations.
 /// </summary>
 /// <param name="image">The image</param>
 /// <param name="mask">The optional mask, can be null if not needed</param>
 /// <param name="keyPoints">The detected keypoints will be stored in this vector</param>
 /// <param name="descriptors">The descriptors from the keypoints</param>
 /// <param name="useProvidedKeyPoints">If true, the method will skip the detection phase and will compute descriptors for the provided keypoints</param>
 public void DetectAndCompute(IInputArray image, IInputArray mask, VectorOfKeyPoint keyPoints, IOutputArray descriptors, bool useProvidedKeyPoints)
 {
     using (InputArray iaImage = image.GetInputArray())
         using (InputArray iaMask = mask == null ? InputArray.GetEmpty() : mask.GetInputArray())
             using (OutputArray oaDescriptors = descriptors.GetOutputArray())
                 Features2DInvoke.CvFeature2DDetectAndCompute(_ptr, iaImage, iaMask, keyPoints, oaDescriptors, useProvidedKeyPoints);
 }
Exemplo n.º 3
0
 /// <summary>
 /// Draw the matched keypoints between the model image and the observed image.
 /// </summary>
 /// <param name="modelImage">The model image</param>
 /// <param name="modelKeypoints">The keypoints in the model image</param>
 /// <param name="observedImage">The observed image</param>
 /// <param name="observedKeyPoints">The keypoints in the observed image</param>
 /// <param name="matchColor">The color for the match correspondence lines</param>
 /// <param name="singlePointColor">The color for highlighting the keypoints</param>
 /// <param name="mask">The mask for the matches. Use null for all matches.</param>
 /// <param name="flags">The drawing type</param>
 /// <param name="result">The image where model and observed image is displayed side by side. Matches are drawn as indicated by the flag</param>
 /// <param name="matches">Matches. Each matches[i] is k or less matches for the same query descriptor.</param>
 public static void DrawMatches(
     IInputArray modelImage,
     VectorOfKeyPoint modelKeypoints,
     IInputArray observedImage,
     VectorOfKeyPoint observedKeyPoints,
     VectorOfVectorOfDMatch matches,
     IInputOutputArray result,
     MCvScalar matchColor,
     MCvScalar singlePointColor,
     VectorOfVectorOfByte mask = null,
     KeypointDrawType flags    = KeypointDrawType.Default)
 {
     using (InputArray iaModelImage = modelImage.GetInputArray())
         using (InputArray iaObservedImage = observedImage.GetInputArray())
             using (InputOutputArray ioaResult = result.GetInputOutputArray())
                 Features2DInvoke.drawMatchedFeatures2(
                     iaObservedImage,
                     observedKeyPoints,
                     iaModelImage,
                     modelKeypoints,
                     matches,
                     ioaResult,
                     ref matchColor,
                     ref singlePointColor,
                     mask,
                     flags);
 }
Exemplo n.º 4
0
 /// <summary>
 /// Release all the unmanaged memory associated with this simple blob detector parameter.
 /// </summary>
 protected override void DisposeObject()
 {
     if (_ptr != IntPtr.Zero)
     {
         Features2DInvoke.cveSimpleBlobDetectorParamsRelease(ref _ptr);
     }
 }
Exemplo n.º 5
0
 /// <summary>
 /// Release all the unmanaged memory associated with this object
 /// </summary>
 protected override void DisposeObject()
 {
     if (_ptr != IntPtr.Zero)
     {
         Features2DInvoke.cveBOWKMeansTrainerRelease(ref _ptr);
     }
 }
Exemplo n.º 6
0
 /// <summary>
 /// Release the unmanaged resource associated with the BFMatcher
 /// </summary>
 protected override void DisposeObject()
 {
     if (IntPtr.Zero != _ptr)
     {
         Features2DInvoke.cveBFMatcherRelease(ref _ptr);
     }
     base.DisposeObject();
 }
Exemplo n.º 7
0
Arquivo: SIFT.cs Projeto: v5chn/emgucv
 /// <summary>
 /// Create a SIFT using the specific values
 /// </summary>
 /// <param name="nFeatures">The desired number of features. Use 0 for un-restricted number of features</param>
 /// <param name="nOctaveLayers">The number of octave layers. Use 3 for default</param>
 /// <param name="contrastThreshold">Contrast threshold. Use 0.04 as default</param>
 /// <param name="edgeThreshold">Detector parameter. Use 10.0 as default</param>
 /// <param name="sigma">Use 1.6 as default</param>
 public SIFT(
     int nFeatures            = 0, int nOctaveLayers = 3,
     double contrastThreshold = 0.04, double edgeThreshold = 10.0,
     double sigma             = 1.6)
 {
     _ptr = Features2DInvoke.cveSIFTCreate(nFeatures, nOctaveLayers, contrastThreshold, edgeThreshold, sigma,
                                           ref _feature2D, ref _sharedPtr);
 }
Exemplo n.º 8
0
 /// <summary>
 /// Create a new BOWKmeans trainer
 /// </summary>
 /// <param name="clusterCount">Number of clusters to split the set by.</param>
 /// <param name="termcrit">Specifies maximum number of iterations and/or accuracy (distance the centers move by between the subsequent iterations). Use empty termcrit for default.</param>
 /// <param name="attempts">The number of attempts. Use 3 for default</param>
 /// <param name="flags">Kmeans initialization flag. Use PPCenters for default.</param>
 public BOWKMeansTrainer(
     int clusterCount,
     MCvTermCriteria termcrit,
     int attempts = 3,
     CvEnum.KMeansInitType flags = KMeansInitType.PPCenters)
 {
     _ptr = Features2DInvoke.cveBOWKMeansTrainerCreate(clusterCount, ref termcrit, attempts, flags);
 }
Exemplo n.º 9
0
 /// <summary>
 /// Release the unmanaged resources associated with this object
 /// </summary>
 protected override void DisposeObject()
 {
     if (_sharedPtr != IntPtr.Zero)
     {
         Features2DInvoke.cveAgastFeatureDetectorRelease(ref _sharedPtr);
     }
     base.DisposeObject();
 }
Exemplo n.º 10
0
 /// <summary>
 /// Release the unmanaged memory associated with this Flann based matcher.
 /// </summary>
 protected override void DisposeObject()
 {
     if (_ptr != IntPtr.Zero)
     {
         Features2DInvoke.cveFlannBasedMatcherRelease(ref _ptr);
     }
     base.DisposeObject();
 }
Exemplo n.º 11
0
 /// <summary>
 /// Create AGAST using the specific values
 /// </summary>
 /// <param name="threshold">Threshold</param>
 /// <param name="nonmaxSuppression">Non maximum suppression</param>
 /// <param name="type">Type</param>
 public AgastFeatureDetector(
     int threshold          = 10,
     bool nonmaxSuppression = true,
     Type type = Type.OAST_9_16)
 {
     _ptr = Features2DInvoke.cveAgastFeatureDetectorCreate(
         threshold, nonmaxSuppression, type,
         ref _feature2D, ref _sharedPtr);
 }
Exemplo n.º 12
0
 /// <summary>
 /// Eliminate the matched features whose scale and rotation do not aggree with the majority's scale and rotation.
 /// </summary>
 /// <param name="rotationBins">The numbers of bins for rotation, a good value might be 20 (which means each bin covers 18 degree)</param>
 /// <param name="scaleIncrement">This determines the different in scale for neighbor hood bins, a good value might be 1.5 (which means matched features in bin i+1 is scaled 1.5 times larger than matched features in bin i</param>
 /// <param name="modelKeyPoints">The keypoints from the model image</param>
 /// <param name="observedKeyPoints">The keypoints from the observed image</param>
 /// <param name="mask">This is both input and output. This matrix indicates which row is valid for the matches.</param>
 /// <param name="matches">Matches. Each matches[i] is k or less matches for the same query descriptor.</param>
 /// <returns> The number of non-zero elements in the resulting mask</returns>
 public static int VoteForSizeAndOrientation(
     VectorOfKeyPoint modelKeyPoints,
     VectorOfKeyPoint observedKeyPoints,
     VectorOfVectorOfDMatch matches,
     Mat mask,
     double scaleIncrement,
     int rotationBins)
 {
     return(Features2DInvoke.voteForSizeAndOrientation(modelKeyPoints, observedKeyPoints, matches, mask, scaleIncrement,
                                                       rotationBins));
 }
Exemplo n.º 13
0
 /// <summary>
 /// Find the k-nearest match
 /// </summary>
 /// <param name="queryDescriptor">An n x m matrix of descriptors to be query for nearest neighbours. n is the number of descriptor and m is the size of the descriptor</param>
 /// <param name="k">Number of nearest neighbors to search for</param>
 /// <param name="mask">Can be null if not needed. An n x 1 matrix. If 0, the query descriptor in the corresponding row will be ignored.</param>
 /// <param name="matches">Matches. Each matches[i] is k or less matches for the same query descriptor.</param>
 /// <param name="compactResult">
 /// Parameter used when the mask (or masks) is not empty. If compactResult is
 /// false, the matches vector has the same size as queryDescriptors rows.If compactResult is true,
 /// the matches vector does not contain matches for fully masked-out query descriptors.
 /// </param>
 public void KnnMatch(
     IInputArray queryDescriptor,
     VectorOfVectorOfDMatch matches,
     int k,
     IInputArray mask   = null,
     bool compactResult = false)
 {
     using (InputArray iaQueryDesccriptor = queryDescriptor.GetInputArray())
         using (InputArray iaMask = mask == null ? InputArray.GetEmpty() : mask.GetInputArray())
             Features2DInvoke.cveDescriptorMatcherKnnMatch2(_descriptorMatcherPtr, iaQueryDesccriptor, matches, k, iaMask, compactResult);
 }
Exemplo n.º 14
0
 /// <summary>
 /// Create a simple blob detector
 /// </summary>
 /// <param name="parameters">The parameters for creating a simple blob detector</param>
 public SimpleBlobDetector(SimpleBlobDetectorParams parameters = null)
 {
     if (parameters == null)
     {
         _ptr = Features2DInvoke.cveSimpleBlobDetectorCreate(ref _feature2D, ref _sharedPtr);
     }
     else
     {
         _ptr = Features2DInvoke.cveSimpleBlobDetectorCreateWithParams(ref _feature2D, parameters, ref _sharedPtr);
     }
 }
Exemplo n.º 15
0
 /// <summary>
 /// Finds the best match for each descriptor from a query set. Train descriptors collection that was set by the Add function is used.
 /// </summary>
 /// <param name="queryDescriptors">Query set of descriptors.</param>
 /// <param name="matches">If a query descriptor is masked out in mask , no match is added for this descriptor. So, matches size may be smaller than the query descriptors count.</param>
 /// <param name="masks">Mask specifying permissible matches between an input query and train matrices of descriptors.</param>
 public void Match(
     IInputArray queryDescriptors,
     VectorOfDMatch matches,
     IInputArrayOfArrays masks = null
     )
 {
     using (InputArray iaQueryDesccriptor = queryDescriptors.GetInputArray())
         using (InputArray iaMasks = masks == null ? InputArray.GetEmpty() : masks.GetInputArray())
         {
             Features2DInvoke.cveDescriptorMatcherMatch2(_descriptorMatcherPtr, iaQueryDesccriptor, matches, iaMasks);
         }
 }
Exemplo n.º 16
0
 /// <summary>
 /// Create KAZE using the specific values
 /// </summary>
 /// <param name="extended">Set to enable extraction of extended (128-byte) descriptor.</param>
 /// <param name="upright">Set to enable use of upright descriptors (non rotation-invariant).</param>
 /// <param name="threshold">Detector response threshold to accept point</param>
 /// <param name="octaves">Maximum octave evolution of the image</param>
 /// <param name="sublevels">Default number of sublevels per scale level</param>
 /// <param name="diffusivity">Diffusivity type.</param>
 public KAZE(
     bool extended           = false,
     bool upright            = false,
     float threshold         = 0.001f,
     int octaves             = 4,
     int sublevels           = 4,
     Diffusivity diffusivity = Diffusivity.PmG2)
 {
     _ptr = Features2DInvoke.cveKAZEDetectorCreate(
         extended, upright, threshold, octaves, sublevels, diffusivity,
         ref _feature2D, ref _sharedPtr);
 }
Exemplo n.º 17
0
 /// <summary>
 /// For each query descriptor, finds the training descriptors not farther than the specified distance.
 /// </summary>
 /// <param name="queryDescriptors">Query set of descriptors.</param>
 /// <param name="matches">Found matches.</param>
 /// <param name="maxDistance">Threshold for the distance between matched descriptors. Distance means here metric distance (e.g. Hamming distance), not the distance between coordinates (which is measured in Pixels)!</param>
 /// <param name="masks">Mask specifying permissible matches between an input query and train matrices of descriptors.</param>
 /// <param name="compactResult">Parameter used when the mask (or masks) is not empty. If compactResult is false, the matches vector has the same size as queryDescriptors rows. If compactResult is true, the matches vector does not contain matches for fully masked-out query descriptors.</param>
 public void RadiusMatch(
     IInputArray queryDescriptors,
     VectorOfVectorOfDMatch matches,
     float maxDistance,
     IInputArray masks  = null,
     bool compactResult = false)
 {
     using (InputArray iaQueryDesccriptor = queryDescriptors.GetInputArray())
         using (InputArray iaMasks = masks == null ? InputArray.GetEmpty() : masks.GetInputArray())
         {
             Features2DInvoke.cveDescriptorMatcherRadiusMatch2(_descriptorMatcherPtr, iaQueryDesccriptor, matches, maxDistance, iaMasks, compactResult);
         }
 }
Exemplo n.º 18
0
 /// <summary>
 /// Finds the best match for each descriptor from a query set.
 /// </summary>
 /// <param name="queryDescriptors">Query set of descriptors.</param>
 /// <param name="trainDescriptors">Train set of descriptors. This set is not added to the train descriptors collection stored in the class object.</param>
 /// <param name="matches">If a query descriptor is masked out in mask , no match is added for this descriptor. So, matches size may be smaller than the query descriptors count.</param>
 /// <param name="mask">Mask specifying permissible matches between an input query and train matrices of descriptors.</param>
 public void Match(
     IInputArray queryDescriptors,
     IInputArray trainDescriptors,
     VectorOfDMatch matches,
     IInputArray mask = null)
 {
     using (InputArray iaQueryDesccriptor = queryDescriptors.GetInputArray())
         using (InputArray iaTrainDescriptor = trainDescriptors.GetInputArray())
             using (InputArray iaMask = mask == null ? InputArray.GetEmpty() : mask.GetInputArray())
             {
                 Features2DInvoke.cveDescriptorMatcherMatch1(_descriptorMatcherPtr, iaQueryDesccriptor, iaTrainDescriptor, matches, iaMask);
             }
 }
Exemplo n.º 19
0
        /// <summary>
        /// Draw the keypoints found on the image.
        /// </summary>
        /// <param name="image">The image</param>
        /// <param name="keypoints">The keypoints to be drawn</param>
        /// <param name="color">The color used to draw the keypoints</param>
        /// <param name="type">The drawing type</param>
        /// <param name="outImage">The image with the keypoints drawn</param>
        public static void DrawKeypoints(
            IInputArray image,
            VectorOfKeyPoint keypoints,
            IInputOutputArray outImage,
            Bgr color,
            Features2DToolbox.KeypointDrawType type = KeypointDrawType.Default)
        {
            MCvScalar c = color.MCvScalar;

            using (InputArray iaImage = image.GetInputArray())
                using (InputOutputArray ioaOutImage = outImage.GetInputOutputArray())
                    Features2DInvoke.drawKeypoints(iaImage, keypoints, ioaOutImage, ref c, type);
        }
Exemplo n.º 20
0
 /// <summary>
 /// Create AKAZE using the specific values
 /// </summary>
 /// <param name="descriptorType">Type of the extracted descriptor</param>
 /// <param name="descriptorSize">Size of the descriptor in bits. 0 -> Full size</param>
 /// <param name="descriptorChannels">Number of channels in the descriptor (1, 2, 3)</param>
 /// <param name="threshold">Detector response threshold to accept point</param>
 /// <param name="nOctaveLayers"> Default number of sublevels per scale level</param>
 /// <param name="nOctaves">Maximum octave evolution of the image</param>
 /// <param name="diffusivity">Diffusivity type</param>
 public AKAZE(
     DescriptorType descriptorType = DescriptorType.Mldb,
     int descriptorSize            = 0,
     int descriptorChannels        = 3,
     float threshold              = 0.001f,
     int nOctaves                 = 4,
     int nOctaveLayers            = 4,
     KAZE.Diffusivity diffusivity = KAZE.Diffusivity.PmG2)
 {
     _ptr = Features2DInvoke.cveAKAZEDetectorCreate(
         descriptorType, descriptorSize, descriptorChannels,
         threshold, nOctaves, nOctaveLayers, diffusivity,
         ref _feature2D, ref _sharedPtr);
 }
Exemplo n.º 21
0
 /// <summary>
 /// Create a MSER detector using the specific parameters
 /// </summary>
 /// <param name="delta">In the code, it compares (size_{i}-size_{i-delta})/size_{i-delta}</param>
 /// <param name="maxArea">Prune the area which bigger than max_area</param>
 /// <param name="minArea">Prune the area which smaller than min_area</param>
 /// <param name="maxVariation">Prune the area have similar size to its children</param>
 /// <param name="minDiversity">Trace back to cut off mser with diversity &lt; min_diversity</param>
 /// <param name="maxEvolution">For color image, the evolution steps</param>
 /// <param name="areaThreshold">The area threshold to cause re-initialize</param>
 /// <param name="minMargin">Ignore too small margin</param>
 /// <param name="edgeBlurSize">The aperture size for edge blur</param>
 public MSER(
     int delta        = 5, int minArea = 60, int maxArea = 14400, double maxVariation = 0.25, double minDiversity = 0.2,
     int maxEvolution = 200, double areaThreshold = 1.01, double minMargin = 0.003, int edgeBlurSize = 5)
 {
     _ptr = Features2DInvoke.cveMserCreate(
         delta,
         minArea,
         maxArea,
         maxVariation,
         minDiversity,
         maxEvolution,
         areaThreshold,
         minMargin,
         edgeBlurSize,
         ref _feature2D,
         ref _sharedPtr);
 }
Exemplo n.º 22
0
        /// <summary>
        /// Recover the homography matrix using RANDSAC. If the matrix cannot be recovered, null is returned.
        /// </summary>
        /// <param name="model">The model keypoints</param>
        /// <param name="observed">The observed keypoints</param>
        /// <param name="ransacReprojThreshold">
        /// The maximum allowed reprojection error to treat a point pair as an inlier.
        /// If srcPoints and dstPoints are measured in pixels, it usually makes sense to set this parameter somewhere in the range 1 to 10.
        /// </param>
        /// <param name="mask">
        /// The mask matrix of which the value might be modified by the function.
        /// As input, if the value is 0, the corresponding match will be ignored when computing the homography matrix.
        /// If the value is 1 and RANSAC determine the match is an outlier, the value will be set to 0.
        /// </param>
        /// <returns>The homography matrix, if it cannot be found, null is returned</returns>
        /// <param name="matches">Matches. Each matches[i] is k or less matches for the same query descriptor.</param>
        public static Mat GetHomographyMatrixFromMatchedFeatures(VectorOfKeyPoint model,
                                                                 VectorOfKeyPoint observed, VectorOfVectorOfDMatch matches, Mat mask, double ransacReprojThreshold)
        {
            Mat  homography = new Mat();
            bool found      = Features2DInvoke.getHomographyMatrixFromMatchedFeatures(model, observed, matches, mask,
                                                                                      ransacReprojThreshold, homography);

            if (found)
            {
                return(homography);
            }
            else
            {
                homography.Dispose();
                return(null);
            }
        }
 /// <summary>
 /// Create a BOWImgDescriptorExtractor
 /// </summary>
 /// <param name="descriptorExtractor">Descriptor extractor that is used to compute descriptors for an input image and its key points.</param>
 /// <param name="descriptorMatcher">Descriptor matcher that is used to find the nearest word of the trained vocabulary for each key point descriptor of the image.</param>
 public BOWImgDescriptorExtractor(Feature2D descriptorExtractor, DescriptorMatcher descriptorMatcher)
 {
     _ptr = Features2DInvoke.cveBOWImgDescriptorExtractorCreate(descriptorExtractor.Feature2DPtr, descriptorMatcher);
 }
Exemplo n.º 24
0
 /// <summary>
 /// Add the model descriptors
 /// </summary>
 /// <param name="modelDescriptors">The model descriptors</param>
 public void Add(IInputArray modelDescriptors)
 {
     using (InputArray iaModelDescriptors = modelDescriptors.GetInputArray())
         Features2DInvoke.cveDescriptorMatcherAdd(_descriptorMatcherPtr, iaModelDescriptors);
 }
 /// <summary>
 /// Sets a visual vocabulary.
 /// </summary>
 /// <param name="vocabulary">The vocabulary</param>
 public void SetVocabulary(Mat vocabulary)
 {
     Features2DInvoke.cveBOWImgDescriptorExtractorSetVocabulary(_ptr, vocabulary);
 }
Exemplo n.º 26
0
 /// <summary>
 /// Create parameters for simple blob detector and use default values.
 /// </summary>
 public SimpleBlobDetectorParams()
 {
     _ptr = Features2DInvoke.cveSimpleBlobDetectorParamsCreate();
 }
Exemplo n.º 27
0
 /// <summary>
 /// Trains a descriptor matcher (for example, the flann index). In all methods to match, the method
 /// train() is run every time before matching.Some descriptor matchers(for example, BruteForceMatcher)
 /// have an empty implementation of this method.Other matchers really train their inner structures (for
 /// example, FlannBasedMatcher trains flann::Index ).
 /// </summary>
 public void Train()
 {
     Features2DInvoke.cveDescriptorMatcherTrain(_descriptorMatcherPtr);
 }
Exemplo n.º 28
0
 /// <summary>
 /// Clears the train descriptor collections.
 /// </summary>
 public void Clear()
 {
     Features2DInvoke.cveDescriptorMatcherClear(_descriptorMatcherPtr);
 }
 /// <summary>
 /// Computes an image descriptor using the set visual vocabulary.
 /// </summary>
 /// <param name="image">Image, for which the descriptor is computed</param>
 /// <param name="keypoints">Key points detected in the input image.</param>
 /// <param name="imgDescriptors">The output image descriptors.</param>
 public void Compute(IInputArray image, VectorOfKeyPoint keypoints, Mat imgDescriptors)
 {
     using (InputArray iaImage = image.GetInputArray())
         Features2DInvoke.cveBOWImgDescriptorExtractorCompute(_ptr, iaImage, keypoints, imgDescriptors);
 }
Exemplo n.º 30
0
 /// <summary>
 /// Cluster the descriptors and return the cluster centers
 /// </summary>
 /// <param name="cluster">The cluster centers</param>
 public void Cluster(IOutputArray cluster)
 {
     using (OutputArray oaCluster = cluster.GetOutputArray())
         Features2DInvoke.cveBOWKMeansTrainerCluster(_ptr, oaCluster);
 }