コード例 #1
0
ファイル: SIFT.cs プロジェクト: sunsunsun000/captchaReg
 /// <summary>
 /// Create a SIFT using the specific values
 /// </summary>
 /// <param name="nFeatures">The desired number of features. Use 0 for un-restricted number of features</param>
 /// <param name="nOctaveLayers">The number of octave layers. Use 3 for default</param>
 /// <param name="contrastThreshold">Contrast threshold. Use 0.04 as default</param>
 /// <param name="edgeThreshold">Detector parameter. Use 10.0 as default</param>
 /// <param name="sigma">Use 1.6 as default</param>
 public SIFT(
     int nFeatures            = 0, int nOctaveLayers = 3,
     double contrastThreshold = 0.04, double edgeThreshold = 10.0,
     double sigma             = 1.6)
 {
     _ptr = ContribInvoke.cveSIFTCreate(nFeatures, nOctaveLayers, contrastThreshold, edgeThreshold, sigma, ref _feature2D);
 }
コード例 #2
0
        /// <summary>
        /// Compute the descriptor given the image and the point location
        /// </summary>
        /// <param name="image">The image where the descriptor will be computed from</param>
        /// <param name="mask">The optional mask, can be null if not needed</param>
        /// <param name="keyPoints">The keypoint where the descriptor will be computed from. The order of the keypoints might be changed unless the GPU_SURF detector is UP-RIGHT.</param>
        /// <returns>The image features founded on the keypoint location</returns>
        public GpuMat ComputeDescriptorsRaw(GpuMat image, GpuMat mask, GpuMat keyPoints)
        {
            GpuMat descriptors = new GpuMat();

            ContribInvoke.cudaSURFDetectorCompute(_ptr, image, mask, keyPoints, descriptors, true);
            return(descriptors);
        }
コード例 #3
0
        /// <summary>
        /// Detect keypoints in the CudaImage
        /// </summary>
        /// <param name="img">The image where keypoints will be detected from</param>
        /// <param name="mask">The optional mask, can be null if not needed</param>
        /// <returns>
        /// The keypoints GpuMat that will have 1 row.
        /// keypoints.at&lt;float[6]&gt;(1, i) contains i'th keypoint
        /// format: (x, y, size, response, angle, octave)
        /// </returns>
        public GpuMat DetectKeyPointsRaw(GpuMat img, GpuMat mask = null)
        {
            GpuMat result = new GpuMat();

            ContribInvoke.cudaSURFDetectorDetectKeyPoints(_ptr, img, mask, result);
            return(result);
        }
コード例 #4
0
ファイル: VGG.cs プロジェクト: vinchu/emgucv
 public VGG(
     int desc, float isigma,
     bool imgNormalize, bool useScaleOrientation,
     float scaleFactor, bool dscNormalize)
 {
     _ptr = ContribInvoke.cveVGGCreate(desc, isigma, imgNormalize, useScaleOrientation, scaleFactor, dscNormalize,
                                       ref _feature2D);
 }
コード例 #5
0
 /// <summary>
 /// Release all the unmanaged resource associated with BRIEF
 /// </summary>
 protected override void DisposeObject()
 {
     if (_ptr != IntPtr.Zero)
     {
         ContribInvoke.cveBriefDescriptorExtractorRelease(ref _ptr);
     }
     base.DisposeObject();
 }
コード例 #6
0
 /// <summary>
 /// Create DAISY descriptor extractor
 /// </summary>
 /// <param name="radius">Radius of the descriptor at the initial scale.</param>
 /// <param name="qRadius">Amount of radial range division quantity.</param>
 /// <param name="qTheta">Amount of angular range division quantity.</param>
 /// <param name="qHist">Amount of gradient orientations range division quantity.</param>
 /// <param name="norm">Descriptors normalization type.</param>
 /// <param name="H">optional 3x3 homography matrix used to warp the grid of daisy but sampling keypoints remains unwarped on image</param>
 /// <param name="interpolation">Switch to disable interpolation for speed improvement at minor quality loss</param>
 /// <param name="useOrientation">Sample patterns using keypoints orientation, disabled by default.</param>
 public DAISY(float radius       = 15, int qRadius           = 3, int qTheta = 8,
              int qHist          = 8, NormalizationType norm = NormalizationType.None, IInputArray H = null,
              bool interpolation = true, bool useOrientation = false)
 {
     using (InputArray iaH = H == null ? InputArray.GetEmpty() : H.GetInputArray())
         _ptr = ContribInvoke.cveDAISYCreate(radius, qRadius, qTheta, qHist, norm, iaH, interpolation, useOrientation,
                                             ref _feature2D);
 }
コード例 #7
0
 /// <summary>
 /// Release the unmanaged memory associated with this detector.
 /// </summary>
 protected override void DisposeObject()
 {
     if (_ptr != IntPtr.Zero)
     {
         ContribInvoke.cveStarDetectorRelease(ref _ptr);
     }
     base.DisposeObject();
 }
コード例 #8
0
        /*
         * /// <summary>
         * /// Create a Cuda SURF detector using the specific parameters
         * /// </summary>
         * /// <param name="detector">The surf detector where the parameters will be borrow from</param>
         * /// <param name="featuresRatio">Max features = featuresRatio * img.size().srea().</param>
         * public CudaSURFDetector(MCvSURFParams detector, float featuresRatio = 0.01f)
         * : this((float)detector.HessianThreshold, detector.NOctaves, detector.NOctaveLayers, (detector.Extended != 0), featuresRatio, (detector.Upright != 0))
         * {
         * }*/

        /// <summary>
        /// Create a Cuda SURF detector
        /// </summary>
        /// <param name="hessianThreshold">The interest operator threshold.</param>
        /// <param name="nOctaves">The number of octaves to process.</param>
        /// <param name="nOctaveLayers">The number of layers in each octave.</param>
        /// <param name="extended">True, if generate 128-len descriptors, false - 64-len descriptors.</param>
        /// <param name="featuresRatio">Max features = featuresRatio * img.size().srea().</param>
        /// <param name="upright">If set to true, the orientation is not computed for the keypoints</param>
        public CudaSURFDetector(
            float hessianThreshold = 100.0f,
            int nOctaves           = 4,
            int nOctaveLayers      = 2,
            bool extended          = true,
            float featuresRatio    = 0.01f,
            bool upright           = false)
        {
            _ptr = ContribInvoke.cudaSURFDetectorCreate(hessianThreshold, nOctaves, nOctaveLayers, extended, featuresRatio, upright);
        }
コード例 #9
0
ファイル: SURF.cs プロジェクト: vinchu/emgucv
 /// <summary>
 /// Create a SURF detector using the specific values
 /// </summary>
 /// <param name="hessianThresh">
 /// Only features with keypoint.hessian larger than that are extracted.
 /// good default value is ~300-500 (can depend on the average local contrast and sharpness of the image).
 /// user can further filter out some features based on their hessian values and other characteristics
 /// </param>
 /// <param name="extended">
 /// false means basic descriptors (64 elements each),
 /// true means extended descriptors (128 elements each)
 /// </param>
 /// <param name="nOctaves">
 /// The number of octaves to be used for extraction.
 /// With each next octave the feature size is doubled
 /// </param>
 /// <param name="nOctaveLayers">
 /// The number of layers within each octave
 /// </param>
 /// <param name="upright">
 /// False means that detector computes orientation of each feature.
 /// True means that the orientation is not computed (which is much, much faster).
 /// For example, if you match images from a stereo pair, or do image stitching, the matched features likely have very similar angles, and you can speed up feature extraction by setting upright=true.</param>
 public SURF(double hessianThresh, int nOctaves = 4, int nOctaveLayers = 2, bool extended = true,
             bool upright = false)
 {
     _ptr = ContribInvoke.cveSURFCreate(hessianThresh, nOctaves, nOctaveLayers, extended, upright, ref _feature2D);
 }
コード例 #10
0
 /// <summary>
 /// Create a BRIEF descriptor extractor.
 /// </summary>
 /// <param name="descriptorSize">The size of descriptor. It can be equal 16, 32 or 64 bytes.</param>
 public BriefDescriptorExtractor(int descriptorSize = 32)
 {
     _ptr = ContribInvoke.cveBriefDescriptorExtractorCreate(descriptorSize, ref _feature2D);
 }
コード例 #11
0
 /// <summary>
 /// Create a locally uniform comparison image descriptor.
 /// </summary>
 /// <param name="lucidKernel">Kernel for descriptor construction, where 1=3x3, 2=5x5, 3=7x7 and so forth</param>
 /// <param name="blurKernel">kernel for blurring image prior to descriptor construction, where 1=3x3, 2=5x5, 3=7x7 and so forth</param>
 public LUCID(int lucidKernel = 1, int blurKernel = 2)
 {
     _ptr = ContribInvoke.cveLUCIDCreate(lucidKernel, blurKernel, ref _feature2D);
 }
コード例 #12
0
 /// <summary>
 /// Create LATCH descriptor extractor
 /// </summary>
 /// <param name="bytes">The size of the descriptor - can be 64, 32, 16, 8, 4, 2 or 1</param>
 /// <param name="rotationInvariance">Whether or not the descriptor should compensate for orientation changes.</param>
 /// <param name="halfSsdSize">the size of half of the mini-patches size. For example, if we would like to compare triplets of patches of size 7x7x
 /// then the half_ssd_size should be (7-1)/2 = 3.</param>
 public LATCH(int bytes = 32, bool rotationInvariance = true, int halfSsdSize = 3)
 {
     _ptr = ContribInvoke.cveLATCHCreate(bytes, rotationInvariance, halfSsdSize, ref _feature2D);
 }
コード例 #13
0
 /// <summary>
 /// Release the unmanaged resource associate to the Detector
 /// </summary>
 protected override void DisposeObject()
 {
     ContribInvoke.cudaSURFDetectorRelease(ref _ptr);
 }
コード例 #14
0
 /// <summary>
 /// Obtain a GpuMat from the keypoints array
 /// </summary>
 /// <param name="src">The keypoints array</param>
 /// <param name="dst">A GpuMat that represent the keypoints</param>
 public void UploadKeypoints(VectorOfKeyPoint src, GpuMat dst)
 {
     ContribInvoke.cudaSURFUploadKeypoints(_ptr, src, dst);
 }
コード例 #15
0
ファイル: Freak.cs プロジェクト: formylover/emgucv-1
 /// <summary>
 /// Create a Freak descriptor extractor.
 /// </summary>
 /// <param name="orientationNormalized">Enable orientation normalization</param>
 /// <param name="scaleNormalized">Enable scale normalization</param>
 /// <param name="patternScale">Scaling of the description pattern</param>
 /// <param name="nOctaves">Number of octaves covered by the detected keypoints.</param>
 public Freak(bool orientationNormalized = true, bool scaleNormalized = true, float patternScale = 22.0f,
              int nOctaves = 4)
 {
     _ptr = ContribInvoke.cveFreakCreate(orientationNormalized, scaleNormalized, patternScale, nOctaves,
                                         ref _feature2D);
 }
コード例 #16
0
 /// <summary>
 /// Create a star detector with the specific parameters
 /// </summary>
 /// <param name="maxSize">
 /// Maximum size of the features. The following
 /// values of the parameter are supported:
 /// 4, 6, 8, 11, 12, 16, 22, 23, 32, 45, 46, 64, 90, 128</param>
 /// <param name="responseThreshold">
 /// Threshold for the approximated laplacian,
 /// used to eliminate weak features. The larger it is,
 /// the less features will be retrieved
 /// </param>
 /// <param name="lineThresholdProjected">
 /// Another threshold for the laplacian to eliminate edges.
 /// The larger the threshold, the more points you get.
 /// </param>
 /// <param name="lineThresholdBinarized">
 /// Another threshold for the feature size to eliminate edges.
 /// The larger the threshold, the more points you get.</param>
 /// <param name="suppressNonmaxSize">
 ///
 /// </param>
 public StarDetector(int maxSize = 45, int responseThreshold = 30, int lineThresholdProjected = 10, int lineThresholdBinarized = 8, int suppressNonmaxSize = 5)
 {
     _ptr = ContribInvoke.cveStarDetectorCreate(maxSize, responseThreshold, lineThresholdProjected, lineThresholdBinarized, suppressNonmaxSize, ref _feature2D);
 }
コード例 #17
0
 /// <summary>
 /// Obtain the keypoints array from GpuMat
 /// </summary>
 /// <param name="src">The keypoints obtained from DetectKeyPointsRaw</param>
 /// <param name="dst">The vector of keypoints</param>
 public void DownloadKeypoints(GpuMat src, VectorOfKeyPoint dst)
 {
     ContribInvoke.cudaSURFDownloadKeypoints(_ptr, src, dst);
 }
コード例 #18
0
ファイル: BoostDesc.cs プロジェクト: vinchu/emgucv
 public BoostDesc(int desc, bool useScaleOrientation, float scalefactor)
 {
     _ptr = ContribInvoke.cveBoostDescCreate(desc, useScaleOrientation, scalefactor, ref _feature2D);
 }