Ejemplo n.º 1
0
 /// <summary>
 /// Release the unmanaged memory associated with this object.
 /// </summary>
 protected override void DisposeObject()
 {
     if (_ptr != IntPtr.Zero)
     {
         XImgprocInvoke.cveGraphSegmentationRelease(ref _ptr, ref _sharedPtr);
     }
 }
Ejemplo n.º 2
0
 /// <summary>
 /// Release the unmanaged memory associated with this object.
 /// </summary>
 protected override void DisposeObject()
 {
     if (_ptr != IntPtr.Zero)
     {
         XImgprocInvoke.cveSuperpixelLSCRelease(ref _ptr);
     }
 }
Ejemplo n.º 3
0
        /// <summary>
        /// Finds lines in the input image.
        /// </summary>
        /// <param name="image">Image to detect lines in.</param>
        /// <returns>The detected line segments</returns>
        public LineSegment2DF[] Detect(IInputArray image)
        {
            using (InputArray iaImage = image.GetInputArray())
                using (Mat matLines = new Mat())
                    using (OutputArray oaLines = matLines.GetOutputArray())
                    {
                        // Process image
                        XImgprocInvoke.cveFastLineDetectorDetect(_ptr, iaImage, oaLines);

                        // Convert data in Mat to list of LineSegment2DF objects
                        float[]          pointData = new float[matLines.Total.ToInt32() * matLines.ElementSize / 4];
                        LineSegment2DF[] lines     = new LineSegment2DF[pointData.Length / 4];
                        matLines.CopyTo(pointData);

                        // Each line is represented by 4 floats
                        for (int i = 0; i < pointData.Length / 4; i++)
                        {
                            lines[i] = new LineSegment2DF(
                                new PointF(pointData[i * 4], pointData[(i * 4) + 1]),
                                new PointF(pointData[(i * 4) + 2], pointData[(i * 4) + 3]));
                        }

                        return(lines);
                    }
        }
Ejemplo n.º 4
0
 /// <summary>
 /// Release the unmanaged memory associated with this object
 /// </summary>
 protected override void DisposeObject()
 {
     if (_ptr != IntPtr.Zero)
     {
         XImgprocInvoke.cveDTFilterRelease(ref _ptr);
     }
 }
Ejemplo n.º 5
0
 /// <summary>
 /// Release the unmanaged memory associated with this object.
 /// </summary>
 protected override void DisposeObject()
 {
     if (_ptr != IntPtr.Zero)
     {
         XImgprocInvoke.cveSelectiveSearchSegmentationRelease(ref _ptr);
     }
 }
Ejemplo n.º 6
0
 /// <summary>
 /// Create an EdgeBox
 /// </summary>
 /// <param name="alpha">Step size of sliding window search.</param>
 /// <param name="beta">Nms threshold for object proposals.</param>
 /// <param name="eta">Adaptation rate for nms threshold.</param>
 /// <param name="minScore">Min score of boxes to detect.</param>
 /// <param name="maxBoxes">Max number of boxes to detect.</param>
 /// <param name="edgeMinMag">Edge min magnitude. Increase to trade off accuracy for speed.</param>
 /// <param name="edgeMergeThr">Edge merge threshold. Increase to trade off accuracy for speed.</param>
 /// <param name="clusterMinMag">Cluster min magnitude. Increase to trade off accuracy for speed.</param>
 /// <param name="maxAspectRatio">Max aspect ratio of boxes.</param>
 /// <param name="minBoxArea">Minimum area of boxes.</param>
 /// <param name="gamma">Affinity sensitivity.</param>
 /// <param name="kappa">Scale sensitivity.</param>
 public EdgeBoxes(
     float alpha          = 0.65f,
     float beta           = 0.75f,
     float eta            = 1,
     float minScore       = 0.01f,
     int maxBoxes         = 10000,
     float edgeMinMag     = 1.0f,
     float edgeMergeThr   = 0.5f,
     float clusterMinMag  = 0.5f,
     float maxAspectRatio = 3f,
     float minBoxArea     = 1000f,
     float gamma          = 2f,
     float kappa          = 1.5f)
 {
     _ptr = XImgprocInvoke.cveEdgeBoxesCreate(
         alpha,
         beta,
         eta,
         minScore,
         maxBoxes,
         edgeMinMag,
         edgeMergeThr,
         clusterMinMag,
         maxAspectRatio,
         minBoxArea,
         gamma,
         kappa,
         ref _algorithm,
         ref _sharedPtr);
 }
Ejemplo n.º 7
0
 /// <summary>
 /// Release the unmanaged memory associated with this RFFeatureGetter.
 /// </summary>
 protected override void DisposeObject()
 {
     if (_ptr != IntPtr.Zero)
     {
         XImgprocInvoke.cveRFFeatureGetterRelease(ref _ptr, ref _sharedPtr);
     }
 }
Ejemplo n.º 8
0
 /// <summary>
 /// Release the unmanaged memory associated with this object.
 /// </summary>
 protected override void DisposeObject()
 {
     if (_ptr != IntPtr.Zero)
     {
         XImgprocInvoke.cveStructuredEdgeDetectionRelease(ref _ptr, ref _sharedPtr);
     }
 }
Ejemplo n.º 9
0
 /// <summary>
 /// Based on all images, graph segmentations and stragies, computes all possible rects and return them.
 /// </summary>
 /// <returns>	The list of rects. The first ones are more relevents than the lasts ones.</returns>
 public Rectangle[] Process()
 {
     using (VectorOfRect vr = new VectorOfRect())
     {
         XImgprocInvoke.cveSelectiveSearchSegmentationProcess(_ptr, vr);
         return(vr.ToArray());
     }
 }
Ejemplo n.º 10
0
 /// <inheritdoc />
 protected override void DisposeObject()
 {
     if (_sharedPtr != IntPtr.Zero)
     {
         XImgprocInvoke.cveFastLineDetectorRelease(ref _sharedPtr);
         _ptr = IntPtr.Zero;
     }
 }
Ejemplo n.º 11
0
 /// <summary>
 /// Creates an instance of DisparityWLSFilter and sets up all the relevant filter parameters automatically based on the matcher instance. Currently supports only StereoBM and StereoSGBM.
 /// </summary>
 /// <param name="matcherLeft">stereo matcher instance that will be used with the filter</param>
 public DisparityWLSFilter(IStereoMatcher matcherLeft)
 {
     _ptr = XImgprocInvoke.cveCreateDisparityWLSFilter(
         matcherLeft.StereoMatcherPtr,
         ref _disparityFilterPtr,
         ref _algorithm,
         ref _sharedPtr);
 }
Ejemplo n.º 12
0
 /// <summary>
 /// Produce domain transform filtering operation on source image.
 /// </summary>
 /// <param name="src">Filtering image with unsigned 8-bit or floating-point 32-bit depth and up to 4 channels.</param>
 /// <param name="dst">Destination image.</param>
 /// <param name="dDepth">Optional depth of the output image. dDepth can be set to Default, which will be equivalent to src.depth().</param>
 public void Filter(IInputArray src, IOutputArray dst, DepthType dDepth = DepthType.Default)
 {
     using (InputArray iaSrc = src.GetInputArray())
         using (OutputArray oaDst = dst.GetOutputArray())
         {
             XImgprocInvoke.cveDTFilterFilter(_ptr, iaSrc, oaDst, dDepth);
         }
 }
Ejemplo n.º 13
0
 /// <inheritdoc />
 protected override void DisposeObject()
 {
     if (_sharedPtr != IntPtr.Zero)
     {
         XImgprocInvoke.cveEdgeBoxesRelease(ref _sharedPtr);
         _ptr = IntPtr.Zero;
     }
 }
Ejemplo n.º 14
0
 /// <summary>
 /// Create instance of DisparityWLSFilter and execute basic initialization routines. When using this method you will need to set-up the ROI, matchers and other parameters by yourself.
 /// </summary>
 /// <param name="useConfidence">Filtering with confidence requires two disparity maps (for the left and right views) and is approximately two times slower. However, quality is typically significantly better.</param>
 public DisparityWLSFilter(bool useConfidence)
 {
     _ptr = XImgprocInvoke.cveCreateDisparityWLSFilterGeneric(
         useConfidence,
         ref _disparityFilterPtr,
         ref _algorithm,
         ref _sharedPtr);
 }
Ejemplo n.º 15
0
 /// <inheritdoc />
 protected override void DisposeObject()
 {
     if (_sharedPtr != IntPtr.Zero)
     {
         XImgprocInvoke.cveRidgeDetectionFilterRelease(ref _sharedPtr);
         _ptr = IntPtr.Zero;
     }
 }
Ejemplo n.º 16
0
 /// <summary>
 /// The function edgenms in edge image and suppress edges where edge is stronger in orthogonal direction.
 /// </summary>
 /// <param name="edgeImage">edge image from DetectEdges function.</param>
 /// <param name="orientationImage">orientation image from ComputeOrientation function.</param>
 /// <param name="dst">Suppressed image (grayscale, float, in [0;1])</param>
 /// <param name="r">Radius for NMS suppression.</param>
 /// <param name="s">Radius for boundary suppression.</param>
 /// <param name="m">Multiplier for conservative suppression.</param>
 /// <param name="isParallel">Enables/disables parallel computing.</param>
 public void EdgesNms(IInputArray edgeImage, IInputArray orientationImage, IOutputArray dst, int r = 2, int s = 0, float m = 1, bool isParallel = true)
 {
     using (InputArray iaEdgeImage = edgeImage.GetInputArray())
         using (InputArray iaOrientationImage = orientationImage.GetInputArray())
             using (OutputArray oaDst = dst.GetOutputArray())
             {
                 XImgprocInvoke.cveStructuredEdgeDetectionEdgesNms(_ptr, iaEdgeImage, iaOrientationImage, oaDst, r, s, m, isParallel);
             }
 }
Ejemplo n.º 17
0
 /// <inheritdoc />
 protected override void DisposeObject()
 {
     if (_sharedPtr != IntPtr.Zero)
     {
         XImgprocInvoke.cveScanSegmentRelease(ref _sharedPtr);
         _algorithm = IntPtr.Zero;
         _ptr       = IntPtr.Zero;
     }
 }
Ejemplo n.º 18
0
 /// <summary>
 /// Release the unmanaged memory associated with this DisparityWLSFilter
 /// </summary>
 protected override void DisposeObject()
 {
     if (_sharedPtr != IntPtr.Zero)
     {
         XImgprocInvoke.cveDisparityWLSFilterRelease(ref _sharedPtr);
         _ptr                = IntPtr.Zero;
         _algorithm          = IntPtr.Zero;
         _disparityFilterPtr = IntPtr.Zero;
     }
 }
Ejemplo n.º 19
0
 /// <summary>
 /// Returns array containing proposal boxes.
 /// </summary>
 /// <param name="edgeMap">edge image.</param>
 /// <param name="orientationMap">orientation map.</param>
 /// <returns>Proposal boxes.</returns>
 public Rectangle[] GetBoundingBoxes(IInputArray edgeMap, IInputArray orientationMap)
 {
     using (InputArray iaEdgeMap = edgeMap.GetInputArray())
         using (InputArray iaOrientationMap = orientationMap.GetInputArray())
             using (VectorOfRect vr = new VectorOfRect())
             {
                 XImgprocInvoke.cveEdgeBoxesGetBoundingBoxes(_ptr, iaEdgeMap, iaOrientationMap, vr);
                 return(vr.ToArray());
             }
 }
Ejemplo n.º 20
0
 /// <summary>
 /// The function initializes a SuperpixelSEEDS object for the input image.
 /// </summary>
 /// <param name="imageWidth">Image width</param>
 /// <param name="imageHeight">Image height</param>
 /// <param name="imageChannels">Number of channels of the image.</param>
 /// <param name="numSuperpixels">Desired number of superpixels. Note that the actual number may be smaller due to restrictions (depending on the image size and num_levels). Use getNumberOfSuperpixels() to get the actual number.</param>
 /// <param name="numLevels">Number of block levels. The more levels, the more accurate is the segmentation, but needs more memory and CPU time.</param>
 /// <param name="prior">Enable 3x3 shape smoothing term if >0. A larger value leads to smoother shapes. prior must be in the range [0, 5].</param>
 /// <param name="histogramBins">Number of histogram bins.</param>
 /// <param name="doubleStep">If true, iterate each block level twice for higher accuracy.</param>
 public SupperpixelSEEDS(int imageWidth, int imageHeight, int imageChannels,
                         int numSuperpixels, int numLevels, int prior,
                         int histogramBins,
                         bool doubleStep)
 {
     _ptr = XImgprocInvoke.cveSuperpixelSEEDSCreate(
         imageWidth, imageHeight, imageChannels,
         numSuperpixels, numLevels, prior,
         histogramBins, doubleStep, ref _sharedPtr);
 }
Ejemplo n.º 21
0
 /// <summary>
 /// Initializes a ScanSegment object.
 /// </summary>
 /// <param name="imageWidth">Image width.</param>
 /// <param name="imageHeight">Image height.</param>
 /// <param name="numSuperpixels">Desired number of superpixels. Note that the actual number may be smaller due to restrictions (depending on the image size). Use NumberOfSuperpixels to get the actual number.</param>
 /// <param name="slices">Number of processing threads for parallelisation. Setting -1 uses the maximum number of threads. In practice, four threads is enough for smaller images and eight threads for larger ones.</param>
 /// <param name="mergeSmall">Merge small segments to give the desired number of superpixels. Processing is much faster without merging, but many small segments will be left in the image.</param>
 public ScanSegment(
     int imageWidth,
     int imageHeight,
     int numSuperpixels,
     int slices      = 8,
     bool mergeSmall = true)
 {
     _ptr = XImgprocInvoke.cveScanSegmentCreate(
         imageWidth,
         imageHeight,
         numSuperpixels,
         slices,
         mergeSmall,
         ref _algorithm,
         ref _sharedPtr);
 }
Ejemplo n.º 22
0
 /// <summary>
 /// Initializes a new instance of the FastLineDetector object.
 /// </summary>
 /// <param name="lengthThreshold">Segment shorter than this will be discarded.</param>
 /// <param name="distanceThreshold">A point placed from a hypothesis line segment farther than this will be regarded as an outlier.</param>
 /// <param name="cannyThreshold1">First threshold for hysteresis procedure in Canny().</param>
 /// <param name="cannyThreshold2">Second threshold for hysteresis procedure in Canny().</param>
 /// <param name="cannyApertureSize">Aperture size for the Sobel operator in Canny().</param>
 /// <param name="doMerge">If true, incremental merging of segments will be performed </param>
 public FastLineDetector(
     int lengthThreshold     = 10,
     float distanceThreshold = 1.414213562f,
     double cannyThreshold1  = 50.0,
     double cannyThreshold2  = 50.0,
     int cannyApertureSize   = 3,
     bool doMerge            = false)
 {
     _ptr = XImgprocInvoke.cveFastLineDetectorCreate(
         lengthThreshold,
         distanceThreshold,
         cannyThreshold1,
         cannyThreshold2,
         cannyApertureSize,
         doMerge,
         ref _sharedPtr);
 }
Ejemplo n.º 23
0
 /// <summary>
 /// Draws the line segments on a given image.
 /// </summary>
 /// <param name="image">The image, where the lines will be drawn. Should be bigger or equal to the image, where the lines were found.</param>
 /// <param name="lines">A vector of the lines that needed to be drawn.</param>
 /// <param name="drawArrows">If true, arrow heads will be drawn.</param>
 public void DrawSegments(IInputOutputArray image, LineSegment2DF[] lines, bool drawArrows = false)
 {
     using (InputOutputArray ioaImage = image.GetInputOutputArray())
         using (Mat matLines = new Mat(lines.Length, 1, DepthType.Cv32F, 4))
         {
             float[] pointData = new float[lines.Length * 4];
             for (int i = 0; i < lines.Length; i++)
             {
                 pointData[i * 4]       = lines[i].P1.X;
                 pointData[(i * 4) + 1] = lines[i].P1.Y;
                 pointData[(i * 4) + 2] = lines[i].P2.X;
                 pointData[(i * 4) + 3] = lines[i].P2.Y;
             }
             matLines.SetTo(pointData);
             using (InputArray iaLines = matLines.GetInputArray())
             {
                 XImgprocInvoke.cveFastLineDetectorDrawSegments(_ptr, ioaImage, iaLines, drawArrows);
             }
         }
 }
Ejemplo n.º 24
0
 /// <summary>
 /// Create a Ridge detection filter.
 /// </summary>
 /// <param name="dDepthType">Specifies output image depth.</param>
 /// <param name="dChannels">Specifies output image channel.</param>
 /// <param name="dx">Order of derivative x</param>
 /// <param name="dy">Order of derivative y</param>
 /// <param name="ksize">Sobel kernel size</param>
 /// <param name="outDepthType">Converted format for output</param>
 /// <param name="outChannels">Converted format for output</param>
 /// <param name="scale">Optional scale value for derivative values</param>
 /// <param name="delta">Optional bias added to output</param>
 /// <param name="borderType">Pixel extrapolation method</param>
 public RidgeDetectionFilter(
     CvEnum.DepthType dDepthType = CvEnum.DepthType.Cv32F,
     int dChannels = 1,
     int dx        = 1,
     int dy        = 1,
     int ksize     = 3,
     CvEnum.DepthType outDepthType = CvEnum.DepthType.Cv8U,
     int outChannels = 1,
     double scale    = 1,
     double delta    = 0,
     Emgu.CV.CvEnum.BorderType borderType = Emgu.CV.CvEnum.BorderType.Default)
 {
     _ptr = XImgprocInvoke.cveRidgeDetectionFilterCreate(
         CvInvoke.MakeType(dDepthType, dChannels),
         dx,
         dy,
         ksize,
         CvInvoke.MakeType(outDepthType, outChannels),
         scale,
         delta,
         borderType,
         ref _algorithm,
         ref _sharedPtr);
 }
Ejemplo n.º 25
0
 /// <summary>
 /// Set up the matcher for computing the right-view disparity map that is required in case of filtering with confidence.
 /// </summary>
 /// <param name="matcherLeft">Main stereo matcher instance that will be used with the filter</param>
 public RightMatcher(IStereoMatcher matcherLeft)
 {
     _ptr = XImgprocInvoke.cveCreateRightMatcher(matcherLeft.StereoMatcherPtr, ref _sharedPtr);
 }
Ejemplo n.º 26
0
 /// <summary>
 /// Add a new image in the list of images to process.
 /// </summary>
 /// <param name="img">	The image</param>
 public void AddImage(IInputArray img)
 {
     using (InputArray iaImg = img.GetInputArray())
         XImgprocInvoke.cveSelectiveSearchSegmentationAddImage(_ptr, iaImg);
 }
Ejemplo n.º 27
0
 /// <summary>
 /// Initialize the class with the 'Selective search quality' parameters
 /// </summary>
 /// <param name="baseK">The k parameter for the first graph segmentation</param>
 /// <param name="incK">The increment of the k parameter for all graph segmentations</param>
 /// <param name="sigma">The sigma parameter for the graph segmentation</param>
 public void SwitchToSelectiveSearchQuality(int baseK = 150, int incK = 150, float sigma = 0.8f)
 {
     XImgprocInvoke.cveSelectiveSearchSegmentationSwitchToSelectiveSearchQuality(_ptr, baseK, incK, sigma);
 }
Ejemplo n.º 28
0
 /// <summary>
 /// Initialize the class with the 'Single stragegy' parameters
 /// </summary>
 /// <param name="k">The k parameter for the graph segmentation</param>
 /// <param name="sigma">The sigma parameter for the graph segmentation</param>
 public void SwitchToSingleStrategy(int k, float sigma)
 {
     XImgprocInvoke.cveSelectiveSearchSegmentationSwitchToSingleStrategy(_ptr, k, sigma);
 }
Ejemplo n.º 29
0
 /// <summary>
 /// Set a image used by switch* functions to initialize the class.
 /// </summary>
 /// <param name="image">The image</param>
 public void SetBaseImage(IInputArray image)
 {
     using (InputArray iaImage = image.GetInputArray())
         XImgprocInvoke.cveSelectiveSearchSegmentationSetBaseImage(_ptr, iaImage);
 }
Ejemplo n.º 30
0
 /// <summary>
 /// Selective search segmentation algorithm
 /// </summary>
 public SelectiveSearchSegmentation()
 {
     _ptr = XImgprocInvoke.cveSelectiveSearchSegmentationCreate();
 }