/// <summary> /// Finds the positions of internal corners of the chessboard. /// </summary> /// <param name="image">Source chessboard view. It must be an 8-bit grayscale or color image.</param> /// <param name="patternSize">Number of inner corners per a chessboard row and column /// ( patternSize = Size(points_per_row,points_per_colum) = Size(columns, rows) ).</param> /// <param name="corners">Output array of detected corners.</param> /// <param name="flags">Various operation flags that can be zero or a combination of the ChessboardFlag values</param> /// <returns>The function returns true if all of the corners are found and they are placed in a certain order (row by row, left to right in every row). /// Otherwise, if the function fails to find all the corners or reorder them, it returns false.</returns> public static bool FindChessboardCorners( InputArray image, Size patternSize, out Point2f[] corners, ChessboardFlag flags = ChessboardFlag.AdaptiveThresh | ChessboardFlag.NormalizeImage) { if (image == null) throw new ArgumentNullException("image"); image.ThrowIfDisposed(); using (var cornersVec = new VectorOfPoint2f()) { int ret = NativeMethods.calib3d_findChessboardCorners_InputArray( image.CvPtr, patternSize, cornersVec.CvPtr, (int)flags); corners = cornersVec.ToArray(); return ret != 0; } }
/// <summary> /// finds subpixel-accurate positions of the chessboard corners /// </summary> /// <param name="img"></param> /// <param name="corners"></param> /// <param name="regionSize"></param> /// <returns></returns> public static bool Find4QuadCornerSubpix(InputArray img, [In, Out] Point2f[] corners, Size regionSize) { if (img == null) throw new ArgumentNullException("img"); if (corners == null) throw new ArgumentNullException("corners"); img.ThrowIfDisposed(); using (var cornersVec = new VectorOfPoint2f(corners)) { int ret = NativeMethods.calib3d_find4QuadCornerSubpix_InputArray( img.CvPtr, cornersVec.CvPtr, regionSize); Point2f[] newCorners = cornersVec.ToArray(); for (int i = 0; i < corners.Length; i++) { corners[i] = newCorners[i]; } return ret != 0; } }
/// <summary> /// Finds centers in the grid of circles. /// </summary> /// <param name="image">grid view of input circles; it must be an 8-bit grayscale or color image.</param> /// <param name="patternSize">number of circles per row and column ( patternSize = Size(points_per_row, points_per_colum) ).</param> /// <param name="centers">output array of detected centers.</param> /// <param name="flags">various operation flags that can be one of the FindCirclesGridFlag values</param> /// <param name="blobDetector">feature detector that finds blobs like dark circles on light background.</param> /// <returns></returns> public static bool FindCirclesGrid( InputArray image, Size patternSize, out Point2f[] centers, FindCirclesGridFlag flags = FindCirclesGridFlag.SymmetricGrid, FeatureDetector blobDetector = null) { if (image == null) throw new ArgumentNullException("image"); image.ThrowIfDisposed(); using (var centersVec = new VectorOfPoint2f()) { int ret = NativeMethods.calib3d_findCirclesGrid_InputArray( image.CvPtr, patternSize, centersVec.CvPtr, (int)flags, ToPtr(blobDetector)); centers = centersVec.ToArray(); return ret != 0; } }
/// <summary> /// finds the strong enough corners where the cornerMinEigenVal() or cornerHarris() report the local maxima /// </summary> /// <param name="src">Input 8-bit or floating-point 32-bit, single-channel image.</param> /// <param name="maxCorners">Maximum number of corners to return. If there are more corners than are found, /// the strongest of them is returned.</param> /// <param name="qualityLevel">Parameter characterizing the minimal accepted quality of image corners. /// The parameter value is multiplied by the best corner quality measure, which is the minimal eigenvalue /// or the Harris function response (see cornerHarris() ). The corners with the quality measure less than /// the product are rejected. For example, if the best corner has the quality measure = 1500, and the qualityLevel=0.01, /// then all the corners with the quality measure less than 15 are rejected.</param> /// <param name="minDistance">Minimum possible Euclidean distance between the returned corners.</param> /// <param name="mask">Optional region of interest. If the image is not empty /// (it needs to have the type CV_8UC1 and the same size as image ), it specifies the region /// in which the corners are detected.</param> /// <param name="blockSize">Size of an average block for computing a derivative covariation matrix over each pixel neighborhood.</param> /// <param name="useHarrisDetector">Parameter indicating whether to use a Harris detector</param> /// <param name="k">Free parameter of the Harris detector.</param> /// <returns>Output vector of detected corners.</returns> public static Point2f[] GoodFeaturesToTrack(InputArray src, int maxCorners, double qualityLevel, double minDistance, InputArray mask, int blockSize, bool useHarrisDetector, double k) { if (src == null) throw new ArgumentNullException("src"); src.ThrowIfDisposed(); using (var vector = new VectorOfPoint2f()) { IntPtr maskPtr = ToPtr(mask); NativeMethods.imgproc_goodFeaturesToTrack(src.CvPtr, vector.CvPtr, maxCorners, qualityLevel, minDistance, maskPtr, blockSize, useHarrisDetector ? 0 : 1, k); return vector.ToArray(); } }
/// <summary> /// /// </summary> /// <param name="idx"></param> /// <param name="facetList"></param> /// <param name="facetCenters"></param> public void GetVoronoiFacetList(IEnumerable<int> idx, out Point2f[][] facetList, out Point2f[] facetCenters) { if (disposed) throw new ObjectDisposedException("Subdiv2D", ""); IntPtr facetListPtr, facetCentersPtr; if (idx == null) { NativeMethods.imgproc_Subdiv2D_getVoronoiFacetList(ptr, IntPtr.Zero, 0, out facetListPtr, out facetCentersPtr); } else { int[] idxArray = EnumerableEx.ToArray(idx); NativeMethods.imgproc_Subdiv2D_getVoronoiFacetList(ptr, idxArray, idxArray.Length, out facetListPtr, out facetCentersPtr); } using (VectorOfVectorPoint2f facetListVec = new VectorOfVectorPoint2f(facetListPtr)) { facetList = facetListVec.ToArray(); } using (VectorOfPoint2f facetCentersVec = new VectorOfPoint2f(facetCentersPtr)) { facetCenters = facetCentersVec.ToArray(); } }
/// <summary> /// finds intersection of two convex polygons /// </summary> /// <param name="p1"></param> /// <param name="p2"></param> /// <param name="p12"></param> /// <param name="handleNested"></param> /// <returns></returns> public static float IntersectConvexConvex(IEnumerable<Point2f> p1, IEnumerable<Point2f> p2, out Point2f[] p12, bool handleNested = true) { if (p1 == null) throw new ArgumentNullException("p1"); if (p2 == null) throw new ArgumentNullException("p2"); Point2f[] p1Array = EnumerableEx.ToArray(p1); Point2f[] p2Array = EnumerableEx.ToArray(p2); IntPtr p12Ptr; float ret = NativeMethods.imgproc_intersectConvexConvex_Point2f(p1Array, p1Array.Length, p2Array, p2Array.Length, out p12Ptr, handleNested ? 1 : 0); using (var p12Vec = new VectorOfPoint2f(p12Ptr)) { p12 = p12Vec.ToArray(); } return ret; }
/// <summary> /// adjusts the corner locations with sub-pixel accuracy to maximize the certain cornerness criteria /// </summary> /// <param name="image">Input image.</param> /// <param name="inputCorners">Initial coordinates of the input corners and refined coordinates provided for output.</param> /// <param name="winSize">Half of the side length of the search window.</param> /// <param name="zeroZone">Half of the size of the dead region in the middle of the search zone /// over which the summation in the formula below is not done. It is used sometimes to avoid possible singularities /// of the autocorrelation matrix. The value of (-1,-1) indicates that there is no such a size.</param> /// <param name="criteria">Criteria for termination of the iterative process of corner refinement. /// That is, the process of corner position refinement stops either after criteria.maxCount iterations /// or when the corner position moves by less than criteria.epsilon on some iteration.</param> /// <returns></returns> public static Point2f[] CornerSubPix(InputArray image, IEnumerable<Point2f> inputCorners, Size winSize, Size zeroZone, CvTermCriteria criteria) { if (image == null) throw new ArgumentNullException("image"); if (inputCorners == null) throw new ArgumentNullException("inputCorners"); image.ThrowIfDisposed(); var inputCornersSrc = Util.ToArray(inputCorners); var inputCornersCopy = new Point2f[inputCornersSrc.Length]; Array.Copy(inputCornersSrc, inputCornersCopy, inputCornersSrc.Length); using (var vector = new VectorOfPoint2f(inputCornersCopy)) { NativeMethods.imgproc_cornerSubPix(image.CvPtr, vector.CvPtr, winSize, zeroZone, criteria); return vector.ToArray(); } }
/// <summary> /// Computes convex hull for a set of 2D points. /// </summary> /// <param name="points">The input 2D point set, represented by CV_32SC2 or CV_32FC2 matrix</param> /// <param name="clockwise">If true, the output convex hull will be oriented clockwise, /// otherwise it will be oriented counter-clockwise. Here, the usual screen coordinate /// system is assumed - the origin is at the top-left corner, x axis is oriented to the right, /// and y axis is oriented downwards.</param> /// <returns>The output convex hull. It is a vector of points that form /// the hull (must have the same type as the input points).</returns> public static Point2f[] ConvexHull(IEnumerable<Point2f> points, bool clockwise = false) { if (points == null) throw new ArgumentNullException("points"); Point2f[] pointsArray = EnumerableEx.ToArray(points); IntPtr hullPtr; NativeMethods.imgproc_convexHull_Point2f_ReturnsPoints(pointsArray, pointsArray.Length, out hullPtr, clockwise ? 1 : 0); using (var hullVec = new VectorOfPoint2f(hullPtr)) { return hullVec.ToArray(); } }
/// <summary> /// Approximates contour or a curve using Douglas-Peucker algorithm /// </summary> /// <param name="curve">The polygon or curve to approximate.</param> /// <param name="epsilon">Specifies the approximation accuracy. /// This is the maximum distance between the original curve and its approximation.</param> /// <param name="closed">If true, the approximated curve is closed /// (i.e. its first and last vertices are connected), otherwise it’s not</param> /// <returns>The result of the approximation; /// The type should match the type of the input curve</returns> public static Point2f[] ApproxPolyDP(IEnumerable<Point2f> curve, double epsilon, bool closed) { if (curve == null) throw new ArgumentNullException("curve"); Point2f[] curveArray = EnumerableEx.ToArray(curve); IntPtr approxCurvePtr; NativeMethods.imgproc_approxPolyDP_Point2f(curveArray, curveArray.Length, out approxCurvePtr, epsilon, closed ? 1 : 0); using (var approxCurveVec = new VectorOfPoint2f(approxCurvePtr)) { return approxCurveVec.ToArray(); } }
/// <summary> /// computes sparse optical flow using multi-scale Lucas-Kanade algorithm /// </summary> /// <param name="prevImg"></param> /// <param name="nextImg"></param> /// <param name="prevPts"></param> /// <param name="nextPts"></param> /// <param name="status"></param> /// <param name="err"></param> /// <param name="winSize"></param> /// <param name="maxLevel"></param> /// <param name="criteria"></param> /// <param name="flags"></param> /// <param name="minEigThreshold"></param> public static void CalcOpticalFlowPyrLK( InputArray prevImg, InputArray nextImg, Point2f[] prevPts, ref Point2f[] nextPts, out byte[] status, out float[] err, Size? winSize = null, int maxLevel = 3, TermCriteria? criteria = null, OpticalFlowFlags flags = OpticalFlowFlags.None, double minEigThreshold = 1e-4) { if (prevImg == null) throw new ArgumentNullException("prevImg"); if (nextImg == null) throw new ArgumentNullException("nextImg"); if (prevPts == null) throw new ArgumentNullException("prevPts"); if (nextPts == null) throw new ArgumentNullException("nextPts"); prevImg.ThrowIfDisposed(); nextImg.ThrowIfDisposed(); Size winSize0 = winSize.GetValueOrDefault(new Size(21, 21)); TermCriteria criteria0 = criteria.GetValueOrDefault( TermCriteria.Both(30, 0.01)); using (var nextPtsVec = new VectorOfPoint2f()) using (var statusVec = new VectorOfByte()) using (var errVec = new VectorOfFloat()) { NativeMethods.video_calcOpticalFlowPyrLK_vector( prevImg.CvPtr, nextImg.CvPtr, prevPts, prevPts.Length, nextPtsVec.CvPtr, statusVec.CvPtr, errVec.CvPtr, winSize0, maxLevel, criteria0, (int)flags, minEigThreshold); nextPts = nextPtsVec.ToArray(); status = statusVec.ToArray(); err = errVec.ToArray(); } }
/// <summary> /// /// </summary> /// <param name="matches1to2"></param> /// <param name="correctMatches1to2Mask"></param> /// <returns>recallPrecisionCurve</returns> public static Point2f[] ComputeRecallPrecisionCurve( DMatch[][] matches1to2, byte[][] correctMatches1to2Mask) { if (matches1to2 == null) throw new ArgumentNullException(nameof(matches1to2)); if (correctMatches1to2Mask == null) throw new ArgumentNullException(nameof(correctMatches1to2Mask)); using (var dm = new ArrayAddress2<DMatch>(matches1to2)) using (var cm = new ArrayAddress2<byte>(correctMatches1to2Mask)) using (var recall = new VectorOfPoint2f()) { NativeMethods.features2d_computeRecallPrecisionCurve( dm.Pointer, dm.Dim1Length, dm.Dim2Lengths, cm.Pointer, cm.Dim1Length, cm.Dim2Lengths, recall.CvPtr); return recall.ToArray(); } }
/// <summary> /// Finds out if there is any intersection between two rotated rectangles. /// If there is then the vertices of the interesecting region are returned as well. /// Below are some examples of intersection configurations. /// The hatched pattern indicates the intersecting region and the red /// vertices are returned by the function. /// </summary> /// <param name="rect1">First rectangle</param> /// <param name="rect2">Second rectangle</param> /// <param name="intersectingRegion"> /// The output array of the verticies of the intersecting region. /// It returns at most 8 vertices.</param> /// <returns></returns> public static RectanglesIntersectTypes RotatedRectangleIntersection( RotatedRect rect1, RotatedRect rect2, out Point2f[] intersectingRegion) { using (var intersectingRegionVec = new VectorOfPoint2f()) { int ret = NativeMethods.imgproc_rotatedRectangleIntersection_OutputArray( rect1, rect2, intersectingRegionVec.CvPtr); intersectingRegion = intersectingRegionVec.ToArray(); return (RectanglesIntersectTypes) ret; } }