/// <summary> /// Finds an object center, size, and orientation. /// </summary> /// <param name="probImage">Back projection of the object histogram. </param> /// <param name="window">Initial search window.</param> /// <param name="criteria">Stop criteria for the underlying MeanShift() .</param> /// <returns></returns> public static RotatedRect CamShift( InputArray probImage, ref Rect window, TermCriteria criteria) { if (probImage == null) throw new ArgumentNullException(nameof(probImage)); probImage.ThrowIfDisposed(); RotatedRect result = NativeMethods.video_CamShift( probImage.CvPtr, ref window, criteria); return result; }
/// <summary> /// the constructor that performs SVD /// </summary> /// <param name="src"></param> /// <param name="flags"></param> public SVD(InputArray src, Flags flags = 0) { if (src == null) throw new ArgumentNullException("src"); src.ThrowIfDisposed(); ptr = NativeMethods.core_SVD_new(src.CvPtr, (int)flags); }
/// <summary> /// Calculates all of the moments /// up to the third order of a polygon or rasterized shape. /// </summary> /// <param name="array">A raster image (single-channel, 8-bit or floating-point /// 2D array) or an array ( 1xN or Nx1 ) of 2D points ( Point or Point2f )</param> /// <param name="binaryImage">If it is true, then all the non-zero image pixels are treated as 1’s</param> /// <returns></returns> public Moments(InputArray array, bool binaryImage = false) { if (array == null) throw new ArgumentNullException(nameof(array)); array.ThrowIfDisposed(); InitializeFromInputArray(array, binaryImage); }
/// <summary> /// computes moments of the rasterized shape or a vector of points /// </summary> /// <param name="array"></param> /// <param name="binaryImage"></param> public Moments(InputArray array, bool binaryImage = false) { if(array == null) throw new ArgumentNullException("array"); array.ThrowIfDisposed(); WCvMoments m = NativeMethods.imgproc_moments(array.CvPtr, binaryImage ? 1 : 0); Initialize(m.m00, m.m10, m.m01, m.m20, m.m11, m.m02, m.m30, m.m21, m.m12, m.m03); }
/// <summary> /// Detect ChArUco Diamond markers. /// </summary> /// <param name="image">input image necessary for corner subpixel.</param> /// <param name="markerCorners">list of detected marker corners from detectMarkers function.</param> /// <param name="markerIds">list of marker ids in markerCorners.</param> /// <param name="squareMarkerLengthRate">rate between square and marker length: squareMarkerLengthRate = squareLength/markerLength. The real units are not necessary.</param> /// <param name="diamondCorners">output list of detected diamond corners (4 corners per diamond). The order is the same than in marker corners: top left, top right, bottom right and bottom left. Similar format than the corners returned by detectMarkers (e.g std::vector<std::vector<cv::Point2f>>).</param> /// <param name="diamondIds">ids of the diamonds in diamondCorners. The id of each diamond is in fact of type Vec4i, so each diamond has 4 ids, which are the ids of the aruco markers composing the diamond.</param> /// <param name="cameraMatrix">Optional camera calibration matrix.</param> /// <param name="distCoeffs">Optional camera distortion coefficients.</param> public static void DetectCharucoDiamond(InputArray image, Point2f[][] markerCorners, IEnumerable <int> markerIds, float squareMarkerLengthRate, out Point2f[][] diamondCorners, out Vec4i[] diamondIds, InputArray?cameraMatrix = null, InputArray?distCoeffs = null) { if (image == null) { throw new ArgumentNullException(nameof(image)); } if (markerCorners == null) { throw new ArgumentNullException(nameof(markerCorners)); } if (markerIds == null) { throw new ArgumentNullException(nameof(markerIds)); } if (cameraMatrix == null && distCoeffs != null) { throw new ArgumentNullException(nameof(cameraMatrix)); } if (cameraMatrix != null && distCoeffs == null) { throw new ArgumentNullException(nameof(distCoeffs)); } image.ThrowIfDisposed(); cameraMatrix?.ThrowIfDisposed(); distCoeffs?.ThrowIfDisposed(); using var markerCornersAddress = new ArrayAddress2 <Point2f>(markerCorners); using var markerIdsVec = new VectorOfInt32(markerIds); using var diamondCornersVec = new VectorOfVectorPoint2f(); using var diamondIdsVec = new VectorOfVec4i(); NativeMethods.HandleException( NativeMethods.aruco_detectCharucoDiamond( image.CvPtr, markerCornersAddress.GetPointer(), markerCornersAddress.GetDim1Length(), markerCornersAddress.GetDim2Lengths(), markerIdsVec.CvPtr, squareMarkerLengthRate, diamondCornersVec.CvPtr, diamondIdsVec.CvPtr, cameraMatrix?.CvPtr ?? IntPtr.Zero, distCoeffs?.CvPtr ?? IntPtr.Zero)); diamondCorners = diamondCornersVec.ToArray(); diamondIds = diamondIdsVec.ToArray(); GC.KeepAlive(image); if (cameraMatrix != null) { GC.KeepAlive(cameraMatrix); } if (distCoeffs != null) { GC.KeepAlive(distCoeffs); } }
/// <summary> /// /// </summary> /// <param name="data"></param> /// <param name="mean"></param> /// <param name="flags"></param> /// <param name="retainedVariance"></param> public PCA(InputArray data, InputArray mean, Flags flags, double retainedVariance) { if (data == null) throw new ArgumentNullException(nameof(data)); if (mean == null) throw new ArgumentNullException(nameof(mean)); data.ThrowIfDisposed(); mean.ThrowIfDisposed(); ptr = NativeMethods.core_PCA_new3(data.CvPtr, mean.CvPtr, (int)flags, retainedVariance); }
/// <summary> /// /// </summary> /// <param name="data"></param> /// <param name="mean"></param> /// <param name="flags"></param> /// <param name="maxComponents"></param> public PCA(InputArray data, InputArray mean, Flags flags, int maxComponents = 0) { if (data == null) throw new ArgumentNullException(nameof(data)); if (mean == null) throw new ArgumentNullException(nameof(mean)); data.ThrowIfDisposed(); mean.ThrowIfDisposed(); ptr = NativeMethods.core_PCA_new2(data.CvPtr, mean.CvPtr, (int)flags, maxComponents); }
/// <summary> /// Finds an object on a back projection image. /// </summary> /// <param name="probImage">Back projection of the object histogram.</param> /// <param name="window">Initial search window.</param> /// <param name="criteria">Stop criteria for the iterative search algorithm.</param> /// <returns>Number of iterations CAMSHIFT took to converge.</returns> public static int MeanShift( InputArray probImage, ref Rect window, TermCriteria criteria) { if (probImage == null) throw new ArgumentNullException("probImage"); probImage.ThrowIfDisposed(); int result = NativeMethods.video_meanShift( probImage.CvPtr, ref window, criteria); return result; }
/// <summary> /// /// </summary> /// <param name="src"></param> /// <param name="dst"></param> /// <param name="colormap"></param> public static void ApplyColorMap(InputArray src, OutputArray dst, ColorMapMode colormap) { if (src == null) throw new ArgumentNullException("src"); if (dst == null) throw new ArgumentNullException("dst"); src.ThrowIfDisposed(); dst.ThrowIfNotReady(); NativeMethods.contrib_applyColorMap(src.CvPtr, dst.CvPtr, (int)colormap); dst.Fix(); }
/// <summary> /// detects corners using FAST algorithm by E. Rosten /// </summary> /// <param name="image"></param> /// <param name="keypoints"></param> /// <param name="threshold"></param> /// <param name="nonmaxSupression"></param> /// <param name="type"></param> public static void FASTX(InputArray image, out KeyPoint[] keypoints, int threshold, bool nonmaxSupression, int type) { if (image == null) throw new ArgumentNullException("image"); image.ThrowIfDisposed(); using (var kp = new VectorOfKeyPoint()) { NativeMethods.features2d_FASTX(image.CvPtr, kp.CvPtr, threshold, nonmaxSupression ? 1 : 0, type); keypoints = kp.ToArray(); } }
/// <summary> /// Forms a border around the image /// </summary> /// <param name="src">The source image</param> /// <param name="dst">The destination image; will have the same type as src and /// the size Size(src.cols+left+right, src.rows+top+bottom)</param> /// <param name="top">Specify how much pixels in each direction from the source image rectangle one needs to extrapolate</param> /// <param name="bottom">Specify how much pixels in each direction from the source image rectangle one needs to extrapolate</param> /// <param name="left">Specify how much pixels in each direction from the source image rectangle one needs to extrapolate</param> /// <param name="right">Specify how much pixels in each direction from the source image rectangle one needs to extrapolate</param> /// <param name="borderType">The border type</param> /// <param name="value">The border value if borderType == Constant</param> public static void CopyMakeBorder(InputArray src, OutputArray dst, int top, int bottom, int left, int right, BorderType borderType, Scalar? value = null) { if (src == null) throw new ArgumentNullException("src"); if (dst == null) throw new ArgumentNullException("dst"); src.ThrowIfDisposed(); dst.ThrowIfNotReady(); Scalar value0 = value.GetValueOrDefault(new Scalar()); NativeMethods.imgproc_copyMakeBorder(src.CvPtr, dst.CvPtr, top, bottom, left, right, (int)borderType, value0); dst.Fix(); }
/// <summary> /// Perform image denoising using Non-local Means Denoising algorithm /// with several computational optimizations. Noise expected to be a gaussian white noise /// </summary> /// <param name="src">Input 8-bit 1-channel, 2-channel or 3-channel image.</param> /// <param name="dst">Output image with the same size and type as src .</param> /// <param name="h"> /// Parameter regulating filter strength. Big h value perfectly removes noise but also removes image details, /// smaller h value preserves details but also preserves some noise</param> /// <param name="templateWindowSize"> /// Size in pixels of the template patch that is used to compute weights. Should be odd. Recommended value 7 pixels</param> /// <param name="searchWindowSize"> /// Size in pixels of the window that is used to compute weighted average for given pixel. /// Should be odd. Affect performance linearly: greater searchWindowsSize - greater denoising time. Recommended value 21 pixels</param> public static void FastNlMeansDenoising(InputArray src, OutputArray dst, float h = 3, int templateWindowSize = 7, int searchWindowSize = 21) { if (src == null) throw new ArgumentNullException("src"); if (dst == null) throw new ArgumentNullException("dst"); src.ThrowIfDisposed(); dst.ThrowIfNotReady(); NativeMethods.photo_fastNlMeansDenoising(src.CvPtr, dst.CvPtr, h, templateWindowSize, searchWindowSize); dst.Fix(); }
/// <summary> /// Detects corners using the FAST algorithm /// </summary> /// <param name="image">grayscale image where keypoints (corners) are detected.</param> /// <param name="threshold">threshold on difference between intensity of the central pixel /// and pixels of a circle around this pixel.</param> /// <param name="nonmaxSupression">if true, non-maximum suppression is applied to /// detected corners (keypoints).</param> /// <param name="type">one of the three neighborhoods as defined in the paper</param> /// <returns>keypoints detected on the image.</returns> public static KeyPoint[] FAST(InputArray image, int threshold, bool nonmaxSupression, FASTType type) { if (image == null) throw new ArgumentNullException(nameof(image)); image.ThrowIfDisposed(); using (var kp = new VectorOfKeyPoint()) { NativeMethods.features2d_FAST2(image.CvPtr, kp.CvPtr, threshold, nonmaxSupression ? 1 : 0, (int)type); GC.KeepAlive(image); return kp.ToArray(); } }
/// <summary> /// converts rotation vector to rotation matrix or vice versa using Rodrigues transformation /// </summary> /// <param name="src">Input rotation vector (3x1 or 1x3) or rotation matrix (3x3).</param> /// <param name="dst">Output rotation matrix (3x3) or rotation vector (3x1 or 1x3), respectively.</param> /// <param name="jacobian">Optional output Jacobian matrix, 3x9 or 9x3, which is a matrix of partial derivatives of the output array components with respect to the input array components.</param> public static void Rodrigues(InputArray src, OutputArray dst, OutputArray jacobian = null) { if (src == null) throw new ArgumentNullException("src"); if (dst == null) throw new ArgumentNullException("dst"); src.ThrowIfDisposed(); dst.ThrowIfNotReady(); NativeMethods.calib3d_Rodrigues(src.CvPtr, dst.CvPtr, ToPtr(jacobian)); dst.Fix(); if (jacobian != null) jacobian.Fix(); }
/// <summary> /// Updates motion history image using the current silhouette /// </summary> /// <param name="silhouette">Silhouette mask that has non-zero pixels where the motion occurs.</param> /// <param name="mhi">Motion history image that is updated by the function (single-channel, 32-bit floating-point).</param> /// <param name="timestamp">Current time in milliseconds or other units.</param> /// <param name="duration">Maximal duration of the motion track in the same units as timestamp .</param> public static void UpdateMotionHistory( InputArray silhouette, InputOutputArray mhi, double timestamp, double duration) { if (silhouette == null) throw new ArgumentNullException("silhouette"); if (mhi == null) throw new ArgumentNullException("mhi"); silhouette.ThrowIfDisposed(); mhi.ThrowIfNotReady(); NativeMethods.video_updateMotionHistory( silhouette.CvPtr, mhi.CvPtr, timestamp, duration); mhi.Fix(); }
/// <summary> /// Detects corners using the AGAST algorithm /// </summary> /// <param name="image">grayscale image where keypoints (corners) are detected.</param> /// <param name="threshold">threshold on difference between intensity of the central pixel /// and pixels of a circle around this pixel.</param> /// <param name="nonmaxSuppression">if true, non-maximum suppression is applied to /// detected corners (keypoints).</param> /// <param name="type">one of the four neighborhoods as defined in the paper</param> /// <returns>keypoints detected on the image.</returns> public static KeyPoint[] AGAST(InputArray image, int threshold, bool nonmaxSuppression, AGASTType type) { if (image == null) throw new ArgumentNullException("image"); image.ThrowIfDisposed(); using (var vector = new VectorOfKeyPoint()) { NativeMethods.features2d_AGAST(image.CvPtr, vector.CvPtr, threshold, nonmaxSuppression ? 1 : 0, (int) type); GC.KeepAlive(image); return vector.ToArray(); } }
/// <summary> /// the update operator that takes the next video frame and returns the current foreground mask as 8-bit binary image. /// </summary> /// <param name="image"></param> /// <param name="fgmask"></param> /// <param name="learningRate"></param> public virtual void Apply(InputArray image, OutputArray fgmask, double learningRate = -1) { if (image == null) throw new ArgumentNullException("image"); if (fgmask == null) throw new ArgumentNullException("fgmask"); image.ThrowIfDisposed(); fgmask.ThrowIfNotReady(); NativeMethods.video_BackgroundSubtractor_apply(ptr, image.CvPtr, fgmask.CvPtr, learningRate); fgmask.Fix(); GC.KeepAlive(image); }
/// <summary> /// restores the damaged image areas using one of the available intpainting algorithms /// </summary> /// <param name="src"></param> /// <param name="inpaintMask"></param> /// <param name="dst"></param> /// <param name="inpaintRadius"></param> /// <param name="flags"></param> public static void Inpaint(InputArray src, InputArray inpaintMask, OutputArray dst, double inpaintRadius, InpaintMethod flags) { if (src == null) throw new ArgumentNullException("src"); if (inpaintMask == null) throw new ArgumentNullException("inpaintMask"); if (dst == null) throw new ArgumentNullException("dst"); src.ThrowIfDisposed(); inpaintMask.ThrowIfDisposed(); dst.ThrowIfNotReady(); NativeMethods.photo_inpaint(src.CvPtr, inpaintMask.CvPtr, dst.CvPtr, inpaintRadius, (int)flags); dst.Fix(); }
/// <summary> /// Computes the global orientation of the selected motion history image part /// </summary> /// <param name="orientation">Motion gradient orientation image calculated by the function CalcMotionGradient() .</param> /// <param name="mask">Mask image. It may be a conjunction of a valid gradient mask, also calculated by CalcMotionGradient() , /// and the mask of a region whose direction needs to be calculated.</param> /// <param name="mhi">Motion history image calculated by UpdateMotionHistory() .</param> /// <param name="timestamp">Timestamp passed to UpdateMotionHistory() .</param> /// <param name="duration">Maximum duration of a motion track in milliseconds, passed to UpdateMotionHistory() .</param> /// <returns></returns> public static double CalcGlobalOrientation( InputArray orientation, InputArray mask, InputArray mhi, double timestamp, double duration) { if (orientation == null) throw new ArgumentNullException("orientation"); if (mask == null) throw new ArgumentNullException("mask"); if (mhi == null) throw new ArgumentNullException("mhi"); orientation.ThrowIfDisposed(); mask.ThrowIfDisposed(); mhi.ThrowIfDisposed(); return NativeMethods.optflow_motempl_calcGlobalOrientation( orientation.CvPtr, mask.CvPtr, mhi.CvPtr, timestamp, duration); }
/// <summary> /// Compute the shape distance between two shapes defined by its contours. /// </summary> /// <param name="contour1">Contour defining first shape.</param> /// <param name="contour2">Contour defining second shape.</param> /// <returns></returns> public virtual float ComputeDistance(InputArray contour1, InputArray contour2) { if (ptr == IntPtr.Zero) throw new ObjectDisposedException(GetType().Name); if (contour1 == null) throw new ArgumentNullException(nameof(contour1)); if (contour2 == null) throw new ArgumentNullException(nameof(contour2)); contour1.ThrowIfDisposed(); contour2.ThrowIfDisposed(); float ret = NativeMethods.shape_ShapeDistanceExtractor_computeDistance( ptr, contour1.CvPtr, contour2.CvPtr); GC.KeepAlive(contour1); GC.KeepAlive(contour2); return ret; }
/// <summary> /// Computes the motion gradient orientation image from the motion history image /// </summary> /// <param name="mhi">Motion history single-channel floating-point image.</param> /// <param name="mask">Output mask image that has the type CV_8UC1 and the same size as mhi. /// Its non-zero elements mark pixels where the motion gradient data is correct.</param> /// <param name="orientation">Output motion gradient orientation image that has the same type and the same size as mhi. /// Each pixel of the image is a motion orientation, from 0 to 360 degrees.</param> /// <param name="delta1">Minimal (or maximal) allowed difference between mhi values within a pixel neighborhood.</param> /// <param name="delta2">Maximal (or minimal) allowed difference between mhi values within a pixel neighborhood. /// That is, the function finds the minimum ( m(x,y) ) and maximum ( M(x,y) ) mhi values over 3x3 neighborhood of each pixel /// and marks the motion orientation at (x, y) as valid only if: /// min(delta1, delta2) <= M(x,y)-m(x,y) <= max(delta1, delta2).</param> /// <param name="apertureSize"></param> public static void CalcMotionGradient( InputArray mhi, OutputArray mask, OutputArray orientation, double delta1, double delta2, int apertureSize = 3) { if (mhi == null) throw new ArgumentNullException("mhi"); if (mask == null) throw new ArgumentNullException("mask"); if (orientation == null) throw new ArgumentNullException("orientation"); mhi.ThrowIfDisposed(); mask.ThrowIfNotReady(); orientation.ThrowIfNotReady(); NativeMethods.video_calcMotionGradient( mhi.CvPtr, mask.CvPtr, orientation.CvPtr, delta1, delta2, apertureSize); mask.Fix(); orientation.Fix(); }
/// <summary> /// Constructs a pyramid which can be used as input for calcOpticalFlowPyrLK /// </summary> /// <param name="img">8-bit input image.</param> /// <param name="pyramid">output pyramid.</param> /// <param name="winSize">window size of optical flow algorithm. /// Must be not less than winSize argument of calcOpticalFlowPyrLK(). /// It is needed to calculate required padding for pyramid levels.</param> /// <param name="maxLevel">0-based maximal pyramid level number.</param> /// <param name="withDerivatives">set to precompute gradients for the every pyramid level. /// If pyramid is constructed without the gradients then calcOpticalFlowPyrLK() will /// calculate them internally.</param> /// <param name="pyrBorder">the border mode for pyramid layers.</param> /// <param name="derivBorder">the border mode for gradients.</param> /// <param name="tryReuseInputImage">put ROI of input image into the pyramid if possible. /// You can pass false to force data copying.</param> /// <returns>number of levels in constructed pyramid. Can be less than maxLevel.</returns> public static int BuildOpticalFlowPyramid( InputArray img, OutputArray pyramid, Size winSize, int maxLevel, bool withDerivatives = true, BorderTypes pyrBorder = BorderTypes.Reflect101, BorderTypes derivBorder = BorderTypes.Constant, bool tryReuseInputImage = true) { if (img == null) throw new ArgumentNullException("img"); if (pyramid == null) throw new ArgumentNullException("pyramid"); img.ThrowIfDisposed(); pyramid.ThrowIfNotReady(); int result = NativeMethods.video_buildOpticalFlowPyramid1( img.CvPtr, pyramid.CvPtr, winSize, maxLevel, withDerivatives ? 1 : 0, (int)pyrBorder, (int)derivBorder, tryReuseInputImage ? 1 : 0); pyramid.Fix(); return result; }
/// <summary> /// Returns array containing proposal boxes. /// </summary> /// <param name="edgeMap">edge image.</param> /// <param name="orientationMap">orientation map.</param> /// <param name="boxes">proposal boxes.</param> public virtual void GetBoundingBoxes(InputArray edgeMap, InputArray orientationMap, out Rect[] boxes) { ThrowIfDisposed(); if (edgeMap == null) { throw new ArgumentNullException(nameof(edgeMap)); } if (orientationMap == null) { throw new ArgumentNullException(nameof(orientationMap)); } edgeMap.ThrowIfDisposed(); orientationMap.ThrowIfDisposed(); using (var boxesVec = new VectorOfRect()) { NativeMethods.ximgproc_EdgeBoxes_getBoundingBoxes(ptr, edgeMap.CvPtr, orientationMap.CvPtr, boxesVec.CvPtr); boxes = boxesVec.ToArray(); } GC.KeepAlive(this); GC.KeepAlive(edgeMap); GC.KeepAlive(orientationMap); }
/// <summary> /// Apply high-dimensional filtering using adaptive manifolds. /// </summary> /// <param name="src">filtering image with any numbers of channels.</param> /// <param name="dst">output image.</param> /// <param name="joint">optional joint (also called as guided) image with any numbers of channels.</param> public virtual void Filter(InputArray src, OutputArray dst, InputArray?joint = null) { ThrowIfDisposed(); if (src == null) { throw new ArgumentNullException(nameof(src)); } if (dst == null) { throw new ArgumentNullException(nameof(dst)); } src.ThrowIfDisposed(); dst.ThrowIfNotReady(); joint?.ThrowIfDisposed(); NativeMethods.HandleException( NativeMethods.ximgproc_AdaptiveManifoldFilter_filter( ptr, src.CvPtr, dst.CvPtr, joint?.CvPtr ?? IntPtr.Zero)); GC.KeepAlive(this); GC.KeepAlive(src); dst.Fix(); GC.KeepAlive(joint); }
/// <summary> /// Trains the statistical model /// </summary> /// <param name="samples">training samples</param> /// <param name="layout">SampleTypes value</param> /// <param name="responses">vector of responses associated with the training samples.</param> /// <returns></returns> public virtual bool Train(InputArray samples, SampleTypes layout, InputArray responses) { if (ptr == IntPtr.Zero) { throw new ObjectDisposedException(GetType().Name); } if (samples == null) { throw new ArgumentNullException(nameof(samples)); } if (responses == null) { throw new ArgumentNullException(nameof(responses)); } samples.ThrowIfDisposed(); responses.ThrowIfDisposed(); int ret = NativeMethods.ml_StatModel_train2(ptr, samples.CvPtr, (int)layout, responses.CvPtr); GC.KeepAlive(this); GC.KeepAlive(samples); GC.KeepAlive(responses); return(ret != 0); }
/// <summary> /// Predicts the label and confidence for a given sample. /// </summary> /// <param name="src"></param> /// <param name="label"></param> /// <param name="confidence"></param> public virtual void Predict(InputArray src, out int label, out double confidence) { if (src == null) throw new ArgumentNullException("src"); src.ThrowIfDisposed(); NativeMethods.contrib_FaceRecognizer_predict2(ptr, src.CvPtr, out label, out confidence); }
/// <summary> /// Gets a prediction from a FaceRecognizer. /// </summary> /// <param name="src"></param> /// <returns></returns> public virtual int Predict(InputArray src) { if (src == null) throw new ArgumentNullException("src"); src.ThrowIfDisposed(); return NativeMethods.contrib_FaceRecognizer_predict1(ptr, src.CvPtr); }
/// <summary> /// Draws two groups of lines in blue and red, counting the non overlapping (mismatching) pixels. /// </summary> /// <param name="size">The size of the image, where lines1 and lines2 were found.</param> /// <param name="lines1">The first group of lines that needs to be drawn. It is visualized in blue color.</param> /// <param name="lines2">The second group of lines. They visualized in red color.</param> /// <param name="image">Optional image, where the lines will be drawn. /// The image should be color(3-channel) in order for lines1 and lines2 to be drawn /// in the above mentioned colors.</param> /// <returns></returns> public virtual int CompareSegments( Size size, InputArray lines1, InputArray lines2, InputOutputArray image = null) { if (lines1 == null) throw new ArgumentNullException(nameof(lines1)); if (lines2 == null) throw new ArgumentNullException(nameof(lines2)); lines1.ThrowIfDisposed(); lines2.ThrowIfDisposed(); image?.ThrowIfNotReady(); var ret = NativeMethods.imgproc_LineSegmentDetector_compareSegments( ptr, size, lines1.CvPtr, lines2.CvPtr, Cv2.ToPtr(image)); GC.KeepAlive(lines1); GC.KeepAlive(lines2); image?.Fix(); return ret; }
/// <summary> /// Draws the line segments on a given image. /// </summary> /// <param name="image">The image, where the liens will be drawn. /// Should be bigger or equal to the image, where the lines were found.</param> /// <param name="lines">A vector of the lines that needed to be drawn.</param> public virtual void DrawSegments(InputOutputArray image, InputArray lines) { if (image == null) throw new ArgumentNullException(nameof(image)); if (lines == null) throw new ArgumentNullException(nameof(lines)); image.ThrowIfNotReady(); lines.ThrowIfDisposed(); NativeMethods.imgproc_LineSegmentDetector_drawSegments(ptr, image.CvPtr, lines.CvPtr); image.Fix(); GC.KeepAlive(lines); }
/// <summary> /// Finds lines in the input image. /// This is the output of the default parameters of the algorithm on the above shown image. /// </summary> /// <param name="image">A grayscale (CV_8UC1) input image. </param> /// <param name="lines">A vector of Vec4i or Vec4f elements specifying the beginning and ending point of a line. /// Where Vec4i/Vec4f is (x1, y1, x2, y2), point 1 is the start, point 2 - end. Returned lines are strictly oriented depending on the gradient.</param> /// <param name="width">Vector of widths of the regions, where the lines are found. E.g. Width of line.</param> /// <param name="prec">Vector of precisions with which the lines are found.</param> /// <param name="nfa">Vector containing number of false alarms in the line region, /// with precision of 10%. The bigger the value, logarithmically better the detection.</param> public virtual void Detect(InputArray image, out Vec4f[] lines, out double[] width, out double[] prec, out double[] nfa) { if (image == null) throw new ArgumentNullException(nameof(image)); image.ThrowIfDisposed(); using (var linesVec = new VectorOfVec4f()) using (var widthVec = new VectorOfDouble()) using (var precVec = new VectorOfDouble()) using (var nfaVec = new VectorOfDouble()) { NativeMethods.imgproc_LineSegmentDetector_detect_vector(ptr, image.CvPtr, linesVec.CvPtr, widthVec.CvPtr, precVec.CvPtr, nfaVec.CvPtr); lines = linesVec.ToArray(); width = widthVec.ToArray(); prec = precVec.ToArray(); nfa = nfaVec.ToArray(); } GC.KeepAlive(image); }
/// <summary> /// Finds lines in the input image. /// This is the output of the default parameters of the algorithm on the above shown image. /// </summary> /// <param name="image">A grayscale (CV_8UC1) input image. </param> /// <param name="lines">A vector of Vec4i or Vec4f elements specifying the beginning and ending point of a line. /// Where Vec4i/Vec4f is (x1, y1, x2, y2), point 1 is the start, point 2 - end. Returned lines are strictly oriented depending on the gradient.</param> /// <param name="width">Vector of widths of the regions, where the lines are found. E.g. Width of line.</param> /// <param name="prec">Vector of precisions with which the lines are found.</param> /// <param name="nfa">Vector containing number of false alarms in the line region, /// with precision of 10%. The bigger the value, logarithmically better the detection.</param> public virtual void Detect(InputArray image, OutputArray lines, OutputArray width = null, OutputArray prec = null, OutputArray nfa = null) { if (image == null) throw new ArgumentNullException(nameof(image)); if (lines == null) throw new ArgumentNullException(nameof(lines)); image.ThrowIfDisposed(); lines.ThrowIfNotReady(); width?.ThrowIfNotReady(); prec?.ThrowIfNotReady(); nfa?.ThrowIfNotReady(); NativeMethods.imgproc_LineSegmentDetector_detect_OutputArray(ptr, image.CvPtr, lines.CvPtr, Cv2.ToPtr(width), Cv2.ToPtr(prec), Cv2.ToPtr(nfa)); GC.KeepAlive(image); lines.Fix(); width?.Fix(); prec?.Fix(); nfa?.Fix(); }
/// <summary> /// inserts a single channel to dst (coi is 0-based index) /// </summary> /// <param name="src"></param> /// <param name="dst"></param> /// <param name="coi"></param> public static void InsertChannel(InputArray src, InputOutputArray dst, int coi) { if (src == null) throw new ArgumentNullException("src"); if (dst == null) throw new ArgumentNullException("dst"); src.ThrowIfDisposed(); dst.ThrowIfNotReady(); NativeMethods.core_insertChannel(src.CvPtr, dst.CvPtr, coi); GC.KeepAlive(src); dst.Fix(); }
/// <summary> /// transforms 2D matrix to 1D row or column vector by taking sum, minimum, maximum or mean value over all the rows /// </summary> /// <param name="src">The source 2D matrix</param> /// <param name="dst">The destination vector. /// Its size and type is defined by dim and dtype parameters</param> /// <param name="dim">The dimension index along which the matrix is reduced. /// 0 means that the matrix is reduced to a single row and 1 means that the matrix is reduced to a single column</param> /// <param name="rtype"></param> /// <param name="dtype">When it is negative, the destination vector will have /// the same type as the source matrix, otherwise, its type will be CV_MAKE_TYPE(CV_MAT_DEPTH(dtype), mtx.channels())</param> public static void Reduce(InputArray src, OutputArray dst, ReduceDimension dim, ReduceTypes rtype, int dtype) { if (src == null) throw new ArgumentNullException("src"); if (dst == null) throw new ArgumentNullException("dst"); src.ThrowIfDisposed(); dst.ThrowIfNotReady(); NativeMethods.core_reduce(src.CvPtr, dst.CvPtr, (int)dim, (int)rtype, dtype); dst.Fix(); GC.KeepAlive(src); }
/// <summary> /// finds global minimum and maximum array elements and returns their values and their locations /// </summary> /// <param name="src">The source single-channel array</param> /// <param name="minVal">Pointer to returned minimum value</param> /// <param name="maxVal">Pointer to returned maximum value</param> /// <param name="minIdx"></param> /// <param name="maxIdx"></param> /// <param name="mask"></param> public static void MinMaxIdx(InputArray src, out double minVal, out double maxVal, out int minIdx, out int maxIdx, InputArray mask = null) { if (src == null) throw new ArgumentNullException("src"); src.ThrowIfDisposed(); NativeMethods.core_minMaxIdx(src.CvPtr, out minVal, out maxVal, out minIdx, out maxIdx, ToPtr(mask)); GC.KeepAlive(src); }