/// <summary> /// Computes a dense optical flow using the Gunnar Farneback's algorithm. /// </summary> /// <param name="prev">first 8-bit single-channel input image.</param> /// <param name="next">second input image of the same size and the same type as prev.</param> /// <param name="flow">computed flow image that has the same size as prev and type CV_32FC2.</param> /// <param name="pyrScale">parameter, specifying the image scale (<1) to build pyramids for each image; /// pyrScale=0.5 means a classical pyramid, where each next layer is twice smaller than the previous one.</param> /// <param name="levels">number of pyramid layers including the initial image; /// levels=1 means that no extra layers are created and only the original images are used.</param> /// <param name="winsize">averaging window size; larger values increase the algorithm robustness to /// image noise and give more chances for fast motion detection, but yield more blurred motion field.</param> /// <param name="iterations">number of iterations the algorithm does at each pyramid level.</param> /// <param name="polyN">size of the pixel neighborhood used to find polynomial expansion in each pixel; /// larger values mean that the image will be approximated with smoother surfaces, /// yielding more robust algorithm and more blurred motion field, typically poly_n =5 or 7.</param> /// <param name="polySigma">standard deviation of the Gaussian that is used to smooth derivatives used as /// a basis for the polynomial expansion; for polyN=5, you can set polySigma=1.1, /// for polyN=7, a good value would be polySigma=1.5.</param> /// <param name="flags">operation flags that can be a combination of OPTFLOW_USE_INITIAL_FLOW and/or OPTFLOW_FARNEBACK_GAUSSIAN</param> public static void CalcOpticalFlowFarneback(InputArray prev, InputArray next, InputOutputArray flow, double pyrScale, int levels, int winsize, int iterations, int polyN, double polySigma, OpticalFlowFlags flags) { if (prev == null) { throw new ArgumentNullException(nameof(prev)); } if (next == null) { throw new ArgumentNullException(nameof(next)); } if (flow == null) { throw new ArgumentNullException(nameof(flow)); } prev.ThrowIfDisposed(); next.ThrowIfDisposed(); flow.ThrowIfNotReady(); NativeMethods.HandleException( NativeMethods.video_calcOpticalFlowFarneback( prev.CvPtr, next.CvPtr, flow.CvPtr, pyrScale, levels, winsize, iterations, polyN, polySigma, (int)flags)); GC.KeepAlive(prev); GC.KeepAlive(next); flow.Fix(); }
/// <summary> /// Draws two groups of lines in blue and red, counting the non overlapping (mismatching) pixels. /// </summary> /// <param name="size">The size of the image, where lines1 and lines2 were found.</param> /// <param name="lines1">The first group of lines that needs to be drawn. It is visualized in blue color.</param> /// <param name="lines2">The second group of lines. They visualized in red color.</param> /// <param name="image">Optional image, where the lines will be drawn. /// The image should be color(3-channel) in order for lines1 and lines2 to be drawn /// in the above mentioned colors.</param> /// <returns></returns> public virtual int CompareSegments( Size size, InputArray lines1, InputArray lines2, InputOutputArray image = null) { if (lines1 == null) { throw new ArgumentNullException("nameof(lines1)"); } if (lines2 == null) { throw new ArgumentNullException("nameof(lines2)"); } lines1.ThrowIfDisposed(); lines2.ThrowIfDisposed(); //UFIX //image?.ThrowIfNotReady(); if (image != null) { image.ThrowIfNotReady(); } var ret = NativeMethods.imgproc_LineSegmentDetector_compareSegments( ptr, size, lines1.CvPtr, lines2.CvPtr, Cv2.ToPtr(image)); GC.KeepAlive(lines1); GC.KeepAlive(lines2); //UFIX //image?.Fix(); if (image != null) { image.Fix(); } return(ret); }
/// <summary> /// /// </summary> /// <param name="frame0"></param> /// <param name="frame1"></param> /// <param name="flow"></param> public override void Calc( InputArray frame0, InputArray frame1, InputOutputArray flow) { if (disposed) { throw new ObjectDisposedException("DenseOpticalFlowImpl"); } if (frame0 == null) { throw new ArgumentNullException(nameof(frame0)); } if (frame1 == null) { throw new ArgumentNullException(nameof(frame1)); } if (flow == null) { throw new ArgumentNullException(nameof(flow)); } frame0.ThrowIfDisposed(); frame1.ThrowIfDisposed(); flow.ThrowIfNotReady(); NativeMethods.video_DenseOpticalFlow_calc( ptr, frame0.CvPtr, frame1.CvPtr, flow.CvPtr); flow.Fix(); }
/// <summary> /// /// </summary> /// <param name="frame0"></param> /// <param name="frame1"></param> /// <param name="flow"></param> public override void Calc( InputArray frame0, InputArray frame1, InputOutputArray flow) { ThrowIfDisposed(); if (frame0 == null) { throw new ArgumentNullException(nameof(frame0)); } if (frame1 == null) { throw new ArgumentNullException(nameof(frame1)); } if (flow == null) { throw new ArgumentNullException(nameof(flow)); } frame0.ThrowIfDisposed(); frame1.ThrowIfDisposed(); flow.ThrowIfNotReady(); NativeMethods.video_DenseOpticalFlow_calc( ptr, frame0.CvPtr, frame1.CvPtr, flow.CvPtr); GC.KeepAlive(this); GC.KeepAlive(frame0); GC.KeepAlive(frame1); GC.KeepAlive(flow); flow.Fix(); }
/// <summary> /// computes sparse optical flow using multi-scale Lucas-Kanade algorithm /// </summary> /// <param name="prevImg"></param> /// <param name="nextImg"></param> /// <param name="prevPts"></param> /// <param name="nextPts"></param> /// <param name="status"></param> /// <param name="err"></param> /// <param name="winSize"></param> /// <param name="maxLevel"></param> /// <param name="criteria"></param> /// <param name="flags"></param> /// <param name="minEigThreshold"></param> public static void CalcOpticalFlowPyrLK( InputArray prevImg, InputArray nextImg, InputArray prevPts, InputOutputArray nextPts, OutputArray status, OutputArray err, Size?winSize = null, int maxLevel = 3, TermCriteria?criteria = null, OpticalFlowFlags flags = OpticalFlowFlags.None, double minEigThreshold = 1e-4) { if (prevImg == null) { throw new ArgumentNullException(nameof(prevImg)); } if (nextImg == null) { throw new ArgumentNullException(nameof(nextImg)); } if (prevPts == null) { throw new ArgumentNullException(nameof(prevPts)); } if (nextPts == null) { throw new ArgumentNullException(nameof(nextPts)); } if (status == null) { throw new ArgumentNullException(nameof(status)); } if (err == null) { throw new ArgumentNullException(nameof(err)); } prevImg.ThrowIfDisposed(); nextImg.ThrowIfDisposed(); prevPts.ThrowIfDisposed(); nextPts.ThrowIfNotReady(); status.ThrowIfNotReady(); err.ThrowIfNotReady(); var winSize0 = winSize.GetValueOrDefault(new Size(21, 21)); var criteria0 = criteria.GetValueOrDefault( TermCriteria.Both(30, 0.01)); NativeMethods.HandleException( NativeMethods.video_calcOpticalFlowPyrLK_InputArray( prevImg.CvPtr, nextImg.CvPtr, prevPts.CvPtr, nextPts.CvPtr, status.CvPtr, err.CvPtr, winSize0, maxLevel, criteria0, (int)flags, minEigThreshold)); GC.KeepAlive(prevImg); GC.KeepAlive(nextImg); GC.KeepAlive(prevPts); nextPts.Fix(); status.Fix(); err.Fix(); }
/// <summary> /// Updates motion history image using the current silhouette /// </summary> /// <param name="silhouette">Silhouette mask that has non-zero pixels where the motion occurs.</param> /// <param name="mhi">Motion history image that is updated by the function (single-channel, 32-bit floating-point).</param> /// <param name="timestamp">Current time in milliseconds or other units.</param> /// <param name="duration">Maximal duration of the motion track in the same units as timestamp .</param> public static void UpdateMotionHistory( InputArray silhouette, InputOutputArray mhi, double timestamp, double duration) { if (silhouette == null) throw new ArgumentNullException("silhouette"); if (mhi == null) throw new ArgumentNullException("mhi"); silhouette.ThrowIfDisposed(); mhi.ThrowIfNotReady(); NativeMethods.optflow_motempl_updateMotionHistory( silhouette.CvPtr, mhi.CvPtr, timestamp, duration); mhi.Fix(); }
/// <summary> /// Draws the line segments on a given image. /// </summary> /// <param name="image">The image, where the liens will be drawn. /// Should be bigger or equal to the image, where the lines were found.</param> /// <param name="lines">A vector of the lines that needed to be drawn.</param> public virtual void DrawSegments(InputOutputArray image, InputArray lines) { if (image == null) throw new ArgumentNullException("image"); if (lines == null) throw new ArgumentNullException("lines"); image.ThrowIfNotReady(); lines.ThrowIfDisposed(); NativeMethods.imgproc_LineSegmentDetector_drawSegments(ptr, image.CvPtr, lines.CvPtr); image.Fix(); GC.KeepAlive(lines); }
/// <summary> /// Updates motion history image using the current silhouette /// </summary> /// <param name="silhouette">Silhouette mask that has non-zero pixels where the motion occurs.</param> /// <param name="mhi">Motion history image that is updated by the function (single-channel, 32-bit floating-point).</param> /// <param name="timestamp">Current time in milliseconds or other units.</param> /// <param name="duration">Maximal duration of the motion track in the same units as timestamp .</param> public static void UpdateMotionHistory( InputArray silhouette, InputOutputArray mhi, double timestamp, double duration) { if (silhouette == null) { throw new ArgumentNullException("nameof(silhouette)"); } if (mhi == null) { throw new ArgumentNullException("nameof(mhi)"); } silhouette.ThrowIfDisposed(); mhi.ThrowIfNotReady(); NativeMethods.optflow_motempl_updateMotionHistory( silhouette.CvPtr, mhi.CvPtr, timestamp, duration); mhi.Fix(); }
/// <summary> /// /// </summary> /// <param name="mat"></param> /// <param name="distType"></param> /// <param name="a"></param> /// <param name="b"></param> /// <param name="saturateRange"></param> public void Fill(InputOutputArray mat, DistributionType distType, InputArray a, InputArray b, bool saturateRange = false) { if (mat == null) { throw new ArgumentNullException("mat"); } if (a == null) { throw new ArgumentNullException("a"); } if (b == null) { throw new ArgumentNullException("b"); } mat.ThrowIfNotReady(); a.ThrowIfDisposed(); b.ThrowIfDisposed(); NativeMethods.core_RNG_fill(State, mat.CvPtr, (int)distType, a.CvPtr, b.CvPtr, saturateRange ? 1 : 0); mat.Fix(); }
/// <summary> /// 1つ,または複数のポリゴンで区切られた領域を塗りつぶします. /// </summary> /// <param name="img">画像</param> /// <param name="pts">ポリゴンの配列.各要素は,点の配列で表現されます.</param> /// <param name="color">ポリゴンの色.</param> /// <param name="lineType">ポリゴンの枠線の種類,</param> /// <param name="shift">ポリゴンの頂点座標において,小数点以下の桁を表すビット数.</param> /// <param name="offset"></param> #else /// <summary> /// Fills the area bounded by one or more polygons /// </summary> /// <param name="img">Image</param> /// <param name="pts">Array of polygons, each represented as an array of points</param> /// <param name="color">Polygon color</param> /// <param name="lineType">Type of the polygon boundaries</param> /// <param name="shift">The number of fractional bits in the vertex coordinates</param> /// <param name="offset"></param> #endif public static void FillPoly( InputOutputArray img, InputArray pts, Scalar color, LineTypes lineType = LineTypes.Link8, int shift = 0, Point? offset = null) { if (img == null) throw new ArgumentNullException(nameof(img)); if (pts == null) throw new ArgumentNullException(nameof(pts)); img.ThrowIfDisposed(); pts.ThrowIfDisposed(); Point offset0 = offset.GetValueOrDefault(new Point()); NativeMethods.imgproc_fillPoly_InputOutputArray( img.CvPtr, pts.CvPtr, color, (int)lineType, shift, offset0); GC.KeepAlive(pts); img.Fix(); }
/// <summary> /// extends the symmetrical matrix from the lower half or from the upper half /// </summary> /// <param name="mtx"> Input-output floating-point square matrix</param> /// <param name="lowerToUpper">If true, the lower half is copied to the upper half, /// otherwise the upper half is copied to the lower half</param> public static void CompleteSymm(InputOutputArray mtx, bool lowerToUpper = false) { if (mtx == null) throw new ArgumentNullException("mtx"); mtx.ThrowIfNotReady(); NativeMethods.core_completeSymm(mtx.CvPtr, lowerToUpper ? 1 : 0); mtx.Fix(); }
/// <summary> /// 枠だけの楕円,もしくは塗りつぶされた楕円を描画する /// </summary> /// <param name="img">楕円が描かれる画像.</param> /// <param name="box">描画したい楕円を囲む矩形領域.</param> /// <param name="color">楕円の色.</param> /// <param name="thickness">楕円境界線の幅.[既定値は1]</param> /// <param name="lineType">楕円境界線の種類.[既定値はLineType.Link8]</param> #else /// <summary> /// Draws simple or thick elliptic arc or fills ellipse sector /// </summary> /// <param name="img">Image. </param> /// <param name="box">The enclosing box of the ellipse drawn </param> /// <param name="color">Ellipse color. </param> /// <param name="thickness">Thickness of the ellipse boundary. [By default this is 1]</param> /// <param name="lineType">Type of the ellipse boundary. [By default this is LineType.Link8]</param> #endif public static void Ellipse(InputOutputArray img, RotatedRect box, Scalar color, int thickness = 1, LineTypes lineType = LineTypes.Link8) { if (img == null) throw new ArgumentNullException(nameof(img)); img.ThrowIfDisposed(); NativeMethods.imgproc_ellipse2(img.CvPtr, box, color, thickness, (int)lineType); img.Fix(); }
/// <summary> /// Draws a arrow segment pointing from the first point to the second one. /// The function arrowedLine draws an arrow between pt1 and pt2 points in the image. /// See also cv::line. /// </summary> /// <param name="img">Image.</param> /// <param name="pt1">The point the arrow starts from.</param> /// <param name="pt2">The point the arrow points to.</param> /// <param name="color">Line color.</param> /// <param name="thickness">Line thickness.</param> /// <param name="lineType">Type of the line, see cv::LineTypes</param> /// <param name="shift">Number of fractional bits in the point coordinates.</param> /// <param name="tipLength">The length of the arrow tip in relation to the arrow length</param> public static void ArrowedLine( InputOutputArray img, Point pt1, Point pt2, Scalar color, int thickness = 1, LineTypes lineType = LineTypes.Link8, int shift = 0, double tipLength = 0.1) { if (img == null) throw new ArgumentNullException(nameof(img)); img.ThrowIfNotReady(); NativeMethods.imgproc_arrowedLine( img.CvPtr, pt1, pt2, color, thickness, (int)lineType, shift, tipLength); img.Fix(); }
/// <summary> /// 輪郭線,または内側が塗りつぶされた輪郭を描きます. /// </summary> /// <param name="image">出力画像</param> /// <param name="contours"> 入力される全輪郭.各輪郭は,点のベクトルとして格納されています.</param> /// <param name="contourIdx">描かれる輪郭を示します.これが負値の場合,すべての輪郭が描画されます.</param> /// <param name="color">輪郭の色.</param> /// <param name="thickness">輪郭線の太さ.これが負値の場合(例えば thickness=CV_FILLED ),輪郭の内側が塗りつぶされます.</param> /// <param name="lineType">線の連結性</param> /// <param name="hierarchy">階層に関するオプションの情報.これは,特定の輪郭だけを描画したい場合にのみ必要になります.</param> /// <param name="maxLevel">描画される輪郭の最大レベル.0ならば,指定された輪郭のみが描画されます. /// 1ならば,指定された輪郭と,それに入れ子になったすべての輪郭が描画されます.2ならば,指定された輪郭と, /// それに入れ子になったすべての輪郭,さらにそれに入れ子になったすべての輪郭が描画されます.このパラメータは, /// hierarchy が有効な場合のみ考慮されます.</param> /// <param name="offset">輪郭をシフトするオプションパラメータ.指定された offset = (dx,dy) だけ,すべての描画輪郭がシフトされます.</param> #else /// <summary> /// draws contours in the image /// </summary> /// <param name="image">Destination image.</param> /// <param name="contours">All the input contours. Each contour is stored as a point vector.</param> /// <param name="contourIdx">Parameter indicating a contour to draw. If it is negative, all the contours are drawn.</param> /// <param name="color">Color of the contours.</param> /// <param name="thickness">Thickness of lines the contours are drawn with. If it is negative (for example, thickness=CV_FILLED ), /// the contour interiors are drawn.</param> /// <param name="lineType">Line connectivity. </param> /// <param name="hierarchy">Optional information about hierarchy. It is only needed if you want to draw only some of the contours</param> /// <param name="maxLevel">Maximal level for drawn contours. If it is 0, only the specified contour is drawn. /// If it is 1, the function draws the contour(s) and all the nested contours. If it is 2, the function draws the contours, /// all the nested contours, all the nested-to-nested contours, and so on. This parameter is only taken into account /// when there is hierarchy available.</param> /// <param name="offset">Optional contour shift parameter. Shift all the drawn contours by the specified offset = (dx, dy)</param> #endif public static void DrawContours( InputOutputArray image, IEnumerable<Mat> contours, int contourIdx, Scalar color, int thickness = 1, LineTypes lineType = LineTypes.Link8, Mat hierarchy = null, int maxLevel = Int32.MaxValue, Point? offset = null) { if (image == null) throw new ArgumentNullException(nameof(image)); if (contours == null) throw new ArgumentNullException(nameof(contours)); image.ThrowIfNotReady(); Point offset0 = offset.GetValueOrDefault(new Point()); IntPtr[] contoursPtr = EnumerableEx.SelectPtrs(contours); NativeMethods.imgproc_drawContours_InputArray(image.CvPtr, contoursPtr, contoursPtr.Length, contourIdx, color, thickness, (int)lineType, ToPtr(hierarchy), maxLevel, offset0); image.Fix(); }
/// <summary> /// inserts a single channel to dst (coi is 0-based index) /// </summary> /// <param name="src"></param> /// <param name="dst"></param> /// <param name="coi"></param> public static void InsertChannel(InputArray src, InputOutputArray dst, int coi) { if (src == null) throw new ArgumentNullException("src"); if (dst == null) throw new ArgumentNullException("dst"); src.ThrowIfDisposed(); dst.ThrowIfNotReady(); NativeMethods.core_insertChannel(src.CvPtr, dst.CvPtr, coi); GC.KeepAlive(src); dst.Fix(); }
/// <summary> /// fills array with uniformly-distributed random numbers from the range [low, high) /// </summary> /// <param name="dst">The output array of random numbers. /// The array must be pre-allocated and have 1 to 4 channels</param> /// <param name="low">The inclusive lower boundary of the generated random numbers</param> /// <param name="high">The exclusive upper boundary of the generated random numbers</param> public static void Randu(InputOutputArray dst, InputArray low, InputArray high) { if (dst == null) throw new ArgumentNullException("dst"); if (low == null) throw new ArgumentNullException("low"); if (high == null) throw new ArgumentNullException("high"); dst.ThrowIfNotReady(); low.ThrowIfDisposed(); high.ThrowIfDisposed(); NativeMethods.core_randu_InputArray(dst.CvPtr, low.CvPtr, high.CvPtr); GC.KeepAlive(low); GC.KeepAlive(high); dst.Fix(); }
/// <summary> /// /// </summary> /// <param name="data"></param> /// <param name="mean"></param> /// <param name="eigenvectors"></param> /// <param name="retainedVariance"></param> public static void PCAComputeVar(InputArray data, InputOutputArray mean, OutputArray eigenvectors, double retainedVariance) { if (data == null) throw new ArgumentNullException("data"); if (mean == null) throw new ArgumentNullException("mean"); if (eigenvectors == null) throw new ArgumentNullException("eigenvectors"); data.ThrowIfDisposed(); mean.ThrowIfNotReady(); eigenvectors.ThrowIfNotReady(); NativeMethods.core_PCAComputeVar(data.CvPtr, mean.CvPtr, eigenvectors.CvPtr, retainedVariance); GC.KeepAlive(data); mean.Fix(); eigenvectors.Fix(); }
/// <summary> /// Computes a dense optical flow using the Gunnar Farneback's algorithm. /// </summary> /// <param name="prev">first 8-bit single-channel input image.</param> /// <param name="next">second input image of the same size and the same type as prev.</param> /// <param name="flow">computed flow image that has the same size as prev and type CV_32FC2.</param> /// <param name="pyrScale">parameter, specifying the image scale (<1) to build pyramids for each image; /// pyrScale=0.5 means a classical pyramid, where each next layer is twice smaller than the previous one.</param> /// <param name="levels">number of pyramid layers including the initial image; /// levels=1 means that no extra layers are created and only the original images are used.</param> /// <param name="winsize">averaging window size; larger values increase the algorithm robustness to /// image noise and give more chances for fast motion detection, but yield more blurred motion field.</param> /// <param name="iterations">number of iterations the algorithm does at each pyramid level.</param> /// <param name="polyN">size of the pixel neighborhood used to find polynomial expansion in each pixel; /// larger values mean that the image will be approximated with smoother surfaces, /// yielding more robust algorithm and more blurred motion field, typically poly_n =5 or 7.</param> /// <param name="polySigma">standard deviation of the Gaussian that is used to smooth derivatives used as /// a basis for the polynomial expansion; for polyN=5, you can set polySigma=1.1, /// for polyN=7, a good value would be polySigma=1.5.</param> /// <param name="flags">operation flags that can be a combination of OPTFLOW_USE_INITIAL_FLOW and/or OPTFLOW_FARNEBACK_GAUSSIAN</param> public static void CalcOpticalFlowFarneback(InputArray prev, InputArray next, InputOutputArray flow, double pyrScale, int levels, int winsize, int iterations, int polyN, double polySigma, OpticalFlowFlags flags) { if (prev == null) throw new ArgumentNullException("prev"); if (next == null) throw new ArgumentNullException("next"); if (flow == null) throw new ArgumentNullException("flow"); prev.ThrowIfDisposed(); next.ThrowIfDisposed(); flow.ThrowIfNotReady(); NativeMethods.video_calcOpticalFlowFarneback(prev.CvPtr, next.CvPtr, flow.CvPtr, pyrScale, levels, winsize, iterations, polyN, polySigma, (int)flags); flow.Fix(); }
/// <summary> /// 円を描画する /// </summary> /// <param name="img">画像</param> /// <param name="center">円の中心</param> /// <param name="radius">円の半径</param> /// <param name="color">円の色</param> /// <param name="thickness">線の幅.負の値を指定した場合は塗りつぶされる.[既定値は1]</param> /// <param name="lineType">線の種類. [既定値はLineType.Link8]</param> /// <param name="shift">中心座標と半径の小数点以下の桁を表すビット数. [既定値は0]</param> #else /// <summary> /// Draws a circle /// </summary> /// <param name="img">Image where the circle is drawn. </param> /// <param name="center">Center of the circle. </param> /// <param name="radius">Radius of the circle. </param> /// <param name="color">Circle color. </param> /// <param name="thickness">Thickness of the circle outline if positive, otherwise indicates that a filled circle has to be drawn. [By default this is 1]</param> /// <param name="lineType">Type of the circle boundary. [By default this is LineType.Link8]</param> /// <param name="shift">Number of fractional bits in the center coordinates and radius value. [By default this is 0]</param> #endif public static void Circle(InputOutputArray img, Point center, int radius, Scalar color, int thickness = 1, LineTypes lineType = LineTypes.Link8, int shift = 0) { if (img == null) throw new ArgumentNullException(nameof(img)); img.ThrowIfDisposed(); NativeMethods.imgproc_circle(img.CvPtr, center, radius, color, thickness, (int)lineType, shift); img.Fix(); }
/// <summary> /// computes sparse optical flow using multi-scale Lucas-Kanade algorithm /// </summary> /// <param name="prevImg"></param> /// <param name="nextImg"></param> /// <param name="prevPts"></param> /// <param name="nextPts"></param> /// <param name="status"></param> /// <param name="err"></param> /// <param name="winSize"></param> /// <param name="maxLevel"></param> /// <param name="criteria"></param> /// <param name="flags"></param> /// <param name="minEigThreshold"></param> public static void CalcOpticalFlowPyrLK( InputArray prevImg, InputArray nextImg, InputArray prevPts, InputOutputArray nextPts, OutputArray status, OutputArray err, Size? winSize = null, int maxLevel = 3, TermCriteria? criteria = null, OpticalFlowFlags flags = OpticalFlowFlags.None, double minEigThreshold = 1e-4) { if (prevImg == null) throw new ArgumentNullException("prevImg"); if (nextImg == null) throw new ArgumentNullException("nextImg"); if (prevPts == null) throw new ArgumentNullException("prevPts"); if (nextPts == null) throw new ArgumentNullException("nextPts"); if (status == null) throw new ArgumentNullException("status"); if (err == null) throw new ArgumentNullException("err"); prevImg.ThrowIfDisposed(); nextImg.ThrowIfDisposed(); prevPts.ThrowIfDisposed(); nextPts.ThrowIfNotReady(); status.ThrowIfNotReady(); err.ThrowIfNotReady(); Size winSize0 = winSize.GetValueOrDefault(new Size(21, 21)); TermCriteria criteria0 = criteria.GetValueOrDefault( TermCriteria.Both(30, 0.01)); NativeMethods.video_calcOpticalFlowPyrLK_InputArray( prevImg.CvPtr, nextImg.CvPtr, prevPts.CvPtr, nextPts.CvPtr, status.CvPtr, err.CvPtr, winSize0,maxLevel, criteria0, (int)flags, minEigThreshold); nextPts.Fix(); status.Fix(); err.Fix(); }
/// <summary> /// draws one or more polygonal curves /// </summary> /// <param name="img"></param> /// <param name="pts"></param> /// <param name="isClosed"></param> /// <param name="color"></param> /// <param name="thickness"></param> /// <param name="lineType"></param> /// <param name="shift"></param> public static void Polylines( InputOutputArray img, InputArray pts, bool isClosed, Scalar color, int thickness = 1, LineTypes lineType = LineTypes.Link8, int shift = 0) { if (img == null) throw new ArgumentNullException(nameof(img)); if (pts == null) throw new ArgumentNullException(nameof(pts)); img.ThrowIfDisposed(); pts.ThrowIfDisposed(); NativeMethods.imgproc_polylines_InputOutputArray( img.CvPtr, pts.CvPtr, isClosed ? 1 : 0, color, thickness, (int)lineType, shift); img.Fix(); GC.KeepAlive(pts); }
/// <summary> /// renders text string in the image /// </summary> /// <param name="img"></param> /// <param name="text"></param> /// <param name="org"></param> /// <param name="fontFace"></param> /// <param name="fontScale"></param> /// <param name="color"></param> /// <param name="thickness"></param> /// <param name="lineType"></param> /// <param name="bottomLeftOrigin"></param> public static void PutText(InputOutputArray img, string text, Point org, HersheyFonts fontFace, double fontScale, Scalar color, int thickness = 1, LineTypes lineType = LineTypes.Link8, bool bottomLeftOrigin = false) { if (img == null) throw new ArgumentNullException(nameof(img)); if (String.IsNullOrEmpty(text)) throw new ArgumentNullException(text); img.ThrowIfDisposed(); NativeMethods.core_putText(img.CvPtr, text, org, (int)fontFace, fontScale, color, thickness, (int)lineType, bottomLeftOrigin ? 1 : 0); img.Fix(); }
/// <summary> /// initializes scaled identity matrix /// </summary> /// <param name="mtx">The matrix to initialize (not necessarily square)</param> /// <param name="s">The value to assign to the diagonal elements</param> public static void SetIdentity(InputOutputArray mtx, Scalar? s = null) { if (mtx == null) throw new ArgumentNullException("mtx"); mtx.ThrowIfNotReady(); Scalar s0 = s.GetValueOrDefault(new Scalar(1)); NativeMethods.core_setIdentity(mtx.CvPtr, s0); mtx.Fix(); }
/// <summary> /// fills array with normally-distributed random numbers with the specified mean and the standard deviation /// </summary> /// <param name="dst">The output array of random numbers. /// The array must be pre-allocated and have 1 to 4 channels</param> /// <param name="mean">The mean value (expectation) of the generated random numbers</param> /// <param name="stddev">The standard deviation of the generated random numbers</param> public static void Randn(InputOutputArray dst, InputArray mean, InputArray stddev) { if (dst == null) throw new ArgumentNullException("dst"); if (mean == null) throw new ArgumentNullException("mean"); if (stddev == null) throw new ArgumentNullException("stddev"); dst.ThrowIfNotReady(); mean.ThrowIfDisposed(); stddev.ThrowIfDisposed(); NativeMethods.core_randn_InputArray(dst.CvPtr, mean.CvPtr, stddev.CvPtr); GC.KeepAlive(mean); GC.KeepAlive(stddev); dst.Fix(); }
/// <summary> /// computes covariation matrix of a set of samples /// </summary> /// <param name="samples"></param> /// <param name="covar"></param> /// <param name="mean"></param> /// <param name="flags"></param> /// <param name="ctype"></param> public static void CalcCovarMatrix(InputArray samples, OutputArray covar, InputOutputArray mean, CovarFlags flags, MatType ctype) { if (samples == null) throw new ArgumentNullException("samples"); if (covar == null) throw new ArgumentNullException("covar"); if (mean == null) throw new ArgumentNullException("mean"); samples.ThrowIfDisposed(); covar.ThrowIfNotReady(); mean.ThrowIfNotReady(); NativeMethods.core_calcCovarMatrix_InputArray(samples.CvPtr, covar.CvPtr, mean.CvPtr, (int)flags, ctype); GC.KeepAlive(samples); covar.Fix(); mean.Fix(); }
/// <summary> /// shuffles the input array elements /// </summary> /// <param name="dst">The input/output numerical 1D array</param> /// <param name="iterFactor">The scale factor that determines the number of random swap operations.</param> /// <param name="rng">The optional random number generator used for shuffling. /// If it is null, theRng() is used instead.</param> public static void RandShuffle(InputOutputArray dst, double iterFactor, RNG rng = null) { if (dst == null) throw new ArgumentNullException("dst"); dst.ThrowIfNotReady(); if (rng == null) { NativeMethods.core_randShuffle(dst.CvPtr, iterFactor, IntPtr.Zero); } else { ulong state = rng.State; NativeMethods.core_randShuffle(dst.CvPtr, iterFactor, ref state); rng.State = state; } dst.Fix(); }
/// <summary> /// clusters the input data using k-Means algorithm /// </summary> /// <param name="data"></param> /// <param name="k"></param> /// <param name="bestLabels"></param> /// <param name="criteria"></param> /// <param name="attempts"></param> /// <param name="flags"></param> /// <param name="centers"></param> /// <returns></returns> public static double Kmeans(InputArray data, int k, InputOutputArray bestLabels, TermCriteria criteria, int attempts, KMeansFlags flags, OutputArray centers = null) { if (data == null) throw new ArgumentNullException("data"); if (bestLabels == null) throw new ArgumentNullException("bestLabels"); data.ThrowIfDisposed(); bestLabels.ThrowIfDisposed(); double ret = NativeMethods.core_kmeans(data.CvPtr, k, bestLabels.CvPtr, criteria, attempts, (int)flags, ToPtr(centers)); bestLabels.Fix(); if(centers != null) centers.Fix(); GC.KeepAlive(data); return ret; }
/// <summary> /// 輪郭線,または内側が塗りつぶされた輪郭を描きます. /// </summary> /// <param name="image">出力画像</param> /// <param name="contours"> 入力される全輪郭.各輪郭は,点のベクトルとして格納されています.</param> /// <param name="contourIdx">描かれる輪郭を示します.これが負値の場合,すべての輪郭が描画されます.</param> /// <param name="color">輪郭の色.</param> /// <param name="thickness">輪郭線の太さ.これが負値の場合(例えば thickness=CV_FILLED ),輪郭の内側が塗りつぶされます.</param> /// <param name="lineType">線の連結性</param> /// <param name="hierarchy">階層に関するオプションの情報.これは,特定の輪郭だけを描画したい場合にのみ必要になります.</param> /// <param name="maxLevel">描画される輪郭の最大レベル.0ならば,指定された輪郭のみが描画されます. /// 1ならば,指定された輪郭と,それに入れ子になったすべての輪郭が描画されます.2ならば,指定された輪郭と, /// それに入れ子になったすべての輪郭,さらにそれに入れ子になったすべての輪郭が描画されます.このパラメータは, /// hierarchy が有効な場合のみ考慮されます.</param> /// <param name="offset">輪郭をシフトするオプションパラメータ.指定された offset = (dx,dy) だけ,すべての描画輪郭がシフトされます.</param> #else /// <summary> /// draws contours in the image /// </summary> /// <param name="image">Destination image.</param> /// <param name="contours">All the input contours. Each contour is stored as a point vector.</param> /// <param name="contourIdx">Parameter indicating a contour to draw. If it is negative, all the contours are drawn.</param> /// <param name="color">Color of the contours.</param> /// <param name="thickness">Thickness of lines the contours are drawn with. If it is negative (for example, thickness=CV_FILLED ), /// the contour interiors are drawn.</param> /// <param name="lineType">Line connectivity. </param> /// <param name="hierarchy">Optional information about hierarchy. It is only needed if you want to draw only some of the contours</param> /// <param name="maxLevel">Maximal level for drawn contours. If it is 0, only the specified contour is drawn. /// If it is 1, the function draws the contour(s) and all the nested contours. If it is 2, the function draws the contours, /// all the nested contours, all the nested-to-nested contours, and so on. This parameter is only taken into account /// when there is hierarchy available.</param> /// <param name="offset">Optional contour shift parameter. Shift all the drawn contours by the specified offset = (dx, dy)</param> #endif public static void DrawContours( InputOutputArray image, IEnumerable<IEnumerable<Point>> contours, int contourIdx, Scalar color, int thickness = 1, LineTypes lineType = LineTypes.Link8, IEnumerable<HierarchyIndex> hierarchy = null, int maxLevel = Int32.MaxValue, Point? offset = null) { if (image == null) throw new ArgumentNullException(nameof(image)); if (contours == null) throw new ArgumentNullException(nameof(contours)); image.ThrowIfNotReady(); Point offset0 = offset.GetValueOrDefault(new Point()); Point[][] contoursArray = EnumerableEx.SelectToArray(contours, EnumerableEx.ToArray); int[] contourSize2 = EnumerableEx.SelectToArray(contoursArray, pts => pts.Length); using (var contoursPtr = new ArrayAddress2<Point>(contoursArray)) { if (hierarchy == null) { NativeMethods.imgproc_drawContours_vector(image.CvPtr, contoursPtr.Pointer, contoursArray.Length, contourSize2, contourIdx, color, thickness, (int)lineType, IntPtr.Zero, 0, maxLevel, offset0); } else { Vec4i[] hiearchyVecs = EnumerableEx.SelectToArray(hierarchy, hi => hi.ToVec4i()); NativeMethods.imgproc_drawContours_vector(image.CvPtr, contoursPtr.Pointer, contoursArray.Length, contourSize2, contourIdx, color, thickness, (int)lineType, hiearchyVecs, hiearchyVecs.Length, maxLevel, offset0); } } image.Fix(); }
/// <summary> /// fills array with uniformly-distributed random numbers from the range [low, high) /// </summary> /// <param name="dst">The output array of random numbers. /// The array must be pre-allocated and have 1 to 4 channels</param> /// <param name="low">The inclusive lower boundary of the generated random numbers</param> /// <param name="high">The exclusive upper boundary of the generated random numbers</param> public static void Randu(InputOutputArray dst, Scalar low, Scalar high) { if (dst == null) throw new ArgumentNullException("dst"); dst.ThrowIfNotReady(); NativeMethods.core_randu_Scalar(dst.CvPtr, low, high); GC.KeepAlive(low); GC.KeepAlive(high); dst.Fix(); }
/// <summary> /// Draws two groups of lines in blue and red, counting the non overlapping (mismatching) pixels. /// </summary> /// <param name="size">The size of the image, where lines1 and lines2 were found.</param> /// <param name="lines1">The first group of lines that needs to be drawn. It is visualized in blue color.</param> /// <param name="lines2">The second group of lines. They visualized in red color.</param> /// <param name="image">Optional image, where the lines will be drawn. /// The image should be color(3-channel) in order for lines1 and lines2 to be drawn /// in the above mentioned colors.</param> /// <returns></returns> public virtual int CompareSegments( Size size, InputArray lines1, InputArray lines2, InputOutputArray image = null) { if (lines1 == null) throw new ArgumentNullException("lines1"); if (lines2 == null) throw new ArgumentNullException("lines2"); lines1.ThrowIfDisposed(); lines2.ThrowIfDisposed(); if (image != null) image.ThrowIfNotReady(); var ret = NativeMethods.imgproc_LineSegmentDetector_compareSegments( ptr, size, lines1.CvPtr, lines2.CvPtr, Cv2.ToPtr(image)); GC.KeepAlive(lines1); GC.KeepAlive(lines2); if (image != null) image.Fix(); return ret; }
/// <summary> /// fills array with normally-distributed random numbers with the specified mean and the standard deviation /// </summary> /// <param name="dst">The output array of random numbers. /// The array must be pre-allocated and have 1 to 4 channels</param> /// <param name="mean">The mean value (expectation) of the generated random numbers</param> /// <param name="stddev">The standard deviation of the generated random numbers</param> public static void Randn(InputOutputArray dst, Scalar mean, Scalar stddev) { if (dst == null) throw new ArgumentNullException("dst"); dst.ThrowIfNotReady(); NativeMethods.core_randn_Scalar(dst.CvPtr, mean, stddev); dst.Fix(); }
/// <summary> /// 枠のみ,もしくは塗りつぶされた矩形を描画する /// </summary> /// <param name="img">画像</param> /// <param name="rect">矩形</param> /// <param name="color">線の色(RGB),もしくは輝度(グレースケール画像).</param> /// <param name="thickness">矩形を描く線の太さ.負の値を指定した場合は塗りつぶされる. [既定値は1]</param> /// <param name="lineType">線の種類. [既定値はLineType.Link8]</param> /// <param name="shift">座標の小数点以下の桁を表すビット数. [既定値は0]</param> #else /// <summary> /// Draws simple, thick or filled rectangle /// </summary> /// <param name="img">Image. </param> /// <param name="rect">Rectangle.</param> /// <param name="color">Line color (RGB) or brightness (grayscale image). </param> /// <param name="thickness">Thickness of lines that make up the rectangle. Negative values make the function to draw a filled rectangle. [By default this is 1]</param> /// <param name="lineType">Type of the line, see cvLine description. [By default this is LineType.Link8]</param> /// <param name="shift">Number of fractional bits in the point coordinates. [By default this is 0]</param> #endif public static void Rectangle( InputOutputArray img, Rect rect, Scalar color, int thickness = 1, LineTypes lineType = LineTypes.Link8, int shift = 0) { if (img == null) throw new ArgumentNullException(nameof(img)); NativeMethods.imgproc_rectangle_InputOutputArray(img.CvPtr, rect.TopLeft, rect.BottomRight, color, thickness, (int)lineType, shift); img.Fix(); }
/// <summary> /// scales and shifts array elements so that either the specified norm (alpha) /// or the minimum (alpha) and maximum (beta) array values get the specified values /// </summary> /// <param name="src">The source array</param> /// <param name="dst">The destination array; will have the same size as src</param> /// <param name="alpha">The norm value to normalize to or the lower range boundary /// in the case of range normalization</param> /// <param name="beta">The upper range boundary in the case of range normalization; /// not used for norm normalization</param> /// <param name="normType">The normalization type</param> /// <param name="dtype">When the parameter is negative, /// the destination array will have the same type as src, /// otherwise it will have the same number of channels as src and the depth =CV_MAT_DEPTH(rtype)</param> /// <param name="mask">The optional operation mask</param> public static void Normalize( InputArray src, InputOutputArray dst, double alpha=1, double beta=0, NormTypes normType=NormTypes.L2, int dtype=-1, InputArray mask=null) { if (src == null) throw new ArgumentNullException("src"); if (dst == null) throw new ArgumentNullException("dst"); src.ThrowIfDisposed(); dst.ThrowIfNotReady(); NativeMethods.core_normalize(src.CvPtr, dst.CvPtr, alpha, beta, (int)normType, dtype, ToPtr(mask)); GC.KeepAlive(src); dst.Fix(); }
/// <summary> /// 2値画像中の輪郭を検出します. /// </summary> /// <param name="image">入力画像,8ビット,シングルチャンネル.0以外のピクセルは 1として,0のピクセルは0のまま扱われます. /// また,この関数は,輪郭抽出処理中に入力画像 image の中身を書き換えます.</param> /// <param name="mode">輪郭抽出モード</param> /// <param name="method">輪郭の近似手法</param> /// <param name="offset">オプションのオフセット.各輪郭点はこの値の分だけシフトします.これは,ROIの中で抽出された輪郭を,画像全体に対して位置づけて解析する場合に役立ちます.</param> /// <return>検出された輪郭.各輪郭は,点のベクトルとして格納されます.</return> #else /// <summary> /// Finds contours in a binary image. /// </summary> /// <param name="image">Source, an 8-bit single-channel image. Non-zero pixels are treated as 1’s. /// Zero pixels remain 0’s, so the image is treated as binary. /// The function modifies the image while extracting the contours.</param> /// <param name="mode">Contour retrieval mode</param> /// <param name="method">Contour approximation method</param> /// <param name="offset"> Optional offset by which every contour point is shifted. /// This is useful if the contours are extracted from the image ROI and then they should be analyzed in the whole image context.</param> /// <returns>Detected contours. Each contour is stored as a vector of points.</returns> #endif public static MatOfPoint[] FindContoursAsMat(InputOutputArray image, RetrievalModes mode, ContourApproximationModes method, Point? offset = null) { if (image == null) throw new ArgumentNullException(nameof(image)); image.ThrowIfNotReady(); Point offset0 = offset.GetValueOrDefault(new Point()); IntPtr contoursPtr; NativeMethods.imgproc_findContours2_OutputArray(image.CvPtr, out contoursPtr, (int)mode, (int)method, offset0); image.Fix(); using (var contoursVec = new VectorOfMat(contoursPtr)) { return contoursVec.ToArray<MatOfPoint>(); } }
/// <summary> /// /// </summary> /// <param name="mat"></param> /// <param name="distType"></param> /// <param name="a"></param> /// <param name="b"></param> /// <param name="saturateRange"></param> public void Fill(InputOutputArray mat, DistributionType distType, InputArray a, InputArray b, bool saturateRange = false) { if (mat == null) throw new ArgumentNullException(nameof(mat)); if (a == null) throw new ArgumentNullException(nameof(a)); if (b == null) throw new ArgumentNullException(nameof(b)); mat.ThrowIfNotReady(); a.ThrowIfDisposed(); b.ThrowIfDisposed(); NativeMethods.core_RNG_fill(ref state, mat.CvPtr, (int) distType, a.CvPtr, b.CvPtr, saturateRange ? 1 : 0); mat.Fix(); }
/// <summary> /// 枠だけの楕円,楕円弧,もしくは塗りつぶされた扇形の楕円を描画する /// </summary> /// <param name="img">楕円が描画される画像</param> /// <param name="center">楕円の中心</param> /// <param name="axes">楕円の軸の長さ</param> /// <param name="angle">回転角度</param> /// <param name="startAngle">楕円弧の開始角度</param> /// <param name="endAngle">楕円弧の終了角度</param> /// <param name="color">楕円の色</param> /// <param name="thickness">楕円弧の線の幅 [既定値は1]</param> /// <param name="lineType">楕円弧の線の種類 [既定値はLineType.Link8]</param> /// <param name="shift">中心座標と軸の長さの小数点以下の桁を表すビット数 [既定値は0]</param> #else /// <summary> /// Draws simple or thick elliptic arc or fills ellipse sector /// </summary> /// <param name="img">Image. </param> /// <param name="center">Center of the ellipse. </param> /// <param name="axes">Length of the ellipse axes. </param> /// <param name="angle">Rotation angle. </param> /// <param name="startAngle">Starting angle of the elliptic arc. </param> /// <param name="endAngle">Ending angle of the elliptic arc. </param> /// <param name="color">Ellipse color. </param> /// <param name="thickness">Thickness of the ellipse arc. [By default this is 1]</param> /// <param name="lineType">Type of the ellipse boundary. [By default this is LineType.Link8]</param> /// <param name="shift">Number of fractional bits in the center coordinates and axes' values. [By default this is 0]</param> #endif public static void Ellipse( InputOutputArray img, Point center, Size axes, double angle, double startAngle, double endAngle, Scalar color, int thickness = 1, LineTypes lineType = LineTypes.Link8, int shift = 0) { if (img == null) throw new ArgumentNullException(nameof(img)); img.ThrowIfNotReady(); NativeMethods.imgproc_ellipse1(img.CvPtr, center, axes, angle, startAngle, endAngle, color, thickness, (int)lineType, shift); img.Fix(); }
/// <summary> /// Draws the line segments on a given image. /// </summary> /// <param name="image">The image, where the liens will be drawn. /// Should be bigger or equal to the image, where the lines were found.</param> /// <param name="lines">A vector of the lines that needed to be drawn.</param> public virtual void DrawSegments(InputOutputArray image, InputArray lines) { if (image == null) throw new ArgumentNullException(nameof(image)); if (lines == null) throw new ArgumentNullException(nameof(lines)); image.ThrowIfNotReady(); lines.ThrowIfDisposed(); NativeMethods.imgproc_LineSegmentDetector_drawSegments(ptr, image.CvPtr, lines.CvPtr); image.Fix(); GC.KeepAlive(lines); }
/// <summary> /// /// </summary> /// <param name="frame0"></param> /// <param name="frame1"></param> /// <param name="flow"></param> public override void Calc( InputArray frame0, InputArray frame1, InputOutputArray flow) { if (disposed) throw new ObjectDisposedException("DenseOpticalFlowImpl"); if (frame0 == null) throw new ArgumentNullException(nameof(frame0)); if (frame1 == null) throw new ArgumentNullException(nameof(frame1)); if (flow == null) throw new ArgumentNullException(nameof(flow)); frame0.ThrowIfDisposed(); frame1.ThrowIfDisposed(); flow.ThrowIfNotReady(); NativeMethods.video_DenseOpticalFlow_calc( ptr, frame0.CvPtr, frame1.CvPtr, flow.CvPtr); flow.Fix(); }