/// <summary> /// Computes a dense optical flow using the Gunnar Farneback's algorithm. /// </summary> /// <param name="prev">first 8-bit single-channel input image.</param> /// <param name="next">second input image of the same size and the same type as prev.</param> /// <param name="flow">computed flow image that has the same size as prev and type CV_32FC2.</param> /// <param name="pyrScale">parameter, specifying the image scale (<1) to build pyramids for each image; /// pyrScale=0.5 means a classical pyramid, where each next layer is twice smaller than the previous one.</param> /// <param name="levels">number of pyramid layers including the initial image; /// levels=1 means that no extra layers are created and only the original images are used.</param> /// <param name="winsize">averaging window size; larger values increase the algorithm robustness to /// image noise and give more chances for fast motion detection, but yield more blurred motion field.</param> /// <param name="iterations">number of iterations the algorithm does at each pyramid level.</param> /// <param name="polyN">size of the pixel neighborhood used to find polynomial expansion in each pixel; /// larger values mean that the image will be approximated with smoother surfaces, /// yielding more robust algorithm and more blurred motion field, typically poly_n =5 or 7.</param> /// <param name="polySigma">standard deviation of the Gaussian that is used to smooth derivatives used as /// a basis for the polynomial expansion; for polyN=5, you can set polySigma=1.1, /// for polyN=7, a good value would be polySigma=1.5.</param> /// <param name="flags">operation flags that can be a combination of OPTFLOW_USE_INITIAL_FLOW and/or OPTFLOW_FARNEBACK_GAUSSIAN</param> public static void CalcOpticalFlowFarneback(InputArray prev, InputArray next, InputOutputArray flow, double pyrScale, int levels, int winsize, int iterations, int polyN, double polySigma, OpticalFlowFlags flags) { if (prev == null) { throw new ArgumentNullException(nameof(prev)); } if (next == null) { throw new ArgumentNullException(nameof(next)); } if (flow == null) { throw new ArgumentNullException(nameof(flow)); } prev.ThrowIfDisposed(); next.ThrowIfDisposed(); flow.ThrowIfNotReady(); NativeMethods.HandleException( NativeMethods.video_calcOpticalFlowFarneback( prev.CvPtr, next.CvPtr, flow.CvPtr, pyrScale, levels, winsize, iterations, polyN, polySigma, (int)flags)); GC.KeepAlive(prev); GC.KeepAlive(next); flow.Fix(); }
/// <summary> /// /// </summary> /// <param name="frame0"></param> /// <param name="frame1"></param> /// <param name="flow"></param> public override void Calc( InputArray frame0, InputArray frame1, InputOutputArray flow) { if (disposed) { throw new ObjectDisposedException("DenseOpticalFlowImpl"); } if (frame0 == null) { throw new ArgumentNullException("frame0"); } if (frame1 == null) { throw new ArgumentNullException("frame1"); } if (flow == null) { throw new ArgumentNullException("flow"); } frame0.ThrowIfDisposed(); frame1.ThrowIfDisposed(); flow.ThrowIfNotReady(); NativeMethods.video_DenseOpticalFlow_calc( ptr, frame0.CvPtr, frame1.CvPtr, flow.CvPtr); flow.Fix(); }
/// <summary> /// /// </summary> /// <param name="frame0"></param> /// <param name="frame1"></param> /// <param name="flow"></param> public override void Calc( InputArray frame0, InputArray frame1, InputOutputArray flow) { ThrowIfDisposed(); if (frame0 == null) { throw new ArgumentNullException(nameof(frame0)); } if (frame1 == null) { throw new ArgumentNullException(nameof(frame1)); } if (flow == null) { throw new ArgumentNullException(nameof(flow)); } frame0.ThrowIfDisposed(); frame1.ThrowIfDisposed(); flow.ThrowIfNotReady(); NativeMethods.video_DenseOpticalFlow_calc( ptr, frame0.CvPtr, frame1.CvPtr, flow.CvPtr); flow.Fix(); }
/// <summary> /// computes sparse optical flow using multi-scale Lucas-Kanade algorithm /// </summary> /// <param name="prevImg"></param> /// <param name="nextImg"></param> /// <param name="prevPts"></param> /// <param name="nextPts"></param> /// <param name="status"></param> /// <param name="err"></param> /// <param name="winSize"></param> /// <param name="maxLevel"></param> /// <param name="criteria"></param> /// <param name="flags"></param> /// <param name="minEigThreshold"></param> public static void CalcOpticalFlowPyrLK( InputArray prevImg, InputArray nextImg, InputArray prevPts, InputOutputArray nextPts, OutputArray status, OutputArray err, Size?winSize = null, int maxLevel = 3, TermCriteria?criteria = null, OpticalFlowFlags flags = OpticalFlowFlags.None, double minEigThreshold = 1e-4) { if (prevImg == null) { throw new ArgumentNullException(nameof(prevImg)); } if (nextImg == null) { throw new ArgumentNullException(nameof(nextImg)); } if (prevPts == null) { throw new ArgumentNullException(nameof(prevPts)); } if (nextPts == null) { throw new ArgumentNullException(nameof(nextPts)); } if (status == null) { throw new ArgumentNullException(nameof(status)); } if (err == null) { throw new ArgumentNullException(nameof(err)); } prevImg.ThrowIfDisposed(); nextImg.ThrowIfDisposed(); prevPts.ThrowIfDisposed(); nextPts.ThrowIfNotReady(); status.ThrowIfNotReady(); err.ThrowIfNotReady(); var winSize0 = winSize.GetValueOrDefault(new Size(21, 21)); var criteria0 = criteria.GetValueOrDefault( TermCriteria.Both(30, 0.01)); NativeMethods.HandleException( NativeMethods.video_calcOpticalFlowPyrLK_InputArray( prevImg.CvPtr, nextImg.CvPtr, prevPts.CvPtr, nextPts.CvPtr, status.CvPtr, err.CvPtr, winSize0, maxLevel, criteria0, (int)flags, minEigThreshold)); GC.KeepAlive(prevImg); GC.KeepAlive(nextImg); GC.KeepAlive(prevPts); nextPts.Fix(); status.Fix(); err.Fix(); }
/// <summary> /// Draws the line segments on a given image. /// </summary> /// <param name="image">The image, where the liens will be drawn. /// Should be bigger or equal to the image, where the lines were found.</param> /// <param name="lines">A vector of the lines that needed to be drawn.</param> public virtual void DrawSegments(InputOutputArray image, InputArray lines) { if (image == null) throw new ArgumentNullException("image"); if (lines == null) throw new ArgumentNullException("lines"); image.ThrowIfNotReady(); lines.ThrowIfDisposed(); NativeMethods.imgproc_LineSegmentDetector_drawSegments(ptr, image.CvPtr, lines.CvPtr); image.Fix(); GC.KeepAlive(lines); }
/// <summary> /// Updates motion history image using the current silhouette /// </summary> /// <param name="silhouette">Silhouette mask that has non-zero pixels where the motion occurs.</param> /// <param name="mhi">Motion history image that is updated by the function (single-channel, 32-bit floating-point).</param> /// <param name="timestamp">Current time in milliseconds or other units.</param> /// <param name="duration">Maximal duration of the motion track in the same units as timestamp .</param> public static void UpdateMotionHistory( InputArray silhouette, InputOutputArray mhi, double timestamp, double duration) { if (silhouette == null) throw new ArgumentNullException("silhouette"); if (mhi == null) throw new ArgumentNullException("mhi"); silhouette.ThrowIfDisposed(); mhi.ThrowIfNotReady(); NativeMethods.optflow_motempl_updateMotionHistory( silhouette.CvPtr, mhi.CvPtr, timestamp, duration); mhi.Fix(); }
/// <summary> /// Updates motion history image using the current silhouette /// </summary> /// <param name="silhouette">Silhouette mask that has non-zero pixels where the motion occurs.</param> /// <param name="mhi">Motion history image that is updated by the function (single-channel, 32-bit floating-point).</param> /// <param name="timestamp">Current time in milliseconds or other units.</param> /// <param name="duration">Maximal duration of the motion track in the same units as timestamp .</param> public static void UpdateMotionHistory( InputArray silhouette, InputOutputArray mhi, double timestamp, double duration) { if (silhouette == null) { throw new ArgumentNullException("nameof(silhouette)"); } if (mhi == null) { throw new ArgumentNullException("nameof(mhi)"); } silhouette.ThrowIfDisposed(); mhi.ThrowIfNotReady(); NativeMethods.optflow_motempl_updateMotionHistory( silhouette.CvPtr, mhi.CvPtr, timestamp, duration); mhi.Fix(); }
/// <summary> /// /// </summary> /// <param name="mat"></param> /// <param name="distType"></param> /// <param name="a"></param> /// <param name="b"></param> /// <param name="saturateRange"></param> public void Fill(InputOutputArray mat, DistributionType distType, InputArray a, InputArray b, bool saturateRange = false) { if (mat == null) { throw new ArgumentNullException("mat"); } if (a == null) { throw new ArgumentNullException("a"); } if (b == null) { throw new ArgumentNullException("b"); } mat.ThrowIfNotReady(); a.ThrowIfDisposed(); b.ThrowIfDisposed(); NativeMethods.core_RNG_fill(State, mat.CvPtr, (int)distType, a.CvPtr, b.CvPtr, saturateRange ? 1 : 0); mat.Fix(); }
/// <summary> /// Draws two groups of lines in blue and red, counting the non overlapping (mismatching) pixels. /// </summary> /// <param name="size">The size of the image, where lines1 and lines2 were found.</param> /// <param name="lines1">The first group of lines that needs to be drawn. It is visualized in blue color.</param> /// <param name="lines2">The second group of lines. They visualized in red color.</param> /// <param name="image">Optional image, where the lines will be drawn. /// The image should be color(3-channel) in order for lines1 and lines2 to be drawn /// in the above mentioned colors.</param> /// <returns></returns> public virtual int CompareSegments( Size size, InputArray lines1, InputArray lines2, InputOutputArray image = null) { if (lines1 == null) throw new ArgumentNullException("lines1"); if (lines2 == null) throw new ArgumentNullException("lines2"); lines1.ThrowIfDisposed(); lines2.ThrowIfDisposed(); if (image != null) image.ThrowIfNotReady(); var ret = NativeMethods.imgproc_LineSegmentDetector_compareSegments( ptr, size, lines1.CvPtr, lines2.CvPtr, Cv2.ToPtr(image)); GC.KeepAlive(lines1); GC.KeepAlive(lines2); if (image != null) image.Fix(); return ret; }
/// <summary> /// extends the symmetrical matrix from the lower half or from the upper half /// </summary> /// <param name="mtx"> Input-output floating-point square matrix</param> /// <param name="lowerToUpper">If true, the lower half is copied to the upper half, /// otherwise the upper half is copied to the lower half</param> public static void CompleteSymm(InputOutputArray mtx, bool lowerToUpper = false) { if (mtx == null) throw new ArgumentNullException("mtx"); mtx.ThrowIfNotReady(); NativeMethods.core_completeSymm(mtx.CvPtr, lowerToUpper ? 1 : 0); mtx.Fix(); }
/// <summary> /// Computes a Hanning window coefficients in two dimensions. /// </summary> /// <param name="dst">Destination array to place Hann coefficients in</param> /// <param name="winSize">The window size specifications</param> /// <param name="type">Created array type</param> public static void CreateHanningWindow(InputOutputArray dst, Size winSize, MatType type) { if (dst == null) throw new ArgumentNullException(nameof(dst)); dst.ThrowIfNotReady(); NativeMethods.imgproc_createHanningWindow(dst.CvPtr, winSize, type); dst.Fix(); }
/// <summary> /// Draws a arrow segment pointing from the first point to the second one. /// The function arrowedLine draws an arrow between pt1 and pt2 points in the image. /// See also cv::line. /// </summary> /// <param name="img">Image.</param> /// <param name="pt1">The point the arrow starts from.</param> /// <param name="pt2">The point the arrow points to.</param> /// <param name="color">Line color.</param> /// <param name="thickness">Line thickness.</param> /// <param name="lineType">Type of the line, see cv::LineTypes</param> /// <param name="shift">Number of fractional bits in the point coordinates.</param> /// <param name="tipLength">The length of the arrow tip in relation to the arrow length</param> public static void ArrowedLine( InputOutputArray img, Point pt1, Point pt2, Scalar color, int thickness = 1, LineTypes lineType = LineTypes.Link8, int shift = 0, double tipLength = 0.1) { if (img == null) throw new ArgumentNullException(nameof(img)); img.ThrowIfNotReady(); NativeMethods.imgproc_arrowedLine( img.CvPtr, pt1, pt2, color, thickness, (int)lineType, shift, tipLength); img.Fix(); }
/// <summary> /// fills array with normally-distributed random numbers with the specified mean and the standard deviation /// </summary> /// <param name="dst">The output array of random numbers. /// The array must be pre-allocated and have 1 to 4 channels</param> /// <param name="mean">The mean value (expectation) of the generated random numbers</param> /// <param name="stddev">The standard deviation of the generated random numbers</param> public static void Randn(InputOutputArray dst, Scalar mean, Scalar stddev) { if (dst == null) throw new ArgumentNullException("dst"); dst.ThrowIfNotReady(); NativeMethods.core_randn_Scalar(dst.CvPtr, mean, stddev); dst.Fix(); }
/// <summary> /// scales and shifts array elements so that either the specified norm (alpha) /// or the minimum (alpha) and maximum (beta) array values get the specified values /// </summary> /// <param name="src">The source array</param> /// <param name="dst">The destination array; will have the same size as src</param> /// <param name="alpha">The norm value to normalize to or the lower range boundary /// in the case of range normalization</param> /// <param name="beta">The upper range boundary in the case of range normalization; /// not used for norm normalization</param> /// <param name="normType">The normalization type</param> /// <param name="dtype">When the parameter is negative, /// the destination array will have the same type as src, /// otherwise it will have the same number of channels as src and the depth =CV_MAT_DEPTH(rtype)</param> /// <param name="mask">The optional operation mask</param> public static void Normalize( InputArray src, InputOutputArray dst, double alpha=1, double beta=0, NormTypes normType=NormTypes.L2, int dtype=-1, InputArray mask=null) { if (src == null) throw new ArgumentNullException("src"); if (dst == null) throw new ArgumentNullException("dst"); src.ThrowIfDisposed(); dst.ThrowIfNotReady(); NativeMethods.core_normalize(src.CvPtr, dst.CvPtr, alpha, beta, (int)normType, dtype, ToPtr(mask)); GC.KeepAlive(src); dst.Fix(); }
/// <summary> /// /// </summary> /// <param name="data"></param> /// <param name="mean"></param> /// <param name="eigenvectors"></param> /// <param name="retainedVariance"></param> public static void PCAComputeVar(InputArray data, InputOutputArray mean, OutputArray eigenvectors, double retainedVariance) { if (data == null) throw new ArgumentNullException("data"); if (mean == null) throw new ArgumentNullException("mean"); if (eigenvectors == null) throw new ArgumentNullException("eigenvectors"); data.ThrowIfDisposed(); mean.ThrowIfNotReady(); eigenvectors.ThrowIfNotReady(); NativeMethods.core_PCAComputeVar(data.CvPtr, mean.CvPtr, eigenvectors.CvPtr, retainedVariance); GC.KeepAlive(data); mean.Fix(); eigenvectors.Fix(); }
/// <summary> /// fills array with uniformly-distributed random numbers from the range [low, high) /// </summary> /// <param name="dst">The output array of random numbers. /// The array must be pre-allocated and have 1 to 4 channels</param> /// <param name="low">The inclusive lower boundary of the generated random numbers</param> /// <param name="high">The exclusive upper boundary of the generated random numbers</param> public static void Randu(InputOutputArray dst, Scalar low, Scalar high) { if (dst == null) throw new ArgumentNullException("dst"); dst.ThrowIfNotReady(); NativeMethods.core_randu_Scalar(dst.CvPtr, low, high); GC.KeepAlive(low); GC.KeepAlive(high); dst.Fix(); }
/// <summary> /// Computes a dense optical flow using the Gunnar Farneback's algorithm. /// </summary> /// <param name="prev">first 8-bit single-channel input image.</param> /// <param name="next">second input image of the same size and the same type as prev.</param> /// <param name="flow">computed flow image that has the same size as prev and type CV_32FC2.</param> /// <param name="pyrScale">parameter, specifying the image scale (<1) to build pyramids for each image; /// pyrScale=0.5 means a classical pyramid, where each next layer is twice smaller than the previous one.</param> /// <param name="levels">number of pyramid layers including the initial image; /// levels=1 means that no extra layers are created and only the original images are used.</param> /// <param name="winsize">averaging window size; larger values increase the algorithm robustness to /// image noise and give more chances for fast motion detection, but yield more blurred motion field.</param> /// <param name="iterations">number of iterations the algorithm does at each pyramid level.</param> /// <param name="polyN">size of the pixel neighborhood used to find polynomial expansion in each pixel; /// larger values mean that the image will be approximated with smoother surfaces, /// yielding more robust algorithm and more blurred motion field, typically poly_n =5 or 7.</param> /// <param name="polySigma">standard deviation of the Gaussian that is used to smooth derivatives used as /// a basis for the polynomial expansion; for polyN=5, you can set polySigma=1.1, /// for polyN=7, a good value would be polySigma=1.5.</param> /// <param name="flags">operation flags that can be a combination of OPTFLOW_USE_INITIAL_FLOW and/or OPTFLOW_FARNEBACK_GAUSSIAN</param> public static void CalcOpticalFlowFarneback(InputArray prev, InputArray next, InputOutputArray flow, double pyrScale, int levels, int winsize, int iterations, int polyN, double polySigma, OpticalFlowFlags flags) { if (prev == null) throw new ArgumentNullException("prev"); if (next == null) throw new ArgumentNullException("next"); if (flow == null) throw new ArgumentNullException("flow"); prev.ThrowIfDisposed(); next.ThrowIfDisposed(); flow.ThrowIfNotReady(); NativeMethods.video_calcOpticalFlowFarneback(prev.CvPtr, next.CvPtr, flow.CvPtr, pyrScale, levels, winsize, iterations, polyN, polySigma, (int)flags); flow.Fix(); }
/// <summary> /// 2値画像中の輪郭を検出します. /// </summary> /// <param name="image">入力画像,8ビット,シングルチャンネル.0以外のピクセルは 1として,0のピクセルは0のまま扱われます. /// また,この関数は,輪郭抽出処理中に入力画像 image の中身を書き換えます.</param> /// <param name="contours">検出された輪郭.各輪郭は,点のベクトルとして格納されます.</param> /// <param name="hierarchy">画像のトポロジーに関する情報を含む出力ベクトル.これは,輪郭数と同じ数の要素を持ちます.各輪郭 contours[i] に対して, /// 要素 hierarchy[i]のメンバにはそれぞれ,同じ階層レベルに存在する前後の輪郭,最初の子輪郭,および親輪郭の /// contours インデックス(0 基準)がセットされます.また,輪郭 i において,前後,親,子の輪郭が存在しない場合, /// それに対応する hierarchy[i] の要素は,負の値になります.</param> /// <param name="mode">輪郭抽出モード</param> /// <param name="method">輪郭の近似手法</param> /// <param name="offset">オプションのオフセット.各輪郭点はこの値の分だけシフトします.これは,ROIの中で抽出された輪郭を,画像全体に対して位置づけて解析する場合に役立ちます.</param> #else /// <summary> /// Finds contours in a binary image. /// </summary> /// <param name="image">Source, an 8-bit single-channel image. Non-zero pixels are treated as 1’s. /// Zero pixels remain 0’s, so the image is treated as binary. /// The function modifies the image while extracting the contours.</param> /// <param name="contours">Detected contours. Each contour is stored as a vector of points.</param> /// <param name="hierarchy">Optional output vector, containing information about the image topology. /// It has as many elements as the number of contours. For each i-th contour contours[i], /// the members of the elements hierarchy[i] are set to 0-based indices in contours of the next /// and previous contours at the same hierarchical level, the first child contour and the parent contour, respectively. /// If for the contour i there are no next, previous, parent, or nested contours, the corresponding elements of hierarchy[i] will be negative.</param> /// <param name="mode">Contour retrieval mode</param> /// <param name="method">Contour approximation method</param> /// <param name="offset"> Optional offset by which every contour point is shifted. /// This is useful if the contours are extracted from the image ROI and then they should be analyzed in the whole image context.</param> #endif public static void FindContours(InputOutputArray image, out Point[][] contours, out HierarchyIndex[] hierarchy, RetrievalModes mode, ContourApproximationModes method, Point? offset = null) { if (image == null) throw new ArgumentNullException(nameof(image)); image.ThrowIfNotReady(); Point offset0 = offset.GetValueOrDefault(new Point()); IntPtr contoursPtr, hierarchyPtr; NativeMethods.imgproc_findContours1_vector(image.CvPtr, out contoursPtr, out hierarchyPtr, (int)mode, (int)method, offset0); using (var contoursVec = new VectorOfVectorPoint(contoursPtr)) using (var hierarchyVec = new VectorOfVec4i(hierarchyPtr)) { contours = contoursVec.ToArray(); Vec4i[] hierarchyOrg = hierarchyVec.ToArray(); hierarchy = EnumerableEx.SelectToArray(hierarchyOrg, HierarchyIndex.FromVec4i); } image.Fix(); }
/// <summary> /// 2値画像中の輪郭を検出します. /// </summary> /// <param name="image">入力画像,8ビット,シングルチャンネル.0以外のピクセルは 1として,0のピクセルは0のまま扱われます. /// また,この関数は,輪郭抽出処理中に入力画像 image の中身を書き換えます.</param> /// <param name="contours">検出された輪郭.各輪郭は,点のベクトルとして格納されます.</param> /// <param name="hierarchy">画像のトポロジーに関する情報を含む出力ベクトル.これは,輪郭数と同じ数の要素を持ちます.各輪郭 contours[i] に対して, /// 要素 hierarchy[i]のメンバにはそれぞれ,同じ階層レベルに存在する前後の輪郭,最初の子輪郭,および親輪郭の /// contours インデックス(0 基準)がセットされます.また,輪郭 i において,前後,親,子の輪郭が存在しない場合, /// それに対応する hierarchy[i] の要素は,負の値になります.</param> /// <param name="mode">輪郭抽出モード</param> /// <param name="method">輪郭の近似手法</param> /// <param name="offset">オプションのオフセット.各輪郭点はこの値の分だけシフトします.これは,ROIの中で抽出された輪郭を,画像全体に対して位置づけて解析する場合に役立ちます.</param> #else /// <summary> /// Finds contours in a binary image. /// </summary> /// <param name="image">Source, an 8-bit single-channel image. Non-zero pixels are treated as 1’s. /// Zero pixels remain 0’s, so the image is treated as binary. /// The function modifies the image while extracting the contours.</param> /// <param name="contours">Detected contours. Each contour is stored as a vector of points.</param> /// <param name="hierarchy">Optional output vector, containing information about the image topology. /// It has as many elements as the number of contours. For each i-th contour contours[i], /// the members of the elements hierarchy[i] are set to 0-based indices in contours of the next /// and previous contours at the same hierarchical level, the first child contour and the parent contour, respectively. /// If for the contour i there are no next, previous, parent, or nested contours, the corresponding elements of hierarchy[i] will be negative.</param> /// <param name="mode">Contour retrieval mode</param> /// <param name="method">Contour approximation method</param> /// <param name="offset"> Optional offset by which every contour point is shifted. /// This is useful if the contours are extracted from the image ROI and then they should be analyzed in the whole image context.</param> #endif public static void FindContours(InputOutputArray image, out Mat[] contours, OutputArray hierarchy, RetrievalModes mode, ContourApproximationModes method, Point? offset = null) { if (image == null) throw new ArgumentNullException(nameof(image)); if (hierarchy == null) throw new ArgumentNullException(nameof(hierarchy)); image.ThrowIfNotReady(); hierarchy.ThrowIfNotReady(); Point offset0 = offset.GetValueOrDefault(new Point()); IntPtr contoursPtr; NativeMethods.imgproc_findContours1_OutputArray(image.CvPtr, out contoursPtr, hierarchy.CvPtr, (int)mode, (int)method, offset0); using (var contoursVec = new VectorOfMat(contoursPtr)) { contours = contoursVec.ToArray(); } image.Fix(); hierarchy.Fix(); }
/// <summary> /// computes sparse optical flow using multi-scale Lucas-Kanade algorithm /// </summary> /// <param name="prevImg"></param> /// <param name="nextImg"></param> /// <param name="prevPts"></param> /// <param name="nextPts"></param> /// <param name="status"></param> /// <param name="err"></param> /// <param name="winSize"></param> /// <param name="maxLevel"></param> /// <param name="criteria"></param> /// <param name="flags"></param> /// <param name="minEigThreshold"></param> public static void CalcOpticalFlowPyrLK( InputArray prevImg, InputArray nextImg, InputArray prevPts, InputOutputArray nextPts, OutputArray status, OutputArray err, Size? winSize = null, int maxLevel = 3, TermCriteria? criteria = null, OpticalFlowFlags flags = OpticalFlowFlags.None, double minEigThreshold = 1e-4) { if (prevImg == null) throw new ArgumentNullException("prevImg"); if (nextImg == null) throw new ArgumentNullException("nextImg"); if (prevPts == null) throw new ArgumentNullException("prevPts"); if (nextPts == null) throw new ArgumentNullException("nextPts"); if (status == null) throw new ArgumentNullException("status"); if (err == null) throw new ArgumentNullException("err"); prevImg.ThrowIfDisposed(); nextImg.ThrowIfDisposed(); prevPts.ThrowIfDisposed(); nextPts.ThrowIfNotReady(); status.ThrowIfNotReady(); err.ThrowIfNotReady(); Size winSize0 = winSize.GetValueOrDefault(new Size(21, 21)); TermCriteria criteria0 = criteria.GetValueOrDefault( TermCriteria.Both(30, 0.01)); NativeMethods.video_calcOpticalFlowPyrLK_InputArray( prevImg.CvPtr, nextImg.CvPtr, prevPts.CvPtr, nextPts.CvPtr, status.CvPtr, err.CvPtr, winSize0,maxLevel, criteria0, (int)flags, minEigThreshold); nextPts.Fix(); status.Fix(); err.Fix(); }
/// <summary> /// 輪郭線,または内側が塗りつぶされた輪郭を描きます. /// </summary> /// <param name="image">出力画像</param> /// <param name="contours"> 入力される全輪郭.各輪郭は,点のベクトルとして格納されています.</param> /// <param name="contourIdx">描かれる輪郭を示します.これが負値の場合,すべての輪郭が描画されます.</param> /// <param name="color">輪郭の色.</param> /// <param name="thickness">輪郭線の太さ.これが負値の場合(例えば thickness=CV_FILLED ),輪郭の内側が塗りつぶされます.</param> /// <param name="lineType">線の連結性</param> /// <param name="hierarchy">階層に関するオプションの情報.これは,特定の輪郭だけを描画したい場合にのみ必要になります.</param> /// <param name="maxLevel">描画される輪郭の最大レベル.0ならば,指定された輪郭のみが描画されます. /// 1ならば,指定された輪郭と,それに入れ子になったすべての輪郭が描画されます.2ならば,指定された輪郭と, /// それに入れ子になったすべての輪郭,さらにそれに入れ子になったすべての輪郭が描画されます.このパラメータは, /// hierarchy が有効な場合のみ考慮されます.</param> /// <param name="offset">輪郭をシフトするオプションパラメータ.指定された offset = (dx,dy) だけ,すべての描画輪郭がシフトされます.</param> #else /// <summary> /// draws contours in the image /// </summary> /// <param name="image">Destination image.</param> /// <param name="contours">All the input contours. Each contour is stored as a point vector.</param> /// <param name="contourIdx">Parameter indicating a contour to draw. If it is negative, all the contours are drawn.</param> /// <param name="color">Color of the contours.</param> /// <param name="thickness">Thickness of lines the contours are drawn with. If it is negative (for example, thickness=CV_FILLED ), /// the contour interiors are drawn.</param> /// <param name="lineType">Line connectivity. </param> /// <param name="hierarchy">Optional information about hierarchy. It is only needed if you want to draw only some of the contours</param> /// <param name="maxLevel">Maximal level for drawn contours. If it is 0, only the specified contour is drawn. /// If it is 1, the function draws the contour(s) and all the nested contours. If it is 2, the function draws the contours, /// all the nested contours, all the nested-to-nested contours, and so on. This parameter is only taken into account /// when there is hierarchy available.</param> /// <param name="offset">Optional contour shift parameter. Shift all the drawn contours by the specified offset = (dx, dy)</param> #endif public static void DrawContours( InputOutputArray image, IEnumerable<Mat> contours, int contourIdx, Scalar color, int thickness = 1, LineTypes lineType = LineTypes.Link8, Mat hierarchy = null, int maxLevel = Int32.MaxValue, Point? offset = null) { if (image == null) throw new ArgumentNullException(nameof(image)); if (contours == null) throw new ArgumentNullException(nameof(contours)); image.ThrowIfNotReady(); Point offset0 = offset.GetValueOrDefault(new Point()); IntPtr[] contoursPtr = EnumerableEx.SelectPtrs(contours); NativeMethods.imgproc_drawContours_InputArray(image.CvPtr, contoursPtr, contoursPtr.Length, contourIdx, color, thickness, (int)lineType, ToPtr(hierarchy), maxLevel, offset0); image.Fix(); }
/// <summary> /// 輪郭線,または内側が塗りつぶされた輪郭を描きます. /// </summary> /// <param name="image">出力画像</param> /// <param name="contours"> 入力される全輪郭.各輪郭は,点のベクトルとして格納されています.</param> /// <param name="contourIdx">描かれる輪郭を示します.これが負値の場合,すべての輪郭が描画されます.</param> /// <param name="color">輪郭の色.</param> /// <param name="thickness">輪郭線の太さ.これが負値の場合(例えば thickness=CV_FILLED ),輪郭の内側が塗りつぶされます.</param> /// <param name="lineType">線の連結性</param> /// <param name="hierarchy">階層に関するオプションの情報.これは,特定の輪郭だけを描画したい場合にのみ必要になります.</param> /// <param name="maxLevel">描画される輪郭の最大レベル.0ならば,指定された輪郭のみが描画されます. /// 1ならば,指定された輪郭と,それに入れ子になったすべての輪郭が描画されます.2ならば,指定された輪郭と, /// それに入れ子になったすべての輪郭,さらにそれに入れ子になったすべての輪郭が描画されます.このパラメータは, /// hierarchy が有効な場合のみ考慮されます.</param> /// <param name="offset">輪郭をシフトするオプションパラメータ.指定された offset = (dx,dy) だけ,すべての描画輪郭がシフトされます.</param> #else /// <summary> /// draws contours in the image /// </summary> /// <param name="image">Destination image.</param> /// <param name="contours">All the input contours. Each contour is stored as a point vector.</param> /// <param name="contourIdx">Parameter indicating a contour to draw. If it is negative, all the contours are drawn.</param> /// <param name="color">Color of the contours.</param> /// <param name="thickness">Thickness of lines the contours are drawn with. If it is negative (for example, thickness=CV_FILLED ), /// the contour interiors are drawn.</param> /// <param name="lineType">Line connectivity. </param> /// <param name="hierarchy">Optional information about hierarchy. It is only needed if you want to draw only some of the contours</param> /// <param name="maxLevel">Maximal level for drawn contours. If it is 0, only the specified contour is drawn. /// If it is 1, the function draws the contour(s) and all the nested contours. If it is 2, the function draws the contours, /// all the nested contours, all the nested-to-nested contours, and so on. This parameter is only taken into account /// when there is hierarchy available.</param> /// <param name="offset">Optional contour shift parameter. Shift all the drawn contours by the specified offset = (dx, dy)</param> #endif public static void DrawContours( InputOutputArray image, IEnumerable<IEnumerable<Point>> contours, int contourIdx, Scalar color, int thickness = 1, LineTypes lineType = LineTypes.Link8, IEnumerable<HierarchyIndex> hierarchy = null, int maxLevel = Int32.MaxValue, Point? offset = null) { if (image == null) throw new ArgumentNullException(nameof(image)); if (contours == null) throw new ArgumentNullException(nameof(contours)); image.ThrowIfNotReady(); Point offset0 = offset.GetValueOrDefault(new Point()); Point[][] contoursArray = EnumerableEx.SelectToArray(contours, EnumerableEx.ToArray); int[] contourSize2 = EnumerableEx.SelectToArray(contoursArray, pts => pts.Length); using (var contoursPtr = new ArrayAddress2<Point>(contoursArray)) { if (hierarchy == null) { NativeMethods.imgproc_drawContours_vector(image.CvPtr, contoursPtr.Pointer, contoursArray.Length, contourSize2, contourIdx, color, thickness, (int)lineType, IntPtr.Zero, 0, maxLevel, offset0); } else { Vec4i[] hiearchyVecs = EnumerableEx.SelectToArray(hierarchy, hi => hi.ToVec4i()); NativeMethods.imgproc_drawContours_vector(image.CvPtr, contoursPtr.Pointer, contoursArray.Length, contourSize2, contourIdx, color, thickness, (int)lineType, hiearchyVecs, hiearchyVecs.Length, maxLevel, offset0); } } image.Fix(); }
/// <summary> /// 枠だけの楕円,楕円弧,もしくは塗りつぶされた扇形の楕円を描画する /// </summary> /// <param name="img">楕円が描画される画像</param> /// <param name="center">楕円の中心</param> /// <param name="axes">楕円の軸の長さ</param> /// <param name="angle">回転角度</param> /// <param name="startAngle">楕円弧の開始角度</param> /// <param name="endAngle">楕円弧の終了角度</param> /// <param name="color">楕円の色</param> /// <param name="thickness">楕円弧の線の幅 [既定値は1]</param> /// <param name="lineType">楕円弧の線の種類 [既定値はLineType.Link8]</param> /// <param name="shift">中心座標と軸の長さの小数点以下の桁を表すビット数 [既定値は0]</param> #else /// <summary> /// Draws simple or thick elliptic arc or fills ellipse sector /// </summary> /// <param name="img">Image. </param> /// <param name="center">Center of the ellipse. </param> /// <param name="axes">Length of the ellipse axes. </param> /// <param name="angle">Rotation angle. </param> /// <param name="startAngle">Starting angle of the elliptic arc. </param> /// <param name="endAngle">Ending angle of the elliptic arc. </param> /// <param name="color">Ellipse color. </param> /// <param name="thickness">Thickness of the ellipse arc. [By default this is 1]</param> /// <param name="lineType">Type of the ellipse boundary. [By default this is LineType.Link8]</param> /// <param name="shift">Number of fractional bits in the center coordinates and axes' values. [By default this is 0]</param> #endif public static void Ellipse( InputOutputArray img, Point center, Size axes, double angle, double startAngle, double endAngle, Scalar color, int thickness = 1, LineTypes lineType = LineTypes.Link8, int shift = 0) { if (img == null) throw new ArgumentNullException(nameof(img)); img.ThrowIfNotReady(); NativeMethods.imgproc_ellipse1(img.CvPtr, center, axes, angle, startAngle, endAngle, color, thickness, (int)lineType, shift); img.Fix(); }
/// <summary> /// initializes scaled identity matrix /// </summary> /// <param name="mtx">The matrix to initialize (not necessarily square)</param> /// <param name="s">The value to assign to the diagonal elements</param> public static void SetIdentity(InputOutputArray mtx, Scalar? s = null) { if (mtx == null) throw new ArgumentNullException("mtx"); mtx.ThrowIfNotReady(); Scalar s0 = s.GetValueOrDefault(new Scalar(1)); NativeMethods.core_setIdentity(mtx.CvPtr, s0); mtx.Fix(); }
/// <summary> /// Fills a connected component with the given color. /// </summary> /// <param name="image">Input/output 1- or 3-channel, 8-bit, or floating-point image. /// It is modified by the function unless the FLOODFILL_MASK_ONLY flag is set in the /// second variant of the function. See the details below.</param> /// <param name="mask">(For the second function only) Operation mask that should be a single-channel 8-bit image, /// 2 pixels wider and 2 pixels taller. The function uses and updates the mask, so you take responsibility of /// initializing the mask content. Flood-filling cannot go across non-zero pixels in the mask. For example, /// an edge detector output can be used as a mask to stop filling at edges. It is possible to use the same mask /// in multiple calls to the function to make sure the filled area does not overlap.</param> /// <param name="seedPoint">Starting point.</param> /// <param name="newVal">New value of the repainted domain pixels.</param> /// <param name="rect">Optional output parameter set by the function to the /// minimum bounding rectangle of the repainted domain.</param> /// <param name="loDiff">Maximal lower brightness/color difference between the currently /// observed pixel and one of its neighbors belonging to the component, or a seed pixel /// being added to the component.</param> /// <param name="upDiff">Maximal upper brightness/color difference between the currently /// observed pixel and one of its neighbors belonging to the component, or a seed pixel /// being added to the component.</param> /// <param name="flags">Operation flags. Lower bits contain a connectivity value, /// 4 (default) or 8, used within the function. Connectivity determines which /// neighbors of a pixel are considered. </param> /// <returns></returns> public static int FloodFill(InputOutputArray image, InputOutputArray mask, Point seedPoint, Scalar newVal, out Rect rect, Scalar? loDiff = null, Scalar? upDiff = null, FloodFillFlags flags = FloodFillFlags.Link4) { if (image == null) throw new ArgumentNullException(nameof(image)); if (mask == null) throw new ArgumentNullException(nameof(mask)); image.ThrowIfNotReady(); mask.ThrowIfNotReady(); Scalar loDiff0 = loDiff.GetValueOrDefault(new Scalar()); Scalar upDiff0 = upDiff.GetValueOrDefault(new Scalar()); int ret = NativeMethods.imgproc_floodFill(image.CvPtr, mask.CvPtr, seedPoint, newVal, out rect, loDiff0, upDiff0, (int)flags); image.Fix(); mask.Fix(); return ret; }
/// <summary> /// computes covariation matrix of a set of samples /// </summary> /// <param name="samples"></param> /// <param name="covar"></param> /// <param name="mean"></param> /// <param name="flags"></param> /// <param name="ctype"></param> public static void CalcCovarMatrix(InputArray samples, OutputArray covar, InputOutputArray mean, CovarFlags flags, MatType ctype) { if (samples == null) throw new ArgumentNullException("samples"); if (covar == null) throw new ArgumentNullException("covar"); if (mean == null) throw new ArgumentNullException("mean"); samples.ThrowIfDisposed(); covar.ThrowIfNotReady(); mean.ThrowIfNotReady(); NativeMethods.core_calcCovarMatrix_InputArray(samples.CvPtr, covar.CvPtr, mean.CvPtr, (int)flags, ctype); GC.KeepAlive(samples); covar.Fix(); mean.Fix(); }
/// <summary> /// Segments the image using GrabCut algorithm /// </summary> /// <param name="img">Input 8-bit 3-channel image.</param> /// <param name="mask">Input/output 8-bit single-channel mask. /// The mask is initialized by the function when mode is set to GC_INIT_WITH_RECT. /// Its elements may have Cv2.GC_BGD / Cv2.GC_FGD / Cv2.GC_PR_BGD / Cv2.GC_PR_FGD</param> /// <param name="rect">ROI containing a segmented object. The pixels outside of the ROI are /// marked as "obvious background". The parameter is only used when mode==GC_INIT_WITH_RECT.</param> /// <param name="bgdModel">Temporary array for the background model. Do not modify it while you are processing the same image.</param> /// <param name="fgdModel">Temporary arrays for the foreground model. Do not modify it while you are processing the same image.</param> /// <param name="iterCount">Number of iterations the algorithm should make before returning the result. /// Note that the result can be refined with further calls with mode==GC_INIT_WITH_MASK or mode==GC_EVAL .</param> /// <param name="mode">Operation mode that could be one of GrabCutFlag value.</param> public static void GrabCut(InputArray img, InputOutputArray mask, Rect rect, InputOutputArray bgdModel, InputOutputArray fgdModel, int iterCount, GrabCutModes mode) { if (img == null) throw new ArgumentNullException(nameof(img)); if (mask == null) throw new ArgumentNullException(nameof(mask)); if (bgdModel == null) throw new ArgumentNullException(nameof(bgdModel)); if (fgdModel == null) throw new ArgumentNullException(nameof(fgdModel)); img.ThrowIfDisposed(); mask.ThrowIfNotReady(); bgdModel.ThrowIfNotReady(); fgdModel.ThrowIfNotReady(); NativeMethods.imgproc_grabCut(img.CvPtr, mask.CvPtr, rect, bgdModel.CvPtr, fgdModel.CvPtr, iterCount, (int)mode); GC.KeepAlive(img); mask.Fix(); bgdModel.Fix(); fgdModel.Fix(); }
/// <summary> /// fills array with uniformly-distributed random numbers from the range [low, high) /// </summary> /// <param name="dst">The output array of random numbers. /// The array must be pre-allocated and have 1 to 4 channels</param> /// <param name="low">The inclusive lower boundary of the generated random numbers</param> /// <param name="high">The exclusive upper boundary of the generated random numbers</param> public static void Randu(InputOutputArray dst, InputArray low, InputArray high) { if (dst == null) throw new ArgumentNullException("dst"); if (low == null) throw new ArgumentNullException("low"); if (high == null) throw new ArgumentNullException("high"); dst.ThrowIfNotReady(); low.ThrowIfDisposed(); high.ThrowIfDisposed(); NativeMethods.core_randu_InputArray(dst.CvPtr, low.CvPtr, high.CvPtr); GC.KeepAlive(low); GC.KeepAlive(high); dst.Fix(); }
/// <summary> /// /// </summary> /// <param name="frame0"></param> /// <param name="frame1"></param> /// <param name="flow"></param> public override void Calc( InputArray frame0, InputArray frame1, InputOutputArray flow) { if (disposed) throw new ObjectDisposedException("DenseOpticalFlowImpl"); if (frame0 == null) throw new ArgumentNullException(nameof(frame0)); if (frame1 == null) throw new ArgumentNullException(nameof(frame1)); if (flow == null) throw new ArgumentNullException(nameof(flow)); frame0.ThrowIfDisposed(); frame1.ThrowIfDisposed(); flow.ThrowIfNotReady(); NativeMethods.video_DenseOpticalFlow_calc( ptr, frame0.CvPtr, frame1.CvPtr, flow.CvPtr); flow.Fix(); }
/// <summary> /// fills array with normally-distributed random numbers with the specified mean and the standard deviation /// </summary> /// <param name="dst">The output array of random numbers. /// The array must be pre-allocated and have 1 to 4 channels</param> /// <param name="mean">The mean value (expectation) of the generated random numbers</param> /// <param name="stddev">The standard deviation of the generated random numbers</param> public static void Randn(InputOutputArray dst, InputArray mean, InputArray stddev) { if (dst == null) throw new ArgumentNullException("dst"); if (mean == null) throw new ArgumentNullException("mean"); if (stddev == null) throw new ArgumentNullException("stddev"); dst.ThrowIfNotReady(); mean.ThrowIfDisposed(); stddev.ThrowIfDisposed(); NativeMethods.core_randn_InputArray(dst.CvPtr, mean.CvPtr, stddev.CvPtr); GC.KeepAlive(mean); GC.KeepAlive(stddev); dst.Fix(); }
/// <summary> /// /// </summary> /// <param name="mat"></param> /// <param name="distType"></param> /// <param name="a"></param> /// <param name="b"></param> /// <param name="saturateRange"></param> public void Fill(InputOutputArray mat, DistributionType distType, InputArray a, InputArray b, bool saturateRange = false) { if (mat == null) throw new ArgumentNullException(nameof(mat)); if (a == null) throw new ArgumentNullException(nameof(a)); if (b == null) throw new ArgumentNullException(nameof(b)); mat.ThrowIfNotReady(); a.ThrowIfDisposed(); b.ThrowIfDisposed(); NativeMethods.core_RNG_fill(ref state, mat.CvPtr, (int) distType, a.CvPtr, b.CvPtr, saturateRange ? 1 : 0); mat.Fix(); }
/// <summary> /// shuffles the input array elements /// </summary> /// <param name="dst">The input/output numerical 1D array</param> /// <param name="iterFactor">The scale factor that determines the number of random swap operations.</param> /// <param name="rng">The optional random number generator used for shuffling. /// If it is null, theRng() is used instead.</param> public static void RandShuffle(InputOutputArray dst, double iterFactor, RNG rng = null) { if (dst == null) throw new ArgumentNullException("dst"); dst.ThrowIfNotReady(); if (rng == null) { NativeMethods.core_randShuffle(dst.CvPtr, iterFactor, IntPtr.Zero); } else { ulong state = rng.State; NativeMethods.core_randShuffle(dst.CvPtr, iterFactor, ref state); rng.State = state; } dst.Fix(); }
/// <summary> /// Performs a marker-based image segmentation using the watershed algorithm. /// </summary> /// <param name="image">Input 8-bit 3-channel image.</param> /// <param name="markers">Input/output 32-bit single-channel image (map) of markers. /// It should have the same size as image.</param> public static void Watershed(InputArray image, InputOutputArray markers) { if (image == null) throw new ArgumentNullException(nameof(image)); if (markers == null) throw new ArgumentNullException(nameof(markers)); image.ThrowIfDisposed(); markers.ThrowIfNotReady(); NativeMethods.imgproc_watershed(image.CvPtr, markers.CvPtr); GC.KeepAlive(image); markers.Fix(); }
/// <summary> /// inserts a single channel to dst (coi is 0-based index) /// </summary> /// <param name="src"></param> /// <param name="dst"></param> /// <param name="coi"></param> public static void InsertChannel(InputArray src, InputOutputArray dst, int coi) { if (src == null) throw new ArgumentNullException("src"); if (dst == null) throw new ArgumentNullException("dst"); src.ThrowIfDisposed(); dst.ThrowIfNotReady(); NativeMethods.core_insertChannel(src.CvPtr, dst.CvPtr, coi); GC.KeepAlive(src); dst.Fix(); }
/// <summary> /// converts NaN's to the given number /// </summary> /// <param name="a"></param> /// <param name="val"></param> public static void PatchNaNs(InputOutputArray a, double val = 0) { if (a == null) throw new ArgumentNullException("a"); a.ThrowIfNotReady(); NativeMethods.core_patchNaNs(a.CvPtr, val); GC.KeepAlive(a); }
/// <summary> /// Draws the line segments on a given image. /// </summary> /// <param name="image">The image, where the liens will be drawn. /// Should be bigger or equal to the image, where the lines were found.</param> /// <param name="lines">A vector of the lines that needed to be drawn.</param> public virtual void DrawSegments(InputOutputArray image, InputArray lines) { if (image == null) throw new ArgumentNullException(nameof(image)); if (lines == null) throw new ArgumentNullException(nameof(lines)); image.ThrowIfNotReady(); lines.ThrowIfDisposed(); NativeMethods.imgproc_LineSegmentDetector_drawSegments(ptr, image.CvPtr, lines.CvPtr); image.Fix(); GC.KeepAlive(lines); }
/// <summary> /// 2値画像中の輪郭を検出します. /// </summary> /// <param name="image">入力画像,8ビット,シングルチャンネル.0以外のピクセルは 1として,0のピクセルは0のまま扱われます. /// また,この関数は,輪郭抽出処理中に入力画像 image の中身を書き換えます.</param> /// <param name="mode">輪郭抽出モード</param> /// <param name="method">輪郭の近似手法</param> /// <param name="offset">オプションのオフセット.各輪郭点はこの値の分だけシフトします.これは,ROIの中で抽出された輪郭を,画像全体に対して位置づけて解析する場合に役立ちます.</param> /// <return>検出された輪郭.各輪郭は,点のベクトルとして格納されます.</return> #else /// <summary> /// Finds contours in a binary image. /// </summary> /// <param name="image">Source, an 8-bit single-channel image. Non-zero pixels are treated as 1’s. /// Zero pixels remain 0’s, so the image is treated as binary. /// The function modifies the image while extracting the contours.</param> /// <param name="mode">Contour retrieval mode</param> /// <param name="method">Contour approximation method</param> /// <param name="offset"> Optional offset by which every contour point is shifted. /// This is useful if the contours are extracted from the image ROI and then they should be analyzed in the whole image context.</param> /// <returns>Detected contours. Each contour is stored as a vector of points.</returns> #endif public static MatOfPoint[] FindContoursAsMat(InputOutputArray image, RetrievalModes mode, ContourApproximationModes method, Point? offset = null) { if (image == null) throw new ArgumentNullException(nameof(image)); image.ThrowIfNotReady(); Point offset0 = offset.GetValueOrDefault(new Point()); IntPtr contoursPtr; NativeMethods.imgproc_findContours2_OutputArray(image.CvPtr, out contoursPtr, (int)mode, (int)method, offset0); image.Fix(); using (var contoursVec = new VectorOfMat(contoursPtr)) { return contoursVec.ToArray<MatOfPoint>(); } }