/// <summary> /// Applies a generic geometrical transformation to an image. /// </summary> /// <param name="src">Source image.</param> /// <param name="dst">Destination image with the size the same as xmap and the type the same as src .</param> /// <param name="map1">X values. Only CV_32FC1 type is supported.</param> /// <param name="map2">Y values. Only CV_32FC1 type is supported.</param> /// <param name="interpolation">Interpolation method (see resize ). INTER_NEAREST , INTER_LINEAR and /// INTER_CUBIC are supported for now.</param> /// <param name="borderMode">Pixel extrapolation method (see borderInterpolate ). BORDER_REFLECT101 , /// BORDER_REPLICATE , BORDER_CONSTANT , BORDER_REFLECT and BORDER_WRAP are supported for now.</param> /// <param name="borderValue">Value used in case of a constant border. By default, it is 0.</param> /// <param name="stream">Stream for the asynchronous version.</param> public static void remap( InputArray src, OutputArray dst, InputArray map1, InputArray map2, InterpolationFlags interpolation = InterpolationFlags.Linear, BorderTypes borderMode = BorderTypes.Constant, Scalar?borderValue = null, Stream stream = null) { if (src == null) { throw new ArgumentNullException(nameof(src)); } if (dst == null) { throw new ArgumentNullException(nameof(dst)); } if (map1 == null) { throw new ArgumentNullException(nameof(map1)); } if (map2 == null) { throw new ArgumentNullException(nameof(map2)); } src.ThrowIfDisposed(); dst.ThrowIfNotReady(); map1.ThrowIfDisposed(); map2.ThrowIfDisposed(); Scalar borderValue0 = borderValue.GetValueOrDefault(Scalar.All(0)); NativeMethods.cuda_warping_remap(src.CvPtr, dst.CvPtr, map1.CvPtr, map2.CvPtr, (int)interpolation, (int)borderMode , borderValue0, stream?.CvPtr ?? Stream.Null.CvPtr); GC.KeepAlive(src); GC.KeepAlive(dst); dst.Fix(); GC.KeepAlive(map1); GC.KeepAlive(map2); }
public override bool run() { try { method = (MorphTypes)getValue("method"); shape = (MorphShapes)getValue("shape"); ksize = (Size)getValue("ksize"); anchor = (Point)getValue("anchor"); iterations = (int)getValue("iterations"); borderType = (BorderTypes)getValue("borderType"); Mat element = Cv2.GetStructuringElement(shape, ksize); switch (method) { case MorphTypes.Dilate: dst = src.Dilate(element, anchor, iterations, borderType, null); break; case MorphTypes.Erode: dst = src.Erode(element, anchor, iterations, borderType, null); break; default: dst = src.MorphologyEx(method, element, anchor, iterations, borderType, null); break; } TestName = method.ToString(); } catch (Exception ex) { throw ex; } return(true); }
/// ------------------------------------------------------------------------------------ /// <summary> /// Constructor 5 /// </summary> /// <param name="g"></param> /// <param name="x"></param> /// <param name="y"></param> /// <param name="nWidth"></param> /// <param name="nHeight"></param> /// <param name="brdrType"></param> /// ------------------------------------------------------------------------------------ public BorderDrawing(Graphics g, int x, int y, int nWidth, int nHeight, BorderTypes brdrType) { m_graphics = g; m_rect = new System.Drawing.Rectangle(x, y, nWidth, nHeight); m_brdrType = brdrType; Draw(); }
/// ------------------------------------------------------------------------------------ /// <summary> /// Constructor 4 /// </summary> /// <param name="g"></param> /// <param name="rect"></param> /// <param name="brdrType"></param> /// ------------------------------------------------------------------------------------ public BorderDrawing(Graphics g, System.Drawing.Rectangle rect, BorderTypes brdrType) { m_graphics = g; m_rect = rect; m_brdrType = brdrType; Draw(); }
/// <summary> /// Constructs a pyramid which can be used as input for calcOpticalFlowPyrLK /// </summary> /// <param name="img">8-bit input image.</param> /// <param name="pyramid">output pyramid.</param> /// <param name="winSize">window size of optical flow algorithm. /// Must be not less than winSize argument of calcOpticalFlowPyrLK(). /// It is needed to calculate required padding for pyramid levels.</param> /// <param name="maxLevel">0-based maximal pyramid level number.</param> /// <param name="withDerivatives">set to precompute gradients for the every pyramid level. /// If pyramid is constructed without the gradients then calcOpticalFlowPyrLK() will /// calculate them internally.</param> /// <param name="pyrBorder">the border mode for pyramid layers.</param> /// <param name="derivBorder">the border mode for gradients.</param> /// <param name="tryReuseInputImage">put ROI of input image into the pyramid if possible. /// You can pass false to force data copying.</param> /// <returns>number of levels in constructed pyramid. Can be less than maxLevel.</returns> public static int BuildOpticalFlowPyramid( InputArray img, OutputArray pyramid, Size winSize, int maxLevel, bool withDerivatives = true, BorderTypes pyrBorder = BorderTypes.Reflect101, BorderTypes derivBorder = BorderTypes.Constant, bool tryReuseInputImage = true) { if (img == null) { throw new ArgumentNullException(nameof(img)); } if (pyramid == null) { throw new ArgumentNullException(nameof(pyramid)); } img.ThrowIfDisposed(); pyramid.ThrowIfNotReady(); NativeMethods.HandleException( NativeMethods.video_buildOpticalFlowPyramid1( img.CvPtr, pyramid.CvPtr, winSize, maxLevel, withDerivatives ? 1 : 0, (int)pyrBorder, (int)derivBorder, tryReuseInputImage ? 1 : 0, out var ret)); pyramid.Fix(); GC.KeepAlive(img); return(ret); }
/// <summary> /// Applies an affine transformation to an image. /// </summary> /// <param name="src">Source image. CV_8U , CV_16U , CV_32S , or CV_32F depth and 1, 3, or 4 channels are /// supported.</param> /// <param name="dst">Destination image with the same type as src . The size is dsize .</param> /// <param name="m">*2x3* transformation matrix.</param> /// <param name="dsize">Size of the destination image.</param> /// <param name="flags">Combination of interpolation methods (see resize) and the optional flag /// WARP_INVERSE_MAP specifying that M is an inverse transformation(dst=\>src ). Only /// INTER_NEAREST, INTER_LINEAR, and INTER_CUBIC interpolation methods are supported.</param> /// <param name="borderMode">pixel extrapolation method; when borderMode=BORDER_TRANSPARENT, /// it means that the pixels in the destination image corresponding to the "outliers" /// in the source image are not modified by the function.</param> /// <param name="borderValue">value used in case of a constant border; by default, it is 0.</param> /// <param name="stream">Stream for the asynchronous version.</param> public static void warpAffine( InputArray src, OutputArray dst, InputArray m, Size dsize, InterpolationFlags flags = InterpolationFlags.Linear, BorderTypes borderMode = BorderTypes.Constant, Scalar?borderValue = null, Stream stream = null) { if (src == null) { throw new ArgumentNullException(nameof(src)); } if (dst == null) { throw new ArgumentNullException(nameof(dst)); } if (m == null) { throw new ArgumentNullException(nameof(m)); } src.ThrowIfDisposed(); dst.ThrowIfDisposed(); m.ThrowIfDisposed(); Scalar borderValue0 = borderValue.GetValueOrDefault(Scalar.All(0)); NativeMethods.cuda_warping_warpAffine(src.CvPtr, dst.CvPtr, m.CvPtr, dsize, (int)flags, (int)borderMode, borderValue0, stream?.CvPtr ?? Stream.Null.CvPtr); GC.KeepAlive(src); GC.KeepAlive(dst); GC.KeepAlive(m); dst.Fix(); }
public static void Remap(Mat src, Mat dst, Mat map1, Mat map2, InterpolationFlags interpolation, BorderTypes borderType = BorderTypes.Constant) { var borderValue = new Scalar(0, 0, 0); Remap(src, dst, map1, map2, interpolation, borderType, borderValue); }
private void button1_Click(object sender, EventArgs e) { Mat src_Mat = new Mat("F:\\Microsoft Visual Studio\\project\\yoloaforge\\yoloaforge\\a.jpg", ImreadModes.AnyColor | ImreadModes.AnyDepth); Mat dst_Mat = new Mat(); #region 边缘处理四个类型 //定义四个方向像素,边缘宽度相对于源图像的 0.05 int top = (int)(0.05 * src_Mat.Rows); int botton = (int)(0.05 * src_Mat.Rows); int left = (int)(0.05 * src_Mat.Cols); int right = (int)(0.05 * src_Mat.Cols); //定义随机数 RNG r = new RNG(12345); //int borderType =(int) BorderTypes.Default; BorderTypes borderType = new BorderTypes(); borderType = BorderTypes.Default; //Cv2.ImShow("src", src); int ch = 0; while (true) { ch = Cv2.WaitKey(500); if ((char)ch == 27)// ESC建退出 { break; } else if ((char)ch == 'r') { borderType = BorderTypes.Replicate;//填充边缘像素用已知的边缘像素值 } else if ((char)ch == 'w') { borderType = BorderTypes.Wrap;//用另外一边的像素来补偿填充 } else if ((char)ch == 'c') { borderType = BorderTypes.Constant;//填充边缘用指定像素值 } else if ((char)ch == 'd') { borderType = BorderTypes.Default;//默认边缘处理 } Scalar color = new Scalar(r.Uniform(0, 255), r.Uniform(0, 255), r.Uniform(0, 255)); Cv2.CopyMakeBorder(src_Mat, dst_Mat, top, botton, left, right, borderType, color); Window w = new Window("dst", WindowMode.Normal); Cv2.ImShow("dst", dst_Mat); } #endregion //Cv2.GaussianBlur(src, dst, new Size(5, 5), 5, 5, BorderTypes.Wrap); //using (new Window("dst", WindowMode.Normal, dst_Mat)) //{ // Cv2.WaitKey(0); //} }
/// ------------------------------------------------------------------------------------ /// <summary> /// /// </summary> /// <param name="g"></param> /// <param name="rect"></param> /// <param name="brdrType"></param> /// ------------------------------------------------------------------------------------ public void Draw(Graphics g, System.Drawing.Rectangle rect, BorderTypes brdrType) { CheckDisposed(); m_graphics = g; m_rect = rect; m_brdrType = brdrType; Draw(); }
public static void Remap(Mat src, Mat dst, Mat map1, Mat map2, InterpolationFlags interpolation, BorderTypes borderType, Scalar borderValue) { var exception = new Exception(); au_cv_imgproc_remap(src.CppPtr, dst.CppPtr, map1.CppPtr, map2.CppPtr, (int)interpolation, (int)borderType, borderValue.CppPtr, exception.CppPtr); exception.Check(); }
public MorphologyLayer(MorphTypes op, int iterations, BorderTypes borderTypes, MorphShapes?morphShapes = null, int?width = null, int?height = null) { MorphTypes = op; BorderTypes = borderTypes; Iterations = iterations; if (morphShapes != null && width != null && height != null) { Kernel = Cv2.GetStructuringElement(morphShapes.Value, new Size(width.Value, height.Value)); } }
/// <summary> /// Forms a border around the image /// </summary> /// <param name="src">The source image</param> /// <param name="dst">The destination image; will have the same type as src and /// the size Size(src.cols+left+right, src.rows+top+bottom)</param> /// <param name="top">Specify how much pixels in each direction from the source image rectangle one needs to extrapolate</param> /// <param name="bottom">Specify how much pixels in each direction from the source image rectangle one needs to extrapolate</param> /// <param name="left">Specify how much pixels in each direction from the source image rectangle one needs to extrapolate</param> /// <param name="right">Specify how much pixels in each direction from the source image rectangle one needs to extrapolate</param> /// <param name="borderType">The border type</param> /// <param name="value">The border value if borderType == Constant</param> public static void CopyMakeBorder(InputArray src, OutputArray dst, int top, int bottom, int left, int right, BorderTypes borderType, Scalar? value = null) { if (src == null) throw new ArgumentNullException("src"); if (dst == null) throw new ArgumentNullException("dst"); src.ThrowIfDisposed(); dst.ThrowIfNotReady(); Scalar value0 = value.GetValueOrDefault(new Scalar()); NativeMethods.imgproc_copyMakeBorder(src.CvPtr, dst.CvPtr, top, bottom, left, right, (int)borderType, value0); GC.KeepAlive(src); dst.Fix(); }
public Bitmap main(bool isPictureSmall, Bitmap canvas, float reSizeRate, System.Drawing.Point px0, System.Drawing.Point px1, System.Drawing.Point px2, System.Drawing.Point px3, ref double rate, int FinalPictureWidth, int FinalPictureHeight) { Image img = canvas; imgSizeX = FinalPictureWidth; imgSizeY = FinalPictureHeight; int[] x = new int[4]; int[] y = new int[4]; rate = solve_squere_line(isPictureSmall, reSizeRate, px0, px1, px2, px3, ref x, ref y); px0.X = x[0]; px0.Y = y[0]; px1.X = x[1]; px1.Y = y[1]; px2.X = x[2]; px2.Y = y[2]; px3.X = x[3]; px3.Y = y[3]; // 四角形で切り取って表示 System.Drawing.Point[] p2pt = { px0, px1, px2, px3 }; // p2の四角形を引き伸ばして表示する Mat src_img = BitmapConverter.ToMat((Bitmap)img); Mat dst_img = src_img; // 四角形の変換前と変換後の対応する頂点をそれぞれセットする Point2f[] src_pt = new Point2f[4]; src_pt[0] = new Point2f(px0.X, px0.Y); src_pt[1] = new Point2f(px1.X, px1.Y); src_pt[2] = new Point2f(px2.X, px2.Y); src_pt[3] = new Point2f(px3.X, px3.Y); Point2f[] dst_pt = new Point2f[4]; dst_pt[0] = new Point2f(0, 0); //左上 dst_pt[1] = new Point2f(0, imgSizeY); //左下 dst_pt[2] = new Point2f(imgSizeX, imgSizeY); //右下 dst_pt[3] = new Point2f(imgSizeX, 0); //右上 Mat map_matrix = Cv2.GetPerspectiveTransform(src_pt, dst_pt); // 指定された透視投影変換行列により,cvWarpPerspectiveを用いて画像を変換させる OpenCvSharp.Size mysize = new OpenCvSharp.Size(imgSizeX, imgSizeY); InterpolationFlags OIFLiner = InterpolationFlags.Linear; BorderTypes OBTDefault = BorderTypes.Default; Cv2.WarpPerspective(src_img, dst_img, map_matrix, mysize, OIFLiner, OBTDefault); //dst_img.SaveImage("trapezoidal.jpg"); //debug用 return(dst_img.ToBitmap()); }
/// <summary> /// Gets a value indicating if the grid cell has a border /// with the specified border type. /// </summary> /// <param name="borderType"></param> /// <returns></returns> public bool HasBorderOfType(BorderTypes borderType) { BorderTypes composite; if (borderType != BorderTypes.None) { composite = this.borderType & borderType; return(composite != BorderTypes.None); } else { return(this.borderType == BorderTypes.None); } }
/// <summary> /// Refreshes the border information. /// </summary> public void RefreshBorders() { this.IsBorder = false; this.borderType = BorderTypes.None; GameRoot root = GameRoot.Instance; GridCell cell = root.Grid.GetCell(new Point(this.Coordinates.X, this.Coordinates.Y - 1)); if (cell != null && cell.Owner != this.Owner) { if (!cell.HasBorderOfType(BorderTypes.Bottom)) { this.IsBorder = true; this.borderType |= BorderTypes.Top; } } cell = root.Grid.GetCell(new Point(this.Coordinates.X - 1, this.Coordinates.Y)); if (cell != null && cell.Owner != this.Owner) { if (!cell.HasBorderOfType(BorderTypes.Right)) { this.IsBorder = true; this.borderType |= BorderTypes.Left; } } cell = root.Grid.GetCell(new Point(this.Coordinates.X + 1, this.Coordinates.Y)); if (cell != null && cell.Owner != this.Owner) { if (!cell.HasBorderOfType(BorderTypes.Left)) { this.IsBorder = true; this.borderType |= BorderTypes.Right; } } cell = root.Grid.GetCell(new Point(this.Coordinates.X, this.Coordinates.Y + 1)); if (cell != null && cell.Owner != this.Owner) { if (!cell.HasBorderOfType(BorderTypes.Top)) { this.IsBorder = true; this.borderType |= BorderTypes.Bottom; } } }
public static void Blur(double ksize = 3, BorderTypes borderType = BorderTypes.Replicate) { Glb.DrawMatAndHist0(Glb.matSrc); var matGray = Glb.matSrc.CvtColor(ColorConversionCodes.BGR2GRAY); Glb.DrawMatAndHist1(matGray); var matDst = matGray.Blur(new Size(ksize, ksize), borderType: borderType); Glb.DrawMatAndHist2(matDst); matGray.Dispose(); matDst.Dispose(); }
public override bool run() { try { switch ((Method)getValue("method")) { case Method.Blur: ksize = (Size)getValue("ksize"); anchor = (Point)getValue("anchor"); borderType = (BorderTypes)getValue("borderType"); dst = src.Blur(ksize, anchor, borderType); break; case Method.Bilateral: d = (int)getValue("d"); sigmacolor = (double)getValue("sigmacolor"); sigmaspace = (double)getValue("sigmaspace"); borderType = (BorderTypes)getValue("borderType"); dst = src.BilateralFilter(d, sigmacolor, sigmaspace, borderType); break; case Method.Gaussian: ksize = (Size)getValue("ksize"); sigmax = (double)getValue("sigmax"); sigmay = (double)getValue("sigmay"); borderType = (BorderTypes)getValue("borderType"); dst = src.GaussianBlur(ksize, sigmax, sigmay, borderType); break; case Method.Median: m_ksize = (int)getValue("ksize"); dst = src.MedianBlur(m_ksize); break; default: break; } } catch (Exception ex) { throw ex; } return(true); }
/// <summary> /// Performs bilateral filtering of passed image /// </summary> /// <param name="src">Source image. Supports only (channels != 2 && depth() != CV_8S && depth() != CV_32S /// && depth() != CV_64F).</param> /// <param name="dst">Destination imagwe.</param> /// <param name="kernel_size">Kernel window size.</param> /// <param name="sigma_color">Filter sigma in the color space.</param> /// <param name="sigma_spatial">Filter sigma in the coordinate space.</param> /// <param name="borderMode">Border type. See borderInterpolate for details. BORDER_REFLECT101 , /// BORDER_REPLICATE , BORDER_CONSTANT , BORDER_REFLECT and BORDER_WRAP are supported for now.</param> /// <param name="stream">Stream for the asynchronous version.</param> public static void bilateralFilter(InputArray src, OutputArray dst, int kernel_size, float sigma_color, float sigma_spatial, BorderTypes borderMode = BorderTypes.Default, Stream stream = null) { if (src == null) { throw new ArgumentNullException(nameof(src)); } if (dst == null) { throw new ArgumentNullException(nameof(dst)); } src.ThrowIfDisposed(); dst.ThrowIfNotReady(); NativeMethods.cuda_imgproc_bilateralFilter(src.CvPtr, dst.CvPtr, kernel_size, sigma_color, sigma_spatial, (int)borderMode, stream?.CvPtr ?? Stream.Null.CvPtr); GC.KeepAlive(src); GC.KeepAlive(dst); dst.Fix(); }
/// <summary> /// Constructs a pyramid which can be used as input for calcOpticalFlowPyrLK /// </summary> /// <param name="img">8-bit input image.</param> /// <param name="pyramid">output pyramid.</param> /// <param name="winSize">window size of optical flow algorithm. /// Must be not less than winSize argument of calcOpticalFlowPyrLK(). /// It is needed to calculate required padding for pyramid levels.</param> /// <param name="maxLevel">0-based maximal pyramid level number.</param> /// <param name="withDerivatives">set to precompute gradients for the every pyramid level. /// If pyramid is constructed without the gradients then calcOpticalFlowPyrLK() will /// calculate them internally.</param> /// <param name="pyrBorder">the border mode for pyramid layers.</param> /// <param name="derivBorder">the border mode for gradients.</param> /// <param name="tryReuseInputImage">put ROI of input image into the pyramid if possible. /// You can pass false to force data copying.</param> /// <returns>number of levels in constructed pyramid. Can be less than maxLevel.</returns> public static int BuildOpticalFlowPyramid( InputArray img, OutputArray pyramid, Size winSize, int maxLevel, bool withDerivatives = true, BorderTypes pyrBorder = BorderTypes.Reflect101, BorderTypes derivBorder = BorderTypes.Constant, bool tryReuseInputImage = true) { if (img == null) throw new ArgumentNullException("img"); if (pyramid == null) throw new ArgumentNullException("pyramid"); img.ThrowIfDisposed(); pyramid.ThrowIfNotReady(); int result = NativeMethods.video_buildOpticalFlowPyramid1( img.CvPtr, pyramid.CvPtr, winSize, maxLevel, withDerivatives ? 1 : 0, (int)pyrBorder, (int)derivBorder, tryReuseInputImage ? 1 : 0); pyramid.Fix(); return result; }
/// <summary> /// Create pointer to the Ridge detection filter. /// </summary> /// <param name="ddepth">Specifies output image depth. Defualt is CV_32FC1</param> /// <param name="dx">Order of derivative x, default is 1</param> /// <param name="dy">Order of derivative y, default is 1</param> /// <param name="ksize">Sobel kernel size , default is 3</param> /// <param name="outDtype">Converted format for output, default is CV_8UC1</param> /// <param name="scale">Optional scale value for derivative values, default is 1</param> /// <param name="delta">Optional bias added to output, default is 0</param> /// <param name="borderType">Pixel extrapolation method, default is BORDER_DEFAULT</param> /// <returns></returns> public static RidgeDetectionFilter Create( MatType?ddepth = null, int dx = 1, int dy = 1, int ksize = 3, MatType?outDtype = null, double scale = 1, double delta = 0, BorderTypes borderType = BorderTypes.Default) { var ddepthValue = ddepth.GetValueOrDefault(MatType.CV_32FC1); var outDtypeValue = outDtype.GetValueOrDefault(MatType.CV_8UC1); NativeMethods.HandleException( NativeMethods.ximgproc_RidgeDetectionFilter_create( ddepthValue, dx, dy, ksize, outDtypeValue, scale, delta, (int)borderType, out var ptr)); return(new RidgeDetectionFilter(ptr)); }
/// <summary> /// Constructs a pyramid which can be used as input for calcOpticalFlowPyrLK /// </summary> /// <param name="img">8-bit input image.</param> /// <param name="pyramid">output pyramid.</param> /// <param name="winSize">window size of optical flow algorithm. /// Must be not less than winSize argument of calcOpticalFlowPyrLK(). /// It is needed to calculate required padding for pyramid levels.</param> /// <param name="maxLevel">0-based maximal pyramid level number.</param> /// <param name="withDerivatives">set to precompute gradients for the every pyramid level. /// If pyramid is constructed without the gradients then calcOpticalFlowPyrLK() will /// calculate them internally.</param> /// <param name="pyrBorder">the border mode for pyramid layers.</param> /// <param name="derivBorder">the border mode for gradients.</param> /// <param name="tryReuseInputImage">put ROI of input image into the pyramid if possible. /// You can pass false to force data copying.</param> /// <returns>number of levels in constructed pyramid. Can be less than maxLevel.</returns> public static int BuildOpticalFlowPyramid( InputArray img, out Mat[] pyramid, Size winSize, int maxLevel, bool withDerivatives = true, BorderTypes pyrBorder = BorderTypes.Reflect101, BorderTypes derivBorder = BorderTypes.Constant, bool tryReuseInputImage = true) { if (img == null) { throw new ArgumentNullException(nameof(img)); } img.ThrowIfDisposed(); using (var pyramidVec = new VectorOfMat()) { int result = NativeMethods.video_buildOpticalFlowPyramid2( img.CvPtr, pyramidVec.CvPtr, winSize, maxLevel, withDerivatives ? 1 : 0, (int)pyrBorder, (int)derivBorder, tryReuseInputImage ? 1 : 0); pyramid = pyramidVec.ToArray(); return(result); } }
public static extern ExceptionStatus imgproc_sqrBoxFilter(IntPtr src, IntPtr dst, int ddepth, Size ksize, Point anchor, int normalize, BorderTypes borderType);
/// <summary> /// Blurs an image using a Gaussian filter. /// </summary> /// <param name="src">input image; the image can have any number of channels, which are processed independently, /// but the depth should be CV_8U, CV_16U, CV_16S, CV_32F or CV_64F.</param> /// <param name="dst">output image of the same size and type as src.</param> /// <param name="ksize">Gaussian kernel size. ksize.width and ksize.height can differ but they both must be positive and odd. /// Or, they can be zero’s and then they are computed from sigma* .</param> /// <param name="sigmaX">Gaussian kernel standard deviation in X direction.</param> /// <param name="sigmaY">Gaussian kernel standard deviation in Y direction; if sigmaY is zero, it is set to be equal to sigmaX, /// if both sigmas are zeros, they are computed from ksize.width and ksize.height, /// respectively (see getGaussianKernel() for details); to fully control the result /// regardless of possible future modifications of all this semantics, it is recommended to specify all of ksize, sigmaX, and sigmaY.</param> /// <param name="borderType">pixel extrapolation method</param> public static void GaussianBlur(InputArray src, OutputArray dst, Size ksize, double sigmaX, double sigmaY = 0, BorderTypes borderType = BorderTypes.Default) { if (src == null) throw new ArgumentNullException(nameof(src)); if (dst == null) throw new ArgumentNullException(nameof(dst)); src.ThrowIfDisposed(); dst.ThrowIfNotReady(); NativeMethods.imgproc_GaussianBlur(src.CvPtr, dst.CvPtr, ksize, sigmaX, sigmaY, (int)borderType); GC.KeepAlive(src); dst.Fix(); }
/// <summary> /// 指定の構造要素を用いて画像の収縮を行います. /// </summary> /// <param name="element">収縮に用いられる構造要素. element=new Mat() の場合, 3x3 の矩形の構造要素が用いられます</param> /// <param name="anchor">構造要素内のアンカー位置.デフォルト値の (-1, -1) は,アンカーが構造要素の中心にあることを意味します</param> /// <param name="iterations">収縮が行われる回数. [既定値は1]</param> /// <param name="borderType">ピクセル外挿手法.[既定値はBorderTypes.Constant]</param> /// <param name="borderValue">定数境界モードで利用されるピクセル値.デフォルト値は特別な意味を持ちます.[既定値はCvCpp.MorphologyDefaultBorderValue()]</param> /// <returns>src と同じサイズ,同じ型の出力画像</returns> #else /// <summary> /// Erodes an image by using a specific structuring element. /// </summary> /// <param name="element">The structuring element used for dilation. If element=new Mat(), a 3x3 rectangular structuring element is used</param> /// <param name="anchor">Position of the anchor within the element. The default value (-1, -1) means that the anchor is at the element center</param> /// <param name="iterations">The number of times erosion is applied</param> /// <param name="borderType">The pixel extrapolation method</param> /// <param name="borderValue">The border value in case of a constant border. The default value has a special meaning. [By default this is CvCpp.MorphologyDefaultBorderValue()]</param> /// <returns>The destination image. It will have the same size and the same type as src</returns> #endif public Mat Erode(InputArray element, Point? anchor = null, int iterations = 1, BorderTypes borderType = BorderTypes.Constant, Scalar? borderValue = null) { var dst = new Mat(); Cv2.Erode(this, dst, element, anchor, iterations, borderType, borderValue); return dst; }
public static extern ExceptionStatus imgproc_GaussianBlur(IntPtr src, IntPtr dst, Size ksize, double sigmaX, double sigmaY, BorderTypes borderType);
public static void Remap(Core.Mat src, Core.Mat dst, Core.Mat map1, Core.Mat map2, InterpolationFlags interpolation, BorderTypes borderType) { Core.Exception exception = new Core.Exception(); au_cv_imgproc_remap2(src.cppPtr, dst.cppPtr, map1.cppPtr, map2.cppPtr, (int)interpolation, (int)borderType, exception.cppPtr); exception.Check(); }
/// <summary> /// Calculates the first x- or y- image derivative using Scharr operator /// </summary> /// <param name="src">The source image</param> /// <param name="dst">The destination image; will have the same size and the same number of channels as src</param> /// <param name="ddepth">The destination image depth</param> /// <param name="xorder">Order of the derivative x</param> /// <param name="yorder">Order of the derivative y</param> /// <param name="scale">The optional scale factor for the computed derivative values (by default, no scaling is applie</param> /// <param name="delta">The optional delta value, added to the results prior to storing them in dst</param> /// <param name="borderType">The pixel extrapolation method</param> public static void Scharr( InputArray src, OutputArray dst, MatType ddepth, int xorder, int yorder, double scale = 1, double delta = 0, BorderTypes borderType = BorderTypes.Default) { if (src == null) throw new ArgumentNullException("src"); if (dst == null) throw new ArgumentNullException("dst"); src.ThrowIfDisposed(); dst.ThrowIfNotReady(); NativeMethods.imgproc_Scharr(src.CvPtr, dst.CvPtr, ddepth, xorder, yorder, scale, delta, (int)borderType); GC.KeepAlive(src); dst.Fix(); }
/// <summary> /// Creates implementation for the minimum eigen value of a 2x2 derivative covariation matrix (the /// cornerness criteria). /// </summary> /// <param name="srcType">Input source type. Only CV_8UC1 and CV_32FC1 are supported for now.</param> /// <param name="blockSize">Neighborhood size.</param> /// <param name="ksize">Aperture parameter for the Sobel operator.</param> /// <param name="borderType">Pixel extrapolation method. Only BORDER_REFLECT101 and BORDER_REPLICATE are /// supported for now.</param> /// <returns></returns> public static CornernessCriteria createMinEigenValCorner(int srcType, int blockSize, int ksize, BorderTypes borderType = BorderTypes.Reflect101) { IntPtr ptr = NativeMethods.cuda_imgproc_createMinEigenValCorner( srcType, blockSize, ksize, (int)borderType); return(new CornernessCriteria(ptr)); }
/* /// <summary> /// Applies the adaptive bilateral filter to an image. /// </summary> /// <param name="src">The source image</param> /// <param name="dst">The destination image; will have the same size and the same type as src</param> /// <param name="ksize">The kernel size. This is the neighborhood where the local variance will be calculated, /// and where pixels will contribute (in a weighted manner).</param> /// <param name="sigmaSpace">Filter sigma in the coordinate space. /// Larger value of the parameter means that farther pixels will influence each other /// (as long as their colors are close enough; see sigmaColor). Then d>0, it specifies the neighborhood /// size regardless of sigmaSpace, otherwise d is proportional to sigmaSpace.</param> /// <param name="maxSigmaColor">Maximum allowed sigma color (will clamp the value calculated in the /// ksize neighborhood. Larger value of the parameter means that more dissimilar pixels will /// influence each other (as long as their colors are close enough; see sigmaColor). /// Then d>0, it specifies the neighborhood size regardless of sigmaSpace, otherwise d is proportional to sigmaSpace.</param> /// <param name="anchor">The anchor point. The default value Point(-1,-1) means that the anchor is at the kernel center</param> /// <param name="borderType">Pixel extrapolation method.</param> public static void AdaptiveBilateralFilter(InputArray src, OutputArray dst, Size ksize, double sigmaSpace, double maxSigmaColor = 20.0, Point? anchor = null, BorderType borderType = BorderType.Default) { if (src == null) throw new ArgumentNullException("src"); if (dst == null) throw new ArgumentNullException("dst"); src.ThrowIfDisposed(); dst.ThrowIfNotReady(); Point anchor0 = anchor.GetValueOrDefault(new Point(-1, -1)); NativeMethods.imgproc_adaptiveBilateralFilter(src.CvPtr, dst.CvPtr, ksize, sigmaSpace, maxSigmaColor, anchor0, (int)borderType); dst.Fix(); } */ #endregion #region BoxFilter /// <summary> /// Smoothes image using box filter /// </summary> /// <param name="src">The source image</param> /// <param name="dst">The destination image; will have the same size and the same type as src</param> /// <param name="ddepth"></param> /// <param name="ksize">The smoothing kernel size</param> /// <param name="anchor">The anchor point. The default value Point(-1,-1) means that the anchor is at the kernel center</param> /// <param name="normalize">Indicates, whether the kernel is normalized by its area or not</param> /// <param name="borderType">The border mode used to extrapolate pixels outside of the image</param> public static void BoxFilter( InputArray src, OutputArray dst, MatType ddepth, Size ksize, Point? anchor = null, bool normalize = true, BorderTypes borderType = BorderTypes.Default) { if (src == null) throw new ArgumentNullException("src"); if (dst == null) throw new ArgumentNullException("dst"); src.ThrowIfDisposed(); dst.ThrowIfNotReady(); Point anchor0 = anchor.GetValueOrDefault(new Point(-1, -1)); NativeMethods.imgproc_boxFilter(src.CvPtr, dst.CvPtr, ddepth, ksize, anchor0, normalize ? 1 : 0, (int)borderType); GC.KeepAlive(src); dst.Fix(); }
public BalloonBorder(BorderTypes type) { line.Brush = new SolidBrush(Color.Black); Type = type; }
/// <summary> /// Upsamples an image and then blurs it. /// </summary> /// <param name="src">input image.</param> /// <param name="dst">output image. It has the specified size and the same type as src.</param> /// <param name="dstSize">size of the output image; by default, it is computed as Size(src.cols*2, (src.rows*2)</param> /// <param name="borderType"></param> public static void PyrUp(InputArray src, OutputArray dst, Size? dstSize = null, BorderTypes borderType = BorderTypes.Default) { if (src == null) throw new ArgumentNullException(nameof(src)); if (dst == null) throw new ArgumentNullException(nameof(dst)); src.ThrowIfDisposed(); dst.ThrowIfNotReady(); Size dstSize0 = dstSize.GetValueOrDefault(new Size()); NativeMethods.imgproc_pyrUp(src.CvPtr, dst.CvPtr, dstSize0, (int)borderType); GC.KeepAlive(src); dst.Fix(); }
/// <summary> /// Applies a generic geometrical transformation to an image. /// </summary> /// <param name="src">Source image.</param> /// <param name="dst">Destination image. It has the same size as map1 and the same type as src</param> /// <param name="map1">The first map of either (x,y) points or just x values having the type CV_16SC2, CV_32FC1, or CV_32FC2.</param> /// <param name="map2">The second map of y values having the type CV_16UC1, CV_32FC1, or none (empty map if map1 is (x,y) points), respectively.</param> /// <param name="interpolation">Interpolation method. The method INTER_AREA is not supported by this function.</param> /// <param name="borderMode">Pixel extrapolation method. When borderMode=BORDER_TRANSPARENT, /// it means that the pixels in the destination image that corresponds to the "outliers" in /// the source image are not modified by the function.</param> /// <param name="borderValue">Value used in case of a constant border. By default, it is 0.</param> public static void Remap( InputArray src, OutputArray dst, InputArray map1, InputArray map2, InterpolationFlags interpolation = InterpolationFlags.Linear, BorderTypes borderMode = BorderTypes.Constant, Scalar? borderValue = null) { if (src == null) throw new ArgumentNullException(nameof(src)); if (dst == null) throw new ArgumentNullException(nameof(dst)); if (map1 == null) throw new ArgumentNullException(nameof(map1)); if (map2 == null) throw new ArgumentNullException(nameof(map2)); src.ThrowIfDisposed(); dst.ThrowIfNotReady(); map1.ThrowIfDisposed(); map2.ThrowIfDisposed(); Scalar borderValue0 = borderValue.GetValueOrDefault(Scalar.All(0)); NativeMethods.imgproc_remap(src.CvPtr, dst.CvPtr, map1.CvPtr, map2.CvPtr, (int)interpolation, (int)borderMode, borderValue0); GC.KeepAlive(src); dst.Fix(); }
/// <summary> /// computes both eigenvalues and the eigenvectors of 2x2 derivative covariation matrix at each pixel. The output is stored as 6-channel matrix. /// </summary> /// <param name="src"></param> /// <param name="dst"></param> /// <param name="blockSize"></param> /// <param name="ksize"></param> /// <param name="borderType"></param> public static void CornerEigenValsAndVecs( InputArray src, OutputArray dst, int blockSize, int ksize, BorderTypes borderType = BorderTypes.Default) { if (src == null) throw new ArgumentNullException("src"); if (dst == null) throw new ArgumentNullException("dst"); src.ThrowIfDisposed(); dst.ThrowIfNotReady(); NativeMethods.imgproc_cornerEigenValsAndVecs(src.CvPtr, dst.CvPtr, blockSize, ksize, (int)borderType); GC.KeepAlive(src); dst.Fix(); }
/// <summary> /// Upsamples an image and then blurs it. /// </summary> /// <param name="dstSize">size of the output image; by default, it is computed as Size(src.cols*2, (src.rows*2)</param> /// <param name="borderType"></param> /// <returns></returns> public Mat PyrUp(Size? dstSize = null, BorderTypes borderType = BorderTypes.Default) { var dst = new Mat(); Cv2.PyrUp(this, dst, dstSize, borderType); return dst; }
/// <summary> /// Calculates the Laplacian of an image /// </summary> /// <param name="ddepth">The desired depth of the destination image</param> /// <param name="ksize">The aperture size used to compute the second-derivative filters</param> /// <param name="scale">The optional scale factor for the computed Laplacian values (by default, no scaling is applied</param> /// <param name="delta">The optional delta value, added to the results prior to storing them in dst</param> /// <param name="borderType">The pixel extrapolation method</param> /// <returns>Destination image; will have the same size and the same number of channels as src</returns> public Mat Laplacian(MatType ddepth, int ksize = 1, double scale = 1, double delta = 0, BorderTypes borderType = BorderTypes.Default) { var dst = new Mat(); Cv2.Laplacian(this, dst, ddepth, ksize, scale, delta, borderType); return dst; }
/// <summary> /// Applies bilateral filter to the image /// </summary> /// <param name="src">The source 8-bit or floating-point, 1-channel or 3-channel image</param> /// <param name="dst">The destination image; will have the same size and the same type as src</param> /// <param name="d">The diameter of each pixel neighborhood, that is used during filtering. /// If it is non-positive, it's computed from sigmaSpace</param> /// <param name="sigmaColor">Filter sigma in the color space. /// Larger value of the parameter means that farther colors within the pixel neighborhood /// will be mixed together, resulting in larger areas of semi-equal color</param> /// <param name="sigmaSpace">Filter sigma in the coordinate space. /// Larger value of the parameter means that farther pixels will influence each other /// (as long as their colors are close enough; see sigmaColor). Then d>0 , it specifies /// the neighborhood size regardless of sigmaSpace, otherwise d is proportional to sigmaSpace</param> /// <param name="borderType"></param> public static void BilateralFilter(InputArray src, OutputArray dst, int d, double sigmaColor, double sigmaSpace, BorderTypes borderType = BorderTypes.Default) { if (src == null) throw new ArgumentNullException(nameof(src)); if (dst == null) throw new ArgumentNullException(nameof(dst)); src.ThrowIfDisposed(); dst.ThrowIfNotReady(); NativeMethods.imgproc_bilateralFilter(src.CvPtr, dst.CvPtr, d, sigmaColor, sigmaSpace, (int)borderType); GC.KeepAlive(src); dst.Fix(); }
internal static extern void imgproc_blur(IntPtr src, IntPtr dst, Size kSize, Point anchor, BorderTypes borderType);
/// <summary> /// Smoothes image using normalized box filter /// </summary> /// <param name="src">The source image</param> /// <param name="dst">The destination image; will have the same size and the same type as src</param> /// <param name="ksize">The smoothing kernel size</param> /// <param name="anchor">The anchor point. The default value Point(-1,-1) means that the anchor is at the kernel center</param> /// <param name="borderType">The border mode used to extrapolate pixels outside of the image</param> public static void Blur( InputArray src, OutputArray dst, Size ksize, Point? anchor = null, BorderTypes borderType = BorderTypes.Default) { if (src == null) throw new ArgumentNullException(nameof(src)); if (dst == null) throw new ArgumentNullException(nameof(dst)); src.ThrowIfDisposed(); dst.ThrowIfNotReady(); Point anchor0 = anchor.GetValueOrDefault(new Point(-1, -1)); NativeMethods.imgproc_blur(src.CvPtr, dst.CvPtr, ksize, anchor0, (int)borderType); GC.KeepAlive(src); dst.Fix(); }
/// <summary> /// Applies separable linear filter to an image /// </summary> /// <param name="src">The source image</param> /// <param name="dst">The destination image; will have the same size and the same number of channels as src</param> /// <param name="ddepth">The destination image depth</param> /// <param name="kernelX">The coefficients for filtering each row</param> /// <param name="kernelY">The coefficients for filtering each column</param> /// <param name="anchor">The anchor position within the kernel; The default value (-1, 1) means that the anchor is at the kernel center</param> /// <param name="delta">The value added to the filtered results before storing them</param> /// <param name="borderType">The pixel extrapolation method</param> public static void SepFilter2D( InputArray src, OutputArray dst, MatType ddepth, InputArray kernelX, InputArray kernelY, Point? anchor = null, double delta = 0, BorderTypes borderType = BorderTypes.Default) { if (src == null) throw new ArgumentNullException(nameof(src)); if (dst == null) throw new ArgumentNullException(nameof(dst)); if (kernelX == null) throw new ArgumentNullException(nameof(kernelX)); if (kernelY == null) throw new ArgumentNullException(nameof(kernelY)); src.ThrowIfDisposed(); dst.ThrowIfNotReady(); kernelX.ThrowIfDisposed(); kernelY.ThrowIfDisposed(); Point anchor0 = anchor.GetValueOrDefault(new Point(-1, -1)); NativeMethods.imgproc_sepFilter2D(src.CvPtr, dst.CvPtr, ddepth, kernelX.CvPtr, kernelY.CvPtr, anchor0, delta, (int)borderType); GC.KeepAlive(src); dst.Fix(); }
/// <summary> /// Constructs a pyramid which can be used as input for calcOpticalFlowPyrLK /// </summary> /// <param name="img">8-bit input image.</param> /// <param name="pyramid">output pyramid.</param> /// <param name="winSize">window size of optical flow algorithm. /// Must be not less than winSize argument of calcOpticalFlowPyrLK(). /// It is needed to calculate required padding for pyramid levels.</param> /// <param name="maxLevel">0-based maximal pyramid level number.</param> /// <param name="withDerivatives">set to precompute gradients for the every pyramid level. /// If pyramid is constructed without the gradients then calcOpticalFlowPyrLK() will /// calculate them internally.</param> /// <param name="pyrBorder">the border mode for pyramid layers.</param> /// <param name="derivBorder">the border mode for gradients.</param> /// <param name="tryReuseInputImage">put ROI of input image into the pyramid if possible. /// You can pass false to force data copying.</param> /// <returns>number of levels in constructed pyramid. Can be less than maxLevel.</returns> public static int BuildOpticalFlowPyramid( InputArray img, out Mat[] pyramid, Size winSize, int maxLevel, bool withDerivatives = true, BorderTypes pyrBorder = BorderTypes.Reflect101, BorderTypes derivBorder = BorderTypes.Constant, bool tryReuseInputImage = true) { if (img == null) throw new ArgumentNullException("img"); img.ThrowIfDisposed(); using (var pyramidVec = new VectorOfMat()) { int result = NativeMethods.video_buildOpticalFlowPyramid2( img.CvPtr, pyramidVec.CvPtr, winSize, maxLevel, withDerivatives ? 1 : 0, (int) pyrBorder, (int) derivBorder, tryReuseInputImage ? 1 : 0); pyramid = pyramidVec.ToArray(); return result; } }
/// <summary> /// Calculates the Laplacian of an image /// </summary> /// <param name="src">Source image</param> /// <param name="dst">Destination image; will have the same size and the same number of channels as src</param> /// <param name="ddepth">The desired depth of the destination image</param> /// <param name="ksize">The aperture size used to compute the second-derivative filters</param> /// <param name="scale">The optional scale factor for the computed Laplacian values (by default, no scaling is applied</param> /// <param name="delta">The optional delta value, added to the results prior to storing them in dst</param> /// <param name="borderType">The pixel extrapolation method</param> public static void Laplacian( InputArray src, OutputArray dst, MatType ddepth, int ksize = 1, double scale = 1, double delta = 0, BorderTypes borderType = BorderTypes.Default) { if (src == null) throw new ArgumentNullException(nameof(src)); if (dst == null) throw new ArgumentNullException(nameof(dst)); src.ThrowIfDisposed(); dst.ThrowIfNotReady(); NativeMethods.imgproc_Laplacian(src.CvPtr, dst.CvPtr, ddepth, ksize, scale, delta, (int)borderType); GC.KeepAlive(src); dst.Fix(); }
/// <summary> /// 高度なモルフォロジー変換を行います. /// </summary> /// <param name="op">モルフォロジー演算の種類</param> /// <param name="element">構造要素</param> /// <param name="anchor">構造要素内のアンカー位置.デフォルト値の (-1, -1) は,アンカーが構造要素の中心にあることを意味します.</param> /// <param name="iterations">収縮と膨張が適用される回数. [既定値は1]</param> /// <param name="borderType">ピクセル外挿手法. [既定値はBorderTypes.Constant]</param> /// <param name="borderValue">定数境界モードで利用されるピクセル値.デフォルト値は特別な意味を持ちます. [既定値は CvCpp.MorphologyDefaultBorderValue()]</param> /// <returns>src と同じサイズ,同じ型の出力画像</returns> #else /// <summary> /// Performs advanced morphological transformations /// </summary> /// <param name="op">Type of morphological operation</param> /// <param name="element">Structuring element</param> /// <param name="anchor">Position of the anchor within the element. The default value (-1, -1) means that the anchor is at the element center</param> /// <param name="iterations">Number of times erosion and dilation are applied. [By default this is 1]</param> /// <param name="borderType">The pixel extrapolation method. [By default this is BorderTypes.Constant]</param> /// <param name="borderValue">The border value in case of a constant border. The default value has a special meaning. [By default this is CvCpp.MorphologyDefaultBorderValue()]</param> /// <returns>Destination image. It will have the same size and the same type as src</returns> #endif public Mat MorphologyEx(MorphTypes op, InputArray element, Point? anchor = null, int iterations = 1, BorderTypes borderType = BorderTypes.Constant, Scalar? borderValue = null) { var dst = new Mat(); Cv2.MorphologyEx(this, dst, op, element, anchor, iterations, borderType, borderValue); return dst; }
/// <summary> /// Applies a generic geometrical transformation to an image. /// </summary> /// <param name="map1">The first map of either (x,y) points or just x values having the type CV_16SC2, CV_32FC1, or CV_32FC2.</param> /// <param name="map2">The second map of y values having the type CV_16UC1, CV_32FC1, or none (empty map if map1 is (x,y) points), respectively.</param> /// <param name="interpolation">Interpolation method. The method INTER_AREA is not supported by this function.</param> /// <param name="borderMode">Pixel extrapolation method. When borderMode=BORDER_TRANSPARENT, /// it means that the pixels in the destination image that corresponds to the "outliers" in /// the source image are not modified by the function.</param> /// <param name="borderValue">Value used in case of a constant border. By default, it is 0.</param> /// <returns>Destination image. It has the same size as map1 and the same type as src</returns> public Mat Remap(InputArray map1, InputArray map2, InterpolationFlags interpolation = InterpolationFlags.Linear, BorderTypes borderMode = BorderTypes.Constant, Scalar? borderValue = null) { var dst = new Mat(); Cv2.Remap(this, dst, map1, map2, interpolation, borderMode, borderValue); return dst; }
public Obstacle(BorderTypes border) { this.border = border; }
/// <summary> /// 高度なモルフォロジー変換を行います. /// </summary> /// <param name="src">入力画像</param> /// <param name="dst">src と同じサイズ,同じ型の出力画像</param> /// <param name="op">モルフォロジー演算の種類</param> /// <param name="element">構造要素</param> /// <param name="anchor">構造要素内のアンカー位置.デフォルト値の (-1, -1) は,アンカーが構造要素の中心にあることを意味します.</param> /// <param name="iterations">収縮と膨張が適用される回数. [既定値は1]</param> /// <param name="borderType">ピクセル外挿手法. [既定値はBorderType.Constant]</param> /// <param name="borderValue">定数境界モードで利用されるピクセル値.デフォルト値は特別な意味を持ちます. [既定値は CvCpp.MorphologyDefaultBorderValue()]</param> #else /// <summary> /// Performs advanced morphological transformations /// </summary> /// <param name="src">Source image</param> /// <param name="dst">Destination image. It will have the same size and the same type as src</param> /// <param name="op">Type of morphological operation</param> /// <param name="element">Structuring element</param> /// <param name="anchor">Position of the anchor within the element. The default value (-1, -1) means that the anchor is at the element center</param> /// <param name="iterations">Number of times erosion and dilation are applied. [By default this is 1]</param> /// <param name="borderType">The pixel extrapolation method. [By default this is BorderType.Constant]</param> /// <param name="borderValue">The border value in case of a constant border. The default value has a special meaning. [By default this is CvCpp.MorphologyDefaultBorderValue()]</param> #endif public static void MorphologyEx( InputArray src, OutputArray dst, MorphTypes op, InputArray element, Point? anchor = null, int iterations = 1, BorderTypes borderType = BorderTypes.Constant, Scalar? borderValue = null) { if (src == null) throw new ArgumentNullException(nameof(src)); if (dst == null) throw new ArgumentNullException(nameof(dst)); src.ThrowIfDisposed(); dst.ThrowIfNotReady(); Point anchor0 = anchor.GetValueOrDefault(new Point(-1, -1)); Scalar borderValue0 = borderValue.GetValueOrDefault(MorphologyDefaultBorderValue()); IntPtr elementPtr = ToPtr(element); NativeMethods.imgproc_morphologyEx(src.CvPtr, dst.CvPtr, (int)op, elementPtr, anchor0, iterations, (int)borderType, borderValue0); GC.KeepAlive(src); dst.Fix(); }
public static extern ExceptionStatus imgproc_bilateralFilter(IntPtr src, IntPtr dst, int d, double sigmaColor, double sigmaSpace, BorderTypes borderType);
/// <summary> /// 画像の透視変換を行います. /// </summary> /// <param name="src">入力画像</param> /// <param name="dst">サイズが dsize で src と同じタイプの出力画像</param> /// <param name="m">3x3 の変換行列</param> /// <param name="dsize">出力画像のサイズ</param> /// <param name="flags">補間手法</param> /// <param name="borderMode">ピクセル外挿手法. /// borderMode=BORDER_TRANSPARENT の場合,入力画像中の「はずれ値」に対応する /// 出力画像中のピクセルが,この関数では変更されないことを意味します</param> /// <param name="borderValue">定数境界モードで利用されるピクセル値.</param> #else /// <summary> /// Applies a perspective transformation to an image. /// </summary> /// <param name="src">input image.</param> /// <param name="dst">output image that has the size dsize and the same type as src.</param> /// <param name="m">3x3 transformation matrix.</param> /// <param name="dsize">size of the output image.</param> /// <param name="flags">combination of interpolation methods (INTER_LINEAR or INTER_NEAREST) /// and the optional flag WARP_INVERSE_MAP, that sets M as the inverse transformation (dst -> src).</param> /// <param name="borderMode">pixel extrapolation method (BORDER_CONSTANT or BORDER_REPLICATE).</param> /// <param name="borderValue">value used in case of a constant border; by default, it equals 0.</param> #endif public static void WarpPerspective( InputArray src, OutputArray dst, InputArray m, Size dsize, InterpolationFlags flags = InterpolationFlags.Linear, BorderTypes borderMode = BorderTypes.Constant, Scalar? borderValue = null) { if (src == null) throw new ArgumentNullException(nameof(src)); if (dst == null) throw new ArgumentNullException(nameof(dst)); if (m == null) throw new ArgumentNullException(nameof(m)); src.ThrowIfDisposed(); dst.ThrowIfDisposed(); m.ThrowIfDisposed(); Scalar borderValue0 = borderValue.GetValueOrDefault(Scalar.All(0)); NativeMethods.imgproc_warpPerspective_MisInputArray( src.CvPtr, dst.CvPtr, m.CvPtr, dsize, (int)flags, (int)borderMode, borderValue0); GC.KeepAlive(src); dst.Fix(); }
/// <summary> /// computes another complex cornerness criteria at each pixel /// </summary> /// <param name="ksize"></param> /// <param name="borderType"></param> public Mat PreCornerDetect(int ksize, BorderTypes borderType = BorderTypes.Default) { var dst = new Mat(); Cv2.PreCornerDetect(this, dst, ksize, borderType); return dst; }
public static void Laplacian(int ksize = 1, double scale = 1, double delta = 0, BorderTypes borderType = BorderTypes.Replicate) { Glb.DrawMatAndHist0(Glb.matSrc); var matGray = Glb.matSrc.CvtColor(ColorConversionCodes.BGR2GRAY); Glb.DrawMatAndHist1(matGray); var matDst = matGray.Laplacian(matGray.Type(), ksize, scale, delta, borderType); Glb.DrawMatAndHist2(matDst); matGray.Dispose(); matDst.Dispose(); }
/// <summary> /// Applies a perspective transformation to an image. /// </summary> /// <param name="m">3x3 transformation matrix.</param> /// <param name="dsize">size of the output image.</param> /// <param name="flags">combination of interpolation methods (INTER_LINEAR or INTER_NEAREST) /// and the optional flag WARP_INVERSE_MAP, that sets M as the inverse transformation (dst -> src).</param> /// <param name="borderMode">pixel extrapolation method (BORDER_CONSTANT or BORDER_REPLICATE).</param> /// <param name="borderValue">value used in case of a constant border; by default, it equals 0.</param> /// <returns>output image that has the size dsize and the same type as src.</returns> public Mat WarpPerspective(Mat m, Size dsize, InterpolationFlags flags = InterpolationFlags.Linear, BorderTypes borderMode = BorderTypes.Constant, Scalar? borderValue = null) { var dst = new Mat(); Cv2.WarpPerspective(this, dst, m, dsize, flags, borderMode, borderValue); return dst; }
/// <summary> /// computes both eigenvalues and the eigenvectors of 2x2 derivative covariation matrix at each pixel. The output is stored as 6-channel matrix. /// </summary> /// <param name="blockSize"></param> /// <param name="ksize"></param> /// <param name="borderType"></param> public Mat CornerEigenValsAndVecs(int blockSize, int ksize, BorderTypes borderType = BorderTypes.Default) { var dst = new Mat(); Cv2.CornerEigenValsAndVecs(this, dst, blockSize, ksize, borderType); return dst; }
/// <summary> /// Sets the border on the Excel cell - does not support piece-meal borders, even though NPOI does /// </summary> /// <param name="row">0-based row number</param> /// <param name="column">0-based column number</param> /// <param name="borderType">Type of border</param> public void SetBorder(int row, int column, BorderTypes borderType) { this.initializeCell(row, column); _currentCell.Style.BorderType = borderType; if (borderType != BorderTypes.None) { _currentCell.Style.BorderBottom = true; _currentCell.Style.BorderLeft = true; _currentCell.Style.BorderRight = true; _currentCell.Style.BorderTop = true; } _currentCell.WriteCellValue(); }
//6. 高斯处理(输入图像为灰度图像) public static Mat GaussianMethod(Mat inMat, Mat outMat, int a, int b, double sigmaX, double sigmaY, BorderTypes borderType) { Cv2.GaussianBlur(inMat, outMat, new Size(a, b), sigmaX, sigmaX, borderType); return(outMat); }
//2. 基于拉普拉斯算子彩色图像增强(有质的提升) public static Mat LaplacianMethod(Mat inMat, Mat outMat, int ddept, int ksize, double scale, double delta, BorderTypes borderType)//输入图像、输出图像、目标图像深度、二阶导数的滤波器孔径尺寸、比例因子、结果存入目标图像前的可选值、边界模式 { Cv2.Laplacian(inMat, outMat, ddept, ksize, scale, delta, borderType); return(outMat); }
/// <summary> /// computes another complex cornerness criteria at each pixel /// </summary> /// <param name="src"></param> /// <param name="dst"></param> /// <param name="ksize"></param> /// <param name="borderType"></param> public static void PreCornerDetect( InputArray src, OutputArray dst, int ksize, BorderTypes borderType = BorderTypes.Default) { if (src == null) throw new ArgumentNullException(nameof(src)); if (dst == null) throw new ArgumentNullException(nameof(dst)); src.ThrowIfDisposed(); dst.ThrowIfNotReady(); NativeMethods.imgproc_preCornerDetect(src.CvPtr, dst.CvPtr, ksize, (int)borderType); GC.KeepAlive(src); dst.Fix(); }