コード例 #1
0
        /// <summary>
        /// 2値画像中の輪郭を検出します.
        /// </summary>
        /// <param name="image">入力画像,8ビット,シングルチャンネル.0以外のピクセルは 1として,0のピクセルは0のまま扱われます.
        /// また,この関数は,輪郭抽出処理中に入力画像 image の中身を書き換えます.</param>
        /// <param name="mode">輪郭抽出モード</param>
        /// <param name="method">輪郭の近似手法</param>
        /// <param name="offset">オプションのオフセット.各輪郭点はこの値の分だけシフトします.これは,ROIの中で抽出された輪郭を,画像全体に対して位置づけて解析する場合に役立ちます.</param>
        /// <return>検出された輪郭.各輪郭は,点のベクトルとして格納されます.</return>
#else
        /// <summary>
        /// Finds contours in a binary image.
        /// </summary>
        /// <param name="image">Source, an 8-bit single-channel image. Non-zero pixels are treated as 1’s. 
        /// Zero pixels remain 0’s, so the image is treated as binary.
        /// The function modifies the image while extracting the contours.</param> 
        /// <param name="mode">Contour retrieval mode</param>
        /// <param name="method">Contour approximation method</param>
        /// <param name="offset"> Optional offset by which every contour point is shifted. 
        /// This is useful if the contours are extracted from the image ROI and then they should be analyzed in the whole image context.</param>
        /// <returns>Detected contours. Each contour is stored as a vector of points.</returns>
#endif
        public static MatOfPoint[] FindContoursAsMat(InputOutputArray image, 
            ContourRetrieval mode, ContourChain method, Point? offset = null)
        {
            if (image == null)
                throw new ArgumentNullException("image");
            image.ThrowIfNotReady();

            CvPoint offset0 = offset.GetValueOrDefault(new Point());
            IntPtr contoursPtr;
            NativeMethods.imgproc_findContours2_OutputArray(image.CvPtr, out contoursPtr, (int)mode, (int)method, offset0);
            image.Fix();

            using (var contoursVec = new VectorOfMat(contoursPtr))
            {
                return contoursVec.ToArray<MatOfPoint>();
            }
        }
コード例 #2
0
        /// <summary>
        /// 2値画像中の輪郭を検出します.
        /// </summary>
        /// <param name="image">入力画像,8ビット,シングルチャンネル.0以外のピクセルは 1として,0のピクセルは0のまま扱われます.
        /// また,この関数は,輪郭抽出処理中に入力画像 image の中身を書き換えます.</param>
        /// <param name="contours">検出された輪郭.各輪郭は,点のベクトルとして格納されます.</param>
        /// <param name="hierarchy">画像のトポロジーに関する情報を含む出力ベクトル.これは,輪郭数と同じ数の要素を持ちます.各輪郭 contours[i] に対して,
        /// 要素 hierarchy[i]のメンバにはそれぞれ,同じ階層レベルに存在する前後の輪郭,最初の子輪郭,および親輪郭の 
        /// contours インデックス(0 基準)がセットされます.また,輪郭 i において,前後,親,子の輪郭が存在しない場合,
        /// それに対応する hierarchy[i] の要素は,負の値になります.</param>
        /// <param name="mode">輪郭抽出モード</param>
        /// <param name="method">輪郭の近似手法</param>
        /// <param name="offset">オプションのオフセット.各輪郭点はこの値の分だけシフトします.これは,ROIの中で抽出された輪郭を,画像全体に対して位置づけて解析する場合に役立ちます.</param>
#else
        /// <summary>
        /// Finds contours in a binary image.
        /// </summary>
        /// <param name="image">Source, an 8-bit single-channel image. Non-zero pixels are treated as 1’s. 
        /// Zero pixels remain 0’s, so the image is treated as binary.
        /// The function modifies the image while extracting the contours.</param> 
        /// <param name="contours">Detected contours. Each contour is stored as a vector of points.</param>
        /// <param name="hierarchy">Optional output vector, containing information about the image topology. 
        /// It has as many elements as the number of contours. For each i-th contour contours[i], 
        /// the members of the elements hierarchy[i] are set to 0-based indices in contours of the next 
        /// and previous contours at the same hierarchical level, the first child contour and the parent contour, respectively. 
        /// If for the contour i there are no next, previous, parent, or nested contours, the corresponding elements of hierarchy[i] will be negative.</param>
        /// <param name="mode">Contour retrieval mode</param>
        /// <param name="method">Contour approximation method</param>
        /// <param name="offset"> Optional offset by which every contour point is shifted. 
        /// This is useful if the contours are extracted from the image ROI and then they should be analyzed in the whole image context.</param>
#endif
        public static void FindContours(InputOutputArray image, out Point[][] contours,
            out HierarchyIndex[] hierarchy, ContourRetrieval mode, ContourChain method, Point? offset = null)
        {
            if (image == null)
                throw new ArgumentNullException("image");
            image.ThrowIfNotReady();

            CvPoint offset0 = offset.GetValueOrDefault(new Point());
            IntPtr contoursPtr, hierarchyPtr;
            NativeMethods.imgproc_findContours1_vector(image.CvPtr, out contoursPtr, out hierarchyPtr, (int)mode, (int)method, offset0);

            using (var contoursVec = new VectorOfVectorPoint(contoursPtr))
            using (var hierarchyVec = new VectorOfVec4i(hierarchyPtr))
            {
                contours = contoursVec.ToArray();
                Vec4i[] hierarchyOrg = hierarchyVec.ToArray();
                hierarchy = EnumerableEx.SelectToArray(hierarchyOrg, HierarchyIndex.FromVec4i);
            }
            image.Fix();
        }
コード例 #3
0
        /// <summary>
        /// 2値画像中の輪郭を検出します.
        /// </summary>
        /// <param name="image">入力画像,8ビット,シングルチャンネル.0以外のピクセルは 1として,0のピクセルは0のまま扱われます.
        /// また,この関数は,輪郭抽出処理中に入力画像 image の中身を書き換えます.</param>
        /// <param name="contours">検出された輪郭.各輪郭は,点のベクトルとして格納されます.</param>
        /// <param name="hierarchy">画像のトポロジーに関する情報を含む出力ベクトル.これは,輪郭数と同じ数の要素を持ちます.各輪郭 contours[i] に対して,
        /// 要素 hierarchy[i]のメンバにはそれぞれ,同じ階層レベルに存在する前後の輪郭,最初の子輪郭,および親輪郭の 
        /// contours インデックス(0 基準)がセットされます.また,輪郭 i において,前後,親,子の輪郭が存在しない場合,
        /// それに対応する hierarchy[i] の要素は,負の値になります.</param>
        /// <param name="mode">輪郭抽出モード</param>
        /// <param name="method">輪郭の近似手法</param>
        /// <param name="offset">オプションのオフセット.各輪郭点はこの値の分だけシフトします.これは,ROIの中で抽出された輪郭を,画像全体に対して位置づけて解析する場合に役立ちます.</param>
#else
        /// <summary>
        /// Finds contours in a binary image.
        /// </summary>
        /// <param name="image">Source, an 8-bit single-channel image. Non-zero pixels are treated as 1’s. 
        /// Zero pixels remain 0’s, so the image is treated as binary.
        /// The function modifies the image while extracting the contours.</param> 
        /// <param name="contours">Detected contours. Each contour is stored as a vector of points.</param>
        /// <param name="hierarchy">Optional output vector, containing information about the image topology. 
        /// It has as many elements as the number of contours. For each i-th contour contours[i], 
        /// the members of the elements hierarchy[i] are set to 0-based indices in contours of the next 
        /// and previous contours at the same hierarchical level, the first child contour and the parent contour, respectively. 
        /// If for the contour i there are no next, previous, parent, or nested contours, the corresponding elements of hierarchy[i] will be negative.</param>
        /// <param name="mode">Contour retrieval mode</param>
        /// <param name="method">Contour approximation method</param>
        /// <param name="offset"> Optional offset by which every contour point is shifted. 
        /// This is useful if the contours are extracted from the image ROI and then they should be analyzed in the whole image context.</param>
#endif
        public static void FindContours(InputOutputArray image, out Mat[] contours,
            OutputArray hierarchy, ContourRetrieval mode, ContourChain method, Point? offset = null)
        {
            if (image == null)
                throw new ArgumentNullException("image");
            if (hierarchy == null)
                throw new ArgumentNullException("hierarchy");
            image.ThrowIfNotReady();
            hierarchy.ThrowIfNotReady();

            CvPoint offset0 = offset.GetValueOrDefault(new Point());
            IntPtr contoursPtr;
            NativeMethods.imgproc_findContours1_OutputArray(image.CvPtr, out contoursPtr, hierarchy.CvPtr, (int)mode, (int)method, offset0);

            using (var contoursVec = new VectorOfMat(contoursPtr))
            {
                contours = contoursVec.ToArray();
            }
            image.Fix();
            hierarchy.Fix();
        }
コード例 #4
0
ファイル: Cv2_gpu.cs プロジェクト: 0sv/opencvsharp
        /// <summary>
        /// Dilates an image by using a specific structuring element.
        /// </summary>
        /// <param name="src">Source image. Only CV_8UC1 and CV_8UC4 types are supported.</param>
        /// <param name="dst">Destination image with the same size and type as src.</param>
        /// <param name="kernel">Structuring element used for erosion. If kernel=Mat(), 
        /// a 3x3 rectangular structuring element is used.</param>
        /// <param name="anchor">Position of an anchor within the element. 
        /// The default value (-1, -1) means that the anchor is at the element center.</param>
        /// <param name="iterations">Number of times erosion to be applied.</param>
        public static void Dilate(
            GpuMat src, GpuMat dst, Mat kernel, Point? anchor = null, int iterations = 1)
        {
            ThrowIfGpuNotAvailable();
            if (src == null)
                throw new ArgumentNullException("src");
            if (dst == null)
                throw new ArgumentNullException("dst");
            if (kernel == null)
                throw new ArgumentNullException("kernel");
            src.ThrowIfDisposed();
            dst.ThrowIfDisposed();
            kernel.ThrowIfDisposed();

            Point anchor0 = anchor.GetValueOrDefault(new Point(-1, -1));
            NativeMethods.gpu_dilate1(src.CvPtr, dst.CvPtr, kernel.CvPtr, anchor0, iterations);
        }
コード例 #5
0
 /// <summary>
 /// Smoothes image using normalized box filter
 /// </summary>
 /// <param name="src">The source image</param>
 /// <param name="dst">The destination image; will have the same size and the same type as src</param>
 /// <param name="ksize">The smoothing kernel size</param>
 /// <param name="anchor">The anchor point. The default value Point(-1,-1) means that the anchor is at the kernel center</param>
 /// <param name="borderType">The border mode used to extrapolate pixels outside of the image</param>
 public static void Blur(InputArray src, OutputArray dst, Size ksize, 
     Point? anchor = null, BorderType borderType = BorderType.Default)
 {
     if (src == null)
         throw new ArgumentNullException("src");
     if (dst == null)
         throw new ArgumentNullException("dst");
     src.ThrowIfDisposed();
     dst.ThrowIfNotReady();
     Point anchor0 = anchor.GetValueOrDefault(new Point(-1, -1));
     NativeMethods.imgproc_blur(src.CvPtr, dst.CvPtr, ksize, anchor0, (int)borderType);
     dst.Fix();
 }
コード例 #6
0
 /// <summary>
 /// Applies separable linear filter to an image
 /// </summary>
 /// <param name="src">The source image</param>
 /// <param name="dst">The destination image; will have the same size and the same number of channels as src</param>
 /// <param name="ddepth">The destination image depth</param>
 /// <param name="kernelX">The coefficients for filtering each row</param>
 /// <param name="kernelY">The coefficients for filtering each column</param>
 /// <param name="anchor">The anchor position within the kernel; The default value (-1, 1) means that the anchor is at the kernel center</param>
 /// <param name="delta">The value added to the filtered results before storing them</param>
 /// <param name="borderType">The pixel extrapolation method</param>
 public static void SepFilter2D(InputArray src, OutputArray dst, MatType ddepth, InputArray kernelX, InputArray kernelY,
     Point? anchor = null, double delta = 0, BorderType borderType = BorderType.Default)
 {
     if (src == null)
         throw new ArgumentNullException("src");
     if (dst == null)
         throw new ArgumentNullException("dst");
     if (kernelX == null)
         throw new ArgumentNullException("kernelX");
     if (kernelY == null)
         throw new ArgumentNullException("kernelY");
     src.ThrowIfDisposed();
     dst.ThrowIfNotReady();
     kernelX.ThrowIfDisposed();
     kernelY.ThrowIfDisposed();
     Point anchor0 = anchor.GetValueOrDefault(new Point(-1, -1));
     NativeMethods.imgproc_sepFilter2D(src.CvPtr, dst.CvPtr, ddepth, 
         kernelX.CvPtr, kernelY.CvPtr, anchor0, delta, (int)borderType);
     dst.Fix();
 }
コード例 #7
0
        /// <summary>
        /// set template to search
        /// </summary>
        /// <param name="templ"></param>
        /// <param name="templCenter"></param>
        public void SetTemplate(InputArray templ, Point? templCenter = null)
        {
            if (ptr == IntPtr.Zero)
                throw new ObjectDisposedException(GetType().Name);
            if (templ == null)
                throw new ArgumentNullException(nameof(templ));
            templ.ThrowIfDisposed();
            var templCenterValue = templCenter.GetValueOrDefault(new Point(-1, -1));

            NativeMethods.imgproc_GeneralizedHough_setTemplate1(ptr, templ.CvPtr, templCenterValue);

            GC.KeepAlive(templ);
        }
コード例 #8
0
ファイル: Cv2_imgproc.cs プロジェクト: shimat/opencvsharp
        /// <summary>
        /// 2値画像中の輪郭を検出します.
        /// </summary>
        /// <param name="image">入力画像,8ビット,シングルチャンネル.0以外のピクセルは 1として,0のピクセルは0のまま扱われます.
        /// また,この関数は,輪郭抽出処理中に入力画像 image の中身を書き換えます.</param>
        /// <param name="mode">輪郭抽出モード</param>
        /// <param name="method">輪郭の近似手法</param>
        /// <param name="offset">オプションのオフセット.各輪郭点はこの値の分だけシフトします.これは,ROIの中で抽出された輪郭を,画像全体に対して位置づけて解析する場合に役立ちます.</param>
        /// <return>検出された輪郭.各輪郭は,点のベクトルとして格納されます.</return>
#else
        /// <summary>
        /// Finds contours in a binary image.
        /// </summary>
        /// <param name="image">Source, an 8-bit single-channel image. Non-zero pixels are treated as 1’s. 
        /// Zero pixels remain 0’s, so the image is treated as binary.
        /// The function modifies the image while extracting the contours.</param> 
        /// <param name="mode">Contour retrieval mode</param>
        /// <param name="method">Contour approximation method</param>
        /// <param name="offset"> Optional offset by which every contour point is shifted. 
        /// This is useful if the contours are extracted from the image ROI and then they should be analyzed in the whole image context.</param>
        /// <returns>Detected contours. Each contour is stored as a vector of points.</returns>
#endif
        public static Point[][] FindContoursAsArray(InputOutputArray image,
            RetrievalModes mode, ContourApproximationModes method, Point? offset = null)
        {
            if (image == null)
                throw new ArgumentNullException(nameof(image));
            image.ThrowIfNotReady();

            Point offset0 = offset.GetValueOrDefault(new Point());
            IntPtr contoursPtr;
            NativeMethods.imgproc_findContours2_vector(image.CvPtr, out contoursPtr, (int)mode, (int)method, offset0);
            image.Fix();

            using (var contoursVec = new VectorOfVectorPoint(contoursPtr))
            {
                return contoursVec.ToArray();
            }
        }
コード例 #9
0
ファイル: Cv2_imgproc.cs プロジェクト: shimat/opencvsharp
        /// <summary>
        /// Convolves an image with the kernel
        /// </summary>
        /// <param name="src">The source image</param>
        /// <param name="dst">The destination image. It will have the same size and the same number of channels as src</param>
        /// <param name="ddepth">The desired depth of the destination image. If it is negative, it will be the same as src.depth()</param>
        /// <param name="kernel">Convolution kernel (or rather a correlation kernel), 
        /// a single-channel floating point matrix. If you want to apply different kernels to 
        /// different channels, split the image into separate color planes using split() and process them individually</param>
        /// <param name="anchor">The anchor of the kernel that indicates the relative position of 
        /// a filtered point within the kernel. The anchor should lie within the kernel. 
        /// The special default value (-1,-1) means that the anchor is at the kernel center</param>
        /// <param name="delta">The optional value added to the filtered pixels before storing them in dst</param>
        /// <param name="borderType">The pixel extrapolation method</param>
        public static void Filter2D(
            InputArray src, OutputArray dst, MatType ddepth,
	        InputArray kernel, Point? anchor = null, double delta = 0, 
            BorderTypes borderType = BorderTypes.Default)
        {
            if (src == null)
                throw new ArgumentNullException(nameof(src));
            if (dst == null)
                throw new ArgumentNullException(nameof(dst));
            if (kernel == null)
                throw new ArgumentNullException(nameof(kernel));
            src.ThrowIfDisposed();
            dst.ThrowIfNotReady();
            kernel.ThrowIfDisposed();
            Point anchor0 = anchor.GetValueOrDefault(new Point(-1, -1));
            NativeMethods.imgproc_filter2D(src.CvPtr, dst.CvPtr, ddepth, kernel.CvPtr, 
                anchor0, delta, (int)borderType);
            GC.KeepAlive(src);
            dst.Fix();
        }
コード例 #10
0
ファイル: Cv2_gpu.cs プロジェクト: 0sv/opencvsharp
        /// <summary>
        /// Applies a separable 2D linear filter to an image.
        /// </summary>
        /// <param name="src">Source image. 
        /// CV_8UC1, CV_8UC4, CV_16SC1, CV_16SC2, CV_32SC1, CV_32FC1 source types are supported.</param>
        /// <param name="dst">Destination image with the same size and number of channels as src.</param>
        /// <param name="ddepth">Destination image depth. CV_8U, CV_16S, CV_32S, and CV_32F are supported.</param>
        /// <param name="kernelX">Horizontal filter coefficients.</param>
        /// <param name="kernelY">Vertical filter coefficients.</param>
        /// <param name="buf"></param>
        /// <param name="anchor">Anchor position within the kernel. 
        /// The default value (-1, 1) means that the anchor is at the kernel center.</param>
        /// <param name="rowBorderType">Pixel extrapolation method in the vertical direction.</param>
        /// <param name="columnBorderType">Pixel extrapolation method in the horizontal direction.</param>
        /// <param name="stream">Stream for the asynchronous version.</param>
        public static void SepFilter2D(
            GpuMat src, GpuMat dst, int ddepth, Mat kernelX, Mat kernelY, GpuMat buf,
            Point? anchor = null, BorderType rowBorderType = BorderType.Default, 
            BorderType columnBorderType = BorderType.Auto, Stream stream = null)
        {
            ThrowIfGpuNotAvailable();
            if (src == null)
                throw new ArgumentNullException("src");
            if (dst == null)
                throw new ArgumentNullException("dst");
            if (kernelX == null)
                throw new ArgumentNullException("kernelX");
            if (kernelY == null)
                throw new ArgumentNullException("kernelY");
            if (buf == null)
                throw new ArgumentNullException("buf");
            src.ThrowIfDisposed();
            dst.ThrowIfDisposed();
            kernelX.ThrowIfDisposed();
            kernelY.ThrowIfDisposed();
            buf.ThrowIfDisposed();

            Point anchor0 = anchor.GetValueOrDefault(new Point(-1, -1));
            NativeMethods.gpu_sepFilter2D2(
                src.CvPtr, dst.CvPtr, ddepth, kernelX.CvPtr, kernelY.CvPtr, buf.CvPtr, 
                anchor0, (int)rowBorderType, (int)columnBorderType, Cv2.ToPtr(stream));
        }
コード例 #11
0
ファイル: Cv2_imgproc.cs プロジェクト: shimat/opencvsharp
 /*
 /// <summary>
 /// Applies the adaptive bilateral filter to an image.
 /// </summary>
 /// <param name="src">The source image</param>
 /// <param name="dst">The destination image; will have the same size and the same type as src</param>
 /// <param name="ksize">The kernel size. This is the neighborhood where the local variance will be calculated, 
 /// and where pixels will contribute (in a weighted manner).</param>
 /// <param name="sigmaSpace">Filter sigma in the coordinate space. 
 /// Larger value of the parameter means that farther pixels will influence each other 
 /// (as long as their colors are close enough; see sigmaColor). Then d>0, it specifies the neighborhood 
 /// size regardless of sigmaSpace, otherwise d is proportional to sigmaSpace.</param>
 /// <param name="maxSigmaColor">Maximum allowed sigma color (will clamp the value calculated in the 
 /// ksize neighborhood. Larger value of the parameter means that more dissimilar pixels will 
 /// influence each other (as long as their colors are close enough; see sigmaColor). 
 /// Then d>0, it specifies the neighborhood size regardless of sigmaSpace, otherwise d is proportional to sigmaSpace.</param>
 /// <param name="anchor">The anchor point. The default value Point(-1,-1) means that the anchor is at the kernel center</param>
 /// <param name="borderType">Pixel extrapolation method.</param>
 public static void AdaptiveBilateralFilter(InputArray src, OutputArray dst, Size ksize,
     double sigmaSpace, double maxSigmaColor = 20.0, Point? anchor = null, BorderType borderType = BorderType.Default)
 {
     if (src == null)
         throw new ArgumentNullException("src");
     if (dst == null)
         throw new ArgumentNullException("dst");
     src.ThrowIfDisposed();
     dst.ThrowIfNotReady();
     Point anchor0 = anchor.GetValueOrDefault(new Point(-1, -1));
     NativeMethods.imgproc_adaptiveBilateralFilter(src.CvPtr, dst.CvPtr, ksize, 
         sigmaSpace, maxSigmaColor, anchor0, (int)borderType);
     dst.Fix();
 }
 */
 #endregion
 #region BoxFilter
 /// <summary>
 /// Smoothes image using box filter
 /// </summary>
 /// <param name="src">The source image</param>
 /// <param name="dst">The destination image; will have the same size and the same type as src</param>
 /// <param name="ddepth"></param>
 /// <param name="ksize">The smoothing kernel size</param>
 /// <param name="anchor">The anchor point. The default value Point(-1,-1) means that the anchor is at the kernel center</param>
 /// <param name="normalize">Indicates, whether the kernel is normalized by its area or not</param>
 /// <param name="borderType">The border mode used to extrapolate pixels outside of the image</param>
 public static void BoxFilter(
     InputArray src, OutputArray dst, MatType ddepth, 
     Size ksize, Point? anchor = null, bool normalize = true,
     BorderTypes borderType = BorderTypes.Default)
 {
     if (src == null)
         throw new ArgumentNullException(nameof(src));
     if (dst == null)
         throw new ArgumentNullException(nameof(dst));
     src.ThrowIfDisposed();
     dst.ThrowIfNotReady();
     Point anchor0 = anchor.GetValueOrDefault(new Point(-1, -1));
     NativeMethods.imgproc_boxFilter(src.CvPtr, dst.CvPtr, ddepth, ksize, anchor0, normalize ? 1 : 0, (int)borderType);
     GC.KeepAlive(src);
     dst.Fix();
 }
コード例 #12
0
ファイル: Cv2_gpu.cs プロジェクト: 0sv/opencvsharp
        /// <summary>
        /// Applies the non-separable 2D linear filter to an image.
        /// </summary>
        /// <param name="src">Source image. Supports CV_8U, CV_16U and CV_32F one and four channel image.</param>
        /// <param name="dst">Destination image. The size and the number of channels is the same as src.</param>
        /// <param name="ddepth">Desired depth of the destination image. If it is negative, it is the same as src.depth(). 
        /// It supports only the same depth as the source image depth.</param>
        /// <param name="kernel">2D array of filter coefficients.</param>
        /// <param name="anchor">Anchor of the kernel that indicates the relative position of 
        /// a filtered point within the kernel. The anchor resides within the kernel. 
        /// The special default value (-1,-1) means that the anchor is at the kernel center.</param>
        /// <param name="borderType">Pixel extrapolation method.</param>
        /// <param name="stream">Stream for the asynchronous version.</param>
        public static void Filter2D(
            GpuMat src, GpuMat dst, int ddepth, Mat kernel, Point? anchor,
            BorderType borderType = BorderType.Default, Stream stream = null)
        {
            ThrowIfGpuNotAvailable();
            if (src == null)
                throw new ArgumentNullException("src");
            if (dst == null)
                throw new ArgumentNullException("dst");
            if (kernel == null)
                throw new ArgumentNullException("kernel");
            src.ThrowIfDisposed();
            dst.ThrowIfDisposed();
            kernel.ThrowIfDisposed();

            Point anchor0 = anchor.GetValueOrDefault(new Point(-1, -1));
            NativeMethods.gpu_filter2D(src.CvPtr, dst.CvPtr, ddepth, kernel.CvPtr,
                anchor0, (int)borderType, Cv2.ToPtr(stream));
        }
コード例 #13
0
ファイル: Cv2_gpu.cs プロジェクト: 0sv/opencvsharp
        /// <summary>
        /// Applies an advanced morphological operation to an image.
        /// </summary>
        /// <param name="src">Source image. CV_8UC1 and CV_8UC4 source types are supported.</param>
        /// <param name="dst">Destination image with the same size and type as src.</param>
        /// <param name="op">Type of morphological operation</param>
        /// <param name="kernel">Structuring element.</param>
        /// <param name="buf1"></param>
        /// <param name="buf2"></param>
        /// <param name="anchor">Position of an anchor within the element. 
        /// The default value Point(-1, -1) means that the anchor is at the element center.</param>
        /// <param name="iterations">Number of times erosion and dilation to be applied.</param>
        /// <param name="stream">Stream for the asynchronous version.</param>
        public static void MorphologyEx(
            GpuMat src, GpuMat dst, MorphologyOperation op, Mat kernel, GpuMat buf1, GpuMat buf2,
            Point? anchor = null, int iterations = 1, Stream stream = null)
        {
            ThrowIfGpuNotAvailable();
            if (src == null)
                throw new ArgumentNullException("src");
            if (dst == null)
                throw new ArgumentNullException("dst");
            if (kernel == null)
                throw new ArgumentNullException("kernel");
            if (buf1 == null)
                throw new ArgumentNullException("buf1");
            if (buf2 == null)
                throw new ArgumentNullException("buf2");
            src.ThrowIfDisposed();
            dst.ThrowIfDisposed();
            kernel.ThrowIfDisposed();
            buf1.ThrowIfDisposed();
            buf2.ThrowIfDisposed();

            Point anchor0 = anchor.GetValueOrDefault(new Point(-1, -1));
            NativeMethods.gpu_morphologyEx2(src.CvPtr, dst.CvPtr, (int)op, kernel.CvPtr,
                buf1.CvPtr, buf2.CvPtr, anchor0, iterations, Cv2.ToPtr(stream));
        }
コード例 #14
0
ファイル: Cv2_gpu.cs プロジェクト: 0sv/opencvsharp
        /// <summary>
        /// Dilates an image by using a specific structuring element.
        /// </summary>
        /// <param name="src">Source image. Only CV_8UC1 and CV_8UC4 types are supported.</param>
        /// <param name="dst">Destination image with the same size and type as src.</param>
        /// <param name="kernel">Structuring element used for erosion. If kernel=Mat(), 
        /// a 3x3 rectangular structuring element is used.</param>
        /// <param name="buf"></param>
        /// <param name="anchor">Position of an anchor within the element. 
        /// The default value (-1, -1) means that the anchor is at the element center.</param>
        /// <param name="iterations">Number of times erosion to be applied.</param>
        /// <param name="stream">Stream for the asynchronous version.</param>
        public static void Dilate(
            GpuMat src, GpuMat dst, Mat kernel, GpuMat buf,
            Point? anchor, int iterations = 1, Stream stream = null)
        {
            ThrowIfGpuNotAvailable();
            if (src == null)
                throw new ArgumentNullException("src");
            if (dst == null)
                throw new ArgumentNullException("dst");
            if (kernel == null)
                throw new ArgumentNullException("kernel");
            if (buf == null)
                throw new ArgumentNullException("buf");
            src.ThrowIfDisposed();
            dst.ThrowIfDisposed();
            kernel.ThrowIfDisposed();
            buf.ThrowIfDisposed();

            Point anchor0 = anchor.GetValueOrDefault(new Point(-1, -1));
            NativeMethods.gpu_dilate2(src.CvPtr, dst.CvPtr, kernel.CvPtr,
                buf.CvPtr, anchor0, iterations, Cv2.ToPtr(stream));
        }
コード例 #15
0
        /// <summary>
        /// 輪郭線,または内側が塗りつぶされた輪郭を描きます.
        /// </summary>
        /// <param name="image">出力画像</param>
        /// <param name="contours"> 入力される全輪郭.各輪郭は,点のベクトルとして格納されています.</param>
        /// <param name="contourIdx">描かれる輪郭を示します.これが負値の場合,すべての輪郭が描画されます.</param>
        /// <param name="color">輪郭の色.</param>
        /// <param name="thickness">輪郭線の太さ.これが負値の場合(例えば thickness=CV_FILLED ),輪郭の内側が塗りつぶされます.</param>
        /// <param name="lineType">線の連結性</param>
        /// <param name="hierarchy">階層に関するオプションの情報.これは,特定の輪郭だけを描画したい場合にのみ必要になります.</param>
        /// <param name="maxLevel">描画される輪郭の最大レベル.0ならば,指定された輪郭のみが描画されます.
        /// 1ならば,指定された輪郭と,それに入れ子になったすべての輪郭が描画されます.2ならば,指定された輪郭と,
        /// それに入れ子になったすべての輪郭,さらにそれに入れ子になったすべての輪郭が描画されます.このパラメータは, 
        /// hierarchy が有効な場合のみ考慮されます.</param>
        /// <param name="offset">輪郭をシフトするオプションパラメータ.指定された offset = (dx,dy) だけ,すべての描画輪郭がシフトされます.</param>
#else
        /// <summary>
        /// draws contours in the image
        /// </summary>
        /// <param name="image">Destination image.</param>
        /// <param name="contours">All the input contours. Each contour is stored as a point vector.</param>
        /// <param name="contourIdx">Parameter indicating a contour to draw. If it is negative, all the contours are drawn.</param>
        /// <param name="color">Color of the contours.</param>
        /// <param name="thickness">Thickness of lines the contours are drawn with. If it is negative (for example, thickness=CV_FILLED ), 
        /// the contour interiors are drawn.</param>
        /// <param name="lineType">Line connectivity. </param>
        /// <param name="hierarchy">Optional information about hierarchy. It is only needed if you want to draw only some of the contours</param>
        /// <param name="maxLevel">Maximal level for drawn contours. If it is 0, only the specified contour is drawn. 
        /// If it is 1, the function draws the contour(s) and all the nested contours. If it is 2, the function draws the contours, 
        /// all the nested contours, all the nested-to-nested contours, and so on. This parameter is only taken into account 
        /// when there is hierarchy available.</param>
        /// <param name="offset">Optional contour shift parameter. Shift all the drawn contours by the specified offset = (dx, dy)</param>
#endif
        public static void DrawContours(
            InputOutputArray image,
            IEnumerable<IEnumerable<Point>> contours,
            int contourIdx,
            Scalar color,
            int thickness = 1,
            LineType lineType = LineType.Link8,
            IEnumerable<HierarchyIndex> hierarchy = null,
            int maxLevel = Int32.MaxValue,
            Point? offset = null)
        {
            if (image == null)
                throw new ArgumentNullException("image");
            if (contours == null)
                throw new ArgumentNullException("contours");
            image.ThrowIfNotReady();

            CvPoint offset0 = offset.GetValueOrDefault(new Point());
            Point[][] contoursArray = EnumerableEx.SelectToArray(contours, EnumerableEx.ToArray);
            int[] contourSize2 = EnumerableEx.SelectToArray(contoursArray, pts => pts.Length);
            using (var contoursPtr = new ArrayAddress2<Point>(contoursArray))
            {
                if (hierarchy == null)
                {
                    NativeMethods.imgproc_drawContours_vector(image.CvPtr, contoursPtr.Pointer, contoursArray.Length, contourSize2,
                        contourIdx, color, thickness, (int)lineType, IntPtr.Zero, 0, maxLevel, offset0);
                }
                else
                {
                    Vec4i[] hiearchyVecs = EnumerableEx.SelectToArray(hierarchy, hi => hi.ToVec4i());
                    NativeMethods.imgproc_drawContours_vector(image.CvPtr, contoursPtr.Pointer, contoursArray.Length, contourSize2,
                        contourIdx, color, thickness, (int)lineType, hiearchyVecs, hiearchyVecs.Length, maxLevel, offset0);
                }
            }

            image.Fix();
        }
コード例 #16
0
ファイル: Cv2_imgproc.cs プロジェクト: shimat/opencvsharp
        /// <summary>
        /// 1つ,または複数のポリゴンで区切られた領域を塗りつぶします.
        /// </summary>
        /// <param name="img">画像</param>
        /// <param name="pts">ポリゴンの配列.各要素は,点の配列で表現されます.</param>
        /// <param name="color">ポリゴンの色.</param>
        /// <param name="lineType">ポリゴンの枠線の種類,</param>
        /// <param name="shift">ポリゴンの頂点座標において,小数点以下の桁を表すビット数.</param>
        /// <param name="offset"></param>
#else
        /// <summary>
        /// Fills the area bounded by one or more polygons
        /// </summary>
        /// <param name="img">Image</param>
        /// <param name="pts">Array of polygons, each represented as an array of points</param>
        /// <param name="color">Polygon color</param>
        /// <param name="lineType">Type of the polygon boundaries</param>
        /// <param name="shift">The number of fractional bits in the vertex coordinates</param>
        /// <param name="offset"></param>
#endif
        public static void FillPoly(
            InputOutputArray img, InputArray pts, Scalar color,
            LineTypes lineType = LineTypes.Link8, int shift = 0, Point? offset = null)
        {
            if (img == null)
                throw new ArgumentNullException(nameof(img));
            if (pts == null) 
                throw new ArgumentNullException(nameof(pts));
            img.ThrowIfDisposed();
            pts.ThrowIfDisposed();
            Point offset0 = offset.GetValueOrDefault(new Point());

            NativeMethods.imgproc_fillPoly_InputOutputArray(
                img.CvPtr, pts.CvPtr, color, (int)lineType, shift, offset0);

            GC.KeepAlive(pts);
            img.Fix();
        }
コード例 #17
0
        /// <summary>
        /// 輪郭線,または内側が塗りつぶされた輪郭を描きます.
        /// </summary>
        /// <param name="image">出力画像</param>
        /// <param name="contours"> 入力される全輪郭.各輪郭は,点のベクトルとして格納されています.</param>
        /// <param name="contourIdx">描かれる輪郭を示します.これが負値の場合,すべての輪郭が描画されます.</param>
        /// <param name="color">輪郭の色.</param>
        /// <param name="thickness">輪郭線の太さ.これが負値の場合(例えば thickness=CV_FILLED ),輪郭の内側が塗りつぶされます.</param>
        /// <param name="lineType">線の連結性</param>
        /// <param name="hierarchy">階層に関するオプションの情報.これは,特定の輪郭だけを描画したい場合にのみ必要になります.</param>
        /// <param name="maxLevel">描画される輪郭の最大レベル.0ならば,指定された輪郭のみが描画されます.
        /// 1ならば,指定された輪郭と,それに入れ子になったすべての輪郭が描画されます.2ならば,指定された輪郭と,
        /// それに入れ子になったすべての輪郭,さらにそれに入れ子になったすべての輪郭が描画されます.このパラメータは, 
        /// hierarchy が有効な場合のみ考慮されます.</param>
        /// <param name="offset">輪郭をシフトするオプションパラメータ.指定された offset = (dx,dy) だけ,すべての描画輪郭がシフトされます.</param>
#else
        /// <summary>
        /// draws contours in the image
        /// </summary>
        /// <param name="image">Destination image.</param>
        /// <param name="contours">All the input contours. Each contour is stored as a point vector.</param>
        /// <param name="contourIdx">Parameter indicating a contour to draw. If it is negative, all the contours are drawn.</param>
        /// <param name="color">Color of the contours.</param>
        /// <param name="thickness">Thickness of lines the contours are drawn with. If it is negative (for example, thickness=CV_FILLED ), 
        /// the contour interiors are drawn.</param>
        /// <param name="lineType">Line connectivity. </param>
        /// <param name="hierarchy">Optional information about hierarchy. It is only needed if you want to draw only some of the contours</param>
        /// <param name="maxLevel">Maximal level for drawn contours. If it is 0, only the specified contour is drawn. 
        /// If it is 1, the function draws the contour(s) and all the nested contours. If it is 2, the function draws the contours, 
        /// all the nested contours, all the nested-to-nested contours, and so on. This parameter is only taken into account 
        /// when there is hierarchy available.</param>
        /// <param name="offset">Optional contour shift parameter. Shift all the drawn contours by the specified offset = (dx, dy)</param>
#endif
        public static void DrawContours(
            InputOutputArray image,
            IEnumerable<Mat> contours,
            int contourIdx,
            Scalar color,
            int thickness = 1,
            LineType lineType = LineType.Link8,
            Mat hierarchy = null,
            int maxLevel = Int32.MaxValue,
            Point? offset = null)
        {
            if (image == null)
                throw new ArgumentNullException("image");
            if (contours == null)
                throw new ArgumentNullException("contours");
            image.ThrowIfNotReady();

            CvPoint offset0 = offset.GetValueOrDefault(new Point());
            IntPtr[] contoursPtr = EnumerableEx.SelectPtrs(contours);
            NativeMethods.imgproc_drawContours_InputArray(image.CvPtr, contoursPtr, contoursPtr.Length,
                        contourIdx, color, thickness, (int)lineType, ToPtr(hierarchy), maxLevel, offset0);
            image.Fix();
        }
コード例 #18
0
ファイル: Cv2_core.cs プロジェクト: kaorun55/opencvsharp
        /// <summary>
        /// 1つ,または複数のポリゴンで区切られた領域を塗りつぶします.
        /// </summary>
        /// <param name="img">画像</param>
        /// <param name="pts">ポリゴンの配列.各要素は,点の配列で表現されます.</param>
        /// <param name="color">ポリゴンの色.</param>
        /// <param name="lineType">ポリゴンの枠線の種類,</param>
        /// <param name="shift">ポリゴンの頂点座標において,小数点以下の桁を表すビット数.</param>
        /// <param name="offset"></param>
#else
        /// <summary>
        /// Fills the area bounded by one or more polygons
        /// </summary>
        /// <param name="img">Image</param>
        /// <param name="pts">Array of polygons, each represented as an array of points</param>
        /// <param name="color">Polygon color</param>
        /// <param name="lineType">Type of the polygon boundaries</param>
        /// <param name="shift">The number of fractional bits in the vertex coordinates</param>
        /// <param name="offset"></param>
#endif
        public static void FillPoly(Mat img, IEnumerable<IEnumerable<Point>> pts, Scalar color, 
            LineType lineType = LineType.Link8, int shift = 0, Point? offset = null)
        {
            if (img == null)
                throw new ArgumentNullException("img");
            img.ThrowIfDisposed();
            Point offset0 = offset.GetValueOrDefault(new Point());

            List<Point[]> ptsList = new List<Point[]>();
            List<int> nptsList = new List<int>();
            foreach (IEnumerable<Point> pts1 in pts)
            {
                Point[] pts1Arr = Util.ToArray(pts1);
                ptsList.Add(pts1Arr);
                nptsList.Add(pts1Arr.Length);
            }
            Point[][] ptsArr = ptsList.ToArray();
            int[] npts = nptsList.ToArray();
            int ncontours = ptsArr.Length;
            using (ArrayAddress2<Point> ptsPtr = new ArrayAddress2<Point>(ptsArr))
            {
                NativeMethods.core_fillPoly(img.CvPtr, ptsPtr.Pointer, npts, ncontours, color, (int)lineType, shift, offset0);
            }
        }
コード例 #19
0
        /// <summary>
        /// 指定の構造要素を用いて画像の膨張を行います.
        /// </summary>
        /// <param name="src">入力画像</param>
        /// <param name="dst">src と同じサイズ,同じ型の出力画像</param>
        /// <param name="element">膨張に用いられる構造要素. element=new Mat()  の場合, 3x3 の矩形構造要素が用いられます</param>
        /// <param name="anchor">構造要素内のアンカー位置.デフォルト値の (-1, -1) は,アンカーが構造要素の中心にあることを意味します</param>
        /// <param name="iterations">膨張が行われる回数. [既定値は1]</param>
        /// <param name="borderType">ピクセル外挿手法.[既定値はBorderType.Constant]</param>
        /// <param name="borderValue">定数境界モードで利用されるピクセル値.デフォルト値は特別な意味を持ちます.[既定値はCvCpp.MorphologyDefaultBorderValue()]</param>
#else
        /// <summary>
        /// Dilates an image by using a specific structuring element.
        /// </summary>
        /// <param name="src">The source image</param>
        /// <param name="dst">The destination image. It will have the same size and the same type as src</param>
        /// <param name="element">The structuring element used for dilation. If element=new Mat() , a 3x3 rectangular structuring element is used</param>
        /// <param name="anchor">Position of the anchor within the element. The default value (-1, -1) means that the anchor is at the element center</param>
        /// <param name="iterations">The number of times dilation is applied. [By default this is 1]</param>
        /// <param name="borderType">The pixel extrapolation method. [By default this is BorderType.Constant]</param>
        /// <param name="borderValue">The border value in case of a constant border. The default value has a special meaning. [By default this is CvCpp.MorphologyDefaultBorderValue()]</param>
#endif
        public static void Dilate(InputArray src, OutputArray dst, InputArray element,
            Point? anchor = null, int iterations = 1, BorderType borderType = BorderType.Constant, Scalar? borderValue = null)
        {
            if (src == null)
                throw new ArgumentNullException("src");
            if (dst == null)
                throw new ArgumentNullException("dst");
            src.ThrowIfDisposed();
            dst.ThrowIfNotReady();

            Point anchor0 = anchor.GetValueOrDefault(new Point(-1, -1));
            Scalar borderValue0 = borderValue.GetValueOrDefault(MorphologyDefaultBorderValue());
            IntPtr elementPtr = ToPtr(element);
            NativeMethods.imgproc_dilate(src.CvPtr, dst.CvPtr, elementPtr, anchor0, iterations, (int)borderType, borderValue0);
            dst.Fix();
        }
コード例 #20
0
 /// <summary>
 /// Applies the adaptive bilateral filter to an image.
 /// </summary>
 /// <param name="src">The source image</param>
 /// <param name="dst">The destination image; will have the same size and the same type as src</param>
 /// <param name="ksize">The kernel size. This is the neighborhood where the local variance will be calculated, 
 /// and where pixels will contribute (in a weighted manner).</param>
 /// <param name="sigmaSpace">Filter sigma in the coordinate space. 
 /// Larger value of the parameter means that farther pixels will influence each other 
 /// (as long as their colors are close enough; see sigmaColor). Then d>0, it specifies the neighborhood 
 /// size regardless of sigmaSpace, otherwise d is proportional to sigmaSpace.</param>
 /// <param name="maxSigmaColor">Maximum allowed sigma color (will clamp the value calculated in the 
 /// ksize neighborhood. Larger value of the parameter means that more dissimilar pixels will 
 /// influence each other (as long as their colors are close enough; see sigmaColor). 
 /// Then d>0, it specifies the neighborhood size regardless of sigmaSpace, otherwise d is proportional to sigmaSpace.</param>
 /// <param name="anchor">The anchor point. The default value Point(-1,-1) means that the anchor is at the kernel center</param>
 /// <param name="borderType">Pixel extrapolation method.</param>
 public static void AdaptiveBilateralFilter(InputArray src, OutputArray dst, Size ksize,
     double sigmaSpace, double maxSigmaColor = 20.0, Point? anchor = null, BorderType borderType = BorderType.Default)
 {
     if (src == null)
         throw new ArgumentNullException("src");
     if (dst == null)
         throw new ArgumentNullException("dst");
     src.ThrowIfDisposed();
     dst.ThrowIfNotReady();
     Point anchor0 = anchor.GetValueOrDefault(new Point(-1, -1));
     NativeMethods.imgproc_adaptiveBilateralFilter(src.CvPtr, dst.CvPtr, ksize, 
         sigmaSpace, maxSigmaColor, anchor0, (int)borderType);
     dst.Fix();
 }
コード例 #21
0
        /// <summary>
        /// set template to search
        /// </summary>
        /// <param name="edges"></param>
        /// <param name="dx"></param>
        /// <param name="dy"></param>
        /// <param name="templCenter"></param>
        public virtual void SetTemplate(InputArray edges, InputArray dx, InputArray dy, Point? templCenter = null)
        {
            if (ptr == IntPtr.Zero)
                throw new ObjectDisposedException(GetType().Name);
            if (edges == null)
                throw new ArgumentNullException(nameof(edges));
            if (dx == null) 
                throw new ArgumentNullException(nameof(dx));
            if (dy == null)
                throw new ArgumentNullException(nameof(dy));
            edges.ThrowIfDisposed();
            dx.ThrowIfDisposed();
            dy.ThrowIfDisposed();
            var templCenterValue = templCenter.GetValueOrDefault(new Point(-1, -1));

            NativeMethods.imgproc_GeneralizedHough_setTemplate2(
                ptr, edges.CvPtr, dx.CvPtr, dy.CvPtr, templCenterValue);

            GC.KeepAlive(edges);
            GC.KeepAlive(dx);
            GC.KeepAlive(dy);
        }
コード例 #22
0
ファイル: Cv2_gpu.cs プロジェクト: 0sv/opencvsharp
        /// <summary>
        /// Smooths the image using the normalized box filter.
        /// </summary>
        /// <param name="src">Input image. CV_8UC1 and CV_8UC4 source types are supported.</param>
        /// <param name="dst">Output image type. The size and type is the same as src.</param>
        /// <param name="ddepth">Output image depth. If -1, the output image has the same depth as the input one. 
        /// The only values allowed here are CV_8U and -1.</param>
        /// <param name="ksize">Kernel size.</param>
        /// <param name="anchor">Anchor point. The default value Point(-1, -1) means that 
        /// the anchor is at the kernel center.</param>
        /// <param name="stream">Stream for the asynchronous version.</param>
        public static void BoxFilter(
            GpuMat src, GpuMat dst, int ddepth, Size ksize, Point? anchor,
            Stream stream = null)
        {
            ThrowIfGpuNotAvailable();
            if (src == null)
                throw new ArgumentNullException("src");
            if (dst == null)
                throw new ArgumentNullException("dst");
            src.ThrowIfDisposed();
            dst.ThrowIfDisposed();

            Point anchor0 = anchor.GetValueOrDefault(new Point(-1, -1));
            NativeMethods.gpu_boxFilter(src.CvPtr, dst.CvPtr, ddepth, ksize, anchor0, Cv2.ToPtr(stream));
        }