Пример #1
0
        /// <summary>
        /// computes a background image
        /// </summary>
        /// <param name="backgroundImage"></param>
        public virtual void GetBackgroundImage(OutputArray backgroundImage)
        {
            if (backgroundImage == null)
                throw new ArgumentNullException("backgroundImage");
            backgroundImage.ThrowIfNotReady();

            NativeMethods.video_BackgroundSubtractor_getBackgroundImage(ptr, backgroundImage.CvPtr);

            backgroundImage.Fix();
        }
Пример #2
0
        /// <summary>
        /// Process next frame from input and return output result.
        /// </summary>
        /// <param name="frame">Output result</param>
        public virtual void NextFrame(OutputArray frame)
        {
            if (firstCall)
            {
                InitImpl(frameSource);
                firstCall = false;
            }

            ProcessImpl(frameSource, frame);
        }
Пример #3
0
 /// <summary>
 /// 
 /// </summary>
 /// <param name="src"></param>
 /// <param name="dst"></param>
 /// <param name="ksize"></param>
 public static void MedianBlur(InputArray src, OutputArray dst, int ksize)
 {
     if (src == null)
         throw new ArgumentNullException("src");
     if (dst == null)
         throw new ArgumentNullException("dst");
     src.ThrowIfDisposed();
     dst.ThrowIfNotReady();
     NativeMethods.imgproc_medianBlur(src.CvPtr, dst.CvPtr, ksize);
     dst.Fix();
 }
Пример #4
0
 /// <summary>
 /// 
 /// </summary>
 /// <param name="src"></param>
 /// <param name="dst"></param>
 /// <param name="colormap"></param>
 public static void ApplyColorMap(InputArray src, OutputArray dst, ColorMapMode colormap)
 {
     if (src == null)
         throw new ArgumentNullException("src");
     if (dst == null)
         throw new ArgumentNullException("dst");
     src.ThrowIfDisposed();
     dst.ThrowIfNotReady();
     NativeMethods.contrib_applyColorMap(src.CvPtr, dst.CvPtr, (int)colormap);
     dst.Fix();
 }
Пример #5
0
 /// <summary>
 /// Perform image denoising using Non-local Means Denoising algorithm 
 /// with several computational optimizations. Noise expected to be a gaussian white noise
 /// </summary>
 /// <param name="src">Input 8-bit 1-channel, 2-channel or 3-channel image.</param>
 /// <param name="dst">Output image with the same size and type as src .</param>
 /// <param name="h">
 /// Parameter regulating filter strength. Big h value perfectly removes noise but also removes image details, 
 /// smaller h value preserves details but also preserves some noise</param>
 /// <param name="templateWindowSize">
 /// Size in pixels of the template patch that is used to compute weights. Should be odd. Recommended value 7 pixels</param>
 /// <param name="searchWindowSize">
 /// Size in pixels of the window that is used to compute weighted average for given pixel. 
 /// Should be odd. Affect performance linearly: greater searchWindowsSize - greater denoising time. Recommended value 21 pixels</param>
 public static void FastNlMeansDenoising(InputArray src, OutputArray dst, float h = 3,
     int templateWindowSize = 7, int searchWindowSize = 21)
 {
     if (src == null)
         throw new ArgumentNullException("src");
     if (dst == null)
         throw new ArgumentNullException("dst");
     src.ThrowIfDisposed();
     dst.ThrowIfNotReady();
     NativeMethods.photo_fastNlMeansDenoising(src.CvPtr, dst.CvPtr, h, templateWindowSize, searchWindowSize);
     dst.Fix();
 }
Пример #6
0
 /// <summary>
 /// Forms a border around the image
 /// </summary>
 /// <param name="src">The source image</param>
 /// <param name="dst">The destination image; will have the same type as src and 
 /// the size Size(src.cols+left+right, src.rows+top+bottom)</param>
 /// <param name="top">Specify how much pixels in each direction from the source image rectangle one needs to extrapolate</param>
 /// <param name="bottom">Specify how much pixels in each direction from the source image rectangle one needs to extrapolate</param>
 /// <param name="left">Specify how much pixels in each direction from the source image rectangle one needs to extrapolate</param>
 /// <param name="right">Specify how much pixels in each direction from the source image rectangle one needs to extrapolate</param>
 /// <param name="borderType">The border type</param>
 /// <param name="value">The border value if borderType == Constant</param>
 public static void CopyMakeBorder(InputArray src, OutputArray dst, int top, int bottom, int left, int right, BorderType borderType, Scalar? value = null)
 {
     if (src == null)
         throw new ArgumentNullException("src");
     if (dst == null)
         throw new ArgumentNullException("dst");
     src.ThrowIfDisposed();
     dst.ThrowIfNotReady();
     Scalar value0 = value.GetValueOrDefault(new Scalar());
     NativeMethods.imgproc_copyMakeBorder(src.CvPtr, dst.CvPtr, top, bottom, left, right, (int)borderType, value0);
     dst.Fix();
 }
Пример #7
0
 /// <summary>
 /// converts rotation vector to rotation matrix or vice versa using Rodrigues transformation
 /// </summary>
 /// <param name="src">Input rotation vector (3x1 or 1x3) or rotation matrix (3x3).</param>
 /// <param name="dst">Output rotation matrix (3x3) or rotation vector (3x1 or 1x3), respectively.</param>
 /// <param name="jacobian">Optional output Jacobian matrix, 3x9 or 9x3, which is a matrix of partial derivatives of the output array components with respect to the input array components.</param>
 public static void Rodrigues(InputArray src, OutputArray dst, OutputArray jacobian = null)
 {
     if (src == null)
         throw new ArgumentNullException("src");
     if (dst == null)
         throw new ArgumentNullException("dst");
     src.ThrowIfDisposed();
     dst.ThrowIfNotReady();
     NativeMethods.calib3d_Rodrigues(src.CvPtr, dst.CvPtr, ToPtr(jacobian));
     dst.Fix();
     if (jacobian != null)
         jacobian.Fix();
 }
Пример #8
0
 /// <summary>
 /// the update operator that takes the next video frame and returns the current foreground mask as 8-bit binary image.
 /// </summary>
 /// <param name="image"></param>
 /// <param name="fgmask"></param>
 /// <param name="learningRate"></param>
 public virtual void Apply(InputArray image, OutputArray fgmask, double learningRate = -1)
 {
     if (image == null)
         throw new ArgumentNullException("image");
     if (fgmask == null)
         throw new ArgumentNullException("fgmask");
     image.ThrowIfDisposed();
     fgmask.ThrowIfNotReady();
     
     NativeMethods.video_BackgroundSubtractor_apply(ptr, image.CvPtr, fgmask.CvPtr, learningRate);
     
     fgmask.Fix();
     GC.KeepAlive(image);
 }
Пример #9
0
 /// <summary>
 /// restores the damaged image areas using one of the available intpainting algorithms
 /// </summary>
 /// <param name="src"></param>
 /// <param name="inpaintMask"></param>
 /// <param name="dst"></param>
 /// <param name="inpaintRadius"></param>
 /// <param name="flags"></param>
 public static void Inpaint(InputArray src, InputArray inpaintMask,
     OutputArray dst, double inpaintRadius, InpaintMethod flags)
 {
     if (src == null)
         throw new ArgumentNullException("src");
     if (inpaintMask == null)
         throw new ArgumentNullException("inpaintMask");
     if (dst == null)
         throw new ArgumentNullException("dst");
     src.ThrowIfDisposed();
     inpaintMask.ThrowIfDisposed();
     dst.ThrowIfNotReady();
     NativeMethods.photo_inpaint(src.CvPtr, inpaintMask.CvPtr, dst.CvPtr, inpaintRadius, (int)flags);
     dst.Fix();
 }
Пример #10
0
        /// <summary>
        /// Returns filter coefficients for computing spatial image derivatives.
        /// </summary>
        /// <param name="kx">Output matrix of row filter coefficients. It has the type ktype.</param>
        /// <param name="ky">Output matrix of column filter coefficients. It has the type ktype.</param>
        /// <param name="dx">Derivative order in respect of x.</param>
        /// <param name="dy">Derivative order in respect of y.</param>
        /// <param name="ksize">Aperture size. It can be CV_SCHARR, 1, 3, 5, or 7.</param>
        /// <param name="normalize">Flag indicating whether to normalize (scale down) the filter coefficients or not.
        /// Theoretically, the coefficients should have the denominator \f$=2^{ksize*2-dx-dy-2}\f$. 
        /// If you are going to filter floating-point images, you are likely to use the normalized kernels.
        /// But if you compute derivatives of an 8-bit image, store the results in a 16-bit image, 
        /// and wish to preserve all the fractional bits, you may want to set normalize = false.</param>
        /// <param name="ktype">Type of filter coefficients. It can be CV_32f or CV_64F.</param>
        public static void GetDerivKernels(
            OutputArray kx, OutputArray ky, int dx, int dy, int ksize,
            bool normalize = false, MatType? ktype = null)
        {
            if (kx == null)
                throw new ArgumentNullException(nameof(kx));
            if (ky == null)
                throw new ArgumentNullException(nameof(ky));
            kx.ThrowIfNotReady();
            ky.ThrowIfNotReady();

            var ktype0 = ktype.GetValueOrDefault(MatType.CV_32F);
            NativeMethods.imgproc_getDerivKernels(
                kx.CvPtr, ky.CvPtr, dx, dy, ksize, normalize ? 1 : 0, ktype0);

            kx.Fix();
            ky.Fix();
        }
Пример #11
0
        /// <summary>
        /// Computes the motion gradient orientation image from the motion history image
        /// </summary>
        /// <param name="mhi">Motion history single-channel floating-point image.</param>
        /// <param name="mask">Output mask image that has the type CV_8UC1 and the same size as mhi. 
        /// Its non-zero elements mark pixels where the motion gradient data is correct.</param>
        /// <param name="orientation">Output motion gradient orientation image that has the same type and the same size as mhi. 
        /// Each pixel of the image is a motion orientation, from 0 to 360 degrees.</param>
        /// <param name="delta1">Minimal (or maximal) allowed difference between mhi values within a pixel neighborhood.</param>
        /// <param name="delta2">Maximal (or minimal) allowed difference between mhi values within a pixel neighborhood. 
        /// That is, the function finds the minimum ( m(x,y) ) and maximum ( M(x,y) ) mhi values over 3x3 neighborhood of each pixel 
        /// and marks the motion orientation at (x, y) as valid only if: 
        /// min(delta1, delta2) &lt;= M(x,y)-m(x,y) &lt;= max(delta1, delta2).</param>
        /// <param name="apertureSize"></param>
        public static void CalcMotionGradient(
            InputArray mhi, OutputArray mask, OutputArray orientation,
            double delta1, double delta2, int apertureSize = 3)
        {
            if (mhi == null)
                throw new ArgumentNullException("mhi");
            if (mask == null)
                throw new ArgumentNullException("mask");
            if (orientation == null)
                throw new ArgumentNullException("orientation");
            mhi.ThrowIfDisposed();
            mask.ThrowIfNotReady();
            orientation.ThrowIfNotReady();

            NativeMethods.video_calcMotionGradient(
                mhi.CvPtr, mask.CvPtr, orientation.CvPtr, delta1, delta2, apertureSize);

            mask.Fix();
            orientation.Fix();
        }
Пример #12
0
        /// <summary>
        /// Recovers inverse camera response.
        /// </summary>
        /// <param name="src">vector of input images</param>
        /// <param name="dst">256x1 matrix with inverse camera response function</param>
        /// <param name="times">vector of exposure time values for each image</param>
        public virtual void Process(IEnumerable<Mat> src, OutputArray dst, IEnumerable<float> times)
        {
            if (src == null)
                throw new ArgumentNullException("src");
            if (dst == null)
                throw new ArgumentNullException("dst");
            if (times == null)
                throw new ArgumentNullException("times");
            dst.ThrowIfNotReady();
            
            IntPtr[] srcArray = EnumerableEx.SelectPtrs(src);
            float[] timesArray = EnumerableEx.ToArray(times);
            if (srcArray.Length != timesArray.Length)
                throw new OpenCvSharpException("src.Count() != times.Count");

            NativeMethods.photo_CalibrateCRF_process(ptr, srcArray, srcArray.Length, dst.CvPtr, timesArray);

            dst.Fix();
            GC.KeepAlive(src);
        }
Пример #13
0
        /// <summary>
        /// Constructs a pyramid which can be used as input for calcOpticalFlowPyrLK
        /// </summary>
        /// <param name="img">8-bit input image.</param>
        /// <param name="pyramid">output pyramid.</param>
        /// <param name="winSize">window size of optical flow algorithm. 
        /// Must be not less than winSize argument of calcOpticalFlowPyrLK(). 
        /// It is needed to calculate required padding for pyramid levels.</param>
        /// <param name="maxLevel">0-based maximal pyramid level number.</param>
        /// <param name="withDerivatives">set to precompute gradients for the every pyramid level. 
        /// If pyramid is constructed without the gradients then calcOpticalFlowPyrLK() will 
        /// calculate them internally.</param>
        /// <param name="pyrBorder">the border mode for pyramid layers.</param>
        /// <param name="derivBorder">the border mode for gradients.</param>
        /// <param name="tryReuseInputImage">put ROI of input image into the pyramid if possible. 
        /// You can pass false to force data copying.</param>
        /// <returns>number of levels in constructed pyramid. Can be less than maxLevel.</returns>
        public static int BuildOpticalFlowPyramid(
            InputArray img, OutputArray pyramid,
            Size winSize, int maxLevel,
            bool withDerivatives = true,
            BorderTypes pyrBorder = BorderTypes.Reflect101,
            BorderTypes derivBorder = BorderTypes.Constant,
            bool tryReuseInputImage = true)
        {
            if (img == null)
                throw new ArgumentNullException("img");
            if (pyramid == null)
                throw new ArgumentNullException("pyramid");
            img.ThrowIfDisposed();
            pyramid.ThrowIfNotReady();

            int result = NativeMethods.video_buildOpticalFlowPyramid1(
                img.CvPtr, pyramid.CvPtr, winSize, maxLevel, withDerivatives ? 1 : 0, 
                (int)pyrBorder, (int)derivBorder, tryReuseInputImage ? 1 : 0);
            pyramid.Fix();
            return result;
        }
Пример #14
0
        /// <summary>
        /// 2つの配列同士,あるいは配列とスカラの 要素毎の商を求めます.
        /// </summary>
        /// <param name="scale">スケールファクタ</param>
        /// <param name="src2">1番目の入力配列</param>
        /// <param name="dst">src2 と同じサイズ,同じ型である出力配列</param>
        /// <param name="dtype"></param>
#else
        /// <summary>
        /// Performs per-element division of two arrays or a scalar by an array.
        /// </summary>
        /// <param name="scale">Scale factor</param>
        /// <param name="src2">The first source array</param>
        /// <param name="dst">The destination array; will have the same size and same type as src2</param>
        /// <param name="dtype"></param>
#endif
        public static void Divide(double scale, InputArray src2, OutputArray dst, int dtype = -1)
        {
            if (src2 == null)
                throw new ArgumentNullException("src2");
            if (dst == null)
                throw new ArgumentNullException("dst");
            src2.ThrowIfDisposed();
            dst.ThrowIfNotReady();
            NativeMethods.core_divide(scale, src2.CvPtr, dst.CvPtr, dtype);
            GC.KeepAlive(src2);
            dst.Fix();
        }
Пример #15
0
 /// <summary>
 /// computes weighted sum of two arrays (dst = alpha*src1 + beta*src2 + gamma)
 /// </summary>
 /// <param name="src1"></param>
 /// <param name="alpha"></param>
 /// <param name="src2"></param>
 /// <param name="beta"></param>
 /// <param name="gamma"></param>
 /// <param name="dst"></param>
 /// <param name="dtype"></param>
 public static void AddWeighted(InputArray src1, double alpha, InputArray src2,
     double beta, double gamma, OutputArray dst, int dtype = -1)
 {
     if (src1 == null)
         throw new ArgumentNullException("src1");
     if (src2 == null)
         throw new ArgumentNullException("src2");
     if (dst == null)
         throw new ArgumentNullException("dst");
     src1.ThrowIfDisposed();
     src2.ThrowIfDisposed();
     dst.ThrowIfNotReady();
     NativeMethods.core_addWeighted(src1.CvPtr, alpha, src2.CvPtr, beta, gamma, dst.CvPtr, dtype);
     GC.KeepAlive(src1);
     GC.KeepAlive(src2);
     dst.Fix();
 }
Пример #16
0
 /// <summary>
 /// Performs inverse 1D or 2D Discrete Cosine Transformation
 /// </summary>
 /// <param name="src">The source floating-point array</param>
 /// <param name="dst">The destination array; will have the same size and same type as src</param>
 /// <param name="flags">Transformation flags, a combination of DctFlag2 values</param>
 public static void Idct(InputArray src, OutputArray dst, DctFlags flags = DctFlags.None)
 {
     if (src == null)
         throw new ArgumentNullException("src");
     if (dst == null)
         throw new ArgumentNullException("dst");
     src.ThrowIfDisposed();
     dst.ThrowIfNotReady();
     NativeMethods.core_idct(src.CvPtr, dst.CvPtr, (int)flags);
     GC.KeepAlive(src); 
     dst.Fix();
 }
Пример #17
0
 /// <summary>
 /// clusters the input data using k-Means algorithm
 /// </summary>
 /// <param name="data"></param>
 /// <param name="k"></param>
 /// <param name="bestLabels"></param>
 /// <param name="criteria"></param>
 /// <param name="attempts"></param>
 /// <param name="flags"></param>
 /// <param name="centers"></param>
 /// <returns></returns>
 public static double Kmeans(InputArray data, int k, InputOutputArray bestLabels,
     TermCriteria criteria, int attempts, KMeansFlags flags, OutputArray centers = null)
 {
     if (data == null)
         throw new ArgumentNullException("data");
     if (bestLabels == null)
         throw new ArgumentNullException("bestLabels");
     data.ThrowIfDisposed();
     bestLabels.ThrowIfDisposed();
     double ret = NativeMethods.core_kmeans(data.CvPtr, k, bestLabels.CvPtr, criteria, attempts, (int)flags, ToPtr(centers));
     bestLabels.Fix();
     if(centers != null)
         centers.Fix();
     GC.KeepAlive(data); 
     return ret;
 }
Пример #18
0
 /// <summary>
 /// Draw a GridBoard.
 /// </summary>
 /// <param name="outSize">size of the output image in pixels.</param>
 /// <param name="img">output image with the board. The size of this image will be outSize and the board will be on the center, keeping the board proportions.</param>
 /// <param name="marginSize">minimum margins (in pixels) of the board in the output image</param>
 /// <param name="borderBits">width of the marker borders.</param>
 public void Draw(Size outSize, IOutputArray img, int marginSize = 0, int borderBits = 1)
 {
     using (OutputArray oaImg = img.GetOutputArray())
         ArucoInvoke.cveArucoGridBoardDraw(_ptr, ref outSize, oaImg, marginSize, borderBits);
 }
Пример #19
0
 /// <summary>
 /// Applies X Deriche filter to an image.
 /// </summary>
 /// <param name="op">Source 8-bit or 16bit image, 1-channel or 3-channel image.</param>
 /// <param name="dst">result CV_32FC image with same number of channel than _op.</param>
 /// <param name="alphaDerive">see paper</param>
 /// <param name="alphaMean">see paper</param>
 /// <remarks>For more details about this implementation, please see http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.476.5736&amp;rep=rep1&amp;type=pdf </remarks>
 public static void GradientDericheX(IInputArray op, IOutputArray dst, double alphaDerive, double alphaMean)
 {
     using (InputArray iaOp = op.GetInputArray())
         using (OutputArray oaDst = dst.GetOutputArray())
             cveGradientDericheX(iaOp, oaDst, alphaDerive, alphaMean);
 }
 /// <summary>
 /// Updates the background model
 /// </summary>
 /// <param name="frame">Next video frame.</param>
 /// <param name="learningRate">The learning rate, use -1.0f for default value.</param>
 /// <param name="stream">Use a Stream to call the function asynchronously (non-blocking) or null to call the function synchronously (blocking).</param>
 public void Apply(IInputArray frame, IOutputArray forgroundMask, double learningRate = -1, Stream stream = null)
 {
     using (InputArray iaFrame = frame.GetInputArray())
         using (OutputArray oaForgroundMask = forgroundMask.GetOutputArray())
             CudaInvoke.cudaBackgroundSubtractorGMGApply(_ptr, iaFrame, oaForgroundMask, learningRate, stream);
 }
Пример #21
0
 /// <summary>
 /// naive nearest neighbor finder
 /// </summary>
 /// <param name="src1"></param>
 /// <param name="src2"></param>
 /// <param name="dist"></param>
 /// <param name="dtype"></param>
 /// <param name="nidx"></param>
 /// <param name="normType"></param>
 /// <param name="k"></param>
 /// <param name="mask"></param>
 /// <param name="update"></param>
 /// <param name="crosscheck"></param>
 public static void BatchDistance(InputArray src1, InputArray src2,
                                  OutputArray dist, int dtype, OutputArray nidx,
                                  NormTypes normType = NormTypes.L2,
                                  int k = 0, InputArray mask = null,
                                  int update = 0, bool crosscheck = false)
 {
     if (src1 == null)
         throw new ArgumentNullException("src1");
     if (src2 == null)
         throw new ArgumentNullException("src2");
     if (dist == null)
         throw new ArgumentNullException("dist");
     if (nidx == null)
         throw new ArgumentNullException("nidx");
     src1.ThrowIfDisposed();
     src2.ThrowIfDisposed();
     dist.ThrowIfNotReady();
     nidx.ThrowIfNotReady();
     NativeMethods.core_batchDistance(src1.CvPtr, src2.CvPtr, dist.CvPtr, dtype, nidx.CvPtr,
         (int)normType, k, ToPtr(mask), update, crosscheck ? 1 : 0);
     GC.KeepAlive(src1);
     GC.KeepAlive(src2);
     dist.Fix();
     nidx.Fix();
 }
Пример #22
0
 /// <summary>
 /// Draw a canonical marker image.
 /// </summary>
 /// <param name="dict">dictionary of markers indicating the type of markers</param>
 /// <param name="id">identifier of the marker that will be returned. It has to be a valid id in the specified dictionary.</param>
 /// <param name="sidePixels">size of the image in pixels</param>
 /// <param name="img">output image with the marker</param>
 /// <param name="borderBits">width of the marker border.</param>
 public static void DrawMarker(Dictionary dict, int id, int sidePixels, IOutputArray img, int borderBits = 1)
 {
     using (OutputArray oaImg = img.GetOutputArray())
         cveArucoDrawMarker(dict, id, sidePixels, oaImg, borderBits);
 }
Пример #23
0
 /// <summary>
 /// Performs marker detection in the input image. Only markers included in the specific dictionary are searched. For each detected marker, it returns the 2D position of its corner in the image and its corresponding identifier. Note that this function does not perform pose estimation.
 /// </summary>
 /// <param name="image">input image</param>
 /// <param name="dict">indicates the type of markers that will be searched</param>
 /// <param name="corners">	vector of detected marker corners. For each marker, its four corners are provided, (e.g VectorOfVectorOfPointF ). For N detected markers, the dimensions of this array is Nx4. The order of the corners is clockwise.</param>
 /// <param name="ids">vector of identifiers of the detected markers. The identifier is of type int (e.g. VectorOfInt). For N detected markers, the size of ids is also N. The identifiers have the same order than the markers in the imgPoints array.</param>
 /// <param name="parameters">marker detection parameters</param>
 /// <param name="rejectedImgPoints">contains the imgPoints of those squares whose inner code has not a correct codification. Useful for debugging purposes.</param>
 public static void DetectMarkers(
     IInputArray image, Dictionary dict, IOutputArrayOfArrays corners,
     IOutputArray ids, DetectorParameters parameters,
     IOutputArrayOfArrays rejectedImgPoints = null
     )
 {
     using (InputArray iaImage = image.GetInputArray())
         using (OutputArray oaCorners = corners.GetOutputArray())
             using (OutputArray oaIds = ids.GetOutputArray())
                 using (OutputArray oaRejectedImgPoints = rejectedImgPoints != null ? rejectedImgPoints.GetOutputArray() : OutputArray.GetEmpty())
                 {
                     cveArucoDetectMarkers(iaImage, dict, oaCorners, oaIds, ref parameters, oaRejectedImgPoints);
                 }
 }
Пример #24
0
 /// <summary>
 /// Calibrate a camera using Charuco corners.
 /// </summary>
 /// <param name="charucoCorners">Vector of detected charuco corners per frame</param>
 /// <param name="charucoIds">List of identifiers for each corner in charucoCorners per frame</param>
 /// <param name="board">Marker Board layout</param>
 /// <param name="imageSize">Size of the image used only to initialize the intrinsic camera matrix.</param>
 /// <param name="cameraMatrix">Output 3x3 floating-point camera matrix. </param>
 /// <param name="distCoeffs">Output vector of distortion coefficients (k1,k2,p1,p2[,k3[,k4,k5,k6],[s1,s2,s3,s4]]) of 4, 5, 8 or 12 elements</param>
 /// <param name="rvecs">Output vector of rotation vectors (see Rodrigues ) estimated for each board view (e.g. std::vector&lt;cv::Mat&gt;). That is, each k-th rotation vector together with the corresponding k-th translation vector (see the next output parameter description) brings the board pattern from the model coordinate space (in which object points are specified) to the world coordinate space, that is, a real position of the board pattern in the k-th pattern view (k=0.. M -1).</param>
 /// <param name="tvecs">Output vector of translation vectors estimated for each pattern view.</param>
 /// <param name="stdDeviationsIntrinsics">Output vector of standard deviations estimated for intrinsic parameters. Order of deviations values: (fx,fy,cx,cy,k1,k2,p1,p2,k3,k4,k5,k6,s1,s2,s3,s4,τx,τy) If one of parameters is not estimated, it's deviation is equals to zero.</param>
 /// <param name="stdDeviationsExtrinsics">Output vector of standard deviations estimated for extrinsic parameters. Order of deviations values: (R1,T1,…,RM,TM) where M is number of pattern views, Ri,Ti are concatenated 1x3 vectors.</param>
 /// <param name="perViewErrors">Output vector of average re-projection errors estimated for each pattern view.</param>
 /// <param name="flags">Flags Different flags for the calibration process</param>
 /// <param name="criteria">Termination criteria for the iterative optimization algorithm.</param>
 /// <returns>The final re-projection error.</returns>
 public static double CalibrateCameraCharuco(
     IInputArrayOfArrays charucoCorners,
     IInputArrayOfArrays charucoIds,
     CharucoBoard board,
     Size imageSize,
     IInputOutputArray cameraMatrix,
     IInputOutputArray distCoeffs,
     IOutputArray rvecs,
     IOutputArray tvecs,
     IOutputArray stdDeviationsIntrinsics,
     IOutputArray stdDeviationsExtrinsics,
     IOutputArray perViewErrors,
     CalibType flags,
     MCvTermCriteria criteria)
 {
     using (InputArray iaCharucoCorners = charucoCorners.GetInputArray())
         using (InputArray iaCharucoIds = charucoIds.GetInputArray())
             using (InputOutputArray ioaCameraMatrix = cameraMatrix.GetInputOutputArray())
                 using (InputOutputArray ioaDistCoeffs = distCoeffs.GetInputOutputArray())
                     using (OutputArray oaRvecs = rvecs == null ? OutputArray.GetEmpty() : rvecs.GetOutputArray())
                         using (OutputArray oaTvecs = tvecs == null ? OutputArray.GetEmpty() : tvecs.GetOutputArray())
                             using (OutputArray oaStdDeviationsIntrinsics = stdDeviationsIntrinsics == null ? OutputArray.GetEmpty() : stdDeviationsIntrinsics.GetOutputArray())
                                 using (OutputArray oaStdDeviationsExtrinsics = stdDeviationsExtrinsics == null ? OutputArray.GetEmpty() : stdDeviationsExtrinsics.GetOutputArray())
                                     using (OutputArray oaPerViewErrors = perViewErrors == null ? OutputArray.GetEmpty() : perViewErrors.GetOutputArray())
                                     {
                                         return(cveArucoCalibrateCameraCharuco(
                                                    iaCharucoCorners, iaCharucoIds, board.BoardPtr, ref imageSize,
                                                    ioaCameraMatrix, ioaDistCoeffs, oaRvecs, oaTvecs,
                                                    oaStdDeviationsIntrinsics, oaStdDeviationsExtrinsics, oaPerViewErrors,
                                                    flags, ref criteria));
                                     }
 }
Пример #25
0
 // ReSharper disable once RedundantOverriddenMember
 /// <inheritdoc />
 /// <param name="inputArr">input image want to compute hash value, type should be CV_8UC4, CV_8UC3 or CV_8UC1.</param>
 /// <param name="outputArr">Hash value of input, it will contain 16 hex decimal number, return type is CV_8U</param>
 /// <returns></returns>
 public override void Compute(InputArray inputArr, OutputArray outputArr)
 {
     base.Compute(inputArr, outputArr);
 }
Пример #26
0
 /// <summary>
 /// Finds the edges on the input <paramref name="src"/> and marks them in the output image edges using the Canny algorithm.
 /// </summary>
 /// <param name="src">Input image</param>
 /// <param name="edges">Image to store the edges found by the function</param>
 /// <param name="stream">Use a Stream to call the function asynchronously (non-blocking) or null to call the function synchronously (blocking).</param>
 public void Detect(IInputArray src, IOutputArray edges, Stream stream = null)
 {
     using (InputArray iaSrc = src.GetInputArray())
         using (OutputArray oaEdges = edges.GetOutputArray())
             CudaInvoke.cudaCannyEdgeDetectorDetect(_ptr, iaSrc, oaEdges, stream);
 }
Пример #27
0
 /// <summary>
 /// transforms array of numbers using a lookup table: dst(i)=lut(src(i))
 /// </summary>
 /// <param name="src">Source array of 8-bit elements</param>
 /// <param name="lut">Look-up table of 256 elements. 
 /// In the case of multi-channel source array, the table should either have 
 /// a single channel (in this case the same table is used for all channels)
 ///  or the same number of channels as in the source array</param>
 /// <param name="dst">Destination array; 
 /// will have the same size and the same number of channels as src, 
 /// and the same depth as lut</param>
 /// <param name="interpolation"></param>
 public static void LUT(InputArray src, InputArray lut, OutputArray dst, int interpolation = 0)
 {
     if (src == null)
         throw new ArgumentNullException("src");
     if (lut == null)
         throw new ArgumentNullException("lut");
     if (dst == null)
         throw new ArgumentNullException("dst");
     src.ThrowIfDisposed();
     lut.ThrowIfDisposed();
     dst.ThrowIfNotReady();
     NativeMethods.core_LUT(src.CvPtr, lut.CvPtr, dst.CvPtr);
     GC.KeepAlive(src);
     GC.KeepAlive(lut);
     dst.Fix();
 }
Пример #28
0
        //1ms FL = 40 binary threshold

        public bool Test()
        {
            var rootDir      = @"P:\Projects\N3 Imaging\Images\06272018_16natural_4_syn_binning\SWUV lamp phos";
            var files        = Directory.GetFiles(rootDir, "*.bmp", SearchOption.TopDirectoryOnly);
            Mat combinedMask = null;
            Mat whiteLight   = Cv2.ImRead(@"P:\Projects\N3 Imaging\Images\06272018_16natural_4_syn_binning\whitelight.bmp");

            foreach (var file in files)
            {
                var fl = Cv2.ImRead(file);
                Mat mask;

                var res = PlMask(fl, 20, out mask);
                if (combinedMask == null)
                {
                    combinedMask = Mat.Zeros(mask.Size(), mask.Type());
                }

                Cv2.Add(mask, combinedMask, combinedMask);
            }

            Mat element = Cv2.GetStructuringElement(MorphShapes.Ellipse,
                                                    new OpenCvSharp.Size(9, 9),
                                                    new OpenCvSharp.Point(2, 2));

            Cv2.Dilate(combinedMask, combinedMask, element);

            //find contours on this mask
            Mat[] contours;
            var   hierarchy = new List <Point>();

            Cv2.FindContours(combinedMask, out contours, OutputArray.Create(hierarchy), RetrievalModes.External,
                             ContourApproximationModes.ApproxSimple);

            //remove small size contours
            List <Mat> bigContours = new List <Mat>();

            foreach (var contour in contours)
            {
                if (Cv2.ContourArea(contour) > 400)
                {
                    bigContours.Add(contour);
                }
            }

            combinedMask = Mat.Zeros(combinedMask.Size(), MatType.CV_8UC1);
            Cv2.DrawContours(combinedMask, bigContours, -1, Scalar.White);

            //get centers of contours
            for (int i = 0; i < bigContours.Count; i++)
            {
                var c = bigContours[i];
                var m = c.Moments(true);
                var x = m.M10 / m.M00;
                var y = m.M01 / m.M00;
                Cv2.DrawContours(whiteLight, bigContours, i, new Scalar(255, 0, 0), 4);
                Cv2.PutText(whiteLight, "F", new Point(x, y), HersheyFonts.HersheySimplex, 1, new Scalar(0, 0, 255), 4);
            }

            Cv2.ImShow("mask", combinedMask);
            Cv2.ImShow("whiteLight", whiteLight);
            Cv2.WaitKey();
            Cv2.DestroyAllWindows();

            return(true);
        }
Пример #29
0
 /// <summary>
 /// returns the list of locations of non-zero pixels
 /// </summary>
 /// <param name="src"></param>
 /// <param name="idx"></param>
 public static void FindNonZero(InputArray src, OutputArray idx)
 {
     if (src == null)
         throw new ArgumentNullException("src");
     if (idx == null)
         throw new ArgumentNullException("idx");
     src.ThrowIfDisposed();
     idx.ThrowIfNotReady();
     NativeMethods.core_findNonZero(src.CvPtr, idx.CvPtr);
     GC.KeepAlive(src);
     idx.Fix();
 }
Пример #30
0
 /// <summary>
 /// Apply the cuda filter
 /// </summary>
 /// <param name="image">The source CudaImage where the filter will be applied to</param>
 /// <param name="dst">The destination CudaImage</param>
 /// <param name="stream">Use a Stream to call the function asynchronously (non-blocking) or null to call the function synchronously (blocking).</param>
 public void Apply(IInputArray image, IOutputArray dst, Stream stream = null)
 {
     using (InputArray iaImage = image.GetInputArray())
         using (OutputArray oaDst = dst.GetOutputArray())
             CudaInvoke.cudaFilterApply(_ptr, iaImage, oaDst, stream);
 }
Пример #31
0
 /// <summary>
 /// Cluster the descriptors and return the cluster centers
 /// </summary>
 /// <param name="cluster">The cluster centers</param>
 public void Cluster(IOutputArray cluster)
 {
     using (OutputArray oaCluster = cluster.GetOutputArray())
         Features2DInvoke.cveBOWKMeansTrainerCluster(_ptr, oaCluster);
 }
Пример #32
0
 /// <summary>
 /// Finds the neighbors and predicts responses for input vectors.
 /// </summary>
 /// <param name="samples">Input samples stored by rows. It is a single-precision floating-point matrix of &lt;number_of_samples&gt; * k size.</param>
 /// <param name="k">Number of used nearest neighbors. Should be greater than 1.</param>
 /// <param name="results">Vector with results of prediction (regression or classification) for each input sample. It is a single-precision floating-point vector with &lt;number_of_samples&gt; elements.</param>
 /// <param name="neighborResponses">Optional output values for corresponding neighbors. It is a single- precision floating-point matrix of &lt;number_of_samples&gt; * k size.</param>
 /// <param name="dist">Optional output distances from the input vectors to the corresponding neighbors. It is a single-precision floating-point matrix of &lt;number_of_samples&gt; * k size.</param>
 /// <returns>If only a single input vector is passed, the predicted value is returned by the method.</returns>
 public float FindNearest(
     IInputArray samples,
     int k,
     IOutputArray results,
     IOutputArray neighborResponses = null,
     IOutputArray dist = null)
 {
     using (InputArray iaSamples = samples.GetInputArray())
         using (OutputArray oaResults = results.GetOutputArray())
             using (OutputArray oaNeighborResponses = neighborResponses == null ? OutputArray.GetEmpty() : neighborResponses.GetOutputArray())
                 using (OutputArray oaDist = dist == null ? OutputArray.GetEmpty() : dist.GetOutputArray())
                 {
                     return(MlInvoke.cveKNearestFindNearest(
                                _ptr,
                                iaSamples,
                                k,
                                oaResults,
                                oaNeighborResponses,
                                oaDist));
                 }
 }
Пример #33
0
 /// <summary>
 /// Applies a binary blob thinning operation, to achieve a skeletization of the input image.
 /// The function transforms a binary blob image into a skeletized form using the technique of Zhang-Suen.
 /// </summary>
 /// <param name="src">Source 8-bit single-channel image, containing binary blobs, with blobs having 255 pixel values.</param>
 /// <param name="dst">Destination image of the same size and the same type as src. The function can work in-place.</param>
 /// <param name="thinningType">Value that defines which thinning algorithm should be used.</param>
 public static void Thinning(IInputArray src, IOutputArray dst, ThinningTypes thinningType)
 {
     using (InputArray iaSrc = src.GetInputArray())
         using (OutputArray oaDst = dst.GetOutputArray())
             cveThinning(iaSrc, oaDst, thinningType);
 }
Пример #34
0
 /// <summary>
 /// Returns the segmentation labeling of the image. Each label represents a superpixel, and each pixel is assigned to one superpixel label.
 /// </summary>
 /// <param name="labels">A CV_32SC1 integer array containing the labels of the superpixel segmentation. The labels are in the range [0, NumberOfSuperpixels].</param>
 public void GetLabels(IOutputArray labels)
 {
     using (OutputArray oaLabels = labels.GetOutputArray())
         XImgprocInvoke.cveSuperpixelSLICGetLabels(_ptr, oaLabels);
 }
Пример #35
0
        /// <summary>
        /// performs back substitution for the previously computed SVD
        /// </summary>
        /// <param name="w"></param>
        /// <param name="u"></param>
        /// <param name="vt"></param>
        /// <param name="rhs"></param>
        /// <param name="dst"></param>
// ReSharper disable once InconsistentNaming
        public static void SVBackSubst(InputArray w, InputArray u, InputArray vt,
            InputArray rhs, OutputArray dst)
        {
            if (w == null)
                throw new ArgumentNullException("w");
            if (u == null)
                throw new ArgumentNullException("u");
            if (vt == null)
                throw new ArgumentNullException("vt");
            if (rhs == null)
                throw new ArgumentNullException("rhs");
            if (dst == null)
                throw new ArgumentNullException("dst");
            w.ThrowIfDisposed();
            u.ThrowIfDisposed();
            vt.ThrowIfDisposed();
            rhs.ThrowIfDisposed();
            dst.ThrowIfNotReady();
            NativeMethods.core_SVBackSubst(w.CvPtr, u.CvPtr, vt.CvPtr, rhs.CvPtr, dst.CvPtr);
            dst.Fix();
        }
Пример #36
0
 /// <summary>
 /// Equalizes the histogram of a grayscale image using Contrast Limited Adaptive Histogram Equalization.
 /// </summary>
 /// <param name="source">Source image</param>
 /// <param name="dst">Destination image</param>
 /// <param name="stream">Use a Stream to call the function asynchronously (non-blocking) or null to call the function synchronously (blocking).</param>
 public void Apply(IInputArray source, IOutputArray dst, Stream stream = null)
 {
     using (InputArray iaSource = source.GetInputArray())
         using (OutputArray oaDst = dst.GetOutputArray())
             CudaInvoke.cudaCLAHEApply(_ptr, iaSource, oaDst, stream);
 }
Пример #37
0
 /// <summary>
 /// computes element-wise product of the two Fourier spectrums. The second spectrum can optionally be conjugated before the multiplication
 /// </summary>
 /// <param name="a"></param>
 /// <param name="b"></param>
 /// <param name="c"></param>
 /// <param name="flags"></param>
 /// <param name="conjB"></param>
 public static void MulSpectrums(
     InputArray a, InputArray b, OutputArray c,
     DftFlags flags, bool conjB = false)
 {
     if (a == null)
         throw new ArgumentNullException("a");
     if (b == null)
         throw new ArgumentNullException("b");
     if (c == null)
         throw new ArgumentNullException("c");
     a.ThrowIfDisposed();
     b.ThrowIfDisposed();
     c.ThrowIfNotReady();
     NativeMethods.core_mulSpectrums(a.CvPtr, b.CvPtr, c.CvPtr, (int)flags, conjB ? 1 : 0);
     GC.KeepAlive(a);
     GC.KeepAlive(b); 
     c.Fix();
 }
Пример #38
0
 /// <summary>
 /// Compute the panoramic images given the images
 /// </summary>
 /// <param name="images">The input images. This can be, for example, a VectorOfMat</param>
 /// <param name="pano">The panoramic image</param>
 /// <returns>The stitching status</returns>
 public Status Stitch(IInputArray images, IOutputArray pano)
 {
     using (InputArray iaImages = images.GetInputArray())
         using (OutputArray oaPano = pano.GetOutputArray())
             return(StitchingInvoke.cveStitcherStitch(_ptr, iaImages, oaPano));
 }
Пример #39
0
        /// <summary>
        /// 2つの配列同士の,要素毎のスケーリングされた積を求めます.
        /// </summary>
        /// <param name="src1">1番目の入力配列</param>
        /// <param name="src2">src1 と同じサイズ,同じ型である2番目の入力配列</param>
        /// <param name="dst">src1 と同じサイズ,同じ型の出力配列</param>
        /// <param name="scale">オプションであるスケールファクタ. [既定値は1]</param>
        /// <param name="dtype"></param>
#else
        /// <summary>
        /// Calculates the per-element scaled product of two arrays
        /// </summary>
        /// <param name="src1">The first source array</param>
        /// <param name="src2">The second source array of the same size and the same type as src1</param>
        /// <param name="dst">The destination array; will have the same size and the same type as src1</param>
        /// <param name="scale">The optional scale factor. [By default this is 1]</param>
        /// <param name="dtype"></param>
#endif
        public static void Multiply(InputArray src1, InputArray src2, OutputArray dst, double scale = 1, int dtype = -1)
        {
            if (src1 == null)
                throw new ArgumentNullException("src1");
            if (src2 == null)
                throw new ArgumentNullException("src2");
            if (dst == null)
                throw new ArgumentNullException("dst");
            src1.ThrowIfDisposed();
            src2.ThrowIfDisposed();
            dst.ThrowIfNotReady();
            NativeMethods.core_multiply(src1.CvPtr, src2.CvPtr, dst.CvPtr, scale, dtype);
            GC.KeepAlive(src1);
            GC.KeepAlive(src2);
            dst.Fix();

        }
Пример #40
0
 /// <summary>
 /// Applies Paillou filter to an image.
 /// </summary>
 /// <param name="op">Source 8-bit or 16bit image, 1-channel or 3-channel image.</param>
 /// <param name="dst">result CV_32F image with same number of channel than op.</param>
 /// <param name="alpha">see paper</param>
 /// <param name="omega">see paper</param>
 /// <remarks>For more details about this implementation, please see: Philippe Paillou. Detecting step edges in noisy sar images: a new linear operator. IEEE transactions on geoscience and remote sensing, 35(1):191–196, 1997.</remarks>
 public static void GradientPaillouX(IInputArray op, IOutputArray dst, double alpha, double omega)
 {
     using (InputArray iaOp = op.GetInputArray())
         using (OutputArray oaDst = dst.GetOutputArray())
             cveGradientPaillouX(iaOp, oaDst, alpha, omega);
 }
Пример #41
0
 /// <summary>
 /// adds scaled array to another one (dst = alpha*src1 + src2)
 /// </summary>
 /// <param name="src1"></param>
 /// <param name="alpha"></param>
 /// <param name="src2"></param>
 /// <param name="dst"></param>
 public static void ScaleAdd(InputArray src1, double alpha, InputArray src2, OutputArray dst)
 {
     if (src1 == null)
         throw new ArgumentNullException("src1");
     if (src2 == null)
         throw new ArgumentNullException("src2");
     if (dst == null)
         throw new ArgumentNullException("dst");
     src1.ThrowIfDisposed();
     src2.ThrowIfDisposed();
     dst.ThrowIfNotReady();
     NativeMethods.core_scaleAdd(src1.CvPtr, alpha, src2.CvPtr, dst.CvPtr);
     GC.KeepAlive(src1);
     GC.KeepAlive(src2);
     dst.Fix();
 }
Пример #42
0
 public int ComposePanorama(IInputArrayOfArrays images, IOutputArray pano)
 {
     using (InputArray iaImages = images.GetInputArray())
         using (OutputArray oaPano = pano.GetOutputArray())
             return(StitchingInvoke.cveStitcherComposePanorama2(_ptr, iaImages, oaPano));
 }
Пример #43
0
        /// <summary>
        /// スケーリング後,絶対値を計算し,結果を結果を 8 ビットに変換します.
        /// </summary>
        /// <param name="src">入力配列</param>
        /// <param name="dst">出力配列</param>
        /// <param name="alpha">オプションのスケールファクタ. [既定値は1]</param>
        /// <param name="beta">スケーリングされた値に加えられるオプション値. [既定値は0]</param>
#else
        /// <summary>
        /// Scales, computes absolute values and converts the result to 8-bit.
        /// </summary>
        /// <param name="src">The source array</param>
        /// <param name="dst">The destination array</param>
        /// <param name="alpha">The optional scale factor. [By default this is 1]</param>
        /// <param name="beta">The optional delta added to the scaled values. [By default this is 0]</param>
#endif
        public static void ConvertScaleAbs(InputArray src, OutputArray dst, double alpha = 1, double beta = 0)
        {
            if (src == null)
                throw new ArgumentNullException("src");
            if (dst == null)
                throw new ArgumentNullException("dst");
            src.ThrowIfDisposed();
            dst.ThrowIfNotReady();
            NativeMethods.core_convertScaleAbs(src.CvPtr, dst.CvPtr, alpha, beta);
            GC.KeepAlive(src);
            dst.Fix();
        }
Пример #44
0
 /// <summary>
 /// extracts a single channel from src (coi is 0-based index)
 /// </summary>
 /// <param name="src"></param>
 /// <param name="dst"></param>
 /// <param name="coi"></param>
 public static void ExtractChannel(InputArray src, OutputArray dst, int coi)
 {
     if (src == null)
         throw new ArgumentNullException("src");
     if (dst == null)
         throw new ArgumentNullException("dst");
     src.ThrowIfDisposed();
     dst.ThrowIfNotReady();
     NativeMethods.core_extractChannel(src.CvPtr, dst.CvPtr, coi);
     GC.KeepAlive(src);
     dst.Fix();
 }
Пример #45
0
 /// <summary>
 /// transforms array of numbers using a lookup table: dst(i)=lut(src(i))
 /// </summary>
 /// <param name="src">Source array of 8-bit elements</param>
 /// <param name="lut">Look-up table of 256 elements. 
 /// In the case of multi-channel source array, the table should either have 
 /// a single channel (in this case the same table is used for all channels) 
 /// or the same number of channels as in the source array</param>
 /// <param name="dst">Destination array; 
 /// will have the same size and the same number of channels as src, 
 /// and the same depth as lut</param>
 /// <param name="interpolation"></param>
 public static void LUT(InputArray src, byte[] lut, OutputArray dst, int interpolation = 0)
 {
     if (lut == null)
         throw new ArgumentNullException("lut");
     if (lut.Length != 256)
         throw new ArgumentException("lut.Length != 256");
     using (Mat lutMat = new Mat(256, 1, MatType.CV_8UC1, lut))
     {
         LUT(src, lutMat, dst, interpolation);
     }
 }
Пример #46
0
 /// <summary>
 /// Perform a binary map of given saliency map
 /// </summary>
 /// <param name="saliencyMap">the saliency map obtained through one of the specialized algorithms</param>
 /// <param name="binaryMap">the binary map</param>
 /// <param name="saliency">The StatucSaliency object</param>
 /// <returns>True if the binary map is sucessfully computed</returns>
 public static bool ComputeBinaryMap(this IStaticSaliency saliency, IInputArray saliencyMap, IOutputArray binaryMap)
 {
     using (InputArray iaSaliencyMap = saliencyMap.GetInputArray())
         using (OutputArray oaBinaryMap = binaryMap.GetOutputArray())
             return(cveStaticSaliencyComputeBinaryMap(saliency.StaticSaliencyPtr, iaSaliencyMap, oaBinaryMap));
 }
Пример #47
0
        /// <summary>
        /// computes mean value and standard deviation of all or selected array elements
        /// </summary>
        /// <param name="src">The source array; it should have 1 to 4 channels 
        /// (so that the results can be stored in Scalar's)</param>
        /// <param name="mean">The output parameter: computed mean value</param>
        /// <param name="stddev">The output parameter: computed standard deviation</param>
        /// <param name="mask">The optional operation mask</param>
        public static void MeanStdDev(
            InputArray src, OutputArray mean, OutputArray stddev, InputArray mask = null)
        {
            if (src == null)
                throw new ArgumentNullException("src");
            if (mean == null)
                throw new ArgumentNullException("mean");
            if (stddev == null)
                throw new ArgumentNullException("stddev");
            src.ThrowIfDisposed();
            mean.ThrowIfNotReady();
            stddev.ThrowIfNotReady();

            NativeMethods.core_meanStdDev_OutputArray(src.CvPtr, mean.CvPtr, stddev.CvPtr, ToPtr(mask));

            mean.Fix();
            stddev.Fix();
            GC.KeepAlive(src);
            GC.KeepAlive(mask);
        }
Пример #48
0
 /// <summary>
 /// Performs anisotropic diffusion on an image.
 /// </summary>
 /// <param name="src">Grayscale Source image.</param>
 /// <param name="dst">Destination image of the same size and the same number of channels as src .</param>
 /// <param name="alpha">The amount of time to step forward by on each iteration (normally, it's between 0 and 1).</param>
 /// <param name="K">sensitivity to the edges</param>
 /// <param name="niters">The number of iterations</param>
 public static void AnisotropicDiffusion(IInputArray src, IOutputArray dst, float alpha, float K, int niters)
 {
     using (InputArray iaSrc = src.GetInputArray())
         using (OutputArray oaDst = dst.GetOutputArray())
             cveAnisotropicDiffusion(iaSrc, oaDst, alpha, K, niters);
 }
Пример #49
0
 /// <summary>
 /// transforms 2D matrix to 1D row or column vector by taking sum, minimum, maximum or mean value over all the rows
 /// </summary>
 /// <param name="src">The source 2D matrix</param>
 /// <param name="dst">The destination vector. 
 /// Its size and type is defined by dim and dtype parameters</param>
 /// <param name="dim">The dimension index along which the matrix is reduced. 
 /// 0 means that the matrix is reduced to a single row and 1 means that the matrix is reduced to a single column</param>
 /// <param name="rtype"></param>
 /// <param name="dtype">When it is negative, the destination vector will have 
 /// the same type as the source matrix, otherwise, its type will be CV_MAKE_TYPE(CV_MAT_DEPTH(dtype), mtx.channels())</param>
 public static void Reduce(InputArray src, OutputArray dst, ReduceDimension dim, ReduceTypes rtype, int dtype)
 {
     if (src == null)
         throw new ArgumentNullException("src");
     if (dst == null)
         throw new ArgumentNullException("dst");
     src.ThrowIfDisposed();
     dst.ThrowIfNotReady();
     NativeMethods.core_reduce(src.CvPtr, dst.CvPtr, (int)dim, (int)rtype, dtype);
     dst.Fix();
     GC.KeepAlive(src);
 }
Пример #50
0
 /// <summary>
 /// Get the reliability map computed from the wrapped phase map.
 /// </summary>
 /// <param name="reliabilityMap">Image where the reliability map is stored.</param>
 public void GetInverseReliabilityMap(IOutputArray reliabilityMap)
 {
     using (OutputArray oaReliabilityMap = reliabilityMap.GetOutputArray())
         PhaseUnwrappingInvoke.cveHistogramPhaseUnwrappingGetInverseReliabilityMap(_ptr, oaReliabilityMap);
 }
Пример #51
0
 /// <summary>
 /// Estimate the Gaussian mixture parameters from a samples set. This variation starts with Expectation step. You need to provide initial means of mixture components. Optionally you can pass initial weights and covariance matrices of mixture components.
 /// </summary>
 /// <param name="samples">Samples from which the Gaussian mixture model will be estimated. It should be a one-channel matrix, each row of which is a sample. If the matrix does not have CV_64F type it will be converted to the inner matrix of such type for the further computing.</param>
 /// <param name="means0">Initial means of mixture components. It is a one-channel matrix of nclusters x dims size. If the matrix does not have CV_64F type it will be converted to the inner matrix of such type for the further computing.</param>
 /// <param name="covs0">The vector of initial covariance matrices of mixture components. Each of covariance matrices is a one-channel matrix of dims x dims size. If the matrices do not have CV_64F type they will be converted to the inner matrices of such type for the further computing.</param>
 /// <param name="weights0">Initial weights of mixture components. It should be a one-channel floating-point matrix with 1 x nclusters or nclusters x 1 size.</param>
 /// <param name="loglikelihoods">The optional output matrix that contains a likelihood logarithm value for each sample. It has nsamples x 1 size and CV_64FC1 type.</param>
 /// <param name="labels">The optional output "class label" (indices of the most probable mixture component for each sample). It has nsamples x 1 size and CV_32SC1 type.</param>
 /// <param name="probs">The optional output matrix that contains posterior probabilities of each Gaussian mixture component given the each sample. It has nsamples x nclusters size and CV_64FC1 type.</param>
 public void trainE(
     IInputArray samples,
     IInputArray means0,
     IInputArray covs0           = null,
     IInputArray weights0        = null,
     IOutputArray loglikelihoods = null,
     IOutputArray labels         = null,
     IOutputArray probs          = null)
 {
     using (InputArray iaSamples = samples.GetInputArray())
         using (InputArray iaMeans0 = means0.GetInputArray())
             using (InputArray iaCovs0 = covs0 == null ? InputArray.GetEmpty() : covs0.GetInputArray())
                 using (InputArray iaWeights = weights0 == null ? InputArray.GetEmpty() : weights0.GetInputArray())
                     using (OutputArray oaLogLikelihood = loglikelihoods == null ? OutputArray.GetEmpty() : loglikelihoods.GetOutputArray())
                         using (OutputArray oaLabels = labels == null ? OutputArray.GetEmpty() : labels.GetOutputArray())
                             using (OutputArray oaProbs = probs == null ? OutputArray.GetEmpty() : probs.GetOutputArray())
                             {
                                 MlInvoke.CvEMTrainE(_ptr, iaSamples, iaMeans0, iaCovs0, iaWeights, oaLogLikelihood, oaLabels,
                                                     oaProbs, ref _statModel, ref _algorithm);
                             }
 }
Пример #52
0
 /// <summary>
 /// Returns the mask of the superpixel segmentation stored in SuperpixelSLIC object.
 /// </summary>
 /// <param name="image">CV_8U1 image mask where -1 indicates that the pixel is a superpixel border, and 0 otherwise.</param>
 /// <param name="thickLine">If false, the border is only one pixel wide, otherwise all pixels at the border are masked.</param>
 public void GetLabelContourMask(IOutputArray image, bool thickLine = true)
 {
     using (OutputArray oaImage = image.GetOutputArray())
         XImgprocInvoke.cveSuperpixelSLICGetLabelContourMask(_ptr, oaImage, thickLine);
 }
Пример #53
0
        /// <summary>
        /// Finds lines in the input image.
        /// This is the output of the default parameters of the algorithm on the above shown image.
        /// </summary>
        /// <param name="image">A grayscale (CV_8UC1) input image. </param>
        /// <param name="lines">A vector of Vec4i or Vec4f elements specifying the beginning and ending point of a line. 
        /// Where Vec4i/Vec4f is (x1, y1, x2, y2), point 1 is the start, point 2 - end. Returned lines are strictly oriented depending on the gradient.</param>
        /// <param name="width">Vector of widths of the regions, where the lines are found. E.g. Width of line.</param>
        /// <param name="prec">Vector of precisions with which the lines are found.</param>
        /// <param name="nfa">Vector containing number of false alarms in the line region, 
        /// with precision of 10%. The bigger the value, logarithmically better the detection.</param>
        public virtual void Detect(InputArray image, OutputArray lines,
            OutputArray width = null, OutputArray prec = null, OutputArray nfa = null)
        {
            if (image == null) 
                throw new ArgumentNullException(nameof(image));
            if (lines == null)
                throw new ArgumentNullException(nameof(lines));
            image.ThrowIfDisposed();
            lines.ThrowIfNotReady();
            width?.ThrowIfNotReady();
            prec?.ThrowIfNotReady();
            nfa?.ThrowIfNotReady();

            NativeMethods.imgproc_LineSegmentDetector_detect_OutputArray(ptr, image.CvPtr, lines.CvPtr,
                Cv2.ToPtr(width), Cv2.ToPtr(prec), Cv2.ToPtr(nfa));

            GC.KeepAlive(image);
            lines.Fix();
            width?.Fix();
            prec?.Fix();
            nfa?.Fix();
        }
Пример #54
0
 /// <summary>
 /// Computes the estimated covariance matrix of an image using the sliding window forumlation.
 /// </summary>
 /// <param name="src">The source image. Input image must be of a complex type.</param>
 /// <param name="dst">The destination estimated covariance matrix. Output matrix will be size (windowRows*windowCols, windowRows*windowCols).</param>
 /// <param name="windowRows">The number of rows in the window.</param>
 /// <param name="windowCols">The number of cols in the window. The window size parameters control the accuracy of the estimation. The sliding window moves over the entire image from the top-left corner to the bottom right corner. Each location of the window represents a sample. If the window is the size of the image, then this gives the exact covariance matrix. For all other cases, the sizes of the window will impact the number of samples and the number of elements in the estimated covariance matrix.</param>
 public static void CovarianceEstimation(IInputArray src, IOutputArray dst, int windowRows, int windowCols)
 {
     using (InputArray iaSrc = src.GetInputArray())
         using (OutputArray oaDst = dst.GetOutputArray())
             cveCovarianceEstimation(iaSrc, oaDst, windowRows, windowCols);
 }