Exemplo n.º 1
0
        /// <summary>
        /// Estimates Gaussian mixture parameters from the sample set
        /// </summary>
        /// <param name="samples"></param>
        /// <param name="logLikelihoods"></param>
        /// <param name="labels"></param>
        /// <param name="probs"></param>
        /// <returns></returns>
        public virtual bool TrainEM(
            InputArray samples,
            OutputArray logLikelihoods = null,
            OutputArray labels         = null,
            OutputArray probs          = null)
        {
            ThrowIfDisposed();
            if (samples == null)
            {
                throw new ArgumentNullException(nameof(samples));
            }
            samples.ThrowIfDisposed();

            logLikelihoods?.ThrowIfNotReady();
            labels?.ThrowIfNotReady();
            probs?.ThrowIfNotReady();

            int ret = NativeMethods.ml_EM_trainEM(
                ptr,
                samples.CvPtr,
                Cv2.ToPtr(logLikelihoods),
                Cv2.ToPtr(labels),
                Cv2.ToPtr(probs));

            logLikelihoods?.Fix();
            labels?.Fix();
            probs?.Fix();
            GC.KeepAlive(this);
            GC.KeepAlive(samples);
            GC.KeepAlive(logLikelihoods);
            GC.KeepAlive(labels);
            GC.KeepAlive(probs);
            return(ret != 0);
        }
Exemplo n.º 2
0
        /// <summary>
        /// Finds lines in the input image.
        /// This is the output of the default parameters of the algorithm on the above shown image.
        /// </summary>
        /// <param name="image">A grayscale (CV_8UC1) input image. </param>
        /// <param name="lines">A vector of Vec4i or Vec4f elements specifying the beginning and ending point of a line.
        /// Where Vec4i/Vec4f is (x1, y1, x2, y2), point 1 is the start, point 2 - end. Returned lines are strictly oriented depending on the gradient.</param>
        /// <param name="width">Vector of widths of the regions, where the lines are found. E.g. Width of line.</param>
        /// <param name="prec">Vector of precisions with which the lines are found.</param>
        /// <param name="nfa">Vector containing number of false alarms in the line region,
        /// with precision of 10%. The bigger the value, logarithmically better the detection.</param>
        public virtual void Detect(InputArray image, OutputArray lines,
                                   OutputArray width = null, OutputArray prec = null, OutputArray nfa = null)
        {
            if (image == null)
            {
                throw new ArgumentNullException(nameof(image));
            }
            if (lines == null)
            {
                throw new ArgumentNullException(nameof(lines));
            }
            image.ThrowIfDisposed();
            lines.ThrowIfNotReady();
            width?.ThrowIfNotReady();
            prec?.ThrowIfNotReady();
            nfa?.ThrowIfNotReady();

            NativeMethods.imgproc_LineSegmentDetector_detect_OutputArray(ptr, image.CvPtr, lines.CvPtr,
                                                                         Cv2.ToPtr(width), Cv2.ToPtr(prec), Cv2.ToPtr(nfa));

            GC.KeepAlive(image);
            lines.Fix();
            width?.Fix();
            prec?.Fix();
            nfa?.Fix();
        }
Exemplo n.º 3
0
        /// <summary>
        /// find template on image
        /// </summary>
        /// <param name="image"></param>
        /// <param name="positions"></param>
        /// <param name="votes"></param>
        public virtual void Detect(
            InputArray image, OutputArray positions, OutputArray?votes = null)
        {
            if (image == null)
            {
                throw new ArgumentNullException(nameof(image));
            }
            if (positions == null)
            {
                throw new ArgumentNullException(nameof(positions));
            }
            image.ThrowIfDisposed();
            positions.ThrowIfNotReady();
            votes?.ThrowIfNotReady();

            NativeMethods.HandleException(
                NativeMethods.imgproc_GeneralizedHough_detect1(
                    ptr, image.CvPtr, positions.CvPtr, Cv2.ToPtr(votes)));

            GC.KeepAlive(this);
            GC.KeepAlive(image);
            GC.KeepAlive(positions);
            GC.KeepAlive(votes);
            positions.Fix();
            votes?.Fix();
        }
        /// <summary>
        ///
        /// </summary>
        /// <param name="frame0"></param>
        /// <param name="frame1"></param>
        /// <param name="flow1"></param>
        /// <param name="flow2"></param>
        public virtual void Calc(InputArray frame0, InputArray frame1, OutputArray flow1, OutputArray flow2 = null)
        {
            if (frame0 == null)
            {
                throw new ArgumentNullException(nameof(frame0));
            }
            if (frame1 == null)
            {
                throw new ArgumentNullException(nameof(frame1));
            }
            if (flow1 == null)
            {
                throw new ArgumentNullException(nameof(flow1));
            }
            frame0.ThrowIfDisposed();
            frame1.ThrowIfDisposed();
            flow1.ThrowIfNotReady();
            flow2?.ThrowIfNotReady();

            NativeMethods.superres_DenseOpticalFlowExt_calc(
                ptr, frame0.CvPtr, frame1.CvPtr, flow1.CvPtr, Cv2.ToPtr(flow2));
            GC.KeepAlive(this);
            GC.KeepAlive(frame0);
            GC.KeepAlive(frame1);
            GC.KeepAlive(flow1);
            GC.KeepAlive(flow2);
            flow1.Fix();
            flow2?.Fix();
        }
Exemplo n.º 5
0
        /// <summary>
        /// Estimate the Gaussian mixture parameters from a samples set.
        /// </summary>
        /// <param name="samples">Samples from which the Gaussian mixture model will be estimated. It should be a
        /// one-channel matrix, each row of which is a sample. If the matrix does not have CV_64F type
        /// it will be converted to the inner matrix of such type for the further computing.</param>
        /// <param name="means0">Initial means \f$a_k\f$ of mixture components. It is a one-channel matrix of
        /// \f$nclusters \times dims\f$ size. If the matrix does not have CV_64F type it will be
        /// converted to the inner matrix of such type for the further computing.</param>
        /// <param name="covs0">The vector of initial covariance matrices \f$S_k\f$ of mixture components. Each of
        /// covariance matrices is a one-channel matrix of \f$dims \times dims\f$ size. If the matrices
        /// do not have CV_64F type they will be converted to the inner matrices of such type for the further computing.</param>
        /// <param name="weights0">Initial weights \f$\pi_k\f$ of mixture components. It should be a one-channel
        /// floating-point matrix with \f$1 \times nclusters\f$ or \f$nclusters \times 1\f$ size.</param>
        /// <param name="logLikelihoods">The optional output matrix that contains a likelihood logarithm value for
        /// each sample. It has \f$nsamples \times 1\f$ size and CV_64FC1 type.</param>
        /// <param name="labels">The optional output "class label" for each sample:
        /// \f$\texttt{labels}_i=\texttt{arg max}_k(p_{i,k}), i=1..N\f$ (indices of the most probable
        /// mixture component for each sample). It has \f$nsamples \times 1\f$ size and CV_32SC1 type.</param>
        /// <param name="probs">The optional output matrix that contains posterior probabilities of each Gaussian
        /// mixture component given the each sample. It has \f$nsamples \times nclusters\f$ size and CV_64FC1 type.</param>
        public virtual bool TrainE(
            InputArray samples,
            InputArray means0,
            InputArray?covs0           = null,
            InputArray?weights0        = null,
            OutputArray?logLikelihoods = null,
            OutputArray?labels         = null,
            OutputArray?probs          = null)
        {
            ThrowIfDisposed();
            if (samples == null)
            {
                throw new ArgumentNullException(nameof(samples));
            }
            if (means0 == null)
            {
                throw new ArgumentNullException(nameof(means0));
            }
            samples.ThrowIfDisposed();
            means0.ThrowIfDisposed();

            logLikelihoods?.ThrowIfNotReady();
            covs0?.ThrowIfDisposed();
            weights0?.ThrowIfDisposed();
            labels?.ThrowIfNotReady();
            probs?.ThrowIfNotReady();

            NativeMethods.HandleException(
                NativeMethods.ml_EM_trainE(
                    ptr,
                    samples.CvPtr,
                    means0.CvPtr,
                    Cv2.ToPtr(covs0),
                    Cv2.ToPtr(weights0),
                    Cv2.ToPtr(logLikelihoods),
                    Cv2.ToPtr(labels),
                    Cv2.ToPtr(probs),
                    out var ret));

            logLikelihoods?.Fix();
            labels?.Fix();
            probs?.Fix();
            GC.KeepAlive(this);
            GC.KeepAlive(samples);
            GC.KeepAlive(means0);
            GC.KeepAlive(covs0);
            GC.KeepAlive(weights0);
            GC.KeepAlive(logLikelihoods);
            GC.KeepAlive(labels);
            GC.KeepAlive(probs);
            return(ret != 0);
        }
Exemplo n.º 6
0
        /// <summary>
        /// サンプルに対する応答を予測する
        /// </summary>
        /// <param name="sample"></param>
        /// <param name="probs"></param>
#else
        /// <summary>
        /// Predicts the response for sample
        /// </summary>
        /// <param name="sample"></param>
        /// <param name="probs"></param>
#endif
        public virtual Vec2d Predict2(InputArray sample, OutputArray probs = null)
        {
            ThrowIfDisposed();
            if (sample == null)
            {
                throw new ArgumentNullException(nameof(sample));
            }
            sample.ThrowIfDisposed();
            probs?.ThrowIfNotReady();

            Vec2d ret = NativeMethods.ml_EM_predict2(ptr, sample.CvPtr, Cv2.ToPtr(probs));

            probs?.Fix();
            GC.KeepAlive(sample);
            return(ret);
        }
Exemplo n.º 7
0
 /// <summary>
 /// Perform image denoising using Non-local Means Denoising algorithm
 /// with several computational optimizations. Noise expected to be a gaussian white noise
 /// </summary>
 /// <param name="src">Input 8-bit 1-channel, 2-channel or 3-channel image.</param>
 /// <param name="dst">Output image with the same size and type as src .</param>
 /// <param name="h">
 /// Parameter regulating filter strength. Big h value perfectly removes noise but also removes image details,
 /// smaller h value preserves details but also preserves some noise</param>
 /// <param name="templateWindowSize">
 /// Size in pixels of the template patch that is used to compute weights. Should be odd. Recommended value 7 pixels</param>
 /// <param name="searchWindowSize">
 /// Size in pixels of the window that is used to compute weighted average for given pixel.
 /// Should be odd. Affect performance linearly: greater searchWindowsSize - greater denoising time. Recommended value 21 pixels</param>
 public static void FastNlMeansDenoising(InputArray src, OutputArray dst, float h = 3,
                                         int templateWindowSize = 7, int searchWindowSize = 21)
 {
     if (src == null)
     {
         throw new ArgumentNullException("nameof(src)");
     }
     if (dst == null)
     {
         throw new ArgumentNullException("nameof(dst)");
     }
     src.ThrowIfDisposed();
     dst.ThrowIfNotReady();
     NativeMethods.photo_fastNlMeansDenoising(src.CvPtr, dst.CvPtr, h, templateWindowSize, searchWindowSize);
     dst.Fix();
 }
Exemplo n.º 8
0
        public Status ComposePanorama(OutputArray pano)
        {
            if (pano == null)
            {
                throw new ArgumentNullException(nameof(pano));
            }
            pano.ThrowIfNotReady();

            int status = NativeMethods.stitching_Stitcher_composePanorama1(
                ptr, pano.CvPtr);

            pano.Fix();
            GC.KeepAlive(this);
            GC.KeepAlive(pano);
            return((Status)status);
        }
Exemplo n.º 9
0
        /// <summary>
        ///
        /// </summary>
        /// <param name="frame"></param>
        public virtual void NextFrame(OutputArray frame)
        {
            ThrowIfDisposed();
            if (frame == null)
            {
                throw new ArgumentNullException(nameof(frame));
            }
            frame.ThrowIfNotReady();

            NativeMethods.HandleException(
                NativeMethods.superres_FrameSource_nextFrame(ptr, frame.CvPtr));

            frame.Fix();
            GC.KeepAlive(this);
            GC.KeepAlive(frame);
        }
Exemplo n.º 10
0
 /// <summary>
 /// reconstructs the original vector from the projection
 /// </summary>
 /// <param name="vec"></param>
 /// <param name="result"></param>
 public void BackProject(InputArray vec, OutputArray result)
 {
     ThrowIfDisposed();
     if (vec == null)
     {
         throw new ArgumentNullException(nameof(vec));
     }
     if (result == null)
     {
         throw new ArgumentNullException(nameof(result));
     }
     vec.ThrowIfDisposed();
     result.ThrowIfNotReady();
     NativeMethods.core_PCA_backProject2(ptr, vec.CvPtr, result.CvPtr);
     result.Fix();
 }
Exemplo n.º 11
0
 /// <summary>
 /// computes singular values of a matrix
 /// </summary>
 /// <param name="src"></param>
 /// <param name="w"></param>
 /// <param name="flags"></param>
 public static void Compute(InputArray src, OutputArray w, Flags flags = 0)
 {
     if (src == null)
     {
         throw new ArgumentNullException(nameof(src));
     }
     if (w == null)
     {
         throw new ArgumentNullException(nameof(w));
     }
     src.ThrowIfDisposed();
     w.ThrowIfNotReady();
     NativeMethods.core_SVD_static_compute2(src.CvPtr, w.CvPtr, (int)flags);
     w.Fix();
     GC.KeepAlive(src);
     GC.KeepAlive(w);
 }
Exemplo n.º 12
0
 /// <summary>
 /// finds dst = arg min_{|dst|=1} |m*dst|
 /// </summary>
 /// <param name="src"></param>
 /// <param name="dst"></param>
 public static void SolveZ(InputArray src, OutputArray dst)
 {
     if (src == null)
     {
         throw new ArgumentNullException(nameof(src));
     }
     if (dst == null)
     {
         throw new ArgumentNullException(nameof(dst));
     }
     src.ThrowIfDisposed();
     dst.ThrowIfNotReady();
     NativeMethods.core_SVD_static_solveZ(src.CvPtr, dst.CvPtr);
     dst.Fix();
     GC.KeepAlive(src);
     GC.KeepAlive(dst);
 }
Exemplo n.º 13
0
 /// <summary>
 ///
 /// </summary>
 /// <param name="arr"></param>
 /// <param name="mask"></param>
 public void CopyTo(OutputArray arr, InputArray mask)
 {
     if (arr == null)
     {
         throw new ArgumentNullException(nameof(arr));
     }
     if (mask == null)
     {
         throw new ArgumentNullException(nameof(mask));
     }
     arr.ThrowIfNotReady();
     mask.ThrowIfDisposed();
     ThrowIfDisposed();
     NativeMethods.core_InputArray_copyTo2(ptr, arr.CvPtr, mask.CvPtr);
     arr.Fix();
     GC.KeepAlive(mask);
 }
Exemplo n.º 14
0
 /// <summary>
 /// Get the images that correspond to each shape.
 /// This images are used in the calculation of the Image Appearance cost.
 /// </summary>
 /// <param name="image1">Image corresponding to the shape defined by contours1.</param>
 /// <param name="image2">Image corresponding to the shape defined by contours2.</param>
 public void GetImages(OutputArray image1, OutputArray image2)
 {
     ThrowIfDisposed();
     if (image1 == null)
     {
         throw new ArgumentNullException(nameof(image1));
     }
     if (image2 == null)
     {
         throw new ArgumentNullException(nameof(image2));
     }
     image1.ThrowIfNotReady();
     image2.ThrowIfNotReady();
     NativeMethods.shape_ShapeContextDistanceExtractor_getImages(ptr, image1.CvPtr, image2.CvPtr);
     image1.Fix();
     image2.Fix();
 }
Exemplo n.º 15
0
        /// <summary>
        /// Computes the estimated covariance matrix of an image using the sliding window forumlation.
        /// </summary>
        /// <remarks>
        /// The window size parameters control the accuracy of the estimation.
        /// The sliding window moves over the entire image from the top-left corner
        /// to the bottom right corner.Each location of the window represents a sample.
        /// If the window is the size of the image, then this gives the exact covariance matrix.
        /// For all other cases, the sizes of the window will impact the number of samples
        /// and the number of elements in the estimated covariance matrix.
        /// </remarks>
        /// <param name="src">The source image. Input image must be of a complex type.</param>
        /// <param name="dst">The destination estimated covariance matrix. Output matrix will be size (windowRows*windowCols, windowRows*windowCols).</param>
        /// <param name="windowRows">The number of rows in the window.</param>
        /// <param name="windowCols">The number of cols in the window.</param>
        public static void CovarianceEstimation(InputArray src, OutputArray dst, int windowRows, int windowCols)
        {
            if (src == null)
            {
                throw new ArgumentNullException(nameof(src));
            }
            if (dst == null)
            {
                throw new ArgumentNullException(nameof(dst));
            }
            src.ThrowIfDisposed();
            dst.ThrowIfNotReady();

            NativeMethods.ximgproc_covarianceEstimation(src.CvPtr, dst.CvPtr, windowRows, windowCols);

            GC.KeepAlive(src);
            dst.Fix();
        }
Exemplo n.º 16
0
        /// <summary>
        /// the update operator that takes the next video frame and returns the current foreground mask as 8-bit binary image.
        /// </summary>
        /// <param name="image"></param>
        /// <param name="fgmask"></param>
        /// <param name="learningRate"></param>
        public virtual void Run(InputArray image, OutputArray fgmask, double learningRate = -1)
        {
            if (image == null)
            {
                throw new ArgumentNullException("image");
            }
            if (fgmask == null)
            {
                throw new ArgumentNullException("fgmask");
            }
            image.ThrowIfDisposed();
            fgmask.ThrowIfNotReady();

            NativeMethods.video_BackgroundSubtractor_apply(ptr, image.CvPtr, fgmask.CvPtr, learningRate);

            fgmask.Fix();
            GC.KeepAlive(image);
        }
Exemplo n.º 17
0
 /// <summary>
 /// reconstructs the original vector from the projection
 /// </summary>
 /// <param name="vec"></param>
 /// <param name="result"></param>
 public void BackProject(InputArray vec, OutputArray result)
 {
     if (disposed)
     {
         throw new ObjectDisposedException("PCA");
     }
     if (vec == null)
     {
         throw new ArgumentNullException("vec");
     }
     if (result == null)
     {
         throw new ArgumentNullException("result");
     }
     vec.ThrowIfDisposed();
     result.ThrowIfNotReady();
     NativeMethods.core_PCA_backProject(ptr, vec.CvPtr, result.CvPtr);
     result.Fix();
 }
Exemplo n.º 18
0
        /// <summary>
        /// Modification of fastNlMeansDenoisingMulti function for colored images sequences
        /// </summary>
        /// <param name="srcImgs">Input 8-bit 3-channel images sequence. All images should have the same type and size.</param>
        /// <param name="dst">Output image with the same size and type as srcImgs images.</param>
        /// <param name="imgToDenoiseIndex">Target image to denoise index in srcImgs sequence</param>
        /// <param name="temporalWindowSize">Number of surrounding images to use for target image denoising. Should be odd.
        /// Images from imgToDenoiseIndex - temporalWindowSize / 2 to imgToDenoiseIndex - temporalWindowSize / 2 from srcImgs
        /// will be used to denoise srcImgs[imgToDenoiseIndex] image.</param>
        /// <param name="h">Parameter regulating filter strength for luminance component. Bigger h value perfectly removes noise
        /// but also removes image details, smaller h value preserves details but also preserves some noise.</param>
        /// <param name="hColor"> The same as h but for color components.</param>
        /// <param name="templateWindowSize">Size in pixels of the template patch that is used to compute weights. Should be odd. Recommended value 7 pixels</param>
        /// <param name="searchWindowSize">Size in pixels of the window that is used to compute weighted average for given pixel.
        /// Should be odd. Affect performance linearly: greater searchWindowsSize - greater denoising time. Recommended value 21 pixels</param>
        public static void FastNlMeansDenoisingColoredMulti(IEnumerable <InputArray> srcImgs, OutputArray dst,
                                                            int imgToDenoiseIndex, int temporalWindowSize, float h = 3, float hColor = 3,
                                                            int templateWindowSize = 7, int searchWindowSize = 21)
        {
            if (srcImgs == null)
            {
                throw new ArgumentNullException("nameof(srcImgs)");
            }
            if (dst == null)
            {
                throw new ArgumentNullException("nameof(dst)");
            }
            dst.ThrowIfNotReady();
            IntPtr[] srcImgPtrs = EnumerableEx.SelectPtrs(srcImgs);

            NativeMethods.photo_fastNlMeansDenoisingColoredMulti(srcImgPtrs, srcImgPtrs.Length, dst.CvPtr, imgToDenoiseIndex,
                                                                 templateWindowSize, h, hColor, templateWindowSize, searchWindowSize);
            dst.Fix();
        }
Exemplo n.º 19
0
        /// <summary>
        ///
        /// </summary>
        /// <param name="src"></param>
        /// <param name="dst"></param>
        public void Apply(InputArray src, OutputArray dst)
        {
            ThrowIfDisposed();
            if (src == null)
            {
                throw new ArgumentNullException(nameof(src));
            }
            if (dst == null)
            {
                throw new ArgumentNullException(nameof(dst));
            }
            src.ThrowIfDisposed();
            dst.ThrowIfNotReady();

            NativeMethods.imgproc_CLAHE_apply(ptr, src.CvPtr, dst.CvPtr);

            dst.Fix();
            GC.KeepAlive(src);
        }
Exemplo n.º 20
0
        public Status ComposePanorama(InputArray images, OutputArray pano)
        {
            if (images == null)
            {
                throw new ArgumentNullException(nameof(images));
            }
            if (pano == null)
            {
                throw new ArgumentNullException(nameof(pano));
            }
            images.ThrowIfDisposed();
            pano.ThrowIfNotReady();

            int status = NativeMethods.stitching_Stitcher_composePanorama2_InputArray(
                ptr, images.CvPtr, pano.CvPtr);

            pano.Fix();
            return((Status)status);
        }
Exemplo n.º 21
0
        public Status ComposePanorama(IEnumerable <Mat> images, OutputArray pano)
        {
            if (images == null)
            {
                throw new ArgumentNullException(nameof(images));
            }
            if (pano == null)
            {
                throw new ArgumentNullException(nameof(pano));
            }
            pano.ThrowIfNotReady();

            IntPtr[] imagesPtrs = EnumerableEx.SelectPtrs(images);
            int      status     = NativeMethods.stitching_Stitcher_composePanorama2_MatArray(
                ptr, imagesPtrs, imagesPtrs.Length, pano.CvPtr);

            pano.Fix();
            return((Status)status);
        }
Exemplo n.º 22
0
        /// <summary>
        /// find template on image
        /// </summary>
        /// <param name="edges"></param>
        /// <param name="dx"></param>
        /// <param name="dy"></param>
        /// <param name="positions"></param>
        /// <param name="votes"></param>
        public virtual void Detect(
            InputArray edges, InputArray dx, InputArray dy, OutputArray positions, OutputArray?votes = null)
        {
            if (edges == null)
            {
                throw new ArgumentNullException(nameof(edges));
            }
            if (dx == null)
            {
                throw new ArgumentNullException(nameof(dx));
            }
            if (dy == null)
            {
                throw new ArgumentNullException(nameof(dy));
            }
            if (positions == null)
            {
                throw new ArgumentNullException(nameof(positions));
            }
            edges.ThrowIfDisposed();
            dx.ThrowIfDisposed();
            dy.ThrowIfDisposed();
            positions.ThrowIfNotReady();
            if (votes != null)
            {
                votes.ThrowIfNotReady();
            }

            NativeMethods.imgproc_GeneralizedHough_detect2(
                ptr, edges.CvPtr, dx.CvPtr, dy.CvPtr, positions.CvPtr, Cv2.ToPtr(votes));
            GC.KeepAlive(this);
            GC.KeepAlive(edges);
            GC.KeepAlive(dx);
            GC.KeepAlive(dy);
            GC.KeepAlive(positions);
            GC.KeepAlive(votes);
            positions.Fix();
            if (votes != null)
            {
                votes.Fix();
            }
        }
Exemplo n.º 23
0
        /// <summary>
        /// Tonemaps image
        /// </summary>
        /// <param name="src">CV_32FC3 Mat (float 32 bits 3 channels)</param>
        /// <param name="dst">CV_32FC3 Mat with values in [0, 1] range</param>
        public virtual void Process(InputArray src, OutputArray dst)
        {
            if (src == null)
            {
                throw new ArgumentNullException(nameof(src));
            }
            if (dst == null)
            {
                throw new ArgumentNullException(nameof(dst));
            }
            src.ThrowIfDisposed();
            dst.ThrowIfNotReady();

            NativeMethods.HandleException(
                NativeMethods.photo_Tonemap_process(ptr, src.CvPtr, dst.CvPtr));

            GC.KeepAlive(src);
            dst.Fix();
            GC.KeepAlive(this);
        }
Exemplo n.º 24
0
        /// <summary>
        /// Recovers inverse camera response.
        /// </summary>
        /// <param name="src">vector of input images</param>
        /// <param name="dst">256x1 matrix with inverse camera response function</param>
        /// <param name="times">vector of exposure time values for each image</param>
        public virtual void Process(IEnumerable<Mat> src, OutputArray dst, IEnumerable<float> times)
        {
            if (src == null)
                throw new ArgumentNullException("src");
            if (dst == null)
                throw new ArgumentNullException("dst");
            if (times == null)
                throw new ArgumentNullException("times");
            dst.ThrowIfNotReady();
            
            IntPtr[] srcArray = EnumerableEx.SelectPtrs(src);
            float[] timesArray = EnumerableEx.ToArray(times);
            if (srcArray.Length != timesArray.Length)
                throw new OpenCvSharpException("src.Count() != times.Count");

            NativeMethods.photo_CalibrateCRF_process(ptr, srcArray, srcArray.Length, dst.CvPtr, timesArray);

            dst.Fix();
            GC.KeepAlive(src);
        }
Exemplo n.º 25
0
 /// <summary>
 /// Get the images that correspond to each shape.
 /// This images are used in the calculation of the Image Appearance cost.
 /// </summary>
 /// <param name="image1">Image corresponding to the shape defined by contours1.</param>
 /// <param name="image2">Image corresponding to the shape defined by contours2.</param>
 public void GetImages(OutputArray image1, OutputArray image2)
 {
     if (disposed)
     {
         throw new ObjectDisposedException(GetType().Name);
     }
     if (image1 == null)
     {
         throw new ArgumentNullException("image1");
     }
     if (image2 == null)
     {
         throw new ArgumentNullException("image2");
     }
     image1.ThrowIfNotReady();
     image2.ThrowIfNotReady();
     NativeMethods.shape_ShapeContextDistanceExtractor_getImages(ptr, image1.CvPtr, image2.CvPtr);
     image1.Fix();
     image2.Fix();
 }
Exemplo n.º 26
0
        /// <summary>
        /// Applies a binary blob thinning operation, to achieve a skeletization of the input image.
        /// The function transforms a binary blob image into a skeletized form using the technique of Zhang-Suen.
        /// </summary>
        /// <param name="src">Source 8-bit single-channel image, containing binary blobs, with blobs having 255 pixel values.</param>
        /// <param name="dst">Destination image of the same size and the same type as src. The function can work in-place.</param>
        /// <param name="thinningType">Value that defines which thinning algorithm should be used. </param>
        public static void Thinning(
            InputArray src, OutputArray dst,
            ThinningTypes thinningType = ThinningTypes.ZHANGSUEN)
        {
            if (src == null)
            {
                throw new ArgumentNullException(nameof(src));
            }
            if (dst == null)
            {
                throw new ArgumentNullException(nameof(dst));
            }
            src.ThrowIfDisposed();
            dst.ThrowIfNotReady();

            NativeMethods.ximgproc_thinning(src.CvPtr, dst.CvPtr, (int)thinningType);

            GC.KeepAlive(src);
            dst.Fix();
        }
Exemplo n.º 27
0
        /// <summary>
        /// Applies Niblack thresholding to input image.
        /// </summary>
        /// <remarks><![CDATA[
        /// The function transforms a grayscale image to a binary image according to the formulae:
        /// -   **THRESH_BINARY**
        /// \f[dst(x, y) =  \fork{\texttt{maxValue }
        /// }{if \(src(x, y) > T(x, y)\)}{0}{otherwise}\f]
        /// -   ** THRESH_BINARY_INV**
        /// \f[dst(x, y) =  \fork{0}{if \(src(x, y) > T(x, y)\)}{\texttt{maxValue}}{otherwise}\f]
        /// where \f$T(x, y)\f$ is a threshold calculated individually for each pixel.
        /// The threshold value \f$T(x, y)\f$ is the mean minus \f$ delta \f$ times standard deviation
        /// of \f$\texttt{blockSize} \times\texttt{blockSize}\f$ neighborhood of \f$(x, y)\f$.
        /// The function can't process the image in-place.
        /// ]]></remarks>
        /// <param name="src">Source 8-bit single-channel image.</param>
        /// <param name="dst">Destination image of the same size and the same type as src.</param>
        /// <param name="maxValue">Non-zero value assigned to the pixels for which the condition is satisfied,
        /// used with the THRESH_BINARY and THRESH_BINARY_INV thresholding types.</param>
        /// <param name="type">Thresholding type, see cv::ThresholdTypes.</param>
        /// <param name="blockSize">Size of a pixel neighborhood that is used to calculate a threshold value for the pixel: 3, 5, 7, and so on.</param>
        /// <param name="delta">Constant multiplied with the standard deviation and subtracted from the mean.
        /// Normally, it is taken to be a real number between 0 and 1.</param>
        public static void NiblackThreshold(
            InputArray src, OutputArray dst,
            double maxValue, ThresholdTypes type, int blockSize, double delta)
        {
            if (src == null)
            {
                throw new ArgumentNullException(nameof(src));
            }
            if (dst == null)
            {
                throw new ArgumentNullException(nameof(dst));
            }
            src.ThrowIfDisposed();
            dst.ThrowIfNotReady();

            NativeMethods.ximgproc_niBlackThreshold(src.CvPtr, dst.CvPtr, maxValue, (int)type, blockSize, delta);

            GC.KeepAlive(src);
            dst.Fix();
        }
Exemplo n.º 28
0
        /// <summary>
        /// Try to stitch the given images.
        /// </summary>
        /// <param name="images">Input images.</param>
        /// <param name="pano">Final pano.</param>
        /// <returns>Status code.</returns>
        public Status Stitch(InputArray images, OutputArray pano)
        {
            if (images == null)
            {
                throw new ArgumentNullException(nameof(images));
            }
            if (pano == null)
            {
                throw new ArgumentNullException(nameof(pano));
            }
            images.ThrowIfDisposed();
            pano.ThrowIfNotReady();

            Status status = (Status)NativeMethods.stitching_Stitcher_stitch1_InputArray(
                ptr, images.CvPtr, pano.CvPtr);

            pano.Fix();

            return(status);
        }
Exemplo n.º 29
0
 /// <summary>
 /// Projects vector(s) to the principal component subspace.
 /// </summary>
 /// <param name="vec">input vector(s); must have the same dimensionality and the
 /// same layout as the input data used at PCA phase, that is, if DATA_AS_ROW are
 /// specified, then `vec.cols==data.cols` (vector dimensionality) and `vec.rows`
 /// is the number of vectors to project, and the same is true for the PCA::DATA_AS_COL case.</param>
 /// <param name="result">output vectors; in case of PCA::DATA_AS_COL, the
 /// output matrix has as many columns as the number of input vectors, this
 /// means that `result.cols==vec.cols` and the number of rows match the
 /// number of principal components (for example, `maxComponents` parameter
 /// passed to the constructor).</param>
 public void Project(InputArray vec, OutputArray result)
 {
     ThrowIfDisposed();
     if (vec == null)
     {
         throw new ArgumentNullException(nameof(vec));
     }
     if (result == null)
     {
         throw new ArgumentNullException(nameof(result));
     }
     vec.ThrowIfDisposed();
     result.ThrowIfNotReady();
     NativeMethods.HandleException(
         NativeMethods.core_PCA_project2(ptr, vec.CvPtr, result.CvPtr));
     result.Fix();
     GC.KeepAlive(this);
     GC.KeepAlive(vec);
     GC.KeepAlive(result);
 }
Exemplo n.º 30
0
        /// <summary>
        /// Modification of fastNlMeansDenoising function for colored images
        /// </summary>
        /// <param name="src">Input 8-bit 3-channel image.</param>
        /// <param name="dst">Output image with the same size and type as src.</param>
        /// <param name="h">Parameter regulating filter strength for luminance component.
        /// Bigger h value perfectly removes noise but also removes image details, smaller h value preserves details but also preserves some noise</param>
        /// <param name="hColor">The same as h but for color components. For most images value equals 10 will be enought
        /// to remove colored noise and do not distort colors</param>
        /// <param name="templateWindowSize">
        /// Size in pixels of the template patch that is used to compute weights. Should be odd. Recommended value 7 pixels</param>
        /// <param name="searchWindowSize">
        /// Size in pixels of the window that is used to compute weighted average for given pixel. Should be odd.
        /// Affect performance linearly: greater searchWindowsSize - greater denoising time. Recommended value 21 pixels</param>
        public static void FastNlMeansDenoisingColored(InputArray src, OutputArray dst,
                                                       float h = 3, float hColor = 3,
                                                       int templateWindowSize = 7, int searchWindowSize = 21)
        {
            if (src == null)
            {
                throw new ArgumentNullException(nameof(src));
            }
            if (dst == null)
            {
                throw new ArgumentNullException(nameof(dst));
            }
            src.ThrowIfDisposed();
            dst.ThrowIfNotReady();

            NativeMethods.HandleException(
                NativeMethods.photo_fastNlMeansDenoisingColored(src.CvPtr, dst.CvPtr, h, hColor, templateWindowSize, searchWindowSize));

            dst.Fix();
            GC.KeepAlive(src);
        }