Exemple #1
1
        /// <summary>
        /// Finds an object center, size, and orientation.
        /// </summary>
        /// <param name="probImage">Back projection of the object histogram. </param>
        /// <param name="window">Initial search window.</param>
        /// <param name="criteria">Stop criteria for the underlying MeanShift() .</param>
        /// <returns></returns>
        public static RotatedRect CamShift(
            InputArray probImage, ref Rect window, TermCriteria criteria)
        {
            if (probImage == null)
                throw new ArgumentNullException(nameof(probImage));
            probImage.ThrowIfDisposed();

            RotatedRect result = NativeMethods.video_CamShift(
                probImage.CvPtr, ref window, criteria);
            return result;
        }
Exemple #2
0
 /// <summary>
 /// the constructor that performs SVD
 /// </summary>
 /// <param name="src"></param>
 /// <param name="flags"></param>
 public SVD(InputArray src, Flags flags = 0)
 {
     if (src == null)
         throw new ArgumentNullException("src");
     src.ThrowIfDisposed();
     ptr = NativeMethods.core_SVD_new(src.CvPtr, (int)flags);
 }
Exemple #3
0
 /// <summary>
 /// Calculates all of the moments 
 /// up to the third order of a polygon or rasterized shape.
 /// </summary>
 /// <param name="array">A raster image (single-channel, 8-bit or floating-point 
 /// 2D array) or an array ( 1xN or Nx1 ) of 2D points ( Point or Point2f )</param>
 /// <param name="binaryImage">If it is true, then all the non-zero image pixels are treated as 1’s</param>
 /// <returns></returns>
 public Moments(InputArray array, bool binaryImage = false)
 {
     if (array == null)
         throw new ArgumentNullException(nameof(array));
     array.ThrowIfDisposed();
     InitializeFromInputArray(array, binaryImage);
 }
Exemple #4
0
 /// <summary>
 /// computes moments of the rasterized shape or a vector of points
 /// </summary>
 /// <param name="array"></param>
 /// <param name="binaryImage"></param>
 public Moments(InputArray array, bool binaryImage = false)
 {
     if(array == null)
         throw new ArgumentNullException("array");
     array.ThrowIfDisposed();
     WCvMoments m = NativeMethods.imgproc_moments(array.CvPtr, binaryImage ? 1 : 0);
     Initialize(m.m00, m.m10, m.m01, m.m20, m.m11, m.m02, m.m30, m.m21, m.m12, m.m03);
 }
Exemple #5
0
 /// <summary>
 /// 
 /// </summary>
 /// <param name="data"></param>
 /// <param name="mean"></param>
 /// <param name="flags"></param>
 /// <param name="maxComponents"></param>
 public PCA(InputArray data, InputArray mean, Flags flags, int maxComponents = 0)
 {
     if (data == null)
         throw new ArgumentNullException(nameof(data));
     if (mean == null)
         throw new ArgumentNullException(nameof(mean));
     data.ThrowIfDisposed();
     mean.ThrowIfDisposed();
     ptr = NativeMethods.core_PCA_new2(data.CvPtr, mean.CvPtr, (int)flags, maxComponents);
 }
Exemple #6
0
 /// <summary>
 /// 
 /// </summary>
 /// <param name="data"></param>
 /// <param name="mean"></param>
 /// <param name="flags"></param>
 /// <param name="retainedVariance"></param>
 public PCA(InputArray data, InputArray mean, Flags flags, double retainedVariance)
 {
     if (data == null)
         throw new ArgumentNullException(nameof(data));
     if (mean == null)
         throw new ArgumentNullException(nameof(mean));
     data.ThrowIfDisposed();
     mean.ThrowIfDisposed();
     ptr = NativeMethods.core_PCA_new3(data.CvPtr, mean.CvPtr, (int)flags, retainedVariance);
 }
Exemple #7
0
 /// <summary>
 /// 
 /// </summary>
 /// <param name="src"></param>
 /// <param name="dst"></param>
 /// <param name="ksize"></param>
 public static void MedianBlur(InputArray src, OutputArray dst, int ksize)
 {
     if (src == null)
         throw new ArgumentNullException("src");
     if (dst == null)
         throw new ArgumentNullException("dst");
     src.ThrowIfDisposed();
     dst.ThrowIfNotReady();
     NativeMethods.imgproc_medianBlur(src.CvPtr, dst.CvPtr, ksize);
     dst.Fix();
 }
Exemple #8
0
        /// <summary>
        /// Finds an object on a back projection image.
        /// </summary>
        /// <param name="probImage">Back projection of the object histogram.</param>
        /// <param name="window">Initial search window.</param>
        /// <param name="criteria">Stop criteria for the iterative search algorithm.</param>
        /// <returns>Number of iterations CAMSHIFT took to converge.</returns>
        public static int MeanShift(
            InputArray probImage, ref Rect window, TermCriteria criteria)
        {
            if (probImage == null)
                throw new ArgumentNullException("probImage");
            probImage.ThrowIfDisposed();

            int result = NativeMethods.video_meanShift(
                probImage.CvPtr, ref window, criteria);
            return result;
        }
Exemple #9
0
 /// <summary>
 /// detects corners using FAST algorithm by E. Rosten
 /// </summary>
 /// <param name="image"></param>
 /// <param name="keypoints"></param>
 /// <param name="threshold"></param>
 /// <param name="nonmaxSupression"></param>
 /// <param name="type"></param>
 public static void FASTX(InputArray image, out KeyPoint[] keypoints, int threshold, bool nonmaxSupression, int type)
 {
     if (image == null)
         throw new ArgumentNullException("image");
     image.ThrowIfDisposed();
     using (var kp = new VectorOfKeyPoint())
     {
         NativeMethods.features2d_FASTX(image.CvPtr, kp.CvPtr, threshold, nonmaxSupression ? 1 : 0, type);
         keypoints = kp.ToArray();
     }
 }
Exemple #10
0
        /// <summary>
        /// 与えられたデータセットの最近傍探索インデックスを作成します.
        /// </summary>
        /// <param name="features">インデックス作成対象となる特徴(点)が格納された, CV_32F 型の行列.この行列のサイズは matrix is num _ features x feature _ dimensionality となります</param>
        /// <param name="params">params – インデックスパラメータを含む構造体.作成されるインデックスの種類は,このパラメータの種類に依存します</param>
        /// <param name="distType"></param>
#else
        /// <summary>
        /// Constructs a nearest neighbor search index for a given dataset.
        /// </summary>
        /// <param name="features">features – Matrix of type CV _ 32F containing the features(points) to index. The size of the matrix is num _ features x feature _ dimensionality.</param>
        /// <param name="params">Structure containing the index parameters. The type of index that will be constructed depends on the type of this parameter. </param>
        /// <param name="distType"></param>
#endif
        public Index(InputArray features, IndexParams @params, FlannDistance distType = FlannDistance.L2)
        {
            if (features == null)
                throw new ArgumentNullException("features");
            if (@params == null)
                throw new ArgumentNullException("params");

            ptr = NativeMethods.flann_Index_new(features.CvPtr, @params.CvPtr, (int)distType);
            if (ptr == IntPtr.Zero)
                throw new OpenCvSharpException("Failed to create Index");
        }
Exemple #11
0
 /// <summary>
 /// 
 /// </summary>
 /// <param name="src"></param>
 /// <param name="dst"></param>
 /// <param name="colormap"></param>
 public static void ApplyColorMap(InputArray src, OutputArray dst, ColorMapMode colormap)
 {
     if (src == null)
         throw new ArgumentNullException("src");
     if (dst == null)
         throw new ArgumentNullException("dst");
     src.ThrowIfDisposed();
     dst.ThrowIfNotReady();
     NativeMethods.contrib_applyColorMap(src.CvPtr, dst.CvPtr, (int)colormap);
     dst.Fix();
 }
 /// <summary>
 /// Forms a border around the image
 /// </summary>
 /// <param name="src">The source image</param>
 /// <param name="dst">The destination image; will have the same type as src and 
 /// the size Size(src.cols+left+right, src.rows+top+bottom)</param>
 /// <param name="top">Specify how much pixels in each direction from the source image rectangle one needs to extrapolate</param>
 /// <param name="bottom">Specify how much pixels in each direction from the source image rectangle one needs to extrapolate</param>
 /// <param name="left">Specify how much pixels in each direction from the source image rectangle one needs to extrapolate</param>
 /// <param name="right">Specify how much pixels in each direction from the source image rectangle one needs to extrapolate</param>
 /// <param name="borderType">The border type</param>
 /// <param name="value">The border value if borderType == Constant</param>
 public static void CopyMakeBorder(InputArray src, OutputArray dst, int top, int bottom, int left, int right, BorderType borderType, Scalar? value = null)
 {
     if (src == null)
         throw new ArgumentNullException("src");
     if (dst == null)
         throw new ArgumentNullException("dst");
     src.ThrowIfDisposed();
     dst.ThrowIfNotReady();
     Scalar value0 = value.GetValueOrDefault(new Scalar());
     NativeMethods.imgproc_copyMakeBorder(src.CvPtr, dst.CvPtr, top, bottom, left, right, (int)borderType, value0);
     dst.Fix();
 }
Exemple #13
0
 /// <summary>
 /// Perform image denoising using Non-local Means Denoising algorithm 
 /// with several computational optimizations. Noise expected to be a gaussian white noise
 /// </summary>
 /// <param name="src">Input 8-bit 1-channel, 2-channel or 3-channel image.</param>
 /// <param name="dst">Output image with the same size and type as src .</param>
 /// <param name="h">
 /// Parameter regulating filter strength. Big h value perfectly removes noise but also removes image details, 
 /// smaller h value preserves details but also preserves some noise</param>
 /// <param name="templateWindowSize">
 /// Size in pixels of the template patch that is used to compute weights. Should be odd. Recommended value 7 pixels</param>
 /// <param name="searchWindowSize">
 /// Size in pixels of the window that is used to compute weighted average for given pixel. 
 /// Should be odd. Affect performance linearly: greater searchWindowsSize - greater denoising time. Recommended value 21 pixels</param>
 public static void FastNlMeansDenoising(InputArray src, OutputArray dst, float h = 3,
     int templateWindowSize = 7, int searchWindowSize = 21)
 {
     if (src == null)
         throw new ArgumentNullException("src");
     if (dst == null)
         throw new ArgumentNullException("dst");
     src.ThrowIfDisposed();
     dst.ThrowIfNotReady();
     NativeMethods.photo_fastNlMeansDenoising(src.CvPtr, dst.CvPtr, h, templateWindowSize, searchWindowSize);
     dst.Fix();
 }
Exemple #14
0
        /// <summary>
        /// Detects corners using the FAST algorithm
        /// </summary>
        /// <param name="image">grayscale image where keypoints (corners) are detected.</param>
        /// <param name="threshold">threshold on difference between intensity of the central pixel 
        /// and pixels of a circle around this pixel.</param>
        /// <param name="nonmaxSupression">if true, non-maximum suppression is applied to 
        /// detected corners (keypoints).</param>
        /// <param name="type">one of the three neighborhoods as defined in the paper</param>
        /// <returns>keypoints detected on the image.</returns>
        public static KeyPoint[] FAST(InputArray image, int threshold, bool nonmaxSupression, FASTType type)
        {
            if (image == null)
                throw new ArgumentNullException(nameof(image));
            image.ThrowIfDisposed();

            using (var kp = new VectorOfKeyPoint())
            {
                NativeMethods.features2d_FAST2(image.CvPtr, kp.CvPtr, threshold, nonmaxSupression ? 1 : 0, (int)type);
                GC.KeepAlive(image);
                return kp.ToArray();
            }
        }
Exemple #15
0
 /// <summary>
 /// converts rotation vector to rotation matrix or vice versa using Rodrigues transformation
 /// </summary>
 /// <param name="src">Input rotation vector (3x1 or 1x3) or rotation matrix (3x3).</param>
 /// <param name="dst">Output rotation matrix (3x3) or rotation vector (3x1 or 1x3), respectively.</param>
 /// <param name="jacobian">Optional output Jacobian matrix, 3x9 or 9x3, which is a matrix of partial derivatives of the output array components with respect to the input array components.</param>
 public static void Rodrigues(InputArray src, OutputArray dst, OutputArray jacobian = null)
 {
     if (src == null)
         throw new ArgumentNullException("src");
     if (dst == null)
         throw new ArgumentNullException("dst");
     src.ThrowIfDisposed();
     dst.ThrowIfNotReady();
     NativeMethods.calib3d_Rodrigues(src.CvPtr, dst.CvPtr, ToPtr(jacobian));
     dst.Fix();
     if (jacobian != null)
         jacobian.Fix();
 }
 /// <summary>
 /// Detects corners using the AGAST algorithm
 /// </summary>
 /// <param name="image">grayscale image where keypoints (corners) are detected.</param>
 /// <param name="threshold">threshold on difference between intensity of the central pixel 
 /// and pixels of a circle around this pixel.</param>
 /// <param name="nonmaxSuppression">if true, non-maximum suppression is applied to 
 /// detected corners (keypoints).</param>
 /// <param name="type">one of the four neighborhoods as defined in the paper</param>
 /// <returns>keypoints detected on the image.</returns>
 public static KeyPoint[] AGAST(InputArray image, int threshold, bool nonmaxSuppression, AGASTType type)
 {
     if (image == null)
         throw new ArgumentNullException("image");
     image.ThrowIfDisposed();
     
     using (var vector = new VectorOfKeyPoint())
     {
         NativeMethods.features2d_AGAST(image.CvPtr, vector.CvPtr, threshold, nonmaxSuppression ? 1 : 0,
             (int) type);
         GC.KeepAlive(image);
         return vector.ToArray();
     }
 }
Exemple #17
0
 /// <summary>
 /// Updates motion history image using the current silhouette
 /// </summary>
 /// <param name="silhouette">Silhouette mask that has non-zero pixels where the motion occurs.</param>
 /// <param name="mhi">Motion history image that is updated by the function (single-channel, 32-bit floating-point).</param>
 /// <param name="timestamp">Current time in milliseconds or other units.</param>
 /// <param name="duration">Maximal duration of the motion track in the same units as timestamp .</param>
 public static void UpdateMotionHistory(
     InputArray silhouette, InputOutputArray mhi,
     double timestamp, double duration)
 {
     if (silhouette == null)
         throw new ArgumentNullException("silhouette");
     if (mhi == null)
         throw new ArgumentNullException("mhi");
     silhouette.ThrowIfDisposed();
     mhi.ThrowIfNotReady();
     NativeMethods.video_updateMotionHistory(
         silhouette.CvPtr, mhi.CvPtr, timestamp, duration);
     mhi.Fix();
 }
 /// <summary>
 /// the update operator that takes the next video frame and returns the current foreground mask as 8-bit binary image.
 /// </summary>
 /// <param name="image"></param>
 /// <param name="fgmask"></param>
 /// <param name="learningRate"></param>
 public virtual void Apply(InputArray image, OutputArray fgmask, double learningRate = -1)
 {
     if (image == null)
         throw new ArgumentNullException("image");
     if (fgmask == null)
         throw new ArgumentNullException("fgmask");
     image.ThrowIfDisposed();
     fgmask.ThrowIfNotReady();
     
     NativeMethods.video_BackgroundSubtractor_apply(ptr, image.CvPtr, fgmask.CvPtr, learningRate);
     
     fgmask.Fix();
     GC.KeepAlive(image);
 }
Exemple #19
0
 /// <summary>
 /// restores the damaged image areas using one of the available intpainting algorithms
 /// </summary>
 /// <param name="src"></param>
 /// <param name="inpaintMask"></param>
 /// <param name="dst"></param>
 /// <param name="inpaintRadius"></param>
 /// <param name="flags"></param>
 public static void Inpaint(InputArray src, InputArray inpaintMask,
     OutputArray dst, double inpaintRadius, InpaintMethod flags)
 {
     if (src == null)
         throw new ArgumentNullException("src");
     if (inpaintMask == null)
         throw new ArgumentNullException("inpaintMask");
     if (dst == null)
         throw new ArgumentNullException("dst");
     src.ThrowIfDisposed();
     inpaintMask.ThrowIfDisposed();
     dst.ThrowIfNotReady();
     NativeMethods.photo_inpaint(src.CvPtr, inpaintMask.CvPtr, dst.CvPtr, inpaintRadius, (int)flags);
     dst.Fix();
 }
Exemple #20
0
        /// <summary>
        /// Computes the global orientation of the selected motion history image part
        /// </summary>
        /// <param name="orientation">Motion gradient orientation image calculated by the function CalcMotionGradient() .</param>
        /// <param name="mask">Mask image. It may be a conjunction of a valid gradient mask, also calculated by CalcMotionGradient() ,
        /// and the mask of a region whose direction needs to be calculated.</param>
        /// <param name="mhi">Motion history image calculated by UpdateMotionHistory() .</param>
        /// <param name="timestamp">Timestamp passed to UpdateMotionHistory() .</param>
        /// <param name="duration">Maximum duration of a motion track in milliseconds, passed to UpdateMotionHistory() .</param>
        /// <returns></returns>
        public static double CalcGlobalOrientation(
            InputArray orientation, InputArray mask, InputArray mhi,
            double timestamp, double duration)
        {
            if (orientation == null)
                throw new ArgumentNullException("orientation");
            if (mask == null)
                throw new ArgumentNullException("mask");
            if (mhi == null)
                throw new ArgumentNullException("mhi");
            orientation.ThrowIfDisposed();
            mask.ThrowIfDisposed();
            mhi.ThrowIfDisposed();

            return NativeMethods.video_calcGlobalOrientation(
                orientation.CvPtr, mask.CvPtr, mhi.CvPtr, timestamp, duration);
        }
Exemple #21
0
        /// <summary>
        /// Computes the global orientation of the selected motion history image part
        /// </summary>
        /// <param name="orientation">Motion gradient orientation image calculated by the function CalcMotionGradient() .</param>
        /// <param name="mask">Mask image. It may be a conjunction of a valid gradient mask, also calculated by CalcMotionGradient() ,
        /// and the mask of a region whose direction needs to be calculated.</param>
        /// <param name="mhi">Motion history image calculated by UpdateMotionHistory() .</param>
        /// <param name="timestamp">Timestamp passed to UpdateMotionHistory() .</param>
        /// <param name="duration">Maximum duration of a motion track in milliseconds, passed to UpdateMotionHistory() .</param>
        /// <returns></returns>
        public static double CalcGlobalOrientation(
            InputArray orientation, InputArray mask, InputArray mhi,
            double timestamp, double duration)
        {
            if (orientation == null)
                throw new ArgumentNullException(nameof(orientation));
            if (mask == null)
                throw new ArgumentNullException(nameof(mask));
            if (mhi == null)
                throw new ArgumentNullException(nameof(mhi));
            orientation.ThrowIfDisposed();
            mask.ThrowIfDisposed();
            mhi.ThrowIfDisposed();

            return NativeMethods.optflow_motempl_calcGlobalOrientation(
                orientation.CvPtr, mask.CvPtr, mhi.CvPtr, timestamp, duration);
        }
        /// <summary>
        /// Compute the shape distance between two shapes defined by its contours.
        /// </summary>
        /// <param name="contour1">Contour defining first shape.</param>
        /// <param name="contour2">Contour defining second shape.</param>
        /// <returns></returns>
        public virtual float ComputeDistance(InputArray contour1, InputArray contour2)
        {
            if (ptr == IntPtr.Zero)
                throw new ObjectDisposedException(GetType().Name);
            if (contour1 == null)
                throw new ArgumentNullException(nameof(contour1));
            if (contour2 == null)
                throw new ArgumentNullException(nameof(contour2));
            contour1.ThrowIfDisposed();
            contour2.ThrowIfDisposed();

            float ret = NativeMethods.shape_ShapeDistanceExtractor_computeDistance(
                ptr, contour1.CvPtr, contour2.CvPtr);

            GC.KeepAlive(contour1);
            GC.KeepAlive(contour2);

            return ret;
        }
Exemple #23
0
        /// <summary>
        /// Computes the motion gradient orientation image from the motion history image
        /// </summary>
        /// <param name="mhi">Motion history single-channel floating-point image.</param>
        /// <param name="mask">Output mask image that has the type CV_8UC1 and the same size as mhi. 
        /// Its non-zero elements mark pixels where the motion gradient data is correct.</param>
        /// <param name="orientation">Output motion gradient orientation image that has the same type and the same size as mhi. 
        /// Each pixel of the image is a motion orientation, from 0 to 360 degrees.</param>
        /// <param name="delta1">Minimal (or maximal) allowed difference between mhi values within a pixel neighborhood.</param>
        /// <param name="delta2">Maximal (or minimal) allowed difference between mhi values within a pixel neighborhood. 
        /// That is, the function finds the minimum ( m(x,y) ) and maximum ( M(x,y) ) mhi values over 3x3 neighborhood of each pixel 
        /// and marks the motion orientation at (x, y) as valid only if: 
        /// min(delta1, delta2) &lt;= M(x,y)-m(x,y) &lt;= max(delta1, delta2).</param>
        /// <param name="apertureSize"></param>
        public static void CalcMotionGradient(
            InputArray mhi, OutputArray mask, OutputArray orientation,
            double delta1, double delta2, int apertureSize = 3)
        {
            if (mhi == null)
                throw new ArgumentNullException("mhi");
            if (mask == null)
                throw new ArgumentNullException("mask");
            if (orientation == null)
                throw new ArgumentNullException("orientation");
            mhi.ThrowIfDisposed();
            mask.ThrowIfNotReady();
            orientation.ThrowIfNotReady();

            NativeMethods.video_calcMotionGradient(
                mhi.CvPtr, mask.CvPtr, orientation.CvPtr, delta1, delta2, apertureSize);

            mask.Fix();
            orientation.Fix();
        }
Exemple #24
0
        /// <summary>
        /// Constructs a pyramid which can be used as input for calcOpticalFlowPyrLK
        /// </summary>
        /// <param name="img">8-bit input image.</param>
        /// <param name="pyramid">output pyramid.</param>
        /// <param name="winSize">window size of optical flow algorithm. 
        /// Must be not less than winSize argument of calcOpticalFlowPyrLK(). 
        /// It is needed to calculate required padding for pyramid levels.</param>
        /// <param name="maxLevel">0-based maximal pyramid level number.</param>
        /// <param name="withDerivatives">set to precompute gradients for the every pyramid level. 
        /// If pyramid is constructed without the gradients then calcOpticalFlowPyrLK() will 
        /// calculate them internally.</param>
        /// <param name="pyrBorder">the border mode for pyramid layers.</param>
        /// <param name="derivBorder">the border mode for gradients.</param>
        /// <param name="tryReuseInputImage">put ROI of input image into the pyramid if possible. 
        /// You can pass false to force data copying.</param>
        /// <returns>number of levels in constructed pyramid. Can be less than maxLevel.</returns>
        public static int BuildOpticalFlowPyramid(
            InputArray img, OutputArray pyramid,
            Size winSize, int maxLevel,
            bool withDerivatives = true,
            BorderTypes pyrBorder = BorderTypes.Reflect101,
            BorderTypes derivBorder = BorderTypes.Constant,
            bool tryReuseInputImage = true)
        {
            if (img == null)
                throw new ArgumentNullException("img");
            if (pyramid == null)
                throw new ArgumentNullException("pyramid");
            img.ThrowIfDisposed();
            pyramid.ThrowIfNotReady();

            int result = NativeMethods.video_buildOpticalFlowPyramid1(
                img.CvPtr, pyramid.CvPtr, winSize, maxLevel, withDerivatives ? 1 : 0, 
                (int)pyrBorder, (int)derivBorder, tryReuseInputImage ? 1 : 0);
            pyramid.Fix();
            return result;
        }
Exemple #25
0
        /// <summary>
        /// Constructs 4-dimensional blob (so-called batch) from image or array of images.
        /// </summary>
        /// <param name="image">2-dimensional multi-channel or 3-dimensional single-channel image (or array of images)</param>

        public Blob(IInputArray image)
        {
            using (InputArray iaImage = image.GetInputArray())
                _ptr = DnnInvoke.cveDnnBlobCreateFromInputArray(iaImage);
        }
Exemple #26
0
 /// <summary>
 /// Create the look up table
 /// </summary>
 /// <param name="lookUpTable">It should be either 1 or 3 channel matrix of 1x256</param>
 public CudaLookUpTable(IInputArray lookUpTable)
 {
     using (InputArray iaLookupTable = lookUpTable.GetInputArray())
         _ptr = CudaInvoke.cudaLookUpTableCreate(iaLookupTable);
 }
        public static Mat IsolateRed(Mat source)
        {
            Mat brightHSV = source.CvtColor(ColorConversionCodes.BGR2HSV);
            Mat redMask   = brightHSV.InRange(InputArray.Create(new int[] { 0, 250, 200 }), InputArray.Create(new int[] { 5, 256, 256 }))
                            + brightHSV.InRange(InputArray.Create(new int[] { 175, 250, 200 }), InputArray.Create(new int[] { 180, 256, 256 }));
            Mat redAreas = new Mat();

            source.CopyTo(redAreas, redMask);
            Mat red = redAreas.Split()[2];

            return(red);
        }
 /// <summary>
 /// Set a image used by switch* functions to initialize the class.
 /// </summary>
 /// <param name="image">The image</param>
 public void SetBaseImage(IInputArray image)
 {
     using (InputArray iaImage = image.GetInputArray())
         XImgprocInvoke.cveSelectiveSearchSegmentationSetBaseImage(_ptr, iaImage);
 }
 // ReSharper disable once RedundantOverriddenMember
 /// <inheritdoc />
 /// <summary>
 /// Computes color moment hash of the input, the algorithm is come from the paper "Perceptual Hashing for Color Images Using Invariant Moments"
 /// </summary>
 /// <param name="inputArr">input image want to compute hash value, type should be CV_8UC4, CV_8UC3 or CV_8UC1.</param>
 /// <param name="outputArr">42 hash values with type CV_64F(double)</param>
 /// <returns></returns>
 public override void Compute(InputArray inputArr, OutputArray outputArr)
 {
     base.Compute(inputArr, outputArr);
 }
Exemple #30
0
        public string ProcessAndRender(IInputArray imageIn, IInputOutputArray imageOut)
        {
            Stopwatch watch = Stopwatch.StartNew();

            #region Pre-processing
            //Convert the image to grayscale and filter out the noise
            CvInvoke.CvtColor(imageIn, _gray, ColorConversion.Bgr2Gray);
            //Remove noise
            CvInvoke.GaussianBlur(_gray, _gray, new Size(3, 3), 1);
            double cannyThreshold        = 180.0;
            double cannyThresholdLinking = 120.0;
            CvInvoke.Canny(_gray, _cannyEdges, cannyThreshold, cannyThresholdLinking);
            #endregion

            #region circle detection
            double    circleAccumulatorThreshold = 120;
            CircleF[] circles = CvInvoke.HoughCircles(_gray, HoughModes.Gradient, 2.0, 20.0, cannyThreshold,
                                                      circleAccumulatorThreshold, 5);
            #endregion

            #region Edge detection
            LineSegment2D[] lines = CvInvoke.HoughLinesP(
                _cannyEdges,
                1,              //Distance resolution in pixel-related units
                Math.PI / 45.0, //Angle resolution measured in radians.
                20,             //threshold
                30,             //min Line width
                10);            //gap between lines
            #endregion

            #region Find triangles and rectangles
            List <Triangle2DF> triangleList = new List <Triangle2DF>();
            List <RotatedRect> boxList      = new List <RotatedRect>(); //a box is a rotated rectangle
            using (VectorOfVectorOfPoint contours = new VectorOfVectorOfPoint())
            {
                CvInvoke.FindContours(_cannyEdges, contours, null, RetrType.List,
                                      ChainApproxMethod.ChainApproxSimple);
                int count = contours.Size;
                for (int i = 0; i < count; i++)
                {
                    using (VectorOfPoint contour = contours[i])
                        using (VectorOfPoint approxContour = new VectorOfPoint())
                        {
                            CvInvoke.ApproxPolyDP(contour, approxContour, CvInvoke.ArcLength(contour, true) * 0.05,
                                                  true);
                            if (CvInvoke.ContourArea(approxContour, false) > 250
                                )                            //only consider contours with area greater than 250
                            {
                                if (approxContour.Size == 3) //The contour has 3 vertices, it is a triangle
                                {
                                    Point[] pts = approxContour.ToArray();
                                    triangleList.Add(new Triangle2DF(
                                                         pts[0],
                                                         pts[1],
                                                         pts[2]
                                                         ));
                                }
                                else if (approxContour.Size == 4) //The contour has 4 vertices.
                                {
                                    #region determine if all the angles in the contour are within [80, 100] degree

                                    bool            isRectangle = true;
                                    Point[]         pts         = approxContour.ToArray();
                                    LineSegment2D[] edges       = PointCollection.PolyLine(pts, true);

                                    for (int j = 0; j < edges.Length; j++)
                                    {
                                        double angle = Math.Abs(
                                            edges[(j + 1) % edges.Length].GetExteriorAngleDegree(edges[j]));
                                        if (angle < 80 || angle > 100)
                                        {
                                            isRectangle = false;
                                            break;
                                        }
                                    }

                                    #endregion

                                    if (isRectangle)
                                    {
                                        boxList.Add(CvInvoke.MinAreaRect(approxContour));
                                    }
                                }
                            }
                        }
                }
            }
            #endregion

            watch.Stop();

            using (Mat triangleRectangleImage = new Mat(_gray.Size, DepthType.Cv8U, 3)) //image to draw triangles and rectangles on
                using (Mat circleImage = new Mat(_gray.Size, DepthType.Cv8U, 3))        //image to draw circles on
                    using (Mat lineImage = new Mat(_gray.Size, DepthType.Cv8U, 3))      //image to draw lines on
                    {
                        #region draw triangles and rectangles

                        triangleRectangleImage.SetTo(new MCvScalar(0));
                        foreach (Triangle2DF triangle in triangleList)
                        {
                            CvInvoke.Polylines(triangleRectangleImage,
                                               Array.ConvertAll(triangle.GetVertices(), Point.Round),
                                               true, new Bgr(Color.DarkBlue).MCvScalar, 2);
                        }

                        foreach (RotatedRect box in boxList)
                        {
                            CvInvoke.Polylines(triangleRectangleImage, Array.ConvertAll(box.GetVertices(), Point.Round),
                                               true,
                                               new Bgr(Color.DarkOrange).MCvScalar, 2);
                        }

                        //Drawing a light gray frame around the image
                        CvInvoke.Rectangle(triangleRectangleImage,
                                           new Rectangle(Point.Empty,
                                                         new Size(triangleRectangleImage.Width - 1, triangleRectangleImage.Height - 1)),
                                           new MCvScalar(120, 120, 120));
                        //Draw the labels
                        CvInvoke.PutText(triangleRectangleImage, "Triangles and Rectangles", new Point(20, 20),
                                         FontFace.HersheyDuplex, 0.5, new MCvScalar(120, 120, 120));

                        #endregion

                        #region draw circles

                        circleImage.SetTo(new MCvScalar(0));
                        foreach (CircleF circle in circles)
                        {
                            CvInvoke.Circle(circleImage, Point.Round(circle.Center), (int)circle.Radius,
                                            new Bgr(Color.Brown).MCvScalar, 2);
                        }

                        //Drawing a light gray frame around the image
                        CvInvoke.Rectangle(circleImage,
                                           new Rectangle(Point.Empty, new Size(circleImage.Width - 1, circleImage.Height - 1)),
                                           new MCvScalar(120, 120, 120));
                        //Draw the labels
                        CvInvoke.PutText(circleImage, "Circles", new Point(20, 20), FontFace.HersheyDuplex, 0.5,
                                         new MCvScalar(120, 120, 120));

                        #endregion

                        #region draw lines

                        lineImage.SetTo(new MCvScalar(0));
                        foreach (LineSegment2D line in lines)
                        {
                            CvInvoke.Line(lineImage, line.P1, line.P2, new Bgr(Color.Green).MCvScalar, 2);
                        }
                        //Drawing a light gray frame around the image
                        CvInvoke.Rectangle(lineImage,
                                           new Rectangle(Point.Empty, new Size(lineImage.Width - 1, lineImage.Height - 1)),
                                           new MCvScalar(120, 120, 120));
                        //Draw the labels
                        CvInvoke.PutText(lineImage, "Lines", new Point(20, 20), FontFace.HersheyDuplex, 0.5,
                                         new MCvScalar(120, 120, 120));

                        #endregion


                        using (InputArray iaImageIn = imageIn.GetInputArray())
                            using (Mat imageInMat = iaImageIn.GetMat())
                                CvInvoke.VConcat(new Mat[] { imageInMat, triangleRectangleImage, circleImage, lineImage }, imageOut);
                    }
            return(String.Format("Detected in {0} milliseconds.", watch.ElapsedMilliseconds));
        }
Exemple #31
0
 /// <summary>
 /// Interpolate position of ChArUco board corners
 /// </summary>
 /// <param name="markerCorners">vector of already detected markers corners. For each marker, its four corners are provided, (e.g VectorOfVectorOfPointF ). For N detected markers, the dimensions of this array should be Nx4.The order of the corners should be clockwise.</param>
 /// <param name="markerIds">list of identifiers for each marker in corners</param>
 /// <param name="image">input image necesary for corner refinement. Note that markers are not detected and should be sent in corners and ids parameters.</param>
 /// <param name="board">layout of ChArUco board.</param>
 /// <param name="charucoCorners">interpolated chessboard corners</param>
 /// <param name="charucoIds">interpolated chessboard corners identifiers</param>
 /// <param name="cameraMatrix">optional 3x3 floating-point camera matrix</param>
 /// <param name="distCoeffs">optional vector of distortion coefficients, (k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6],[s_1, s_2, s_3, s_4]]) of 4, 5, 8 or 12 elements </param>
 /// <param name="minMarkers">number of adjacent markers that must be detected to return a charuco corner</param>
 /// <returns>The number of interpolated corners.</returns>
 public static int InterpolateCornersCharuco(
     IInputArrayOfArrays markerCorners,
     IInputArray markerIds,
     IInputArray image,
     CharucoBoard board,
     IOutputArray charucoCorners,
     IOutputArray charucoIds,
     IInputArray cameraMatrix = null,
     IInputArray distCoeffs   = null,
     int minMarkers           = 2)
 {
     using (InputArray iaMarkerCorners = markerCorners.GetInputArray())
         using (InputArray iaMarkerIds = markerIds.GetInputArray())
             using (InputArray iaImage = image.GetInputArray())
                 using (OutputArray oaCharucoCorners = charucoCorners.GetOutputArray())
                     using (OutputArray oaCharucoIds = charucoIds.GetOutputArray())
                         using (InputArray iaCameraMatrix = cameraMatrix == null ? InputArray.GetEmpty() : cameraMatrix.GetInputArray())
                             using (InputArray iaDistCoeffs = distCoeffs == null ? InputArray.GetEmpty() : distCoeffs.GetInputArray())
                             {
                                 return(cveArucoInterpolateCornersCharuco(
                                            iaMarkerCorners, iaMarkerIds, iaImage, board,
                                            oaCharucoCorners, oaCharucoIds,
                                            iaCameraMatrix, iaDistCoeffs,
                                            minMarkers));
                             }
 }
Exemple #32
0
 /// <summary>
 /// Create a new instance of Rapid tracker
 /// </summary>
 /// <param name="pts3d">The 3D points of the mesh</param>
 /// <param name="tris">Triangle face connectivity</param>
 public Rapid(IInputArray pts3d, IInputArray tris)
 {
     using (InputArray iaPts3d = pts3d.GetInputArray())
         using (InputArray iaTris = tris.GetInputArray())
             _ptr = RapidInvoke.cveRapidCreate(iaPts3d, iaTris, ref _trackerPtr, ref _algorithmPtr, ref _sharedPtr);
 }
Exemple #33
0
 /// <summary>
 /// Set the image for optical character recognition
 /// </summary>
 /// <param name="image">The image where detection took place</param>
 public void SetImage(IInputArray image)
 {
     using (InputArray iaImage = image.GetInputArray())
         OcrInvoke.TessBaseAPISetImage(_ptr, iaImage);
 }
Exemple #34
0
 /// <summary>
 /// Gets a prediction from a FaceRecognizer.
 /// </summary>
 /// <param name="src"></param>
 /// <returns></returns>
 public virtual int Predict(InputArray src)
 {
     if (src == null)
         throw new ArgumentNullException("src");
     src.ThrowIfDisposed();
     return NativeMethods.contrib_FaceRecognizer_predict1(ptr, src.CvPtr);
 }
Exemple #35
0
        public static void Detect(
            IInputArray image, String faceFileName, String eyeFileName,
            List <Rectangle> faces, List <Rectangle> eyes)/*,
                                                           * out long detectionTime)*/
        {
            // Stopwatch watch;

            using (InputArray iaImage = image.GetInputArray())
            {
#if !(__IOS__ || NETFX_CORE)
                if (iaImage.Kind == InputArray.Type.CudaGpuMat && CudaInvoke.HasCuda)
                {
                    using (CudaCascadeClassifier face = new CudaCascadeClassifier(faceFileName))
                        using (CudaCascadeClassifier eye = new CudaCascadeClassifier(eyeFileName))
                        {
                            face.ScaleFactor   = 1.1;
                            face.MinNeighbors  = 10;
                            face.MinObjectSize = Size.Empty;
                            eye.ScaleFactor    = 1.1;
                            eye.MinNeighbors   = 10;
                            eye.MinObjectSize  = Size.Empty;
                            // watch = Stopwatch.StartNew();
                            using (CudaImage <Bgr, Byte> gpuImage = new CudaImage <Bgr, byte>(image))
                                using (CudaImage <Gray, Byte> gpuGray = gpuImage.Convert <Gray, Byte>())
                                    using (GpuMat region = new GpuMat())
                                    {
                                        face.DetectMultiScale(gpuGray, region);
                                        Rectangle[] faceRegion = face.Convert(region);
                                        faces.AddRange(faceRegion);
                                        foreach (Rectangle f in faceRegion)
                                        {
                                            using (CudaImage <Gray, Byte> faceImg = gpuGray.GetSubRect(f))
                                            {
                                                //For some reason a clone is required.
                                                //Might be a bug of CudaCascadeClassifier in opencv
                                                using (CudaImage <Gray, Byte> clone = faceImg.Clone(null))
                                                    using (GpuMat eyeRegionMat = new GpuMat())
                                                    {
                                                        eye.DetectMultiScale(clone, eyeRegionMat);
                                                        Rectangle[] eyeRegion = eye.Convert(eyeRegionMat);
                                                        foreach (Rectangle e in eyeRegion)
                                                        {
                                                            Rectangle eyeRect = e;
                                                            eyeRect.Offset(f.X, f.Y);
                                                            eyes.Add(eyeRect);
                                                        }
                                                    }
                                            }
                                        }
                                    }
                            // watch.Stop();
                        }
                }
                else
#endif
                {
                    //Read the HaarCascade objects
                    using (CascadeClassifier face = new CascadeClassifier(faceFileName))
                        using (CascadeClassifier eye = new CascadeClassifier(eyeFileName))
                        {
                            //watch = Stopwatch.StartNew();

                            using (UMat ugray = new UMat())
                            {
                                CvInvoke.CvtColor(image, ugray, Emgu.CV.CvEnum.ColorConversion.Bgr2Gray);

                                //normalizes brightness and increases contrast of the image
                                CvInvoke.EqualizeHist(ugray, ugray);

                                //Detect the faces  from the gray scale image and store the locations as rectangle
                                //The first dimensional is the channel
                                //The second dimension is the index of the rectangle in the specific channel
                                Rectangle[] facesDetected = face.DetectMultiScale(
                                    ugray,
                                    1.1,
                                    10,
                                    new Size(20, 20));

                                faces.AddRange(facesDetected);

                                foreach (Rectangle f in facesDetected)
                                {
                                    //Get the region of interest on the faces
                                    using (UMat faceRegion = new UMat(ugray, f))
                                    {
                                        Rectangle[] eyesDetected = eye.DetectMultiScale(
                                            faceRegion,
                                            1.1,
                                            10,
                                            new Size(20, 20));

                                        foreach (Rectangle e in eyesDetected)
                                        {
                                            Rectangle eyeRect = e;
                                            eyeRect.Offset(f.X, f.Y);
                                            eyes.Add(eyeRect);
                                        }
                                    }
                                }
                            }
                            //watch.Stop();
                        }
                }
                //detectionTime = watch.ElapsedMilliseconds;
            }
        }
        /// <summary>
        /// Draws the line segments on a given image.
        /// </summary>
        /// <param name="image">The image, where the liens will be drawn. 
        /// Should be bigger or equal to the image, where the lines were found.</param>
        /// <param name="lines">A vector of the lines that needed to be drawn.</param>
        public virtual void DrawSegments(InputOutputArray image, InputArray lines)
        {
            if (image == null)
                throw new ArgumentNullException(nameof(image));
            if (lines == null)
                throw new ArgumentNullException(nameof(lines));
            image.ThrowIfNotReady();
            lines.ThrowIfDisposed();

            NativeMethods.imgproc_LineSegmentDetector_drawSegments(ptr, image.CvPtr, lines.CvPtr);

            image.Fix();
            GC.KeepAlive(lines);
        }
        /// <summary>
        /// Finds lines in the input image.
        /// This is the output of the default parameters of the algorithm on the above shown image.
        /// </summary>
        /// <param name="image">A grayscale (CV_8UC1) input image. </param>
        /// <param name="lines">A vector of Vec4i or Vec4f elements specifying the beginning and ending point of a line. 
        /// Where Vec4i/Vec4f is (x1, y1, x2, y2), point 1 is the start, point 2 - end. Returned lines are strictly oriented depending on the gradient.</param>
        /// <param name="width">Vector of widths of the regions, where the lines are found. E.g. Width of line.</param>
        /// <param name="prec">Vector of precisions with which the lines are found.</param>
        /// <param name="nfa">Vector containing number of false alarms in the line region, 
        /// with precision of 10%. The bigger the value, logarithmically better the detection.</param>
        public virtual void Detect(InputArray image, OutputArray lines,
            OutputArray width = null, OutputArray prec = null, OutputArray nfa = null)
        {
            if (image == null) 
                throw new ArgumentNullException(nameof(image));
            if (lines == null)
                throw new ArgumentNullException(nameof(lines));
            image.ThrowIfDisposed();
            lines.ThrowIfNotReady();
            width?.ThrowIfNotReady();
            prec?.ThrowIfNotReady();
            nfa?.ThrowIfNotReady();

            NativeMethods.imgproc_LineSegmentDetector_detect_OutputArray(ptr, image.CvPtr, lines.CvPtr,
                Cv2.ToPtr(width), Cv2.ToPtr(prec), Cv2.ToPtr(nfa));

            GC.KeepAlive(image);
            lines.Fix();
            width?.Fix();
            prec?.Fix();
            nfa?.Fix();
        }
Exemple #38
0
 /// <summary>
 /// Write an attribute inside the root group.
 /// </summary>
 /// <param name="value">Attribute value.</param>
 /// <param name="atLabel">Attribute name.</param>
 public void AtWrite(IInputArray value, String atLabel)
 {
     using (InputArray iaValue = value.GetInputArray())
         using (CvString csAtLabel = new CvString(atLabel))
             HdfInvoke.cveHDF5AtWriteArray(_ptr, iaValue, csAtLabel);
 }
Exemple #39
0
 /// <summary>
 /// The function initializes a SuperpixelSLIC object for the input image. It sets the parameters of choosed superpixel algorithm, which are: region_size and ruler. It preallocate some buffers for future computing iterations over the given image.
 /// </summary>
 /// <param name="image">Image to segment</param>
 /// <param name="algorithm">Chooses the algorithm variant to use</param>
 /// <param name="regionSize">Chooses an average superpixel size measured in pixels</param>
 /// <param name="ruler">Chooses the enforcement of superpixel smoothness factor of superpixel</param>
 public SupperpixelSLIC(IInputArray image, Algorithm algorithm, int regionSize, float ruler)
 {
     using (InputArray iaImage = image.GetInputArray())
         _ptr = XImgprocInvoke.cveSuperpixelSLICCreate(iaImage, algorithm, regionSize, ruler, ref _sharedPtr);
 }
Exemple #40
0
 /// <summary>
 /// Transform the image using the lookup table
 /// </summary>
 /// <param name="image">The image to be transformed</param>
 /// <param name="dst">The transformation result</param>
 /// <param name="stream">Use a Stream to call the function asynchronously (non-blocking) or null to call the function synchronously (blocking).</param>
 public void Transform(IInputArray image, IOutputArray dst, Stream stream = null)
 {
     using (InputArray iaImage = image.GetInputArray())
         using (OutputArray oaDst = dst.GetOutputArray())
             CudaInvoke.cudaLookUpTableTransform(_ptr, iaImage, oaDst, stream);
 }
Exemple #41
0
        public static void All(
            IInputArray image, String faceFileName, String eyeFileName, string mouthFileName, string noseFileName,
            List <Rectangle> faces, List <Rectangle> eyes, List <Rectangle> mouthes, List <Rectangle> noses)/*,
                                                                                                             * out long detectionTime)*/
        {
            // Stopwatch watch;

            using (InputArray iaImage = image.GetInputArray())
            {
                //Read the HaarCascade objects
                using (CascadeClassifier face = new CascadeClassifier(faceFileName))
                    using (CascadeClassifier eye = new CascadeClassifier(eyeFileName))
                        using (CascadeClassifier mouth = new CascadeClassifier(mouthFileName))
                            using (CascadeClassifier nose = new CascadeClassifier(noseFileName))
                            {
                                //watch = Stopwatch.StartNew();

                                using (UMat ugray = new UMat())
                                {
                                    CvInvoke.CvtColor(image, ugray, Emgu.CV.CvEnum.ColorConversion.Bgr2Gray);

                                    //normalizes brightness and increases contrast of the image
                                    CvInvoke.EqualizeHist(ugray, ugray);

                                    //Detect the faces  from the gray scale image and store the locations as rectangle
                                    //The first dimensional is the channel
                                    //The second dimension is the index of the rectangle in the specific channel
                                    Rectangle[] facesDetected = face.DetectMultiScale(
                                        ugray,
                                        1.1,
                                        10,
                                        new Size(20, 20));

                                    faces.AddRange(facesDetected);

                                    foreach (Rectangle f in facesDetected)
                                    {
                                        //Get the region of interest on the faces
                                        using (UMat faceRegion = new UMat(ugray, f))
                                        {
                                            Rectangle[] eyesDetected = eye.DetectMultiScale(
                                                faceRegion,
                                                1.1,
                                                10,
                                                new Size(20, 20));

                                            foreach (Rectangle e in eyesDetected)
                                            {
                                                Rectangle eyeRect = e;
                                                eyeRect.Offset(f.X, f.Y);
                                                eyes.Add(eyeRect);
                                            }

                                            Rectangle[] nosesDetected = nose.DetectMultiScale(
                                                faceRegion,
                                                1.1,
                                                10,
                                                new Size(20, 20));

                                            foreach (Rectangle n in nosesDetected)
                                            {
                                                Rectangle noseRect = n;
                                                noseRect.Offset(f.X, f.Y);
                                                noses.Add(noseRect);
                                            }

                                            Rectangle[] mouthesDetected = mouth.DetectMultiScale(
                                                faceRegion,
                                                1.1,
                                                10,
                                                new Size(20, 20));

                                            foreach (Rectangle n in mouthesDetected)
                                            {
                                                Rectangle mouthRect = n;
                                                mouthRect.Offset(f.X, f.Y);
                                                mouthes.Add(mouthRect);
                                            }
                                        }
                                    }
                                }
                                //watch.Stop();
                            }
            }
            //detectionTime = watch.ElapsedMilliseconds;
        }
Exemple #42
0
 /// <summary>
 /// Updates a FaceRecognizer with given data and associated labels.
 /// </summary>
 /// <param name="images">The training images, that means the faces you want to learn. The data has to be given as a VectorOfMat.</param>
 /// <param name="labels">The labels corresponding to the images</param>
 public void Update(IInputArray images, IInputArray labels)
 {
     using (InputArray iaImages = images.GetInputArray())
         using (InputArray iaLabels = labels.GetInputArray())
             ContribInvoke.CvFaceRecognizerUpdate(_ptr, iaImages, iaLabels);
 }
Exemple #43
0
 /// <summary>
 /// Create a new instance of OLS tracker
 /// </summary>
 /// <param name="pts3d">The 3D points of the mesh</param>
 /// <param name="tris">Triangle face connectivity</param>
 /// <param name="histBins">Number of histogram bins</param>
 /// <param name="sobelThresh">Sobel threshold</param>
 public OLSTracker(IInputArray pts3d, IInputArray tris, int histBins = 8, Byte sobelThresh = (byte)10)
 {
     using (InputArray iaPts3d = pts3d.GetInputArray())
         using (InputArray iaTris = tris.GetInputArray())
             _ptr = RapidInvoke.cveOLSTrackerCreate(iaPts3d, iaTris, histBins, sobelThresh, ref _trackerPtr, ref _algorithmPtr, ref _sharedPtr);
 }
Exemple #44
0
 /// <summary>
 /// Train the face recognizer with the specific images and labels
 /// </summary>
 /// <param name="images">The images used in the training. This can be a VectorOfMat</param>
 /// <param name="labels">The labels of the images. This can be a VectorOfInt</param>
 public void Train(IInputArray images, IInputArray labels)
 {
     using (InputArray iaImage = images.GetInputArray())
         using (InputArray iaLabels = labels.GetInputArray())
             ContribInvoke.CvFaceRecognizerTrain(_ptr, iaImage, iaLabels);
 }
Exemple #45
0
 /// <summary>
 /// The function initializes a SuperpixelLSC object for the input image.
 /// </summary>
 /// <param name="image">Image to segment</param>
 /// <param name="regionSize">Chooses an average superpixel size measured in pixels</param>
 /// <param name="ratio">Chooses the enforcement of superpixel compactness factor of superpixel</param>
 public SuperpixelLSC(IInputArray image, int regionSize, float ratio)
 {
     using (InputArray iaImage = image.GetInputArray())
         _ptr = XImgprocInvoke.cveSuperpixelLSCCreate(iaImage, regionSize, ratio);
 }
        void Detect()
        {
            Datas.Clear();
            IImage image = _frame;

            long             detectionTime;
            List <Rectangle> faces = new List <Rectangle>();
            List <Rectangle> eyes  = new List <Rectangle>();
            var RootPath           = System.IO.Path.GetDirectoryName(Application.ExecutablePath);

            DetectFace.Detect(
                image, RootPath + @"\\haarcascade_frontalface_default.xml", RootPath + @"\\haarcascade_eye.xml",
                faces, eyes,
                out detectionTime);

            foreach (Rectangle face in faces)
            {
                CvInvoke.Rectangle(image, face, new Bgr(Color.Red).MCvScalar, 2);
                //crop face
                Image <Bgr, Byte> myImage = new Image <Bgr, Byte>(image.Bitmap);
                myImage.ROI = face;
                var croppedFace = myImage.Copy();
                //croppedFace = croppedFace.Resize(227, 227, Inter.Linear);
                //Image<Bgr, byte> croppedFace2 = null;
                //CvInvoke.CvtColor(croppedFace, croppedFace, Emgu.CV.CvEnum.ColorConversion.Rgb2Gray);
                //var croppedFace2 = croppedFace.Convert<Gray, Byte>();
                int AgeIndex = -1, GenderIndex = -1;
                (AgeIndex, AgeStr)       = detector.clsImg(croppedFace, Resources.deploy_age, Resources.age_net, Resources.age);          //@"C:\experiment\Training\ujicoba\deploy_age.prototxt", @"C:\experiment\Training\ujicoba\age_net.caffemodel", @"C:\experiment\Training\ujicoba\age.txt");
                (GenderIndex, GenderStr) = detector.clsImg(croppedFace, Resources.deploy_gender, Resources.gender_net, Resources.gender); //@"C:\experiment\Training\ujicoba\deploy_gender.prototxt", @"C:\experiment\Training\ujicoba\gender_net.caffemodel", @"C:\experiment\Training\ujicoba\gender.txt");
                CvInvoke.Rectangle(image, face, new Bgr(Color.LightYellow).MCvScalar, 2);
                Datas.Add(new DetectionResult()
                {
                    FaceRect = face, AgeIndex = AgeIndex, AgeDesc = AgeStr, GenderDesc = GenderStr, GenderIndex = GenderIndex
                });
            }
            foreach (Rectangle eye in eyes)
            {
                CvInvoke.Rectangle(image, eye, new Bgr(Color.Blue).MCvScalar, 2);
            }

            /*
             * //display the image
             * using (InputArray iaImage = image.GetInputArray())
             *  ImageViewer.Show(image, String.Format(
             *     "Completed face and eye detection using {0} in {1} milliseconds",
             *     (iaImage.Kind == InputArray.Type.CudaGpuMat && CudaInvoke.HasCuda) ? "CUDA" :
             *     (iaImage.IsUMat && CvInvoke.UseOpenCL) ? "OpenCL"
             *     : "CPU",
             *     detectionTime));
             */
            using (InputArray iaImage = image.GetInputArray())
            {
                Invoke(new Action(() =>
                {
                    DetectedFacePic.Image = image.Bitmap;
                    StatusLbl.Text        = $"Age : {AgeStr}, Gender :{GenderStr} => " + String.Format(
                        "Completed face and eye detection using {0} in {1} milliseconds",
                        (iaImage.Kind == InputArray.Type.CudaGpuMat && CudaInvoke.HasCuda) ? "CUDA" :
                        (iaImage.IsUMat && CvInvoke.UseOpenCL) ? "OpenCL"
                           : "CPU",
                        detectionTime);
                }));
            }
        }
Exemple #47
0
 /// <summary>
 /// Refine not detected markers based on the already detected and the board layout.
 /// </summary>
 /// <param name="image">Input image</param>
 /// <param name="board">Layout of markers in the board.</param>
 /// <param name="detectedCorners">Vector of already detected marker corners.</param>
 /// <param name="detectedIds">Vector of already detected marker identifiers.</param>
 /// <param name="rejectedCorners">Vector of rejected candidates during the marker detection process</param>
 /// <param name="cameraMatrix">Optional input 3x3 floating-point camera matrix </param>
 /// <param name="distCoeffs">Optional vector of distortion coefficients (k1,k2,p1,p2[,k3[,k4,k5,k6],[s1,s2,s3,s4]]) of 4, 5, 8 or 12 elements</param>
 /// <param name="minRepDistance">Minimum distance between the corners of the rejected candidate and the reprojected marker in order to consider it as a correspondence. (default 10)</param>
 /// <param name="errorCorrectionRate">Rate of allowed erroneous bits respect to the error correction capability of the used dictionary. -1 ignores the error correction step. (default 3)</param>
 /// <param name="checkAllOrders">Consider the four posible corner orders in the rejectedCorners array. If it set to false, only the provided corner order is considered (default true).</param>
 /// <param name="recoveredIdxs">Optional array to returns the indexes of the recovered candidates in the original rejectedCorners array.</param>
 /// <param name="parameters">marker detection parameters</param>
 public static void RefineDetectedMarkers(
     IInputArray image, IBoard board, IInputOutputArray detectedCorners,
     IInputOutputArray detectedIds, IInputOutputArray rejectedCorners,
     IInputArray cameraMatrix, IInputArray distCoeffs,
     float minRepDistance, float errorCorrectionRate,
     bool checkAllOrders,
     IOutputArray recoveredIdxs, DetectorParameters parameters)
 {
     using (InputArray iaImage = image.GetInputArray())
         using (InputOutputArray ioaDetectedCorners = detectedCorners.GetInputOutputArray())
             using (InputOutputArray ioaDetectedIds = detectedIds.GetInputOutputArray())
                 using (InputOutputArray ioaRejectedCorners = rejectedCorners.GetInputOutputArray())
                     using (InputArray iaCameraMatrix = cameraMatrix == null ? InputArray.GetEmpty() : cameraMatrix.GetInputArray())
                         using (InputArray iaDistCoeffs = distCoeffs == null ? InputArray.GetEmpty() : distCoeffs.GetInputArray())
                             using (
                                 OutputArray oaRecovervedIdx = recoveredIdxs == null
           ? OutputArray.GetEmpty()
           : recoveredIdxs.GetOutputArray())
                             {
                                 cveArucoRefineDetectedMarkers(iaImage, board.BoardPtr, ioaDetectedCorners, ioaDetectedIds, ioaRejectedCorners,
                                                               iaCameraMatrix, iaDistCoeffs, minRepDistance, errorCorrectionRate, checkAllOrders, oaRecovervedIdx, ref parameters);
                             }
 }
        public static void FindTriQuadrant()
        {
            Bitmap image     = (Bitmap)Image.FromFile("res3/supercruisetarget.png");
            Mat    source    = BitmapConverter.ToMat(image);
            Mat    sourceHSV = source.CvtColor(ColorConversionCodes.BGR2HSV);

            /* Paint.Net uses HSV [0..360], [0..100], [0..100].
             * OpenCV uses H: 0 - 180, S: 0 - 255, V: 0 - 255
             * Paint.NET colors:
             * 50   94  100     bright yellow
             * 27   93  90      orange
             * 24   91  74      brown
             * 16   73  25      almost background (low V)
             * suggested range [20..55], [80..100], [50..100] (paint.net)
             * suggested range [10..27], [200..255], [128..255] (openCV
             * */
            Mat mask = sourceHSV.InRange(InputArray.Create(new int[] { 10, 200, 128 }), InputArray.Create(new int[] { 27, 255, 255 }));
            Mat sourceHSVFiltered = new Mat();

            sourceHSV.CopyTo(sourceHSVFiltered, mask);
            Window w3         = new Window("yellowfilter", sourceHSVFiltered.CvtColor(ColorConversionCodes.HSV2BGR));
            Mat    sourceGrey = sourceHSVFiltered.Split()[2]; // Value channel is pretty good as a greyscale conversion
            Window w4         = new Window("yellowFilterValue", sourceGrey);

            CircleSegment[] circles2 = sourceGrey.HoughCircles(
                HoughMethods.Gradient,
                dp: 1f,       /* resolution scaling factor?  full resolution seems to work better */
                minDist: 100, /* if we find more than one then we go to the second analysis, the crosshair is probably blue as well*/
                param1: 100,  /* default was fine after experimentation */
                param2: 13,   /* required quality factor. 9 finds too many, 14 finds too few */
                minRadius: 40,
                maxRadius: 47);
            foreach (CircleSegment circle in circles2)
            {
                var quarterCircle = new OpenCvSharp.Point2f(circle.Radius, circle.Radius);
                source.Rectangle(circle.Center - quarterCircle, circle.Center + quarterCircle, new Scalar(0, 255, 0));
            }


            Mat    templatepointer = new Mat("res3/squaretarget.png", ImreadModes.GrayScale);
            Mat    matches         = sourceGrey.MatchTemplate(templatepointer, TemplateMatchModes.CCoeffNormed);
            Window w6 = new Window("pointer", matches);

            OpenCvSharp.Point minloc, maxloc;
            matches.MinMaxLoc(out minloc, out maxloc);

            source.Rectangle(maxloc, maxloc + new OpenCvSharp.Point(templatepointer.Size().Width, templatepointer.Size().Height), new Scalar(255, 255, 0));

            Window w5 = new Window("result", source);
        }
Exemple #49
0
 /// <summary>
 /// Detect ChArUco Diamond markers
 /// </summary>
 /// <param name="image">input image necessary for corner subpixel.</param>
 /// <param name="markerCorners">list of detected marker corners from detectMarkers function.</param>
 /// <param name="markerIds">list of marker ids in markerCorners.</param>
 /// <param name="squareMarkerLengthRate">rate between square and marker length: squareMarkerLengthRate = squareLength / markerLength.The real units are not necessary.</param>
 /// <param name="diamondCorners">output list of detected diamond corners (4 corners per diamond). The order is the same than in marker corners: top left, top right, bottom right and bottom left. Similar format than the corners returned by detectMarkers(e.g VectorOfVectorOfPointF ).</param>
 /// <param name="diamondIds">ids of the diamonds in diamondCorners. The id of each diamond is in fact of type Vec4i, so each diamond has 4 ids, which are the ids of the aruco markers composing the diamond.</param>
 /// <param name="cameraMatrix">Optional camera calibration matrix.</param>
 /// <param name="distCoeffs">Optional camera distortion coefficients.</param>
 public static void DetectCharucoDiamond(
     IInputArray image,
     IInputArray markerCorners,
     IInputArray markerIds,
     float squareMarkerLengthRate,
     IOutputArray diamondCorners,
     IOutputArray diamondIds,
     IInputArray cameraMatrix = null,
     IInputArray distCoeffs   = null)
 {
     using (InputArray iaImage = image.GetInputArray())
         using (InputArray iaMarkerCorners = markerCorners.GetInputArray())
             using (InputArray iaMarkerIds = markerIds.GetInputArray())
                 using (OutputArray oaDiamondCorners = diamondCorners.GetOutputArray())
                     using (OutputArray oaDiamondIds = diamondIds.GetOutputArray())
                         using (InputArray iaCameraMatrix = cameraMatrix == null ? InputArray.GetEmpty() : cameraMatrix.GetInputArray())
                             using (InputArray iaDistCoeffs = distCoeffs == null ? InputArray.GetEmpty() : distCoeffs.GetInputArray())
                             {
                                 cveArucoDetectCharucoDiamond(iaImage, iaMarkerCorners, iaMarkerIds, squareMarkerLengthRate, oaDiamondCorners, oaDiamondIds, iaCameraMatrix, iaDistCoeffs);
                             }
 }
        /// <summary>
        /// Filter the given image to select certain yellow hues (returned as grayscale)
        /// </summary>
        public static Mat IsolateYellow(Mat source)
        {
            Mat sourceHSV         = source.CvtColor(ColorConversionCodes.BGR2HSV);
            Mat mask              = sourceHSV.InRange(InputArray.Create(new int[] { 10, 200, 128 }), InputArray.Create(new int[] { 30, 255, 255 }));
            Mat sourceHSVFiltered = new Mat();

            sourceHSV.CopyTo(sourceHSVFiltered, mask);
            Mat valueChannel = sourceHSVFiltered.Split()[2];

            return(valueChannel);
        }
 /// <summary>
 /// Add a new image in the list of images to process.
 /// </summary>
 /// <param name="img">	The image</param>
 public void AddImage(IInputArray img)
 {
     using (InputArray iaImg = img.GetInputArray())
         XImgprocInvoke.cveSelectiveSearchSegmentationAddImage(_ptr, iaImg);
 }
        public static void MatchCorona()
        {
            Bitmap screen      = new Bitmap("Screenshot_0028.bmp");
            Bitmap cropped     = CompassSensor.Crop(screen, screen.Width * 1 / 3, screen.Height * 1 / 3, screen.Width * 2 / 3, screen.Height * 2 / 3);
            Mat    screenwhole = BitmapConverter.ToMat(cropped);

            // erase the vivid areas, otherwise the blur subtraction turns yellow near red to green
            Mat brightHSV     = screenwhole.CvtColor(ColorConversionCodes.BGR2HSV);
            Mat darkAreasMask = brightHSV.InRange(InputArray.Create(new int[] { 0, 0, 0 }), InputArray.Create(new int[] { 180, 255, 180 }));
            Mat darkAreas     = new Mat();

            screenwhole.CopyTo(darkAreas, darkAreasMask);

            Mat    screenblur = darkAreas - darkAreas.Blur(new OpenCvSharp.Size(10, 10));
            Window w3         = new Window(screenblur);

            //screenblur.SaveImage("sharplines.png");
            Mat sourceHSV = screenblur.CvtColor(ColorConversionCodes.BGR2HSV);

            /* Paint.Net uses HSV [0..360], [0..100], [0..100].
             * OpenCV uses H: 0 - 180, S: 0 - 255, V: 0 - 255
             * Paint.NET colors:
             * 73   100 18     brightest part of green edge
             * 72   98  9      very dark green
             * suggested range [70..180], [80..100], [8..100] (paint.net)
             * suggested range [35..90], [204..255], [20..255] (openCV)
             * */
            Mat mask = sourceHSV.InRange(InputArray.Create(new int[] { 35, 204, 20 }), InputArray.Create(new int[] { 90, 255, 255 }));
            Mat sourceHSVFiltered = new Mat();

            sourceHSV.CopyTo(sourceHSVFiltered, mask);
            Window w5         = new Window("yellowfilter", sourceHSVFiltered.CvtColor(ColorConversionCodes.HSV2BGR));
            Mat    sourceGrey = sourceHSVFiltered.Split()[2].InRange(32, 256); // Value channel is pretty good as a greyscale conversion
            Window w6         = new Window("yellowFilterValue", sourceGrey);

            LineSegmentPoint[] result = sourceGrey.HoughLinesP(1, 3.1415 / 180, 5, 10, 2);
            List <Point2d>     points = new List <Point2d>();

            foreach (var line in result)
            {
                points.Add(line.P1);
                points.Add(line.P2);
                darkAreas.Line(line.P1, line.P2, new Scalar(255, 0, 255));
            }
            CircleSegment c = CruiseSensor.ComputeCircle(points);

            darkAreas.Circle(c.Center, (int)c.Radius, new Scalar(255, 255, 0));
            Window w9 = new Window("final", darkAreas);
        }
Exemple #53
0
 /// <summary>
 /// These functions try to compose the given images (or images stored internally from the other function calls) into the final pano under the assumption that the image transformations were estimated before.
 /// </summary>
 /// <param name="images">Input images</param>
 /// <param name="pano">Final pano.</param>
 /// <returns>Status code.</returns>
 public Stitcher.Status ComposePanorama(IInputArrayOfArrays images, IOutputArray pano)
 {
     using (InputArray iaImages = images.GetInputArray())
         using (OutputArray oaPano = pano.GetOutputArray())
             return(StitchingInvoke.cveStitcherComposePanorama2(_ptr, iaImages, oaPano));
 }
        public static void MatchImpact()
        {
            Bitmap screen = new Bitmap("ImpactTest.png");
            //Bitmap cropped = CompassSensor.Crop(screen, screen.Width - 400, 0, screen.Width - 100, 300);
            Mat screenwhole = BitmapConverter.ToMat(screen);

            Mat brightHSV = screenwhole.CvtColor(ColorConversionCodes.BGR2HSV);
            Mat redMask   = brightHSV.InRange(InputArray.Create(new int[] { 0, 250, 200 }), InputArray.Create(new int[] { 5, 256, 256 }))
                            + brightHSV.InRange(InputArray.Create(new int[] { 175, 250, 200 }), InputArray.Create(new int[] { 180, 256, 256 }));
            Mat darkAreas = new Mat();

            screenwhole.CopyTo(darkAreas, redMask);
            Mat red = darkAreas.Split()[2];

            red.SaveImage("impacttemplateraw.png");
            Mat template = new Mat("res3/impacttemplate.png", ImreadModes.GrayScale);
            Mat result   = new Mat(red.Size(), red.Type());

            Cv2.MatchTemplate(red, template, result, TemplateMatchModes.CCoeffNormed);
            Window w2 = new Window(red);
            Window w3 = new Window(result);

            Cv2.Threshold(result, result, 0.4, 1.0, ThresholdTypes.Tozero);
            Window w4 = new Window(result);
            Window w1 = new Window(screenwhole);
        }
        /// <summary>
        /// Finds lines in the input image.
        /// This is the output of the default parameters of the algorithm on the above shown image.
        /// </summary>
        /// <param name="image">A grayscale (CV_8UC1) input image. </param>
        /// <param name="lines">A vector of Vec4i or Vec4f elements specifying the beginning and ending point of a line. 
        /// Where Vec4i/Vec4f is (x1, y1, x2, y2), point 1 is the start, point 2 - end. Returned lines are strictly oriented depending on the gradient.</param>
        /// <param name="width">Vector of widths of the regions, where the lines are found. E.g. Width of line.</param>
        /// <param name="prec">Vector of precisions with which the lines are found.</param>
        /// <param name="nfa">Vector containing number of false alarms in the line region, 
        /// with precision of 10%. The bigger the value, logarithmically better the detection.</param>
        public virtual void Detect(InputArray image, out Vec4f[] lines,
            out double[] width, out double[] prec, out double[] nfa)
        {
            if (image == null)
                throw new ArgumentNullException(nameof(image));
            image.ThrowIfDisposed();

            using (var linesVec = new VectorOfVec4f())
            using (var widthVec = new VectorOfDouble())
            using (var precVec = new VectorOfDouble())
            using (var nfaVec = new VectorOfDouble())
            {
                NativeMethods.imgproc_LineSegmentDetector_detect_vector(ptr, image.CvPtr,
                    linesVec.CvPtr, widthVec.CvPtr, precVec.CvPtr, nfaVec.CvPtr);

                lines = linesVec.ToArray();
                width = widthVec.ToArray();
                prec = precVec.ToArray();
                nfa = nfaVec.ToArray();
            }

            GC.KeepAlive(image);
        }
Exemple #56
0
 /// <summary>
 /// Compute the panoramic images given the images
 /// </summary>
 /// <param name="images">The input images. This can be, for example, a VectorOfMat</param>
 /// <param name="pano">The panoramic image</param>
 /// <returns>The stitching status</returns>
 public Status Stitch(IInputArray images, IOutputArray pano)
 {
     using (InputArray iaImages = images.GetInputArray())
         using (OutputArray oaPano = pano.GetOutputArray())
             return(StitchingInvoke.cveStitcherStitch(_ptr, iaImages, oaPano));
 }
        /// <summary>
        /// Draws two groups of lines in blue and red, counting the non overlapping (mismatching) pixels.
        /// </summary>
        /// <param name="size">The size of the image, where lines1 and lines2 were found.</param>
        /// <param name="lines1">The first group of lines that needs to be drawn. It is visualized in blue color.</param>
        /// <param name="lines2">The second group of lines. They visualized in red color.</param>
        /// <param name="image">Optional image, where the lines will be drawn. 
        /// The image should be color(3-channel) in order for lines1 and lines2 to be drawn 
        /// in the above mentioned colors.</param>
        /// <returns></returns>
        public virtual int CompareSegments(
            Size size, InputArray lines1, InputArray lines2, InputOutputArray image = null)
        {
            if (lines1 == null) 
                throw new ArgumentNullException(nameof(lines1));
            if (lines2 == null)
                throw new ArgumentNullException(nameof(lines2));
            lines1.ThrowIfDisposed();
            lines2.ThrowIfDisposed();
            image?.ThrowIfNotReady();

            var ret = NativeMethods.imgproc_LineSegmentDetector_compareSegments(
                ptr, size, lines1.CvPtr, lines2.CvPtr, Cv2.ToPtr(image));

            GC.KeepAlive(lines1);
            GC.KeepAlive(lines2);
            image?.Fix();

            return ret;
        }
Exemple #58
0
 /// <summary>
 /// Compute the descriptors on the image from the given keypoint locations.
 /// </summary>
 /// <param name="image">The image to compute descriptors from</param>
 /// <param name="keyPoints">The keypoints where the descriptor computation is perfromed</param>
 /// <param name="descriptors">The descriptors from the given keypoints</param>
 public void Compute(IInputArray image, VectorOfKeyPoint keyPoints, IOutputArray descriptors)
 {
     using (InputArray iaImage = image.GetInputArray())
         using (OutputArray oaDescriptors = descriptors.GetOutputArray())
             Features2DInvoke.CvFeature2DCompute(_feature2D, iaImage, keyPoints.Ptr, oaDescriptors);
 }
Exemple #59
0
 /// <summary>
 /// Predicts the label and confidence for a given sample.
 /// </summary>
 /// <param name="src"></param>
 /// <param name="label"></param>
 /// <param name="confidence"></param>
 public virtual void Predict(InputArray src, out int label, out double confidence)
 {
     if (src == null)
         throw new ArgumentNullException("src");
     src.ThrowIfDisposed();
     NativeMethods.contrib_FaceRecognizer_predict2(ptr, src.CvPtr, out label, out confidence);
 }
Exemple #60
0
 /// <summary>
 /// Detect the features in the image
 /// </summary>
 /// <param name="keypoints">The result vector of keypoints</param>
 /// <param name="image">The image from which the features will be detected from</param>
 /// <param name="mask">The optional mask.</param>
 public void DetectRaw(IInputArray image, VectorOfKeyPoint keypoints, IInputArray mask = null)
 {
     using (InputArray iaImage = image.GetInputArray())
         using (InputArray iaMask = mask == null ? InputArray.GetEmpty() : mask.GetInputArray())
             Features2DInvoke.CvFeature2DDetect(_feature2D, iaImage, keypoints.Ptr, iaMask);
 }