Пример #1
1
 /// <summary>
 /// Applies an affine transformation to an image.
 /// </summary>
 /// <param name="src">Source image</param>
 /// <param name="dst">Destination image</param>
 /// <param name="mapMatrix">2x3 transformation matrix</param>
 /// <param name="dsize">Size of the output image.</param>
 /// <param name="interpMethod">Interpolation method</param>
 /// <param name="warpMethod">Warp method</param>
 /// <param name="borderMode">Pixel extrapolation method</param>
 /// <param name="borderValue">A value used to fill outliers</param>
 public static void WarpAffine(IInputArray src, IOutputArray dst, IInputArray mapMatrix, Size dsize, CvEnum.Inter interpMethod = CvEnum.Inter.Linear, CvEnum.Warp warpMethod = CvEnum.Warp.Default, CvEnum.BorderType borderMode = CvEnum.BorderType.Constant, MCvScalar borderValue = new MCvScalar())
 {
    using (InputArray iaSrc = src.GetInputArray())
    using (OutputArray oaDst = dst.GetOutputArray())
    using (InputArray iaMapMatrix = mapMatrix.GetInputArray())
       cveWarpAffine(iaSrc, oaDst, iaMapMatrix, ref dsize, (int)interpMethod | (int)warpMethod, borderMode, ref borderValue);
 }
Пример #2
1
        public static VectorOfPoint FindLargestContour(IInputOutputArray cannyEdges, IInputOutputArray result)
        {
            int largest_contour_index = 0;
            double largest_area = 0;
            VectorOfPoint largestContour;

            using (Mat hierachy = new Mat())
            using (VectorOfVectorOfPoint contours = new VectorOfVectorOfPoint())
            {
                IOutputArray hirarchy;

                CvInvoke.FindContours(cannyEdges, contours, hierachy, RetrType.Tree, ChainApproxMethod.ChainApproxNone);

                for (int i = 0; i < contours.Size; i++)
                {
                    MCvScalar color = new MCvScalar(0, 0, 255);

                    double a = CvInvoke.ContourArea(contours[i], false);  //  Find the area of contour
                    if (a > largest_area)
                    {
                        largest_area = a;
                        largest_contour_index = i;                //Store the index of largest contour
                    }

                    CvInvoke.DrawContours(result, contours, largest_contour_index, new MCvScalar(255, 0, 0));
                }

                CvInvoke.DrawContours(result, contours, largest_contour_index, new MCvScalar(0, 0, 255), 3, LineType.EightConnected, hierachy);
                largestContour = new VectorOfPoint(contours[largest_contour_index].ToArray());
            }

            return largestContour;
        }
        public void DrawCrosshair( int x, int y, MCvScalar color, Image<Bgr, Byte> src )
        {
            Point point_laser, point_left, point_right, point_top, point_bottom;

            /*
            point_laser = new Point( x, y );
            point_left = new Point( 0, y );
            point_top = new Point( x, 0 );
            point_right = new Point( CvInvoke.cvGetSize( src ).Width, y );
            point_bottom = new Point( x, CvInvoke.cvGetSize( src ).Height );
            */
            // If image center tracking is desired
            /*
            point_laser = new Point( 320, 240 );
            point_left = new Point( 0, 240 );
            point_top = new Point( 320, 0 );
            point_right = new Point( CvInvoke.cvGetSize( src ).Width, 240 );
            point_bottom = new Point( 320, CvInvoke.cvGetSize( src ).Height );
            */

            // Draw a crosshair centered on the laser pointer on the webcam feed
            CvInvoke.cvCircle( src, point_laser, 5, color, 1,
                               Emgu.CV.CvEnum.LINE_TYPE.EIGHT_CONNECTED, 0 );
            CvInvoke.cvLine( src, point_left, point_right, color, 1,
                             Emgu.CV.CvEnum.LINE_TYPE.EIGHT_CONNECTED, 0 );
            CvInvoke.cvLine( src, point_top, point_bottom, color, 1,
                             Emgu.CV.CvEnum.LINE_TYPE.EIGHT_CONNECTED, 0 );
        }
Пример #4
0
 private static MCvScalar GetScalar(double hue, double sat, double value)
 {
     var scalar = new MCvScalar();
     scalar.V0 = hue;
     scalar.V1 = sat;
     scalar.V2 = value;
     return scalar;
 }
Пример #5
0
 public void Absorb(ThresholdSettings s)
 {
     if (s == null)
     {
         return;
     }
     LowThreshold = s.LowThreshold;
     HighThreshold = s.HighThreshold;
 }
Пример #6
0
      /// <summary>
      /// Transforms source image using the specified matrix
      /// </summary>
      /// <param name="src">Source image</param>
      /// <param name="dst">Destination image</param>
      /// <param name="mapMatrix">2x3 transformation matrix</param>
      /// <param name="flags"> flags </param>
      /// <param name="fillval">A value used to fill outliers</param>
#if ANDROID
      public static void cvWarpAffine(
          IntPtr src,
          IntPtr dst,
          IntPtr mapMatrix,
          int flags,
          MCvScalar fillval)
      {
         cvWarpAffine(src, dst, mapMatrix, flags, fillval.v0, fillval.v1, fillval.v2, fillval.v3);
      }
Пример #7
0
 /// <summary>
 /// Create a Laplacian filter.
 /// </summary>
 /// <param name="ksize">Either 1 or 3</param>
 /// <param name="scale">Optional scale. Use 1.0 for default</param>
 /// <param name="borderType">The border type.</param>
 /// <param name="borderValue">The border value.</param>
 public CudaLaplacianFilter(
    DepthType srcDepth, int srcChannels,
    DepthType dstDepth, int dstChannels,
    int ksize = 1, double scale = 1.0, 
    CvEnum.BorderType borderType = BorderType.Default, MCvScalar borderValue = new MCvScalar())
 {
    _ptr = CudaInvoke.cudaCreateLaplacianFilter(
       CvInvoke.MakeType(srcDepth, srcChannels), CvInvoke.MakeType(dstDepth, dstChannels), 
       ksize, scale, borderType, ref borderValue);
 }
        public void DrawMeasurements( int x, int y, double dist, double pfc,
                                      MCvScalar color, Image<Bgr, Byte> src )
        {
            // Measurement text content, position, and font
            string text_size, text_posn, text_pfc, text_dist;
            Point point_size, point_posn, point_pfc, point_dist;
            MCvFont font = new MCvFont( Emgu.CV.CvEnum.FONT.CV_FONT_HERSHEY_SIMPLEX, 0.5, 0.5 );

            // Fill size string
            text_size = "Size (pix): ";
            text_size += Convert.ToString( CvInvoke.cvGetSize( src ).Width );
            text_size += ", ";
            text_size += Convert.ToString( CvInvoke.cvGetSize( src ).Height );

            // Start position, pfc, and distance strings
            text_posn = "Position (pix):  ";
            text_pfc  = "PFC (pix): ";
            text_dist = "Distance (cm): ";

            // If the laser point WAS found
            if ( ( x > 0 ) && ( y > 0 ) )
            {
                // Fill position string
                text_posn += Convert.ToString( x );
                text_posn += ", ";
                text_posn += Convert.ToString( y );

                // Fill pfc string
                text_pfc += Convert.ToString( pfc );

                // Fill distance string
                text_dist += String.Format( "{0:F1}", dist );
            }
            // If the laser pointer was NOT found
            else
            {
                // Fill measurement strings with NULL readings
                text_posn += "NULL, NULL";
                text_pfc += "NULL";
                text_dist += "NULL";
            }

            // Initialize text position
            point_size = new Point( 10, 400 );
            point_posn = new Point( 10, 420 );
            point_pfc  = new Point( 10, 440 );
            point_dist = new Point( 10, 460 );

            // Draw text on image
            CvInvoke.cvPutText( src, text_size, point_size, ref font, color );
            CvInvoke.cvPutText( src, text_posn, point_posn, ref font, color );
            CvInvoke.cvPutText( src, text_pfc,  point_pfc,  ref font, color );
            CvInvoke.cvPutText( src, text_dist, point_dist, ref font, color );
        }
Пример #9
0
 /// <summary>
 /// Create a Gpu LinearFilter
 /// </summary>
 /// <param name="kernel">Convolution kernel, single-channel floating point matrix (e.g. Emgu.CV.Matrix). If you want to apply different kernels to different channels, split the gpu image into separate color planes and process them individually</param>
 /// <param name="anchor">The anchor of the kernel that indicates the relative position of a filtered point within the kernel. The anchor shoud lie within the kernel. The special default value (-1,-1) means that it is at the kernel center</param>
 /// <param name="borderType">Border type. Use REFLECT101 as default.</param>
 /// <param name="borderValue">The border value</param>
 public CudaLinearFilter(
    DepthType srcDepth, int srcChannels,
    DepthType dstDepth, int dstChannels,
    IInputArray kernel,
    System.Drawing.Point anchor,
    CvEnum.BorderType borderType = BorderType.Default, MCvScalar borderValue = new MCvScalar())
 {
    using (InputArray iaKernel = kernel.GetInputArray())
       _ptr = CudaInvoke.cudaCreateLinearFilter(
          CvInvoke.MakeType(srcDepth, srcChannels), CvInvoke.MakeType(dstDepth, dstChannels),
          iaKernel, ref anchor, borderType, ref borderValue);
 }
Пример #10
0
 public static void DrawLine(IInputOutputArray image,
     Point start,
     Point end,
     MCvScalar color,
     int thickness = 1,
     LineType lineType = LineType.EightConnected,
     int shift = 0)
 {
     using (InputOutputArray array = image.GetInputOutputArray())
     {
         cveLine(array, ref start, ref end, ref color, thickness, lineType, shift);
     }
 }
Пример #11
0
 public static void DrawCircle(IInputOutputArray image,
     Point center,
     int radius,
     MCvScalar color,
     int thickness = 1,
     LineType lineType = LineType.EightConnected,
     int shift = 0)
 {
     using (InputOutputArray array = image.GetInputOutputArray())
     {
         cveCircle(array, ref center, radius, ref color, thickness, lineType, shift);
     }
 }
Пример #12
0
        public static void DrawEllipse(IInputOutputArray image,
            RotatedRect box,
            MCvScalar color,
            int thickness = 1,
            LineType lineType = LineType.EightConnected,
            int shift = 0)
        {
            int width = (int)Math.Round(box.Size.Height * 0.5F);
            int height = (int)Math.Round(box.Size.Width * 0.5F);
            Size axesSize = new Size(width, height);
            Point center = Point.Round(box.Center);

            DrawEllipse(image, center, axesSize, box.Angle, 0.0D, 360.0D, color, thickness, lineType, shift);
        }
Пример #13
0
 /// <summary>
 /// Draw the matched keypoints between the model image and the observered image.
 /// </summary>
 /// <param name="modelImage">The model image</param>
 /// <param name="modelKeypoints">The keypoints in the model image</param>
 /// <param name="observerdImage">The observed image</param>
 /// <param name="observedKeyPoints">The keypoints in the observed image</param>
 /// <param name="matchColor">The color for the match correspondence lines</param>
 /// <param name="singlePointColor">The color for highlighting the keypoints</param>
 /// <param name="mask">The mask for the matches. Use null for all matches.</param>
 /// <param name="flags">The drawing type</param>
 /// <param name="result">The image where model and observed image is displayed side by side. Matches are drawn as indicated by the flag</param>
 /// <param name="matches">Matches. Each matches[i] is k or less matches for the same query descriptor.</param>
 public static void DrawMatches(
    IInputArray modelImage, VectorOfKeyPoint modelKeypoints,
    IInputArray observerdImage, VectorOfKeyPoint observedKeyPoints,
    VectorOfVectorOfDMatch matches,
    IInputOutputArray result,
    MCvScalar matchColor, MCvScalar singlePointColor,
    IInputArray mask = null,
    KeypointDrawType flags = KeypointDrawType.Default)
 {
    using (InputArray iaModelImage = modelImage.GetInputArray())
    using (InputArray iaObserverdImage = observerdImage.GetInputArray())
    using (InputOutputArray ioaResult = result.GetInputOutputArray())
    using (InputArray iaMask = mask == null ? InputArray.GetEmpty() : mask.GetInputArray())
    CvInvoke.drawMatchedFeatures(iaObserverdImage, observedKeyPoints, iaModelImage,
       modelKeypoints, matches, ioaResult, ref matchColor, ref singlePointColor, iaMask , flags);
 }
Пример #14
0
 public static void DrawEllipse(IInputOutputArray image,
     Point center,
     Size axes,
     double angle,
     double startAngle,
     double endAngle,
     MCvScalar color,
     int thickness = 1,
     LineType lineType = LineType.EightConnected,
     int shift = 0)
 {
     using (InputOutputArray array = image.GetInputOutputArray())
     {
         cveEllipse(array, ref center, ref axes, angle, startAngle, endAngle, ref color, thickness, lineType, shift);
     }
 }
Пример #15
0
        public List<FaceScored> FindFaces(Emgu.CV.Image<Emgu.CV.Structure.Bgr, byte> image, CascadeClassifier cascadeClassifierFace, CascadeClassifier cascadeClassifierEye)
        {
            List<FaceScored> currentFaces = new List<FaceScored>();
            using (Image<Gray, Byte> gray = image.Convert<Gray, Byte>())
            {
            gray._EqualizeHist();
            Size minFaceSize = new Size(minSizeFace , minSizeFace );
            Size maxFaceSize =  new Size(maxSizeFace , maxSizeFace );
            Size minEyeSize = new Size(minSizeEye , minSizeEye );
            Size maxEyeSize =  new Size(maxSizeEye , maxSizeEye );
            Rectangle[] facesDetected = cascadeClassifierFace.DetectMultiScale(gray, scaleFace , neighborsFace , minFaceSize,maxFaceSize);

            foreach (Rectangle f in facesDetected)
            {
                if (f.Width<35)
                    break;
                gray.ROI = f;

                Rectangle[] eyesDetected = cascadeClassifierEye.DetectMultiScale(gray, scaleEye, neighborsEye, minEyeSize, maxEyeSize);
                if (eyesDetected.Count() >0){
                    FaceScored faceModel = new FaceScored();
                    faceModel.FaceImage = gray.Bitmap;
                    faceModel.FaceImageFullColr = image.GetSubRect(f).Bitmap;
                    faceModel.Height = faceModel.FaceImage.Height;
                    faceModel.Width = faceModel.FaceImage.Width;
                    faceModel.EyesCount = eyesDetected.Count();

                    Gray avgf = new Gray();
                    MCvScalar avstd = new MCvScalar();
                    gray.AvgSdv(out avgf, out avstd);
                    faceModel.StdDev = avstd.V0;

                    currentFaces.Add(faceModel);
                    if(currentFaces.Count%5==0)
                        Console.WriteLine("FaceDetect Add every 5 faceModel" + faceModel.Width);
                    break;
                }
                gray.ROI = Rectangle.Empty;
                }
            }
            return currentFaces;
        }
Пример #16
0
 public static void DrawContours(IInputOutputArray image,
     IInputArray contours,
     int contourIdx,
     MCvScalar color,
     int thickness = 1,
     LineType lineType = LineType.EightConnected,
     IInputArray hierarchy = null,
     int maxLevel = int.MaxValue,
     Point offset = default(Point))
 {
     using (InputOutputArray imageArray = image.GetInputOutputArray())
     {
         using (InputArray contoursArray = contours.GetInputArray())
         {
             using (InputArray hierarchyArray = (hierarchy != null) ? hierarchy.GetInputArray() : EmptyArray<InputArray>.Value)
             {
                 cveDrawContours(imageArray, contoursArray, contourIdx, ref color, thickness, lineType, hierarchyArray, maxLevel, ref offset);
             }
         }
     }
 }
Пример #17
0
    public void ApplyFilter(Mat src)
    {
        CvInvoke.CvtColor(src, src, ColorConversion.Bgr2Hsv);

        Mat threshold = new Mat(src.Height, src.Width, src.Depth, src.NumberOfChannels);
        MCvScalar min = new MCvScalar(m_hmin, m_smin, m_vmin);
        MCvScalar max = new MCvScalar(m_hmax, m_smax, m_vmax);

        CvInvoke.InRange(src, new ScalarArray(min), new ScalarArray(max), threshold);

        Mat element = CvInvoke.GetStructuringElement(ElementShape.Rectangle, new Size(3,3), Point.Empty);
        CvInvoke.Erode(threshold, threshold, element, Point.Empty, 1, BorderType.Constant, new MCvScalar(1.0f));
        CvInvoke.Canny(threshold, threshold, 100, 255);

        VectorOfVectorOfPoint contours = new VectorOfVectorOfPoint();
        Mat hierarchy = new Mat();

         CvInvoke.FindContours(threshold, contours, hierarchy, RetrType.Tree, ChainApproxMethod.ChainApproxSimple, Point.Empty);

        Mat draw = new Mat(src.Height, src.Width, src.Depth, 1);
        draw.SetTo(new MCvScalar(0.0));
        int i = 0;

        //Debug.Log("CONTOURS");

        var contoursArray = contours.ToArrayOfArray();
        foreach(Point[] contour in contoursArray)
        {
            CvInvoke.DrawContours(draw, contours, i, new MCvScalar(255.0), 1, LineType.EightConnected, null, int.MaxValue, Point.Empty);

         	double a = CvInvoke.ContourArea(new VectorOfPoint(contour));
            //Debug.Log("Contour: " + a);
            i++;
        }

        //Emgu.CV.UI.ImageViewer.Show(draw, "test");

        if(m_onFrame != null) m_onFrame.Invoke(draw);
    }
Пример #18
0
 internal static extern IntPtr cudaCreateLaplacianFilter(int srcType, int dstType, int ksize, double scale, CvEnum.BorderType borderMode, ref MCvScalar borderValue);
Пример #19
0
 ///<summary>
 ///Set the element of the Array to <paramref name="value"/>
 ///</summary>
 ///<param name="value"> The value to be set for each element of the Array </param>
 public void SetValue(MCvScalar value)
 {
     CvInvoke.cvSet(_ptr, value, IntPtr.Zero);
 }
Пример #20
0
 internal static extern IntPtr cveWArrowCreate(ref MCvPoint3D64f pt1, ref MCvPoint3D64f pt2, double thickness, ref MCvScalar color, ref IntPtr widget3d, ref IntPtr widget);
Пример #21
0
 /// <summary>
 /// Constructs a WCloud.
 /// </summary>
 /// <param name="cloud">Set of points which can be of type: CV_32FC3, CV_32FC4, CV_64FC3, CV_64FC4.</param>
 /// <param name="color">A single Color for the whole cloud.</param>
 public WCloud(IInputArray cloud, MCvScalar color)
 {
     using (InputArray iaCloud = cloud.GetInputArray())
         CvInvoke.cveWCloudCreateWithColor(iaCloud, ref color, ref _widget3dPtr, ref _widgetPtr);
 }
Пример #22
0
 internal static extern IntPtr cveWCloudCreateWithColor(IntPtr cloud, ref MCvScalar color, ref IntPtr widget3d, ref IntPtr widget);
Пример #23
0
        public Image <Bgr, Byte> GenerateMap(int steps, List <double> map, Image <Bgr, Byte> img_map)
        {
            Image <Hsv, Byte> img_heated = new Image <Hsv, Byte>(640, 480);
            Image <Bgr, Byte> img_ready;
            Hsv       pixel;
            int       index;
            double    new_hue, max = 0, min = 1000000000, level = 0;
            Point     origin = new Point(0, 0);
            MCvScalar S      = new MCvScalar(0.5, 0.5, 0.5, 0.5);
            MCvScalar D      = new MCvScalar(0.5, 0.5, 0.5, 0.5);


            for (int n = 0; n < map.Count; n++)
            {
                if (map[n] > max)
                {
                    max = map[n];
                }
                if (map[n] < min)
                {
                    min = map[n];
                }
            }

            level = (max - min) / 10;

            // Set each pixels satuation and brightness to the max
            for (int n = 0; n < 480 * 640; n++)
            {
                pixel           = img_heated[n / 640, n % 640];
                pixel.Satuation = 255;
                pixel.Value     = 255;
                img_heated[n / 640, n % 640] = pixel;
            }

            // Divide for each chunck of the image
            for (int y = 0; y < 480; y += 480 / steps)
            //for ( int y = 0; y < 480; y += 480 / 50 )
            {
                for (int x = 0; x < 640; x += 640 / steps)
                //for ( int x = 0; x < 640; x += 640 / 80 )
                {
                    // Set the hue to a color representing distance scaled to max distance
                    index = (y / (480 / steps)) * (steps) + (x / (640 / steps));
                    //index = ( y / ( 480 / 50 ) ) * ( steps ) + ( x / ( 640 / 80 ) );
                    new_hue = (double)120 * (map[index] - min) / (max - min);

                    // Pixel by pixel
                    for (int yy = y; yy < y + 480 / steps; yy++)
                    {
                        for (int xx = x; xx < x + 640 / steps; xx++)
                        {
                            pixel              = img_heated[yy, xx];
                            pixel.Hue          = new_hue;
                            img_heated[yy, xx] = pixel;
                        }
                    }
                }
            }


            img_ready = img_heated.Convert <Bgr, Byte>();


            return(img_ready);
        }
Пример #24
0
 private static extern void cveDilate(IntPtr src, IntPtr dst, IntPtr kernel, ref Point anchor, int iterations, CvEnum.BorderType borderType, ref MCvScalar borderValue);
Пример #25
0
 internal extern static void cveDrawFacemarks(IntPtr image, IntPtr points, ref MCvScalar color);
Пример #26
0
        /// <summary>
        /// Creates 4-dimensional blob from image. Optionally resizes and crops image from center, subtract mean values, scales values by scalefactor, swap Blue and Red channels.
        /// </summary>
        /// <param name="image">Input image (with 1- or 3-channels).</param>
        /// <param name="scaleFactor">Multiplier for image values.</param>
        /// <param name="size">Spatial size for output image</param>
        /// <param name="mean">Scalar with mean values which are subtracted from channels. Values are intended to be in (mean-R, mean-G, mean-B) order if image has BGR ordering and swapRB is true.</param>
        /// <param name="swapRB">Flag which indicates that swap first and last channels in 3-channel image is necessary.</param>
        /// <param name="crop">Flag which indicates whether image will be cropped after resize or not</param>
        /// <returns>4-dimansional Mat with NCHW dimensions order.</returns>
        public static Mat BlobFromImage(Mat image, double scaleFactor = 1.0, Size size = new Size(), MCvScalar mean = new MCvScalar(), bool swapRB = true, bool crop = true)
        {
            Mat blob = new Mat();

            cveDnnBlobFromImage(image, scaleFactor, ref size, ref mean, swapRB, crop, blob);
            return(blob);
        }
Пример #27
0
 /// <summary>
 /// Sets all or some of the array elements to the specified value.
 /// </summary>
 /// <param name="value">Assigned scalar value.</param>
 /// <param name="mask">Operation mask of the same size as the umat.</param>
 public void SetTo(MCvScalar value, IInputArray mask = null)
 {
     using (ScalarArray ia = new ScalarArray(value))
         SetTo(ia, mask);
 }
Пример #28
0
 public void SetRandNormal(MCvScalar mean, MCvScalar std)
 {
     SetRandNormal((UInt64)_randomGenerator.Next(), mean, std);
 }
Пример #29
0
 internal static extern IntPtr cveWCylinderCreate(ref MCvPoint3D64f axisPoint1, ref MCvPoint3D64f axisPoint2, double radius, int numsides, ref MCvScalar color, ref IntPtr widget3d, ref IntPtr widget);
Пример #30
0
 /// <summary>
 /// Constructs a WCylinder.
 /// </summary>
 /// <param name="axisPoint1">A point1 on the axis of the cylinder.</param>
 /// <param name="axisPoint2">A point2 on the axis of the cylinder.</param>
 /// <param name="radius">Radius of the cylinder.</param>
 /// <param name="numsides">Resolution of the cylinder.</param>
 /// <param name="color">Color of the cylinder.</param>
 public WCylinder(ref MCvPoint3D64f axisPoint1, MCvPoint3D64f axisPoint2, double radius, int numsides, MCvScalar color)
 {
     _ptr = CvInvoke.cveWCylinderCreate(ref axisPoint1, ref axisPoint2, radius, numsides, ref color, ref _widget3dPtr, ref _widgetPtr);
 }
Пример #31
0
        public ImageAnalysis analyse(string fileName, int lLow, int lHigh, int aLow, int aHigh, int bLow, int bHigh)
        {
            ImageAnalysis analysis = new ImageAnalysis();

            Mat tempMat = new Mat(fileName);

            // pixel mask, erode x2, dilate x2
            //CvInvoke.GaussianBlur(mat, mat, new Size(5, 5), 1.5, 1.5);
            //CvInvoke.GaussianBlur(mat, mat, new Size(5, 5), 1.5, 1.5);
            //GetColorPixelMask(tempMat, tempMat, maskHueUpper, maskHueLower, maskSatUpper, maskSatLower, maskLumUpper, maskLumLower);
            GetLabColorPixelMask(tempMat, tempMat, lLow, lHigh, aLow, aHigh, bLow, bHigh);
            //tempMat.Save(fileName + "temp.jpg");

            CvInvoke.Erode(tempMat, tempMat, null, new Point(-1, -1), 2, BorderType.Constant, CvInvoke.MorphologyDefaultBorderValue);
            Mat temp = new Mat();

            CvInvoke.Dilate(tempMat, temp, null, new Point(-1, -1), 2, BorderType.Constant, CvInvoke.MorphologyDefaultBorderValue);
            tempMat = temp;

            //find largest contour
            Mat           result = new Mat(540, 720, 0, 1);
            int           largest_contour_index = 0;
            double        largest_area          = 0;
            VectorOfPoint largestContour;

            VectorOfVectorOfPoint contours = new VectorOfVectorOfPoint();
            Mat hierachy = new Mat();

            CvInvoke.FindContours(tempMat, contours, hierachy, RetrType.Tree, ChainApproxMethod.ChainApproxNone);

            if (contours.Size > 0)
            {
                for (int i = 0; i < contours.Size; i++)
                {
                    MCvScalar color = new MCvScalar(0, 0, 255);

                    double a = CvInvoke.ContourArea(contours[i], false);  //  Find the area of contour
                    if (a > largest_area)
                    {
                        largest_area          = a;
                        largest_contour_index = i;                //Store the index of largest contour
                    }

                    //CvInvoke.DrawContours(result, contours, largest_contour_index, new MCvScalar(255, 0, 0));
                }


                //draw largest contour
                CvInvoke.DrawContours(result, contours, largest_contour_index, new MCvScalar(255, 255, 255), 1, LineType.EightConnected, hierachy);
                largestContour = new VectorOfPoint(contours[largest_contour_index].ToArray());


                Image <Bgr, Byte> tempImg = result.ToImage <Bgr, Byte>();

                //Find center point
                MCvMoments    m      = CvInvoke.Moments(largestContour, true);
                MCvPoint2D64f center = m.GravityCenter;
                //textBox1.AppendText("Center point: " + Math.Round(center.X, 3) + "px, " + Math.Round(center.Y, 3) + "px\n");
                tempImg.Draw(new Cross2DF(new PointF((float)center.X, (float)center.Y), 3, 3), new Bgr(0, 0, 255), 2);

                //Find Area
                double area = CvInvoke.ContourArea(largestContour);
                //textBox1.AppendText("Area: " + area + "px,     " + convertSqPxToSqMm(area) + "sq mm\n");

                //Find Bounding Rectangle
                RotatedRect rect    = CvInvoke.MinAreaRect(largestContour);
                float       width0  = rect.Size.Width;
                float       height0 = rect.Size.Height;

                float length = (height0 >= width0 ? height0 : width0);
                float width  = (height0 < width0 ? height0 : width0);

                tempImg.Draw(rect, new Bgr(255, 0, 0), 2);
                //textBox1.AppendText("Width: " + width + "px  Length: " + length + "px\n");
                //textBox1.AppendText("Width: " + convertPxToMm(width) + "mm  Length: " + convertPxToMm(length) + "mm\n");

                double ratio = Math.Round((length / width), 3);
                //textBox1.AppendText("Ratio (width:length): 1:" + ratio + "\n");

                //save and display
                tempImg.Save(fileName + "_after.bmp");
                tempMat = tempImg.Mat;

                analysis.Contours            = contours;
                analysis.LargestContourIndex = largest_contour_index;
                analysis.LargestContour      = largestContour;
                analysis.Center      = center;
                analysis.Area        = area;
                analysis.BoundingBox = rect;
                analysis.Length      = length;
                analysis.Width       = width;
                analysis.Ratio       = ratio;
                analysis.Result      = tempImg.ToBitmap();
            }


            return(analysis);
        }
Пример #32
0
 /// <summary>
 /// Constructs an WArrow.
 /// </summary>
 /// <param name="pt1">Start point of the arrow.</param>
 /// <param name="pt2">End point of the arrow.</param>
 /// <param name="thickness">Thickness of the arrow. Thickness of arrow head is also adjusted accordingly.</param>
 /// <param name="color">Color of the arrow.</param>
 public WArrow(MCvPoint3D64f pt1, MCvPoint3D64f pt2, double thickness, MCvScalar color)
 {
     _ptr = CvInvoke.cveWArrowCreate(ref pt1, ref pt2, thickness, ref color, ref _widget3dPtr, ref _widgetPtr);
 }
Пример #33
0
 /// <summary>
 /// Set mean value for frame.
 /// </summary>
 /// <param name="mean">Scalar with mean values which are subtracted from channels.</param>
 public void SetInputMean(MCvScalar mean)
 {
     DnnInvoke.cveModelSetInputMean(_model, ref mean);
 }
Пример #34
0
 ///<summary>
 ///Set the element of the Array to <paramref name="value"/>, using the specific <paramref name="mask"/>
 ///</summary>
 ///<param name="value">The value to be set</param>
 ///<param name="mask">The mask for the operation</param>
 public void SetValue(MCvScalar value, CvArray <Byte> mask)
 {
     CvInvoke.cvSet(_ptr, value, mask == null ? IntPtr.Zero : mask.Ptr);
 }
Пример #35
0
        /// <summary>
        /// Creates 4-dimensional blob from series of images. Optionally resizes and crops images from center, subtract mean values, scales values by scalefactor, swap Blue and Red channels.
        /// </summary>
        /// <param name="images">Input images (all with 1- or 3-channels).</param>
        /// <param name="scaleFactor">Multiplier for images values.</param>
        /// <param name="size">Spatial size for output image</param>
        /// <param name="mean">Scalar with mean values which are subtracted from channels. Values are intended to be in (mean-R, mean-G, mean-B) order if image has BGR ordering and swapRB is true.</param>
        /// <param name="swapRB">Flag which indicates that swap first and last channels in 3-channel image is necessary.</param>
        /// <param name="crop">Flag which indicates whether image will be cropped after resize or not</param>
        /// <returns>Input image is resized so one side after resize is equal to corresponding dimension in size and another one is equal or larger. Then, crop from the center is performed.</returns>
        public static Mat BlobFromImages(Mat[] images, double scaleFactor = 1.0, Size size = new Size(), MCvScalar mean = new MCvScalar(), bool swapRB = true, bool crop = true)
        {
            Mat blob = new Mat();

            using (VectorOfMat vm = new VectorOfMat(images))
            {
                cveDnnBlobFromImages(vm, scaleFactor, ref size, ref mean, swapRB, crop, blob);
            }
            return(blob);
        }
Пример #36
0
 /// <summary>
 /// Utility to draw the detected facial landmark points.
 /// </summary>
 /// <param name="image">The input image to be processed.</param>
 /// <param name="points">Contains the data of points which will be drawn.</param>
 /// <param name="color">The color of points in BGR format </param>
 public static void DrawFacemarks(IInputOutputArray image, IInputArray points, MCvScalar color)
 {
     using (InputOutputArray ioaImage = image.GetInputOutputArray())
         using (InputArray iaPoints = points.GetInputArray())
         {
             cveDrawFacemarks(ioaImage, iaPoints, ref color);
         }
 }
Пример #37
0
 private static extern void cveRemap(IntPtr src, IntPtr dst, IntPtr map1, IntPtr map2, CvEnum.Inter interpolation, CvEnum.BorderType borderMode, ref MCvScalar borderValue);
Пример #38
0
 /// <summary>
 /// Initializs scaled identity matrix
 /// </summary>
 /// <param name="value">The value on the diagonal</param>
 public void SetIdentity(MCvScalar value)
 {
     CvInvoke.SetIdentity(this, value);
 }
Пример #39
0
 internal static extern void cveModelSetInputMean(
     IntPtr model,
     ref MCvScalar mean);
Пример #40
0
 /// <summary>
 /// Inplace fills Array with uniformly distributed random numbers
 /// </summary>
 /// <param name="seed">Seed for the random number generator</param>
 /// <param name="floorValue">the inclusive lower boundary of random numbers range</param>
 /// <param name="ceilingValue">the exclusive upper boundary of random numbers range</param>
 public void SetRandUniform(UInt64 seed, MCvScalar floorValue, MCvScalar ceilingValue)
 {
     CvInvoke.cvRandArr(ref seed, Ptr, CvEnum.RandType.Uni, floorValue, ceilingValue);
 }
Пример #41
0
 /// <summary>
 /// Inplace fills Array with normally distributed random numbers
 /// </summary>
 /// <param name="seed">Seed for the random number generator</param>
 /// <param name="mean">the mean value of random numbers</param>
 /// <param name="std"> the standard deviation of random numbers</param>
 public void SetRandNormal(UInt64 seed, MCvScalar mean, MCvScalar std)
 {
     CvInvoke.cvRandArr(ref seed, Ptr, CvEnum.RandType.Normal, mean, std);
 }
Пример #42
0
 private static extern void cveArrowedLine(IntPtr img, ref Point pt1, ref Point pt2, ref MCvScalar color,
    int thickness, CvEnum.LineType lineType, int shift, double tipLength);
        /// <summary>
        /// Detect vehicle from the given image
        /// </summary>
        /// <param name="image">The image</param>
        /// <returns>The detected vehicles.</returns>
        public Vehicle[] Detect(IInputArray image)
        {
            float vehicleConfidenceThreshold      = 0.5f;
            float licensePlateConfidenceThreshold = 0.5f;


            double    scale   = 1.0;
            MCvScalar meanVal = new MCvScalar();

            List <Vehicle>      vehicles = new List <Vehicle>();
            List <LicensePlate> plates   = new List <LicensePlate>();

            using (InputArray iaImage = image.GetInputArray())
                using (Mat iaImageMat = iaImage.GetMat())
                    foreach (DetectedObject vehicleOrPlate in _vehicleLicensePlateDetectionModel.Detect(image, 0.0f, 0.0f))
                    {
                        Rectangle region = vehicleOrPlate.Region;

                        if (vehicleOrPlate.ClassId == 1 && vehicleOrPlate.Confident > vehicleConfidenceThreshold)
                        {
                            //this is a vehicle
                            Vehicle v = new Vehicle();
                            v.Region = region;

                            #region find out the type and color of the vehicle

                            using (Mat vehicle = new Mat(iaImageMat, region))
                                using (VectorOfMat vm = new VectorOfMat(2))
                                {
                                    _vehicleAttrRecognizerModel.Predict(vehicle, vm);
                                    //_vehicleAttrRecognizer.Forward(vm, new string[] { "color", "type" });
                                    using (Mat vehicleColorMat = vm[0])
                                        using (Mat vehicleTypeMat = vm[1])
                                        {
                                            float[] vehicleColorData = vehicleColorMat.GetData(false) as float[];
                                            float   maxProbColor     = vehicleColorData.Max();
                                            int     maxIdxColor      = Array.IndexOf(vehicleColorData, maxProbColor);
                                            v.Color = _colorName[maxIdxColor];
                                            float[] vehicleTypeData = vehicleTypeMat.GetData(false) as float[];
                                            float   maxProbType     = vehicleTypeData.Max();
                                            int     maxIdxType      = Array.IndexOf(vehicleTypeData, maxProbType);
                                            v.Type = _vehicleType[maxIdxType];
                                        }
                                }
                            #endregion

                            vehicles.Add(v);
                        }
                        else if (vehicleOrPlate.ClassId == 2 && vehicleOrPlate.Confident > licensePlateConfidenceThreshold)
                        {
                            //this is a license plate
                            LicensePlate p = new LicensePlate();
                            p.Region = region;

                            #region OCR on license plate
                            using (Mat plate = new Mat(iaImageMat, region))
                            {
                                using (Mat inputBlob = DnnInvoke.BlobFromImage(
                                           plate,
                                           scale,
                                           new Size(94, 24),
                                           meanVal,
                                           false,
                                           false,
                                           DepthType.Cv32F))
                                {
                                    _ocr.SetInput(inputBlob, "data");
                                    using (Mat output = _ocr.Forward("decode"))
                                    {
                                        float[]       plateValue = output.GetData(false) as float[];
                                        StringBuilder licensePlateStringBuilder = new StringBuilder();
                                        foreach (int j in plateValue)
                                        {
                                            if (j >= 0)
                                            {
                                                licensePlateStringBuilder.Append(_plateText[j]);
                                            }
                                        }

                                        p.Text = licensePlateStringBuilder.ToString();
                                    }
                                }
                            }
                            #endregion

                            plates.Add(p);
                        }
                    }

            foreach (LicensePlate p in plates)
            {
                foreach (Vehicle v in vehicles)
                {
                    if (v.ContainsPlate(p))
                    {
                        v.LicensePlate = p;
                        break;
                    }
                }
            }

            return(vehicles.ToArray());
        }
Пример #44
0
 /// <summary>
 /// Draws a single or multiple polygonal curves
 /// </summary>
 /// <param name="img">Image</param>
 /// <param name="pts">Array of pointers to polylines</param>
 /// <param name="isClosed">
 /// Indicates whether the polylines must be drawn closed. 
 /// If !=0, the function draws the line from the last vertex of every contour to the first vertex.
 /// </param>
 /// <param name="color">Polyline color</param>
 /// <param name="thickness">Thickness of the polyline edges</param>
 /// <param name="lineType">Type of the line segments, see cvLine description</param>
 /// <param name="shift">Number of fractional bits in the vertex coordinates</param>
 public static void Polylines(IInputOutputArray img, IInputArray pts, bool isClosed, MCvScalar color, int thickness = 1, CvEnum.LineType lineType = CvEnum.LineType.EightConnected, int shift = 0)
 {
    using (InputOutputArray ioaImg = img.GetInputOutputArray())
    using (InputArray iaPts = pts.GetInputArray())
       cvePolylines(ioaImg, iaPts, isClosed, ref color, thickness, lineType, shift);
 }
Пример #45
0
 /// <summary>
 /// Draws a rectangle specified by a CvRect structure
 /// </summary>
 /// /// <param name="img">Image</param>
 /// <param name="rect">The rectangle to be drawn</param>
 /// <param name="color">Line color </param>
 /// <param name="thickness">Thickness of lines that make up the rectangle. Negative values make the function to draw a filled rectangle.</param>
 /// <param name="lineType">Type of the line</param>
 /// <param name="shift">Number of fractional bits in the point coordinates</param>
 public static void Rectangle(IInputOutputArray img, Rectangle rect, MCvScalar color, int thickness = 1, CvEnum.LineType lineType = CvEnum.LineType.EightConnected, int shift = 0)
 {
    using (InputOutputArray ioaImg = img.GetInputOutputArray())
       cveRectangle(ioaImg, ref rect, ref color, thickness, lineType, shift);
 }
Пример #46
0
 /// <summary>
 /// Copies scalar value to every selected element of the destination GpuMat:
 /// GpuMat(I)=value if mask(I)!=0
 /// </summary>
 /// <param name="value">Fill value</param>
 /// <param name="mask">Operation mask, 8-bit single channel GpuMat; specifies elements of destination array to be changed. Can be null if not used.</param>
 /// <param name="stream">Use a Stream to call the function asynchronously (non-blocking) or null to call the function synchronously (blocking).</param>
 public void SetTo(MCvScalar value, GpuMat <Byte> mask, Stream stream)
 {
     GpuInvoke.GpuMatSetTo(_ptr, value, mask, stream);
 }
Пример #47
0
 public void SetRandUniform(MCvScalar floorValue, MCvScalar ceilingValue)
 {
     SetRandUniform((UInt64)_randomGenerator.Next(), floorValue, ceilingValue);
 }
Пример #48
0
 /// <summary>
 /// Draws a single or multiple polygonal curves
 /// </summary>
 /// <param name="img">Image</param>
 /// <param name="pts">Array points</param>
 /// <param name="isClosed">
 /// Indicates whether the polylines must be drawn closed. 
 /// If !=0, the function draws the line from the last vertex of every contour to the first vertex.
 /// </param>
 /// <param name="color">Polyline color</param>
 /// <param name="thickness">Thickness of the polyline edges</param>
 /// <param name="lineType">Type of the line segments, see cvLine description</param>
 /// <param name="shift">Number of fractional bits in the vertex coordinates</param>
 public static void Polylines(IInputOutputArray img, Point[] pts, bool isClosed, MCvScalar color, int thickness = 1, CvEnum.LineType lineType = CvEnum.LineType.EightConnected, int shift = 0)
 {    
    using (VectorOfPoint vps = new VectorOfPoint(pts))
       Polylines(img, vps, isClosed, color, thickness, lineType, shift);
 }
Пример #49
0
 /// <summary>
 /// Constructs default planar circle centred at origin with plane normal along z-axis.
 /// </summary>
 /// <param name="radius">Radius of the circle.</param>
 /// <param name="thickness">Thickness of the circle.</param>
 /// <param name="color">Color of the circle.</param>
 public WCircle(double radius, double thickness, MCvScalar color)
 {
     _ptr = CvInvoke.cveWCircleCreateAtOrigin(radius, thickness, ref color, ref _widget3dPtr, ref _widgetPtr);
 }
Пример #50
0
 private static extern void cvePolylines(
    IntPtr img, IntPtr pts,
    [MarshalAs(CvInvoke.BoolMarshalType)]
    bool isClosed,
    ref MCvScalar color,
    int thickness, CvEnum.LineType lineType, int shift);
Пример #51
0
 /// <summary>
 /// Constructs repositioned planar circle.
 /// </summary>
 /// <param name="radius">Radius of the circle.</param>
 /// <param name="center">Center of the circle.</param>
 /// <param name="normal">Normal of the plane in which the circle lies.</param>
 /// <param name="thickness">Thickness of the circle.</param>
 /// <param name="color">Color of the circle.</param>
 public WCircle(double radius, MCvPoint3D64f center, MCvPoint3D64f normal, double thickness, MCvScalar color)
 {
     _ptr = CvInvoke.cveWCircleCreate(radius, ref center, ref normal, thickness, ref color, ref _widget3dPtr, ref _widgetPtr);
 }
Пример #52
0
 private static extern void cveRectangle(IntPtr img, ref Rectangle rect, ref MCvScalar color, int thickness, CvEnum.LineType lineType, int shift);
Пример #53
0
 internal static extern IntPtr cveWCircleCreateAtOrigin(double radius, double thickness, ref MCvScalar color, ref IntPtr widget3d, ref IntPtr widget);
Пример #54
0
 /// <summary>
 /// Dilates the source image using the specified structuring element that determines the shape of a pixel neighborhood over which the maximum is taken
 /// The function supports the in-place mode. Dilation can be applied several (iterations) times. In case of color image each channel is processed independently
 /// </summary>
 /// <param name="src">Source image</param>
 /// <param name="dst">Destination image</param>
 /// <param name="element">Structuring element used for erosion. If it is IntPtr.Zero, a 3x3 rectangular structuring element is used</param>
 /// <param name="iterations">Number of times erosion is applied</param>
 /// <param name="borderType">Pixel extrapolation method</param>
 /// <param name="borderValue">Border value in case of a constant border </param>
 /// <param name="anchor">Position of the anchor within the element; default value (-1, -1) means that the anchor is at the element center.</param>
 public static void Dilate(IInputArray src, IOutputArray dst, IInputArray element, Point anchor, int iterations, CvEnum.BorderType borderType, MCvScalar borderValue)
 {
    using (InputArray iaSrc = src.GetInputArray())
    using (OutputArray oaDst = dst.GetOutputArray())
    using (InputArray iaElement = element == null ? InputArray.GetEmpty() : element.GetInputArray())
       cveDilate(iaSrc, oaDst, iaElement, ref anchor, iterations, borderType, ref borderValue);
 }
Пример #55
0
 internal static extern IntPtr cveWCircleCreate(double radius, ref MCvPoint3D64f center, ref MCvPoint3D64f normal, double thickness, ref MCvScalar color, ref IntPtr widget3d, ref IntPtr widget);
Пример #56
0
 internal static extern IntPtr cudaCreateBoxMinFilter(int srcType, ref Size ksize, ref Point anchor, CvEnum.BorderType borderMode, ref MCvScalar borderValue);
Пример #57
0
        public DetectedObject[] Detect(Mat image, double confThreshold = 0.5)
        {
            MCvScalar meanVal = new MCvScalar();

            Size imageSize = image.Size;

            DnnInvoke.BlobFromImage(
                image,
                _inputBlob,
                1.0,
                new Size(416, 416),
                meanVal,
                true,
                false,
                DepthType.Cv8U);
            _yoloDetector.SetInput(_inputBlob, "", 0.00392);
            int[]  outLayers    = _yoloDetector.UnconnectedOutLayers;
            String outLayerType = _yoloDetector.GetLayer(outLayers[0]).Type;

            String[] outLayerNames = _yoloDetector.UnconnectedOutLayersNames;

            using (VectorOfMat outs = new VectorOfMat())
            {
                List <DetectedObject> detectedObjects = new List <DetectedObject>();
                _yoloDetector.Forward(outs, outLayerNames);

                if (outLayerType.Equals("Region"))
                {
                    int size = outs.Size;

                    for (int i = 0; i < size; i++)
                    {
                        // Network produces output blob with a shape NxC where N is a number of
                        // detected objects and C is a number of classes + 4 where the first 4
                        // numbers are [center_x, center_y, width, height]
                        using (Mat m = outs[i])
                        {
                            int rows = m.Rows;
                            int cols = m.Cols;
                            float[,] data = m.GetData(true) as float[, ];
                            for (int j = 0; j < rows; j++)
                            {
                                using (Mat subM = new Mat(m, new Emgu.CV.Structure.Range(j, j + 1), new Emgu.CV.Structure.Range(5, cols)))
                                {
                                    double minVal = 0, maxVal = 0;
                                    Point  minLoc = new Point();
                                    Point  maxLoc = new Point();
                                    CvInvoke.MinMaxLoc(subM, ref minVal, ref maxVal, ref minLoc, ref maxLoc);
                                    if (maxVal > confThreshold)
                                    {
                                        int       centerX = (int)(data[j, 0] * imageSize.Width);
                                        int       centerY = (int)(data[j, 1] * imageSize.Height);
                                        int       width   = (int)(data[j, 2] * imageSize.Width);
                                        int       height  = (int)(data[j, 3] * imageSize.Height);
                                        int       left    = centerX - width / 2;
                                        int       top     = centerY - height / 2;
                                        Rectangle rect    = new Rectangle(left, top, width, height);

                                        DetectedObject obj = new DetectedObject();
                                        obj.ClassId   = maxLoc.X;
                                        obj.Confident = maxVal;
                                        obj.Region    = rect;
                                        obj.Label     = _labels[obj.ClassId];
                                        detectedObjects.Add(obj);
                                    }
                                }
                            }
                        }
                    }

                    return(detectedObjects.ToArray());
                }
                else
                {
                    throw new Exception(String.Format("Unknown output layer type: {0}", outLayerType));
                }
            }
        }
Пример #58
0
 private static extern void cveWarpAffine(
    IntPtr src,
    IntPtr dst,
    IntPtr mapMatrix,
    ref Size dsize,
    int flags,
    CvEnum.BorderType borderMode,
    ref MCvScalar fillval);
Пример #59
0
 /// <summary>
 /// Create a BoxMin filter.
 /// </summary>
 /// <param name="ksize">Size of the kernel</param>
 /// <param name="anchor">The center of the kernel. User (-1, -1) for the default kernel center.</param>
 /// <param name="borderType">The border type.</param>
 /// <param name="borderValue">The border value.</param>
 /// <param name="srcDepth">The depth of the source image</param>
 /// <param name="srcChannels">The number of channels in the source image</param>
 public CudaBoxMinFilter(DepthType srcDepth, int srcChannels, Size ksize, Point anchor, CvEnum.BorderType borderType = BorderType.Default, MCvScalar borderValue = new MCvScalar())
 {
    _ptr = CudaInvoke.cudaCreateBoxMinFilter(CvInvoke.MakeType(srcDepth, srcChannels), ref ksize, ref anchor, borderType, ref borderValue);
 }
        public int apply(string fileName, string output)
        {
            int counter = 0;

            Emgu.CV.Image<Bgr, Byte> imgS = new Emgu.CV.Image<Bgr, Byte>(fileName);

            Emgu.CV.Image<Gray, Byte> img = new Emgu.CV.Image<Gray, Byte>(fileName);

            //Emgu.CV.Image<Gray, Byte> imgGray = new Image<Gray, byte>(img.Width, img.Height);
            //CvInvoke.cvCvtColor(img, imgGray, COLOR_CONVERSION.BGR2GRAY);

            int thresh = 1;
            int max_thresh = 255;
            img = img.ThresholdBinary(new Gray(thresh), new Gray(max_thresh));

            img.Save(output.Replace(".", "_binary."));

            Contour<Point> contur = img.FindContours(Emgu.CV.CvEnum.CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_SIMPLE, Emgu.CV.CvEnum.RETR_TYPE.CV_RETR_CCOMP);
            Emgu.CV.CvInvoke.cvDrawContours(imgS, contur, new MCvScalar(0, 0, 255), new MCvScalar(0, 0, 255), 1, 1, LINE_TYPE.EIGHT_CONNECTED, new Point(0, 0));

            contur = img.FindContours(Emgu.CV.CvEnum.CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_SIMPLE, Emgu.CV.CvEnum.RETR_TYPE.CV_RETR_CCOMP);

            while (contur != null && contur.HNext != null)
            {
                if (counter == 0) { counter++; }

                contur = contur.HNext;
                counter++;
            }

            MCvFont font = new MCvFont(Emgu.CV.CvEnum.FONT.CV_FONT_HERSHEY_SIMPLEX, 0.8f, 0.8f);
            MCvScalar color = new MCvScalar(255, 255, 255);

            CvInvoke.cvPutText(imgS, "counter:" + counter, new Point(10, 20), ref font, color);

            imgS.Save(output);

            return counter;
        }