A MemStorage is a wrapper to cvMemStorage of OpenCV.
Inheritance: Emgu.Util.UnmanagedObject
		private void FindRectangles(Image<Gray, Byte> blackAndWhiteImage)
		{
			m_FoundRectangles.Clear();

			using (MemStorage storage = new MemStorage()) //allocate storage for contour approximation
			{
				for (Contour<Point> contours = blackAndWhiteImage.FindContours(
					Emgu.CV.CvEnum.CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_SIMPLE,
					Emgu.CV.CvEnum.RETR_TYPE.CV_RETR_LIST,
					storage);
					contours != null;
					contours = contours.HNext)
				{
					Contour<Point> currentContour = contours.ApproxPoly(contours.Perimeter * 0.05, storage);
					//Debug.WriteLine(currentContour.Area);

					if (currentContour.Area > 250) //only consider contours with area greater than 250
					{
						if (currentContour.Total == 4) //The contour has 4 vertices.
						{
							if (IsRectangle(currentContour))
							{
								m_FoundRectangles.Add(currentContour.GetMinAreaRect());
							}
						}
					}
				}
			}
		}
示例#2
1
        public void ContourCoordinates()
        {
            Image<Bgr, Byte> img = this.ShowCamera();
            Image<Gray, Byte> g_img = this.FilterImage(img);
            Image<Gray, Byte> r_img = new Image<Gray, Byte>(new Size(g_img.Width, g_img.Height));
            this.h = g_img.Width;
            this.w = g_img.Height;

            using (MemStorage storage = new MemStorage()) //allocate storage for contour approximation
            {
                for (Contour<Point> contours = g_img.FindContours(
                    Emgu.CV.CvEnum.CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_NONE,
                    Emgu.CV.CvEnum.RETR_TYPE.CV_RETR_CCOMP,
                    storage);
                    contours != null;
                    contours = contours.HNext)
                {
                    Contour<Point> contour = contours.ApproxPoly(contours.Perimeter * 0.0005, storage);

                    Point[] pts = contour.ToArray();
                    LineSegment2D[] edges = PointCollection.PolyLine(pts, false);

                    CvInvoke.cvDrawContours(r_img, contour, new MCvScalar(200), new MCvScalar(0, 200,0), 5, -1, LINE_TYPE.FOUR_CONNECTED, new Point(0, 0));
                    for (int k = 0; k < pts.Length; k++)
                    {
                        //r_img.Draw(new CircleF(pts[k], 2), new Gray(255), 1);
                        this.showimg = r_img;
                        //this.Coord2d.Add(pts[k]);
                        List<Point> p = new List<Point>();
                        p.Add(pts[k]);
                        matrix.Add(p);
                    }
                }
            }
        }
        private void button1_Click(object sender, EventArgs e)
        {
            OpenFileDialog Openfile = new OpenFileDialog();
            if (Openfile.ShowDialog() == DialogResult.OK)
            {
                Image<Bgr, byte> My_Image = new Image<Bgr, byte>(Openfile.FileName);
                Image<Gray, byte> gray_image = My_Image.Convert<Gray, byte>();
                Image<Gray, byte> eh_gray_image = My_Image.Convert<Gray, byte>();
                Image<Gray, byte> smooth_gray_image = My_Image.Convert<Gray, byte>();
                Image<Gray, byte> ed_gray_image = new Image<Gray, byte>(gray_image.Size);
                Image<Bgr, byte> final_image = new Image<Bgr, byte>(Openfile.FileName);
                MemStorage stor = new MemStorage();
                List<MCvBox2D> detectedLicensePlateRegionList = new List<MCvBox2D>();

                CvInvoke.cvEqualizeHist(gray_image, eh_gray_image);
                CvInvoke.cvSmooth(eh_gray_image, smooth_gray_image, Emgu.CV.CvEnum.SMOOTH_TYPE.CV_GAUSSIAN, 3, 3, 0, 0);
                //CvInvoke.cvAdaptiveThreshold(smooth_gray_image, bi_gray_image, 255, Emgu.CV.CvEnum.ADAPTIVE_THRESHOLD_TYPE.CV_ADAPTIVE_THRESH_GAUSSIAN_C, Emgu.CV.CvEnum.THRESH.CV_THRESH_BINARY, 71, 1);
                CvInvoke.cvCanny(smooth_gray_image, ed_gray_image, 100, 50, 3);
                Contour<Point> contours = ed_gray_image.FindContours(Emgu.CV.CvEnum.CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_SIMPLE, Emgu.CV.CvEnum.RETR_TYPE.CV_RETR_TREE, stor);
                DetectPlate(contours, detectedLicensePlateRegionList);

                for (int i = 0; i < detectedLicensePlateRegionList.Count; i++)
                {
                    final_image.Draw(detectedLicensePlateRegionList[i], new Bgr(Color.Red), 2);
                }
                imageBox1.Image = My_Image;
                imageBox2.Image = gray_image;
                imageBox3.Image = eh_gray_image;
                imageBox4.Image = smooth_gray_image;
                imageBox5.Image = ed_gray_image;
                imageBox6.Image = final_image;
            }
        }
示例#4
1
        /// <summary>
        /// Detect license plate from the given image
        /// </summary>
        /// <param name="img">The image to search license plate from</param>
        /// <param name="licensePlateImagesList">A list of images where the detected license plate regions are stored</param>
        /// <param name="filteredLicensePlateImagesList">A list of images where the detected license plate regions (with noise removed) are stored</param>
        /// <param name="detectedLicensePlateRegionList">A list where the regions of license plate (defined by an MCvBox2D) are stored</param>
        /// <returns>The list of words for each license plate</returns>
        public List<List<Word>> DetectLicensePlate(
            Image<Bgr, byte> img,
            List<Image<Gray, Byte>> licensePlateImagesList,
            List<Image<Gray, Byte>> filteredLicensePlateImagesList,
            List<MCvBox2D> detectedLicensePlateRegionList)
        {
            List<List<Word>> licenses = new List<List<Word>>();

            // Convert image to gray
            using (Image<Gray, byte> gray = img.Convert<Gray, Byte>())

            // Create Canny image
            using (Image<Gray, Byte> canny = new Image<Gray, byte>(gray.Size))

            //Create MemStorage
            using (MemStorage stor = new MemStorage())
            {
                //Convert gray with Canny Algorithm
                CvInvoke.cvCanny(gray, canny, 130, 70, 3);

                //List all Contour
                Contour<Point> contours = canny.FindContours(
                     Emgu.CV.CvEnum.CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_SIMPLE,
                     Emgu.CV.CvEnum.RETR_TYPE.CV_RETR_TREE,
                     stor);

                //Check Contour
                FindLicensePlate(contours, gray, canny, licensePlateImagesList, filteredLicensePlateImagesList, detectedLicensePlateRegionList, licenses);
            }

            return licenses;
        }
示例#5
1
        public static IEnumerable<Contour<Point>> GetContours(Image<Gray, byte> canny)
        {
            using (var storage = new MemStorage())
            {
                var contours = canny.FindContours(CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_SIMPLE, RETR_TYPE.CV_RETR_TREE, storage);

                while (contours != null)
                {
                    yield return contours;
                    contours = contours.HNext;
                }
            }
        }
        private static Image<Gray, Byte> FilterPlate(Image<Gray, Byte> plate)
        {
            Image<Gray, Byte> thresh = plate.ThresholdBinaryInv(new Gray(120), new Gray(255));

            Image<Gray, Byte> plateMask = new Image<Gray, byte>(plate.Size);
            Image<Gray, Byte> plateCanny = plate.Canny(new Gray(100), new Gray(50));
            MemStorage stor = new MemStorage();
            {
                plateMask.SetValue(255.0);
                for (
                   Contour<Point> contours = plateCanny.FindContours(
                      Emgu.CV.CvEnum.CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_SIMPLE,
                      Emgu.CV.CvEnum.RETR_TYPE.CV_RETR_EXTERNAL,
                      stor);
                   contours != null; contours = contours.HNext)
                {
                    Rectangle rect = contours.BoundingRectangle;
                    if (rect.Height > (plate.Height >> 1))
                    {
                        rect.X -= 1; rect.Y -= 1; rect.Width += 2; rect.Height += 2;
                        rect.Intersect(plate.ROI);

                        plateMask.Draw(rect, new Gray(0.0), -1);
                    }
                }

                thresh.SetValue(0, plateMask);
            }

            thresh._Erode(1);
            thresh._Dilate(1);

            return thresh;
        }
示例#7
0
        public HandDetector(string hsvFnm, int width, int height)
        {
            Size scale = new Size(width/IMG_SCALE, height/IMG_SCALE);
            scaleImg = new Image<Bgr, Byte>(scale);
            hsvImg = new Image<Hsv, Byte>(scale);
            imgThreshed = new Image<Gray, Byte>(scale);

            // storage for contour, hull, and defect calculations by OpenCV
            contourStorage = new MemStorage();
            approxStorage = new MemStorage();
            hullStorage = new MemStorage();
            defectsStorage = new MemStorage();

            msgFont = new Font("SansSerif", 18, FontStyle.Bold, GraphicsUnit.Pixel);

            cogPt = new Point();
            fingerTips = new List<Point>();
            namedFingers = new List<FingerNameClass.FingerName>();

            tipPts = new Point[MAX_POINTS];   // coords of the finger tips
            foldPts = new Point[MAX_POINTS];  // coords of the skin folds between fingers
            depths = new float[MAX_POINTS];   // distances from tips to folds

            hueLower = 0;
            hueUpper = 20;
            satLower = 50;
            satUpper = 255;
            briLower = 0;
            briUpper = 255;
        }
示例#8
0
 /// <summary>
 /// Create a new HOGDescriptor
 /// </summary>
 public HOGDescriptor()
 {
    _ptr = CvHOGDescriptorCreateDefault();
    _rectStorage = new MemStorage();
    _rectSeq = new Seq<Rectangle>(_rectStorage);
    _vector = new VectorOfFloat();
 }
示例#9
0
 /// <summary>
 /// Creates a child memory storage that is similar to simple memory storage except for the differences in the memory allocation/deallocation mechanism. When a child storage needs a new block to add to the block list, it tries to get this block from the parent. The first unoccupied parent block available is taken and excluded from the parent block list. If no blocks are available, the parent either allocates a block or borrows one from its own parent, if any. In other words, the chain, or a more complex structure, of memory storages where every storage is a child/parent of another is possible. When a child storage is released or even cleared, it returns all blocks to the parent. In other aspects, the child storage is the same as the simple storage
 /// </summary>
 /// <returns>Child MemStorage</returns>
 public MemStorage CreateChildMemStorage()
 {
     IntPtr childStoragePtr = CvInvoke.cvCreateChildMemStorage(_ptr);
      MemStorage childStorage = new MemStorage(childStoragePtr);
      //_childStorageList.Add(childStorage);
      return childStorage;
 }
示例#10
0
 public SignDetector(Image<Bgr, Byte> stopSignModel)
 {
     _detector2 = new SURFDetector(500, false);
     using (Image<Gray, Byte> redMask = GetColorPixelMask(stopSignModel))
     {
         try
         {
             _tracker2 = new Features2DTracker<float>(_detector2.DetectFeatures(redMask, null));
         }
         catch { }
     }
     _octagonStorage2 = new MemStorage();
     _octagon2 = new Contour<Point>(_octagonStorage2);
     _octagon2.PushMulti(new Point[] { 
         //hexagon
         new Point(1, 0),
         new Point(2, 0),
         new Point(3, 1),
         new Point(2, 2),
         new Point(1, 2),
         new Point(0, 1)},
         //octagon
     //new Point(1, 0),
     //new Point(2, 0),
     //new Point(3, 1),
     //new Point(3, 2),
     //new Point(2, 3),
     //new Point(1, 3),
     //new Point(0, 2),
     //new Point(0, 1)},
        Emgu.CV.CvEnum.BACK_OR_FRONT.FRONT);
 }
示例#11
0
        /// <summary>
        /// Create a new HOGDescriptor using the specific parameters
        /// </summary>
        public HOGDescriptor(
            Size winSize,
            Size blockSize,
            Size blockStride,
            Size cellSize,
            int nbins,
            int derivAperture,
            double winSigma,
            double L2HysThreshold,
            bool gammaCorrection)
        {
            _ptr = CvHOGDescriptorCreate(
            ref winSize,
            ref blockSize,
            ref blockStride,
            ref cellSize,
            nbins,
            derivAperture,
            winSigma,
            0,
            L2HysThreshold,
            gammaCorrection);

             _rectStorage = new MemStorage();
             _rectSeq = new Seq<Rectangle>(_rectStorage);
        }
示例#12
0
 //public Bitmap _grayImg;
 //public Bitmap _canndyImg;
 public ArrowSignDetector()
 {
     _stor = new MemStorage();
     _defaultStor = new MemStorage();
     _tempStor = new MemStorage();
     //   _defaultContour = FindDefault();
 }
示例#13
0
        public Form1()
        {
            InitializeComponent();

            grabber = new Emgu.CV.Capture("C:/Users/L33549.CITI/Desktop/a.avi");
            grabber.QueryFrame();
            frameWidth = grabber.Width;
            frameHeight = grabber.Height;
            //detector = new AdaptiveSkinDetector(1, AdaptiveSkinDetector.MorphingMethod.NONE);
            hsv_min = new Hsv(0, 45, 0);
            hsv_max = new Hsv(20, 255, 255);
            YCrCb_min = new Ycc(0, 129, 40);
            YCrCb_max = new Ycc(255, 185, 135);
            box = new MCvBox2D();
            ellip = new Ellipse();

            contourStorage = new MemStorage();
            approxStorage = new MemStorage();
            hullStorage = new MemStorage();
            defectsStorage = new MemStorage();

            tipPts = new Point[MAX_POINTS];   // coords of the finger tips
            foldPts = new Point[MAX_POINTS];  // coords of the skin folds between fingers
            depths = new float[MAX_POINTS];   // distances from tips to folds
            cogPt = new Point();
            fingerTips = new List<Point>();
            face = new CascadeClassifier("C:/Users/L33549.CITI/Desktop/AbuseAnalysis/HandGestureRecognition/HandGestureRecognition/HandGestureRecognition/haar/Original/haarcascade_hand.xml");

            Application.Idle += new EventHandler(FrameGrabber);

            /*foreach (var potentialSensor in KinectSensor.KinectSensors)
            {
                if (potentialSensor.Status == KinectStatus.Connected)
                {
                    this.sensor = potentialSensor;
                    break;
                }
            }

            if (null != this.sensor)
            {
                // Turn on the color stream to receive color frames
                this.sensor.ColorStream.Enable(ColorImageFormat.RgbResolution640x480Fps30);

                // Allocate space to put the pixels we'll receive
                this.colorPixels = new byte[this.sensor.ColorStream.FramePixelDataLength];

                // This is the bitmap we'll display on-screen
                this.colorBitmap = new WriteableBitmap(this.sensor.ColorStream.FrameWidth, this.sensor.ColorStream.FrameHeight, 96.0, 96.0, PixelFormats.Bgr32, null);

                // Set the image we display to point to the bitmap where we'll put the image data
                //this.Image.Source = this.colorBitmap;

                // Add an event handler to be called whenever there is new color frame data
                this.sensor.ColorFrameReady += this.SensorColorFrameReady;

                // Start the sensor!
                this.sensor.Start();
            }*/
        }
示例#14
0
      private void ProcessFrame(object sender, EventArgs e)
      {
         using (MemStorage storage = new MemStorage()) //create storage for motion components
         {
            Image<Bgr, Byte> image = _capture.QuerySmallFrame().PyrUp(); //reduce noise from the image
            capturedImageBox.Image = image;

            //update the motion history
            _motionHistory.Update(image.Convert<Gray, Byte>());

            #region get a copy of the motion mask and enhance its color
            Image<Gray, Byte> motionMask = _motionHistory.Mask;
            double[] minValues, maxValues;
            System.Drawing.Point[] minLoc, maxLoc;
            motionMask.MinMax(out minValues, out maxValues, out minLoc, out maxLoc);
            motionMask._Mul(255.0 / maxValues[0]);
            #endregion

            //create the motion image 
            Image<Bgr, Byte> motionImage = new Image<Bgr, byte>(motionMask.Size);
            //display the motion pixels in blue (first channel)
            motionImage[0] = motionMask;

            //Threshold to define a motion area, reduce the value to detect smaller motion
            double minArea = 100;

            storage.Clear(); //clear the storage
            Seq<MCvConnectedComp> motionComponents = _motionHistory.GetMotionComponents(storage);

            //iterate through each of the motion component
            foreach (MCvConnectedComp comp in motionComponents)
            {
               //reject the components that have small area;
               if (comp.area < minArea) continue;

               // find the angle and motion pixel count of the specific area
               double angle, motionPixelCount;
               _motionHistory.MotionInfo(comp.rect, out angle, out motionPixelCount);

               //reject the area that contains too few motion
               if (motionPixelCount < comp.area * 0.05) continue;

               //Draw each individual motion in red
               DrawMotion(motionImage, comp.rect, angle, new Bgr(Color.Red));
            }

            // find and draw the overall motion angle
            double overallAngle, overallMotionPixelCount;
            _motionHistory.MotionInfo(motionMask.ROI, out overallAngle, out overallMotionPixelCount);
            DrawMotion(motionImage, motionMask.ROI, overallAngle, new Bgr(Color.Green));

            //Display the amount of motions found on the current image
            UpdateText(String.Format("Total Motions found: {0}; Motion Pixel count: {1}", motionComponents.Total, overallMotionPixelCount));

            //Display the image of the motion
            motionImageBox.Image = motionImage;
         }
      }
示例#15
0
 /// <summary>
 /// Detect the Lepetit keypoints from the image
 /// </summary>
 /// <param name="image">The image to extract Lepetit keypoints</param>
 /// <param name="maxCount">The maximum number of keypoints to be extracted</param>
 /// <param name="scaleCoords">Indicates if the coordinates should be scaled</param>
 /// <returns>The array of Lepetit keypoints</returns>
 public MKeyPoint[] DetectKeyPoints(Image<Gray, Byte> image, int maxCount, bool scaleCoords)
 {
     using (MemStorage stor = new MemStorage())
      {
     Seq<MKeyPoint> seq = new Seq<MKeyPoint>(stor);
     CvLDetectorDetectKeyPoints(ref this, image, seq.Ptr, maxCount, scaleCoords);
     return seq.ToArray();
      }
 }
示例#16
0
 /// <summary>
 /// Return the default people detector
 /// </summary>
 /// <returns>the default people detector</returns>
 public static float[] GetDefaultPeopleDetector()
 {
     using (MemStorage stor = new MemStorage())
      {
     Seq<float> desc = new Seq<float>(stor);
     CvHOGDescriptorPeopleDetectorCreate(desc);
     return desc.ToArray();
      }
 }
示例#17
0
 private void getContours()
 {
     using (MemStorage store = new MemStorage())
         for (Contour<Point> contours1 = this.img.FindContours(Emgu.CV.CvEnum.CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_NONE, Emgu.CV.CvEnum.RETR_TYPE.CV_RETR_TREE, store); contours1 != null; contours1 = contours1.HNext)
         {
             Rectangle r = CvInvoke.cvBoundingRect(contours1, 1);
             this.rects.Add(r);
         }
 }
示例#18
0
 /// <summary>
 /// Hough Line Transform, as in OpenCV (EmguCv does not wrap this function as it should be)
 /// </summary>
 /// <param name="img">Binary image</param>
 /// <param name="type">type of hough transform</param>
 /// <param name="threshold">how many votes is needed to accept line</param>
 /// <returns>Lines in theta/rho format</returns>
 public static PointF[] HoughLineTransform(Image<Gray, byte> img, Emgu.CV.CvEnum.HOUGH_TYPE type, int threshold)
 {
     using (MemStorage stor = new MemStorage())
     {
         IntPtr linePtr = CvInvoke.cvHoughLines2(img, stor.Ptr, type, 5, Math.PI / 180 * 15, threshold, 0, 0);
         Seq<PointF> seq = new Seq<PointF>(linePtr, stor);
         return seq.ToArray(); ;
     }
 }
示例#19
0
 /// <summary>
 /// Detect planar object from the specific image
 /// </summary>
 /// <param name="image">The image where the planar object will be detected</param>
 /// <param name="h">The homography matrix which will be updated</param>
 /// <returns>The four corners of the detected region</returns>
 public PointF[] Detect(Image<Gray, Byte> image, HomographyMatrix h)
 {
     using (MemStorage stor = new MemStorage())
      {
     Seq<PointF> corners = new Seq<PointF>(stor);
     CvPlanarObjectDetectorDetect(_ptr, image, h, corners);
     return corners.ToArray();
      }
 }
示例#20
0
 /// <summary>
 /// Get the model points stored in this detector
 /// </summary>
 /// <returns>The model points stored in this detector</returns>
 public MKeyPoint[] GetModelPoints()
 {
     using (MemStorage stor = new MemStorage())
      {
     Seq<MKeyPoint> modelPoints = new Seq<MKeyPoint>(stor);
     CvPlanarObjectDetectorGetModelPoints(_ptr, modelPoints);
     return modelPoints.ToArray();
      }
 }
示例#21
0
 /// <summary>
 /// Detect STAR key points from the image
 /// </summary>
 /// <param name="image">The image to extract key points from</param>
 /// <returns>The STAR key points of the image</returns>
 public MKeyPoint[] DetectKeyPoints(Image<Gray, Byte> image)
 {
     using (MemStorage stor = new MemStorage())
      {
     Seq<MKeyPoint> seq = new Seq<MKeyPoint>(stor);
     CvStarDetectorDetectKeyPoints(ref this, image, seq.Ptr);
     return seq.ToArray();
      }
 }
        //::::::::::::Method to calculate the convex hull:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::

        public List<object> HandConvexHull(Image<Gray, Byte> frame, Rectangle Roi)
        {
            List<object> ListReturn = new List<object>(); 
            Image<Gray, Byte> BinaryImage;
            //PointF centerPalm; 

            BinaryImage = frame.Copy(Roi); 
            BinaryImage = binaryThresholdNiBlack(BinaryImage);
            

            using (MemStorage storage = new MemStorage())
            {
                Double result1 = 0;
                Double result2 = 0;
                
                Contour<System.Drawing.Point> contours = BinaryImage.FindContours(Emgu.CV.CvEnum.CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_SIMPLE, Emgu.CV.CvEnum.RETR_TYPE.CV_RETR_LIST, storage);
                Contour<System.Drawing.Point> biggestContour = null;

                while (contours != null)
                {
                    result1 = contours.Area;
                    if (result1 > result2)
                    {
                        result2 = result1;
                        biggestContour = contours;
                    }
                    contours = contours.HNext;
                } 

                if (biggestContour != null)
                {
                    Contour<System.Drawing.Point> concurrentContour = biggestContour.ApproxPoly(biggestContour.Perimeter * 0.0025, storage);
                    biggestContour = concurrentContour; 

                    Hull = biggestContour.GetConvexHull(Emgu.CV.CvEnum.ORIENTATION.CV_COUNTER_CLOCKWISE);
                    defects = biggestContour.GetConvexityDefacts(storage, Emgu.CV.CvEnum.ORIENTATION.CV_COUNTER_CLOCKWISE);
                    defectsArray = defects.ToArray(); 

                    box = biggestContour.GetMinAreaRect();
                    points = box.GetVertices(); 

                    contourArea=result2;
                    convexHullArea = Hull.Area; 
                }

                //BinaryImage.Draw(Hull, new Gray(155), 3);
            }
            
            ListReturn = GetFingers(BinaryImage); 

            //ListReturn.Add(centerPalm);
            ListReturn.Add(contourArea); 
            ListReturn.Add(convexHullArea);  

            return ListReturn;
        }//end HandConvexHull  
示例#23
0
        /// <summary>
        /// Finds rectangular regions in the given image that are likely to contain objects the cascade has been trained for and returns those regions as a sequence of rectangles. 
        /// The function scans the image several times at different scales. Each time it considers overlapping regions in the image. 
        /// It may also apply some heuristics to reduce number of analyzed regions, such as Canny prunning. 
        /// After it has proceeded and collected the candidate rectangles (regions that passed the classifier cascade), it groups them and returns a sequence of average rectangles for each large enough group. 
        /// </summary>
        /// <param name="image">The image where the objects are to be detected from</param>
        /// <param name="scaleFactor">The factor by which the search window is scaled between the subsequent scans, for example, 1.1 means increasing window by 10%</param>
        /// <param name="minNeighbors">Minimum number (minus 1) of neighbor rectangles that makes up an object. All the groups of a smaller number of rectangles than min_neighbors-1 are rejected. If min_neighbors is 0, the function does not any grouping at all and returns all the detected candidate rectangles, which may be useful if the user wants to apply a customized grouping procedure</param>
        /// <param name="minSize">Minimum window size. Use Size.Empty for default, where it is set to the size of samples the classifier has been trained on (~20x20 for face detection)</param>
        /// <param name="maxSize">Maxumum window size. Use Size.Empty for default, where the parameter will be ignored.</param>
        /// <returns>The objects detected, one array per channel</returns>
        public Rectangle[] DetectMultiScale(Image<Gray, Byte> image, double scaleFactor, int minNeighbors, Size minSize, Size maxSize)
        {
            using (MemStorage stor = new MemStorage())
             {
            Seq<Rectangle> rectangles = new Seq<Rectangle>(stor);

            CvInvoke.CvCascadeClassifierDetectMultiScale(_ptr, image, rectangles, scaleFactor, minNeighbors, 0, minSize, maxSize);
            return rectangles.ToArray();
             }
        }
示例#24
0
        public DetectorResult Process(Image<Bgr, byte> rawFrame, Image<Gray, byte> grayFrame)
        {
            Image<Bgr, byte> contourImage = null;
            if (rawFrame != null)
            {
                List<Point[]> polygon = new List<Point[]>();      // to draw the perimeter

                Image<Gray, byte> gray = rawFrame.Convert<Gray, byte>();               // convert source to gray
                Image<Gray, byte> thresh = gray.PyrDown().PyrUp();                  // attempt to make edges more distinct?

                using (Image<Gray, Byte> mask = new Image<Gray, byte>(thresh.Size))
                using (Image<Gray, byte> cannyImg = thresh.Canny(new Gray(10), new Gray(50)))
                using (Image<Gray, byte> dilateImg = cannyImg.Dilate(1))

                using (MemStorage stor = new MemStorage())
                {
                    mask.SetValue(255.0);
                    for (
                       Contour<Point> contours = dilateImg.FindContours(
                          Emgu.CV.CvEnum.CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_SIMPLE,
                          Emgu.CV.CvEnum.RETR_TYPE.CV_RETR_EXTERNAL,
                          stor);
                       contours != null; contours = contours.HNext)
                    {
                        Rectangle rect = contours.BoundingRectangle;
                        int area = rect.Height * rect.Width;
                        if (area > 30000)
                        {
                            rect.X -= 1; rect.Y -= 1; rect.Width += 2; rect.Height += 2;
                            rect.Intersect(gray.ROI);
                            mask.Draw(rect, new Gray(0.0), -1);

                            polygon.Add(contours.ToArray());
                        }
                    }

                    thresh.SetValue(0, mask);
                }

                contourImage = new Image<Bgr, byte>(gray.Bitmap);
                contourImage.CopyBlank();

                foreach (Point[] points in polygon)
                    contourImage.DrawPolyline(points, true, new Bgr(Color.Red), 2);
            }
            var result = new DetectorResult()
                             {
                                 Confidence = 100,
                                 GrayImage = grayFrame,
                                 ProcessedImage = contourImage,
                                 RawImage = rawFrame
                             };
            return result;
        }
 /// <summary>
 /// Find rectangular regions in the given image that are likely to contain objects and corresponding confidence levels
 /// </summary>
 /// <param name="image">The image to detect objects in</param>
 /// <param name="overlapThreshold">Threshold for the non-maximum suppression algorithm, Use default value of 0.5</param>
 /// <returns>Array of detected objects</returns>
 public MCvObjectDetection[] Detect(Image<Bgr, Byte> image, float overlapThreshold)
 {
     using (MemStorage stor = new MemStorage())
      {
     IntPtr seqPtr = CvInvoke.cvLatentSvmDetectObjects(image, Ptr, stor, overlapThreshold, -1);
     if (seqPtr == IntPtr.Zero)
        return new MCvObjectDetection[0];
     Seq<MCvObjectDetection> seq = new Seq<MCvObjectDetection>(seqPtr, stor);
     return seq.ToArray();
      }
 }
示例#26
0
        public List<SignResult> DetectStopSign(Image<Bgr, byte> image, out Image<Gray, byte> filteredImage)
        {
            filteredImage = GetFilteredImage(image);
            List<SignResult> results = new List<SignResult>();

            using (MemStorage storage = new MemStorage())
            {
                Contour<Point> contours = filteredImage.FindContours(Emgu.CV.CvEnum.CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_SIMPLE, Emgu.CV.CvEnum.RETR_TYPE.CV_RETR_TREE, storage);
                results = FindSign(image, contours);
            }

            return results;
        }
        public static List<MCvBox2D> FilterContours(Image<Gray, Byte> cannyEdges, double minArea, double approxPoly)
        {
            List<MCvBox2D> filteredContours = new List<MCvBox2D>();

            using (MemStorage storage = new MemStorage())
            {
               for (Contour<System.Drawing.Point> contours = cannyEdges.FindContours(Emgu.CV.CvEnum.CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_SIMPLE, Emgu.CV.CvEnum.RETR_TYPE.CV_RETR_LIST, storage); contours != null; contours = contours.HNext)
                {
                    if (contours != null)
                    {
                        Contour<System.Drawing.Point> currentContour = contours.ApproxPoly(approxPoly, storage);
                        if (currentContour.Total == 4 && currentContour.Convex && currentContour.Area > minArea)
                        {
                            if (currentContour.BoundingRectangle.Width > currentContour.BoundingRectangle.Height)
                            {
                                bool isRectangle = true;
                                System.Drawing.Point[] pts = currentContour.ToArray();
                                LineSegment2D[] edges = Emgu.CV.PointCollection.PolyLine(pts, true);

                                for (int i = 0; i < edges.Length; i++)
                                {
                                    double angle = Math.Abs(edges[(i + 1) % edges.Length].GetExteriorAngleDegree(edges[i]));

                                    if (angle < 85 || angle > 95)
                                    {
                                        isRectangle = false;
                                        break;
                                    }
                                }
                                if (isRectangle)
                                {
                                    filteredContours.Add(currentContour.GetMinAreaRect());
                                }
                            }
                        }
                    }
                }
            }

            if (filteredContours.Count > 0)
            {
                //filter out boxes with similar centers close by
                filteredContours = FilterSimilar(filteredContours);

                //sort highest to top
                filteredContours = SortAscending(filteredContours);
            }

            return filteredContours;
        }
示例#28
0
    public Marker(SerializationInfo info, StreamingContext context)
    {
      _marker = (Image<Bgr, byte>)info.GetValue("marker", typeof(Image<Bgr, byte>));
      _marker_size = 0;
      _marker_length = (double)info.GetValue("markerLength", typeof(double));
      _max_error_normed = (double)info.GetValue("maxError", typeof(double));
      _binary_threshold = (int)info.GetValue("binaryThreshold", typeof(int));

      _sync = new object();
      _warp_matrix = new Matrix<double>(3, 3);
      _contour_storage = new MemStorage();
      UpdateMarker();
      this.ObjectPoints = UpdateObjectPoints();
    }
示例#29
0
 /// <summary>
 /// Default constructor
 /// </summary>
 public Marker() {
   _marker = null;
   _binary_marker = null;
   _warped = null;
   _tmp = null;
   _marker_size = 0;
   _marker_length = 50;
   _max_error_normed = 0.4;
   _binary_threshold = 60;
   _warp_matrix = new Matrix<double>(3, 3);
   _contour_storage = new MemStorage();
   _sync = new object();
   this.ObjectPoints = UpdateObjectPoints();
 }
 /// <summary>
 /// Start the Delaunay's triangulation in the specific region of interest.
 /// </summary>
 /// <param name="roi">The region of interest of the triangulation</param>
 public PlanarSubdivision(ref Rectangle roi)
 {
     _storage = new MemStorage();
     _ptr     = CvInvoke.cvCreateSubdivDelaunay2D(roi, _storage);
     _roi     = roi;
 }
示例#31
0
 /// <summary>
 /// Create a contour using the specific <paramref name="storage"/>
 /// </summary>
 /// <param name="storage">The storage to be used</param>
 public Contour(MemStorage storage)
     : this((int)CvEnum.SeqType.Polygon, storage)
 {
 }
示例#32
0
 /// <summary>
 /// Get the minimum area rectangle for this point sequence
 /// </summary>
 /// <param name="stor">The temporary storage to use</param>
 /// <returns>The minimum area rectangle</returns>
 public MCvBox2D GetMinAreaRect(MemStorage stor)
 {
     return(CvInvoke.cvMinAreaRect2(Ptr, stor == null ? IntPtr.Zero : stor.Ptr));
 }
示例#33
0
 /// <summary>
 /// Approximates one curves and returns the approximation result
 /// </summary>
 /// <param name="accuracy">The desired approximation accuracy</param>
 /// <param name="storage"> The storage used by the resulting sequence. If null, the storage of this sequence is used.</param>
 /// <returns>The approximated contour</returns>
 public Seq <T> ApproxPoly(double accuracy, MemStorage storage)
 {
     return(ApproxPoly(accuracy, 0, storage));
 }
示例#34
0
        /// <summary>
        /// Get the convex hull of this point sequence
        /// </summary>
        /// <param name="orientation">The orientation of the convex hull</param>
        /// <param name="stor">The storage for the resulting sequence</param>
        /// <returns>The result convex hull</returns>
        public Seq <T> GetConvexHull(CvEnum.ORIENTATION orientation, MemStorage stor)
        {
            IntPtr hull = CvInvoke.cvConvexHull2(Ptr, stor, orientation, 1);

            return(new Seq <T>(hull, stor));
        }
示例#35
0
 /// <summary>
 /// Create a sequence from the unmanaged pointer and the storage used by the pointer
 /// </summary>
 /// <param name="seq">The unmanaged sequence</param>
 /// <param name="storage">The memory storage this sequence utilize</param>
 public Seq(IntPtr seq, MemStorage storage)
 {
     _ptr  = seq;
     _stor = storage;
 }
示例#36
0
 /// <summary>
 /// Create an empty Oct-Tree
 /// </summary>
 public Octree()
 {
     _ptr      = CvOctreeCreate();
     _storage  = new MemStorage();
     _pointSeq = new Seq <MCvPoint3D32f>(_storage);
 }
示例#37
0
 /// <summary>
 /// Create a contour using the specific <paramref name="storage"/>
 /// </summary>
 /// <param name="storage">The storage to be used</param>
 public Contour(MemStorage storage)
     : this((int)CvEnum.SEQ_TYPE.CV_SEQ_POLYGON, storage)
 {
 }
示例#38
0
 /// <summary>
 /// Creates a sequence that represents the specified slice of the input sequence. The new sequence either shares the elements with the original sequence or has own copy of the elements. So if one needs to process a part of sequence but the processing function does not have a slice parameter, the required sub-sequence may be extracted using this function
 /// </summary>
 /// <param name="slice">The part of the sequence to extract</param>
 /// <param name="storage">The destination storage to keep the new sequence header and the copied data if any. If it is NULL, the function uses the storage containing the input sequence.</param>
 /// <param name="copy_data">The flag that indicates whether to copy the elements of the extracted slice </param>
 /// <returns>A sequence that represents the specified slice of the input sequence</returns>
 public Seq <T> Slice(MCvSlice slice, MemStorage storage, bool copy_data)
 {
     return(new Seq <T>(CvInvoke.cvSeqSlice(Ptr, slice, storage.Ptr, copy_data), storage));
 }
示例#39
0
 /// <summary>
 /// Create a contour of the specific kind, type and flag
 /// </summary>
 /// <param name="kind">The kind of the sequence</param>
 /// <param name="eltype">The type of the sequence</param>
 /// <param name="flag">The flag of the sequence</param>
 /// <param name="stor">The storage</param>
 public Contour(CvEnum.SEQ_ELTYPE eltype, CvEnum.SEQ_KIND kind, CvEnum.SEQ_FLAG flag, MemStorage stor)
     : this(((int)kind | (int)eltype | (int)flag), stor)
 {
 }
示例#40
0
 /// <summary>
 /// Craete a contour from the specific IntPtr and storage
 /// </summary>
 /// <param name="ptr">The unmanged Pointer to the sequence</param>
 /// <param name="storage">The storage used by this contour</param>
 public Contour(IntPtr ptr, MemStorage storage)
     : base(ptr, storage)
 {
 }
示例#41
0
 /// <summary>
 /// Approximates one curves and returns the approximation result.
 /// </summary>
 /// <param name="accuracy">The desired approximation accuracy</param>
 /// <param name="storage"> The storage the resulting sequence use</param>
 /// <returns>The approximated contour</returns>
 public new Contour <T> ApproxPoly(double accuracy, MemStorage storage)
 {
     return(ApproxPoly(accuracy, 0, storage));
 }
示例#42
0
 /// <summary>
 /// Create a contour of the specific kind, type and flag
 /// </summary>
 /// <param name="kind">The kind of the sequence</param>
 /// <param name="eltype">The type of the sequence</param>
 /// <param name="flag">The flag of the sequence</param>
 /// <param name="stor">The storage</param>
 public Seq(CvEnum.SeqEltype eltype, CvEnum.SeqKind kind, CvEnum.SeqFlag flag, MemStorage stor)
     : this(((int)kind | (int)eltype | (int)flag), stor)
 {
 }
示例#43
0
 /// <summary>
 /// Create a sequence using the specific <paramref name="storage"/>
 /// </summary>
 /// <param name="storage">the storage</param>
 public Seq(MemStorage storage)
     : this(0, storage)
 {
 }
示例#44
-1
        public static Image<Gray, byte> Filter(Image<Bgra, byte> original)
        {
            var gray = original.Convert<Gray, byte>();
            var binary = new Image<Gray, byte>(new OtsuThreshold().Apply(gray.Bitmap));
            var canny = new Image<Gray, byte>(new CannyEdgeDetector().Apply(gray.Bitmap));
            var list = new List<Rectangle>();
            using (MemStorage stor = new MemStorage())
            {
                for (
                    Contour<Point> contours = canny.FindContours(
                        CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_SIMPLE,
                        RETR_TYPE.CV_RETR_EXTERNAL,
                        stor);
                    contours != null;
                    contours = contours.HNext)
                {
                    Rectangle rect = contours.BoundingRectangle;
                    list.Add(rect);
                }
            }
            //list.Where(rect => rect.Height * rect.Width < 100)
            //    .ToList().ForEach( rect => binary.Draw(rect, new Gray(1.0) ,-1));

            binary._Erode(1);
            binary._Dilate(1);
            return binary;
        }