public Form1()
 {
     InitializeComponent();
     hsv_min = new Hsv(0, 45, 0);
     hsv_max = new Hsv(20, 255, 255);
     YCrCb_min = new Ycc(0, 131, 80);
     YCrCb_max = new Ycc(255, 185, 135);
     mv = new MCvMoments();
 }
 public Form1()
 {
     InitializeComponent();
     hsv_min = new Hsv(0, 45, 0);
     hsv_max = new Hsv(20, 255, 255);
     YCrCb_min = new Ycc(0, 131, 80);
     YCrCb_max = new Ycc(255, 185, 135);
     mv = new MCvMoments();
     pt.X = loaded.Width / 2;
     pt.Y = loaded.Height / 2;
 }
Beispiel #3
0
 private static extern void cveHuMoments(ref MCvMoments moments, IntPtr huMoments);
Beispiel #4
0
 /// <summary>
 /// Calculates seven Hu invariants
 /// </summary>
 /// <param name="moments">Pointer to the moment state structure</param>
 /// <param name="hu">Pointer to Hu moments structure.</param>
 public static void HuMoments(MCvMoments moments, IOutputArray hu)
 {
    using (OutputArray oaHu = hu.GetOutputArray())
       cveHuMoments(ref moments, oaHu);
 }
Beispiel #5
0
 public static extern double cvGetNormalizedCentralMoment(
     ref MCvMoments moments,
     int xOrder,
     int yOrder);
Beispiel #6
0
 public static extern double cvGetSpatialMoment(
     ref MCvMoments moments,
     int xOrder,
     int yOrder);
Beispiel #7
0
 private static extern void cveMoments(
    IntPtr arr,
    [MarshalAs(CvInvoke.BoolMarshalType)]
    bool binaryImage,
    ref MCvMoments moments);
Beispiel #8
0
 /// <summary>
 /// Calculates spatial and central moments up to the third order and writes them to moments. The moments may be used then to calculate gravity center of the shape, its area, main axises and various shape characeteristics including 7 Hu invariants.
 /// </summary>
 /// <param name="arr">Image (1-channel or 3-channel with COI set) or polygon (CvSeq of points or a vector of points)</param>
 /// <param name="binaryImage">(For images only) If the flag is true, all the zero pixel values are treated as zeroes, all the others are treated as 1s</param>
 /// <returns>The moment</returns>
 public static MCvMoments Moments(IInputArray arr, bool binaryImage = false)
 {
    MCvMoments m = new MCvMoments();
    using (InputArray iaArr = arr.GetInputArray())
       cveMoments(iaArr, binaryImage, ref m);
    return m;
 }
        private void extractContourInfo(Contour<Point> bigContour, int scale, Image<Bgr, Byte> im)
        {
            MCvMoments moments = new MCvMoments();
            CvInvoke.cvMoments(bigContour, ref moments, 1);

            // center of gravity
            double m00 = CvInvoke.cvGetSpatialMoment(ref moments, 0, 0);
            double m10 = CvInvoke.cvGetSpatialMoment(ref moments, 1, 0);
            double m01 = CvInvoke.cvGetSpatialMoment(ref moments, 0, 1);

            if (m00 != 0)
            { // calculate center
                int xCenter = (int)Math.Round(m10 / m00) * scale;
                int yCenter = (int)Math.Round(m01 / m00) * scale;
                cogPt = new Point(xCenter, yCenter);
                Size s = new Size(bigContour.MCvContour.rect.Height, bigContour.MCvContour.rect.Width);
                Rectangle rect = new Rectangle(cogPt, s);
                im.Draw(rect, new Bgr(Color.Red), 2);
            }

            double m11 = CvInvoke.cvGetCentralMoment(ref moments, 1, 1);
            double m20 = CvInvoke.cvGetCentralMoment(ref moments, 2, 0);
            double m02 = CvInvoke.cvGetCentralMoment(ref moments, 0, 2);
            contourAxisAngle = calculateTilt(m11, m20, m02); // deal with hand contour pointing downwards

            // uses fingertips information generated on the last update of the hand, so will be out-of-date
            if (fingerTips.Count() > 0)
            {
                int yTotal = 0;
                for (int i = 0; i < fingerTips.Count(); i++)
                {
                    yTotal += fingerTips.ElementAt(i).Y;
                }
                int avgYFinger = yTotal / fingerTips.Count();
                if (avgYFinger > cogPt.Y)
                { // fingers below COG
                    contourAxisAngle += 180;
                }
            }
        }
Beispiel #10
0
 public static extern void cvMoments(IntPtr arr, ref MCvMoments moments, int binary);
Beispiel #11
0
 public static extern void cvGetHuMoments(ref MCvMoments moments, ref MCvHuMoments huMoments);
Beispiel #12
0
        ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
        ////////////////////////////////////////////////////////////////PUPIL(INNER IRIS BOUNDARY) DETECTION///////////////////////////////////////////////
        ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
        //return true if pupil is detected on contour detection
        //false if further hough circles need to be found
        private void PerformContourDetection()
        {
            // Detected Contours will store all the contours detected in our image, Find contours will find all the contours
            // CV_RETR_TREE retrieves all of the contours and reconstructs a full hierarchy of nested contours
            // CV_CHAIN_APPROX_SIMPLE compresses horizontal, vertical, and diagonal segments and leaves only their end points.
            Contour<Point> detectedContours = MaskedImage.FindContours(CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_SIMPLE, RETR_TYPE.CV_RETR_TREE);

            //Image moments help you to calculate some features like center of the object, area of the object etc---> here the object is the contour detected
            MCvMoments moments = new MCvMoments();

            //Make sure atleast one contour is detected
            while (detectedContours != null)
            {
                //Get the Moments of the Detected Contour
                moments = detectedContours.GetMoments();

                //get the area of the detected contour--> GetCentralMoment has the area
                double AreaOfDetectedContour = moments.GetCentralMoment(IrisConstants.Zero, IrisConstants.Zero);

                if (detectedContours.Total > 1)
                {
                    //((area > IrisConstants.MaxPupilArea) && (area < IrisConstants.MinPupilEyelashAreaCombined)) :
                    // to check if whole of eyelash is detected as a contour
                    //its area is greater than the pupil, but less than the pupil+eyelash area
                    //(area < IrisConstants.MinPupilArea) :
                    //to check for very small detected contours

                    if (((AreaOfDetectedContour > IrisConstants.MaxPupilArea) && (AreaOfDetectedContour < IrisConstants.MinPupilEyelashAreaCombined)) || (AreaOfDetectedContour < IrisConstants.MinPupilArea))
                    {
                        //discard the contour and process the next
                        detectedContours = detectedContours.HNext;
                        continue;
                    }

                }

                if ((AreaOfDetectedContour > IrisConstants.MinPupilArea))
                {

                    double Pupilarea = AreaOfDetectedContour;

                    //Get the Center of the Pupil --> GetSpatialMoment ---> has the center of the detected contour
                    double x = moments.GetSpatialMoment(IrisConstants.One, IrisConstants.Zero) / AreaOfDetectedContour;
                    double y = moments.GetSpatialMoment(IrisConstants.Zero, IrisConstants.One) / AreaOfDetectedContour;

                    //Store it in PupilCenter
                    PupilCenter.X = (int)x;
                    PupilCenter.Y = (int)y;

                    //Store the contour detected image in ContourDetectedPupilImage
                    ContourDetectedPupilImage = InputImage.Clone();

                    //Filled one will have the pupil coloured black
                    FilledContourForSegmentation = InputImage.Clone();

                    //--------------------------------------------------------------------
                    //Create a color image and store the grayscale contour image and convert to color, then draw colored contour on this
                    //--------------------------------------------------------------------

                    CvInvoke.cvCvtColor(ContourDetectedPupilImage, ContourDetectedPupilImageColor, COLOR_CONVERSION.GRAY2BGR);

                    //Draw the contour over the pupil
                    // ContourDetectedPupilImage.Draw(detectedContours, new Gray(255), IrisConstants.Zero);

                    //Fill the center of the pupil black--> -1 indicates fill
                    FilledContourForSegmentation.Draw(detectedContours, new Gray(IrisConstants.Zero), -1);

                    //DRAW the Colored circle in red
                    ContourDetectedPupilImageColor.Draw(detectedContours, new Bgr(0, 0, 255), 2);

                    //If the eyebrow is detected then apply hough transform

                    if (AreaOfDetectedContour > IrisConstants.MinPupilEyelashAreaCombined)
                    {
                        //Draw the contour white
                        ContourDetectedPupilImageColor.Draw(detectedContours, new Bgr(255, 255, 255), 2);

                        //make the flag false
                        IsContourDetectionSatisfactory = false;

                        //Clone the image to apply hough transform
                        ApproximatedPupilImage = ContourDetectedPupilImage.Clone();

                        //Create image to store the approximated pupil
                        Image<Gray, Byte> ApproximatedPupilImageWithContrast = ApproximatedPupilImage.Clone();

                        //Contrast the image for histogram
                        ApproximatedPupilImageWithContrast._EqualizeHist();

                        //Perform Hough Trasform
                        PerformHoughTransform(ApproximatedPupilImageWithContrast,
                            IrisConstants.HoughCircleThreshold, IrisConstants.MinPupilHoughCircleAccumulator, IrisConstants.MaxPupilHoughCircleAccumulator,
                            IrisConstants.PupilHoughCircleResolution, IrisConstants.MinPupilHoughCircleDistance,
                            IrisConstants.MinPupilHoughCircleRadius, IrisConstants.MaxPupilHoughCircleRadius, HoughTransformFlag.Pupil);
                    }
                    break;
                }
                detectedContours = detectedContours.HNext;
            }
        }
        private void ExtractContourAndHull(Image<Gray, byte> skin)
        {
            using (MemStorage storage = new MemStorage())
            {

                Contour<Point> contours = skin.FindContours(Emgu.CV.CvEnum.CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_SIMPLE, Emgu.CV.CvEnum.RETR_TYPE.CV_RETR_LIST, storage);
                Contour<Point> biggestContour = null;

                Double Result1 = 0;
                Double Result2 = 0;
                while (contours != null)
                {
                    Result1 = contours.Area;
                    if (Result1 > Result2)
                    {
                        Result2 = Result1;
                        biggestContour = contours;
                    }
                    contours = contours.HNext;
                }

                if (biggestContour != null)
                {
                    //currentFrame.Draw(biggestContour, new Bgr(Color.DarkViolet), 2);
                    Contour<Point> currentContour = biggestContour.ApproxPoly(biggestContour.Perimeter * 0.0025, storage);
                    currentFrame.Draw(currentContour, new Bgr(Color.LimeGreen), 2);
                    biggestContour = currentContour;
                    hull = biggestContour.GetConvexHull(Emgu.CV.CvEnum.ORIENTATION.CV_CLOCKWISE);
                    box = biggestContour.GetMinAreaRect();
                    PointF[] points = box.GetVertices();
                    mv = biggestContour.GetMoments();
                    CvInvoke.cvMoments(biggestContour,ref mv, 1);
                    double m00 = CvInvoke.cvGetSpatialMoment(ref mv, 0, 0) ;
                    double m10 = CvInvoke.cvGetSpatialMoment(ref mv, 1, 0) ;
                    double m01 = CvInvoke.cvGetSpatialMoment(ref mv, 0, 1) ;
                    if (m00 != 0) { // calculate center
                    int xCenter = (int) Math.Round(m10/m00)*2;  //scale = 2
                    int yCenter = (int) Math.Round(m01/m00)*2;
                    cogPt.X =xCenter;
                    cogPt.Y =yCenter;
                    }

                    double m11 = CvInvoke.cvGetCentralMoment(ref mv, 1, 1);
                    double m20 = CvInvoke.cvGetCentralMoment(ref mv, 2, 0);
                    double m02 = CvInvoke.cvGetCentralMoment(ref mv, 0, 2);
                    contourAxisAngle = calculateTilt(m11, m20, m02);
                    Point[] ps = new Point[points.Length];
                    for (int i = 0; i < points.Length; i++)
                        ps[i] = new Point((int)points[i].X, (int)points[i].Y);

                    currentFrame.DrawPolyline(hull.ToArray(), true, new Bgr(200, 125, 75), 2);
                    currentFrame.Draw(new CircleF(new PointF(box.center.X, box.center.Y), 3), new Bgr(200, 125, 75), 2);

                    filteredHull = new Seq<Point>(storage);
                    for (int i = 0; i < hull.Total; i++)
                    {
                        if (Math.Sqrt(Math.Pow(hull[i].X - hull[i + 1].X, 2) + Math.Pow(hull[i].Y - hull[i + 1].Y, 2)) > box.size.Width / 10)
                        {
                            filteredHull.Push(hull[i]);
                        }
                    }

                    defects = biggestContour.GetConvexityDefacts(storage, Emgu.CV.CvEnum.ORIENTATION.CV_CLOCKWISE);
                    defectArray = defects.ToArray();
                }
            }
        }
        public Point FindLaser( Image<Gray, Byte> src )
        {
            double moment10, moment01;              // Moments used in locating laser point
            double area;                            // Central moment used in locating laser point
            Point laser = new Point();              // Laser pointer position
            MCvMoments moments = new MCvMoments();  // Moment object used for laser pointer location

            // Find initial moments
            CvInvoke.cvMoments( src, ref moments, 1 );

            // Find precise moments
            moment10 = CvInvoke.cvGetSpatialMoment( ref moments, 1, 0 );
            moment01 = CvInvoke.cvGetSpatialMoment( ref moments, 0, 1 );
            area = CvInvoke.cvGetCentralMoment( ref moments, 0, 0 );

            // Calculate laser point position
            laser.X = (int)( moment10 / area );
            laser.Y = (int)( moment01 / area );

            // Return laser pointer position
            return laser;
        }
        private void ExtractContourAndHull(Image<Gray, byte> skin)
        {
            using (MemStorage storage = new MemStorage())
            {

                Contour<Point> contours = skin.FindContours(Emgu.CV.CvEnum.CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_SIMPLE, Emgu.CV.CvEnum.RETR_TYPE.CV_RETR_LIST, storage);
                Contour<Point> biggestContour = null;

                Double Result1 = 0;
                Double Result2 = 0;
                while (contours != null)
                {
                    Result1 = contours.Area;
                    if (Result1 > Result2)
                    {
                        Result2 = Result1;
                        biggestContour = contours;
                    }
                    contours = contours.HNext;
                }

                if (biggestContour != null)
                {
                   // currentFrame.Draw(biggestContour, new Bgr(Color.Black), 2);
                    Contour<Point> currentContour = biggestContour.ApproxPoly(biggestContour.Perimeter * 0.0025, storage);
                    //currentFrame.Draw(currentContour, new Bgr(Color.Red), 2);
                    biggestContour = currentContour;

                    hull = biggestContour.GetConvexHull(Emgu.CV.CvEnum.ORIENTATION.CV_CLOCKWISE);
                    box = biggestContour.GetMinAreaRect();
                    PointF[] points = box.GetVertices();
                    handRect = box.MinAreaRect();
                    //int xx = (handRect.Width) / 2;
                    //int yy = (handRect.Height) / 2;
                    currentFrame.Draw(handRect, new Bgr(200, 0, 0), 1);

                    // currentFrame.Draw(new CircleF(new PointF(xx, yy), 3), new Bgr(200, 125, 75), 2);
                    Point[] ps = new Point[points.Length];
                    for (int i = 0; i < points.Length; i++)
                        ps[i] = new Point((int)points[i].X, (int)points[i].Y);

                    currentFrame.DrawPolyline(hull.ToArray(), true, new Bgr(200, 125, 75), 2);
                    currentFrame.Draw(new CircleF(new PointF(box.center.X, box.center.Y), 3), new Bgr(200, 125, 75), 2);
                   // currentFrame.Draw(new CircleF(new PointF(handRect.center.X, handRect.center.Y), 3), new Bgr(200, 125, 75), 2);

                    //ellip.MCvBox2D= CvInvoke.cvFitEllipse2(biggestContour.Ptr);
                    //currentFrame.Draw(new Ellipse(ellip.MCvBox2D), new Bgr(Color.LavenderBlush), 3);

                   // PointF center;
                    // float radius;
                    //CvInvoke.cvMinEnclosingCircle(biggestContour.Ptr, out  center, out  radius);
                    //currentFrame.Draw(new CircleF(center, radius), new Bgr(Color.Gold), 2);

                    //currentFrame.Draw(new CircleF(new PointF(ellip.MCvBox2D.center.X, ellip.MCvBox2D.center.Y), 3), new Bgr(100, 25, 55), 2);
                    //currentFrame.Draw(ellip, new Bgr(Color.DeepPink), 2);

                    //CvInvoke.cvEllipse(currentFrame, new Point((int)ellip.MCvBox2D.center.X, (int)ellip.MCvBox2D.center.Y), new System.Drawing.Size((int)ellip.MCvBox2D.size.Width, (int)ellip.MCvBox2D.size.Height), ellip.MCvBox2D.angle, 0, 360, new MCvScalar(120, 233, 88), 1, Emgu.CV.CvEnum.LINE_TYPE.EIGHT_CONNECTED, 0);
                    //currentFrame.Draw(new Ellipse(new PointF(box.center.X, box.center.Y), new SizeF(box.size.Height, box.size.Width), box.angle), new Bgr(0, 0, 0), 2);

                    filteredHull = new Seq<Point>(storage);
                    for (int i = 0; i < hull.Total; i++)
                    {
                        if (Math.Sqrt(Math.Pow(hull[i].X - hull[i + 1].X, 2) + Math.Pow(hull[i].Y - hull[i + 1].Y, 2)) > box.size.Width / 10)
                        {
                            filteredHull.Push(hull[i]);
                        }
                    }

                    defects = biggestContour.GetConvexityDefacts(storage, Emgu.CV.CvEnum.ORIENTATION.CV_CLOCKWISE);

                    defectArray = defects.ToArray();
                }
                            MCvMoments moment = new MCvMoments();               // a new MCvMoments object

            try
            {
                moment = biggestContour.GetMoments();           // Moments of biggestContour
            }
            catch (NullReferenceException)
            {

            }
            int fingerNum = 0;
            CvInvoke.cvMoments(biggestContour, ref moment, 0);

            double m_00 = CvInvoke.cvGetSpatialMoment(ref moment, 0, 0);
            double m_10 = CvInvoke.cvGetSpatialMoment(ref moment, 1, 0);
            double m_01 = CvInvoke.cvGetSpatialMoment(ref moment, 0, 1);

            int current_X = Convert.ToInt32(m_10 / m_00) / 10;      // X location of centre of contour
            int current_Y = Convert.ToInt32(m_01 / m_00) / 10;      // Y location of center of contour

             if (fingerNum == 1 || fingerNum == 0 || blue == 6)
             {
                 Cursor.Position = new Point(current_X * 10, current_Y * 10);
             }
             //Leave the cursor where it was and Do mouse click, if finger count >= 3

                }
        }
Beispiel #16
0
    public void ProcessFrame(Image<Bgr, Byte> frame)
    {
        sw.Reset();
        sw.Start();
        MCvAvgComp[] faces = FaceDetect(frame);
        sw.Stop();
        t_facedetect = sw.ElapsedMilliseconds;

        sw.Reset();
        sw.Start();
        Image<Hsv, Byte> hsv = frame.Convert<Hsv, Byte>();
        Image<Gray, Byte> hue = new Image<Gray, byte>(frame.Width, frame.Height);
        Image<Gray, Byte> mask = new Image<Gray, byte>(frame.Width, frame.Height);
        Emgu.CV.CvInvoke.cvInRangeS(hsv, new MCvScalar(0, 30, 30, 0), new MCvScalar(180, 256, 256, 0), mask);
        Emgu.CV.CvInvoke.cvSplit(hsv, hue, IntPtr.Zero, IntPtr.Zero, IntPtr.Zero);

        if (isTracked == false)
        {
            if (faces.Length != 0)
            {
                var ff = faces[0];
                Rectangle smallFaceROI = new Rectangle(ff.rect.X + ff.rect.Width / 8, ff.rect.Y + ff.rect.Height / 8, ff.rect.Width / 4, ff.rect.Height / 4);
                _hist = GetHist(hue, smallFaceROI, mask);
                isTracked=true;
                th_check = true;
                center = new Point[] { new Point(0, 0), new Point(0, 0) };
            }
            else
            {
                have_face=false;
                have_left=false;
                have_right=false;
                return;
            }
        }
        sw.Stop();
        t_hue = sw.ElapsedMilliseconds;

        if (faces.Length != 0)
        {
            face_rect = faces[0].rect;
            face = face_rect;
            have_face = true;
        }
        else
        {
            face = face_rect;
            have_face = false;
        }

        sw.Reset();
        sw.Start();
        backproject = GetBackproject(hue, _hist, mask, face_rect).ThresholdToZero(new Gray(backproj_threshold));
        sw.Stop();
        t_backproject = sw.ElapsedMilliseconds;

        sw.Reset();
        sw.Start();

        if (isTracked)
        {
            center = kmeans(center, backproject, face_rect, kmeans_scale);
            center = refine_center(center, backproject);
        }
        sw.Stop();
        t_kmeans = sw.ElapsedMilliseconds;

        sw.Reset();
        sw.Start();
        right = new Rectangle(center[0].X - hand_size / 2, center[0].Y - hand_size / 2, hand_size, hand_size);
        left = new Rectangle(center[1].X - hand_size / 2, center[1].Y - hand_size / 2, hand_size, hand_size);
        backproject.ROI = left;
        left_mom=backproject.GetMoments(false);
        backproject.ROI = right;
        right_mom = backproject.GetMoments(false);
        Emgu.CV.CvInvoke.cvResetImageROI(backproject);

        sw.Stop();
        t_hand = sw.ElapsedMilliseconds;

        ProcessInput();
    }