public HandGeastureWindows() { InitializeComponent(); InitializeComponent(); //grabber = new Emgu.CV.Capture(@".\..\..\..\M2U00253.MPG"); grabber = new Emgu.CV.Capture(); grabber.QueryFrame(); frameWidth = grabber.Width; frameHeight = grabber.Height; detector = new AdaptiveSkinDetector(1, AdaptiveSkinDetector.MorphingMethod.NONE); hsv_min = new Hsv(0, 45, 0); hsv_max = new Hsv(20, 255, 255); YCrCb_min = new Ycc(0, 131, 80); YCrCb_max = new Ycc(255, 185, 135); box = new MCvBox2D(); ellip = new Emgu.CV.Structure.Ellipse(); //Application.Idle += new EventHandler(FrameGrabber); worker = new BackgroundWorker(); worker.DoWork += FrameGrabber; worker.RunWorkerAsync(); worker.RunWorkerCompleted += (object sender, RunWorkerCompletedEventArgs e) => { worker.RunWorkerAsync(); }; }
/// <summary> /// /// Fit an ellipse to the points collection /// </summary> /// <param name="points">The points to be fitted</param> /// <returns>An ellipse</returns> public Ellipse EllipseLeastSquareFitting(AForge.Point[] points) { IntPtr seq = Marshal.AllocHGlobal(StructSize.MCvSeq); IntPtr block = Marshal.AllocHGlobal(StructSize.MCvSeqBlock); GCHandle handle = GCHandle.Alloc(points, GCHandleType.Pinned); CvInvoke.cvMakeSeqHeaderForArray( CvInvoke.CV_MAKETYPE((int)Emgu.CV.CvEnum.MAT_DEPTH.CV_32F, 2), StructSize.MCvSeq, StructSize.PointF, handle.AddrOfPinnedObject(), points.Length, seq, block); Ellipse e = new Ellipse(CvInvoke.cvFitEllipse2(seq)); handle.Free(); Marshal.FreeHGlobal(seq); Marshal.FreeHGlobal(block); //this version of Emgu Ellipse fitting has a bug and should be modified as below //and even after this modification, Box is smaller in a wrong angle Ellipse ModifiedEllipse = new Emgu.CV.Structure.Ellipse(e.MCvBox2D.center, new SizeF(e.MCvBox2D.size.Width, e.MCvBox2D.size.Height), e.MCvBox2D.angle - 90); return(ModifiedEllipse); }
public Form1() { InitializeComponent(); grabber = new Emgu.CV.Capture("C:/Users/L33549.CITI/Desktop/a.avi"); grabber.QueryFrame(); frameWidth = grabber.Width; frameHeight = grabber.Height; //detector = new AdaptiveSkinDetector(1, AdaptiveSkinDetector.MorphingMethod.NONE); hsv_min = new Hsv(0, 45, 0); hsv_max = new Hsv(20, 255, 255); YCrCb_min = new Ycc(0, 129, 40); YCrCb_max = new Ycc(255, 185, 135); box = new MCvBox2D(); ellip = new Ellipse(); contourStorage = new MemStorage(); approxStorage = new MemStorage(); hullStorage = new MemStorage(); defectsStorage = new MemStorage(); tipPts = new Point[MAX_POINTS]; // coords of the finger tips foldPts = new Point[MAX_POINTS]; // coords of the skin folds between fingers depths = new float[MAX_POINTS]; // distances from tips to folds cogPt = new Point(); fingerTips = new List<Point>(); face = new CascadeClassifier("C:/Users/L33549.CITI/Desktop/AbuseAnalysis/HandGestureRecognition/HandGestureRecognition/HandGestureRecognition/haar/Original/haarcascade_hand.xml"); Application.Idle += new EventHandler(FrameGrabber); /*foreach (var potentialSensor in KinectSensor.KinectSensors) { if (potentialSensor.Status == KinectStatus.Connected) { this.sensor = potentialSensor; break; } } if (null != this.sensor) { // Turn on the color stream to receive color frames this.sensor.ColorStream.Enable(ColorImageFormat.RgbResolution640x480Fps30); // Allocate space to put the pixels we'll receive this.colorPixels = new byte[this.sensor.ColorStream.FramePixelDataLength]; // This is the bitmap we'll display on-screen this.colorBitmap = new WriteableBitmap(this.sensor.ColorStream.FrameWidth, this.sensor.ColorStream.FrameHeight, 96.0, 96.0, PixelFormats.Bgr32, null); // Set the image we display to point to the bitmap where we'll put the image data //this.Image.Source = this.colorBitmap; // Add an event handler to be called whenever there is new color frame data this.sensor.ColorFrameReady += this.SensorColorFrameReady; // Start the sensor! this.sensor.Start(); }*/ }
public void Work(string database, string source) { m_image = new Image<Bgr, byte>(source); if (m_image == null) { Log("invalid source " + source); return; } Log("Scanning files " + database); string[] files = System.IO.Directory.GetFiles(database, "*.jpg"); foreach (var current in files) { Load(current); } Log("Scanning done"); m_rows = m_image.Height / m_sizey; m_cols = m_image.Width / m_sizex; m_used = 0; m_mask = new Image<Gray, Byte>(m_sizex, m_sizey); float halfx = m_sizex / 2; float halfy = m_sizey / 2; Ellipse elipse = new Ellipse(new PointF(halfx, halfy), new SizeF((float)m_sizex * 0.95f, (float)m_sizey * 0.95f), 90.0f); m_mask.Draw(elipse, new Gray(255), -1); m_mask._SmoothGaussian(15); m_mask = m_mask * 0.90; //ImageViewer.Show(m_mask, "Mask"); //m_mask = new Image<Gray, Byte>(m_sizex, m_sizey); //float sx = (int)(m_sizex * 0.9f); //float sy = (int)(m_sizey * 0.9f); //Rectangle rect = new Rectangle((int)(m_sizex - sx) / 2, (int)(m_sizey - sy) / 2, (int)sx, (int)sy); //m_mask.Draw(rect, new Gray(255), -1); //m_mask._SmoothGaussian(15); //m_mask = m_mask * 0.75; //ImageViewer.Show(m_mask, "Mask"); m_mask._Not(); m_image._SmoothGaussian(51); Log("Row: " + m_rows + " Cols: " + m_cols + " Size[" + m_sizex + "," + m_sizey + "]"); for (int r = 0; r != m_rows; ++r) { for (int c = 0; c != m_cols; ++c) ProcessCell(r, c); Log("R: " + r); } m_image.ROI = new Rectangle(0, 0, m_cols * m_sizex, m_rows * m_sizey); m_image.Save("dump.jpg"); }
public Form1() { InitializeComponent(); grabber = new Emgu.CV.Capture(@".\..\..\..\M2U00253.MPG"); grabber.QueryFrame(); frameWidth = grabber.Width; frameHeight = grabber.Height; detector = new AdaptiveSkinDetector(1, AdaptiveSkinDetector.MorphingMethod.NONE); hsv_min = new Hsv(0, 45, 0); hsv_max = new Hsv(20, 255, 255); YCrCb_min = new Ycc(0, 131, 80); YCrCb_max = new Ycc(255, 185, 135); box = new MCvBox2D(); ellip = new Ellipse(); Application.Idle += new EventHandler(FrameGrabber); }
public void TestEllipseFitting() { #region generate random points System.Random r = new Random(); int sampleCount = 100; Ellipse modelEllipse = new Ellipse(new PointF(200, 200), new SizeF(150, 60), 30); PointF[] pts = PointCollection.GeneratePointCloud(modelEllipse, sampleCount); #endregion Stopwatch watch = Stopwatch.StartNew(); Ellipse fittedEllipse = PointCollection.EllipseLeastSquareFitting(pts); watch.Stop(); #region draw the points and the fitted ellipse Image<Bgr, byte> img = new Image<Bgr, byte>(400, 400, new Bgr(Color.White)); foreach (PointF p in pts) img.Draw(new CircleF(p, 2), new Bgr(Color.Green), 1); img.Draw(fittedEllipse, new Bgr(Color.Red), 2); #endregion //ImageViewer.Show(img, String.Format("Time used: {0} milliseconds", watch.ElapsedMilliseconds)); }
/* /// <summary> /// Re-project pixels on a 1-channel disparity map to array of 3D points. /// </summary> /// <param name="disparity">Disparity map</param> /// <param name="Q">The re-projection 4x4 matrix, can be arbitrary, e.g. the one, computed by cvStereoRectify</param> /// <returns>The reprojected 3D points</returns> public static MCvPoint3D32f[] ReprojectImageTo3D(Image<Gray, Byte> disparity, Matrix<double> Q) { Size size = disparity.Size; MCvPoint3D32f[] points3D = new MCvPoint3D32f[size.Width * size.Height]; GCHandle handle = GCHandle.Alloc(points3D, GCHandleType.Pinned); using (Matrix<float> pts = new Matrix<float>(size.Height, size.Width, 3, handle.AddrOfPinnedObject(), 0)) CvInvoke.ReprojectImageTo3D(disparity, pts, Q, false, CvEnum.DepthType.Cv32F); handle.Free(); return points3D; }*/ /// <summary> /// Generate a random point cloud around the ellipse. /// </summary> /// <param name="e">The region where the point cloud will be generated. The axes of e corresponds to std of the random point cloud.</param> /// <param name="numberOfPoints">The number of points to be generated</param> /// <returns>A random point cloud around the ellipse</returns> public static PointF[] GeneratePointCloud(Ellipse e, int numberOfPoints) { PointF[] cloud = new PointF[numberOfPoints]; GCHandle handle = GCHandle.Alloc(cloud, GCHandleType.Pinned); using (Matrix<float> points = new Matrix<float>(numberOfPoints, 2, handle.AddrOfPinnedObject())) using (Matrix<float> xValues = points.GetCol(0)) using (Matrix<float> yValues = points.GetCol(1)) using (RotationMatrix2D rotation = new RotationMatrix2D(e.RotatedRect.Center, e.RotatedRect.Angle, 1.0)) using (Mat tmp = new Mat()) { rotation.ConvertTo(tmp, DepthType.Cv32F); xValues.SetRandNormal(new MCvScalar(e.RotatedRect.Center.X), new MCvScalar(e.RotatedRect.Size.Width / 2.0f)); yValues.SetRandNormal(new MCvScalar(e.RotatedRect.Center.Y), new MCvScalar(e.RotatedRect.Size.Height / 2.0f)); rotation.RotatePoints(points); } handle.Free(); return cloud; }
/* /// <summary> /// A comparator which compares only the X value of the point /// </summary> private class XValueOfPointComparator : IComparer<PointF> { public int Compare(PointF p1, PointF p2) { return p1.X.CompareTo(p2.X); } } /// <summary> /// Perform a first degree interpolation to lookup the y coordinate given the x coordinate /// </summary> /// <param name="points">The collection of points. Must be sorted by the x value.</param> /// <param name="index">the x coordinate</param> /// <returns>the y coordinate as the result of the first degree interpolation</returns> public static float FirstDegreeInterpolate(PointF[] points, float index) { XValueOfPointComparator comparator = new XValueOfPointComparator(); int idx = Array.BinarySearch<PointF>(points, new PointF(index, 0.0f), comparator); if (idx >= 0) // an exact index is matched return points[idx].Y; // the index fall into a range, in this case we do interpolation idx = -idx; if (idx == 1) // the specific index is smaller than all indexes idx = 0; else if (idx == points.Length + 1) // the specific index is larger than all indexes idx = points.Length - 2; else idx -= 2; LineSegment2DF line = new LineSegment2DF(points[idx], points[idx + 1]); return line.YByX(index); } /// <summary> /// Perform a first degree interpolation to lookup the y coordinates given the x coordinates /// </summary> /// <param name="points">The collection of points, Must be sorted by x value</param> /// <param name="indexes">the x coordinates</param> /// <returns>The y coordinates as the result of the first degree interpolation</returns> public static float[] FirstDegreeInterpolate(PointF[] points, float[] indexes) { return Array.ConvertAll<float, float>( indexes, delegate(float d) { return FirstDegreeInterpolate(points, d); }); }*/ /* /// <summary> /// Fit a line to the points collection /// </summary> /// <param name="points">The points to be fitted</param> /// <param name="type">The type of the fitting</param> /// <param name="normalizedDirection">The normalized direction of the fitted line</param> /// <param name="aPointOnLine">A point on the fitted line</param> public static void Line2DFitting(PointF[] points, CvEnum.DistType type, out PointF normalizedDirection, out PointF aPointOnLine) { float[] data = new float[6]; IntPtr seq = Marshal.AllocHGlobal(StructSize.MCvSeq); IntPtr block = Marshal.AllocHGlobal(StructSize.MCvSeqBlock); GCHandle handle = GCHandle.Alloc(points, GCHandleType.Pinned); CvInvoke.cvMakeSeqHeaderForArray( CvInvoke.MakeType(CvEnum.DepthType.Cv32F, 2), StructSize.MCvSeq, StructSize.PointF, handle.AddrOfPinnedObject(), points.Length, seq, block); CvInvoke.cvFitLine(seq, type, 0.0, 0.01, 0.01, data); handle.Free(); Marshal.FreeHGlobal(seq); Marshal.FreeHGlobal(block); normalizedDirection = new PointF(data[0], data[1]); aPointOnLine = new PointF(data[2], data[3]); }*/ /// <summary> /// Fit an ellipse to the points collection /// </summary> /// <param name="points">The points to be fitted</param> /// <returns>An ellipse</returns> public static Ellipse EllipseLeastSquareFitting(PointF[] points) { using (VectorOfPointF vp = new VectorOfPointF(points)) { Ellipse e = new Ellipse(CvInvoke.FitEllipse(vp)); //The angle returned by cvFitEllipse2 has the wrong sign. //Returned angle is clock wise rotation, what we need for the definition of MCvBox is the counter clockwise rotation. //For this, we needs to change the sign of the angle RotatedRect b = e.RotatedRect; b.Angle = -b.Angle; if (b.Angle < 0) b.Angle += 360; e.RotatedRect = b; return e; } }
private void UpdateFrameNumber() { Video.SetFrame(SliderValue); using (Image <Bgr, Byte> orig = Video.GetFrameImage()) using (Image <Gray, Byte> origGray = orig.Convert <Gray, Byte>()) using (Image <Gray, Byte> binary = origGray.ThresholdBinary(new Gray(ThresholdValue), new Gray(255))) using (Image <Gray, Byte> subbed = BinaryBackground.AbsDiff(binary)) { CvBlobs blobs = new CvBlobs(); BlobDetector.Detect(subbed, blobs); CvBlob mouseBlob = null; double maxArea = -1; foreach (var blob in blobs.Values) { if (blob.Area > maxArea) { mouseBlob = blob; maxArea = blob.Area; } } //double gapDistance = GetBestGapDistance(rbsk); double gapDistance = 50; RBSK.Settings.GapDistance = gapDistance; //PointF[] headPoints = ProcessFrame(orig, RBSK); PointF center = mouseBlob.Centroid; //LineSegment2DF[] targetPoints = null; Point[] mouseContour = mouseBlob.GetContour(); orig.DrawPolyline(mouseContour, true, new Bgr(Color.Cyan)); Image1 = ImageService.ToBitmapSource(orig); PointF[] result; if (HeadPoints != null) { result = HeadPoints[SliderValue].HeadPoints; } else { double prob = 0; RBSK headRbsk = MouseService.GetStandardMouseRules(); headRbsk.Settings.GapDistance = 65; headRbsk.Settings.BinaryThreshold = 20; List <List <PointF> > allKeyPoints = headRbsk.FindKeyPoints(mouseContour, headRbsk.Settings.NumberOfSlides, false); result = headRbsk.FindPointsFromRules(allKeyPoints[0], binary, ref prob); } if (result != null) { using (Image <Bgr, Byte> test = orig.Clone()) { foreach (var point in result) { test.Draw(new CircleF(point, 3), new Bgr(Color.Red), 3); } Image1 = ImageService.ToBitmapSource(test); } } else { return; } RotatedRect rotatedRect = CvInvoke.MinAreaRect(mouseContour.Select(x => new PointF(x.X, x.Y)).ToArray()); //Console.WriteLine("Size: " + rotatedRect.Size); ISkeleton skel = ModelResolver.Resolve <ISkeleton>(); Image <Gray, Byte> tempBinary = binary.Clone(); System.Drawing.Rectangle rect = mouseBlob.BoundingBox; Image <Gray, Byte> binaryRoi = tempBinary.GetSubRect(rect); using (Image <Bgr, Byte> displayImage = subbed.Convert <Bgr, Byte>()) using (Image <Gray, Byte> skelImage = skel.GetSkeleton(binaryRoi)) using (Image <Bgr, Byte> drawImage = orig.Clone()) using (Image <Bgr, Byte> tempImage2 = new Image <Bgr, byte>(drawImage.Size)) { //----------------------------------------- if (SkelImage != null) { SkelImage.Dispose(); } SkelImage = skelImage.Clone(); //-------------------------------------------- tempImage2.SetValue(new Bgr(Color.Black)); ISpineFinding spineFinder = ModelResolver.Resolve <ISpineFinding>(); spineFinder.NumberOfCycles = 3; spineFinder.NumberOfIterations = 1; spineFinder.SkeletonImage = skelImage; //spineFinder.RotatedRectangle = rotatedRect; Image5 = ImageService.ToBitmapSource(skelImage); const int delta = 20; double smallestAngle = double.MaxValue; Point tailPoint = Point.Empty; for (int i = 0; i < mouseContour.Length; i++) { int leftDelta = i - delta; int rightDelta = i + delta; if (leftDelta < 0) { leftDelta += mouseContour.Length; } if (rightDelta >= mouseContour.Length) { rightDelta -= mouseContour.Length; } Point testPoint = mouseContour[i]; Point leftPoint = mouseContour[leftDelta]; Point rightPoint = mouseContour[rightDelta]; Vector v1 = new Vector(leftPoint.X - testPoint.X, leftPoint.Y - testPoint.Y); Vector v2 = new Vector(rightPoint.X - testPoint.X, rightPoint.Y - testPoint.Y); double angle = Math.Abs(Vector.AngleBetween(v1, v2)); if (angle < 30 && angle > 9) { if (angle < smallestAngle) { smallestAngle = angle; tailPoint = testPoint; } } } PointF headCornerCorrect = new PointF(result[2].X - rect.X, result[2].Y - rect.Y); PointF tailCornerCorrect = new PointF(tailPoint.X - rect.X, tailPoint.Y - rect.Y); PointF[] spine = spineFinder.GenerateSpine(headCornerCorrect, tailCornerCorrect); Point topCorner = mouseBlob.BoundingBox.Location; PointF[] spineCornerCorrected = new PointF[spine.Length]; for (int i = 0; i < spine.Length; i++) { spineCornerCorrected[i] = new PointF(spine[i].X + topCorner.X, spine[i].Y + topCorner.Y); } ITailFinding tailFinding = ModelResolver.Resolve <ITailFinding>(); double rotatedWidth = rotatedRect.Size.Width < rotatedRect.Size.Height ? rotatedRect.Size.Width : rotatedRect.Size.Height; List <Point> bodyPoints; if (result != null) { double firstDist = result[2].DistanceSquared(spineCornerCorrected.First()); double lastDist = result[2].DistanceSquared(spineCornerCorrected.Last()); if (firstDist < lastDist) { spineCornerCorrected = spineCornerCorrected.Reverse().ToArray(); } } double waistLength; double pelvicArea1, pelvicArea2; tailFinding.FindTail(mouseContour, spineCornerCorrected, displayImage, rotatedWidth, mouseBlob.Centroid, out bodyPoints, out waistLength, out pelvicArea1, out pelvicArea2); Console.WriteLine(smallestAngle); if (!tailPoint.IsEmpty) { drawImage.Draw(new CircleF(tailPoint, 4), new Bgr(Color.Red), 3); } if (bodyPoints != null && bodyPoints.Count > 0) { Point[] bPoints = bodyPoints.ToArray(); double volume = MathExtension.PolygonArea(bPoints); Emgu.CV.Structure.Ellipse fittedEllipse = PointCollection.EllipseLeastSquareFitting(bPoints.Select(x => x.ToPointF()).ToArray()); //CvInvoke.Ellipse(drawImage, fittedEllipse.RotatedRect, new MCvScalar(0, 0, 255), 2); Console.WriteLine("Volume: " + volume + " - " + (fittedEllipse.RotatedRect.Size.Width * fittedEllipse.RotatedRect.Size.Height) + ", Waist Length: " + waistLength); //Alter this to something better if (MathExtension.PolygonArea(bPoints) > (rotatedRect.Size.Height * rotatedRect.Size.Width) / 6 || true) { //tempImage2.FillConvexPoly(bPoints, new Bgr(Color.White)); tempImage2.DrawPolyline(bPoints, true, new Bgr(Color.White)); PointF centroid = MathExtension.FindCentroid(bPoints); System.Drawing.Rectangle minRect; Image <Gray, Byte> temp2 = new Image <Gray, byte>(tempImage2.Width + 2, tempImage2.Height + 2); CvInvoke.FloodFill(tempImage2, temp2, centroid.ToPoint(), new MCvScalar(255, 255, 255), out minRect, new MCvScalar(5, 5, 5), new MCvScalar(5, 5, 5)); using (Image <Gray, Byte> nonZeroImage = tempImage2.Convert <Gray, Byte>()) { int[] volume2 = nonZeroImage.CountNonzero(); Console.WriteLine("Volume2: " + volume2[0]); //int tester = 9; //using (Image<Gray, Byte> t1 = nonZeroImage.Erode(tester)) //using (Image<Gray, Byte> t2 = t1.Dilate(tester)) //using (Image<Gray, Byte> t3 = t2.Erode(tester)) //using (Image<Gray, Byte> t4 = t3.Dilate(tester)) //using (Image<Gray, Byte> t5 = t4.Erode(tester)) //using (Image<Gray, Byte> t6 = t5.Dilate(tester)) //using (Image<Gray, Byte> t7 = t6.Erode(tester)) //{ // Image6 = ImageService.ToBitmapSource(t7); //} } tempImage2.Draw(new CircleF(centroid, 2), new Bgr(Color.Blue), 2); double distanceToSpine = double.MaxValue; PointF p11 = PointF.Empty, p22 = PointF.Empty; for (int i = 1; i < spineCornerCorrected.Length; i++) { PointF point1 = spineCornerCorrected[i - 1]; PointF point2 = spineCornerCorrected[i]; double cDist = MathExtension.MinDistanceFromLineToPoint(point1, point2, centroid); if (cDist < distanceToSpine) { p11 = point1; p22 = point2; distanceToSpine = cDist; } } PointSideVector psv = MathExtension.FindSide(p11, p22, centroid); if (psv == PointSideVector.Below) { distanceToSpine *= -1; } Console.WriteLine(distanceToSpine + ","); } } for (int i = 1; i < spine.Length; i++) { PointF point1 = spine[i - 1]; PointF point2 = spine[i]; point1.X += topCorner.X; point1.Y += topCorner.Y; point2.X += topCorner.X; point2.Y += topCorner.Y; LineSegment2D line = new LineSegment2D(new Point((int)point1.X, (int)point1.Y), new Point((int)point2.X, (int)point2.Y)); drawImage.Draw(line, new Bgr(Color.Aqua), 2); tempImage2.Draw(line, new Bgr(Color.Cyan), 2); } drawImage.Draw(new CircleF(mouseBlob.Centroid, 2), new Bgr(Color.Blue), 2); Image3 = ImageService.ToBitmapSource(drawImage); Image6 = ImageService.ToBitmapSource(tempImage2); double rotatedRectArea = rotatedRect.Size.Width * rotatedRect.Size.Height; if (rotatedRectArea < 75000) { //Console.WriteLine(rotatedRectArea); //return; } else { //Console.WriteLine(rotatedRectArea); } double height = rotatedRect.Size.Height; double width = rotatedRect.Size.Width; //double angle = rotatedRect.Angle; bool heightLong = height > width; double halfLength; PointF[] vertices = rotatedRect.GetVertices(); if (heightLong) { halfLength = height; } else { halfLength = width; } halfLength /= 2; PointF[] sidePoints1 = new PointF[4], midPoints = new PointF[2]; PointF p1 = vertices[0], p2 = vertices[1], p3 = vertices[2], p4 = vertices[3]; double d1 = p1.DistanceSquared(p2); double d2 = p2.DistanceSquared(p3); if (d1 < d2) { //p1 and p2, p3 and p4 are side points sidePoints1[0] = p1; sidePoints1[1] = p2; sidePoints1[2] = p4; sidePoints1[3] = p3; midPoints[0] = p1.MidPoint(p4); midPoints[1] = p2.MidPoint(p3); } else { //p2 and p3, p1 and p4 are side points sidePoints1[0] = p1; sidePoints1[1] = p4; sidePoints1[2] = p2; sidePoints1[3] = p3; midPoints[0] = p1.MidPoint(p2); midPoints[1] = p3.MidPoint(p4); } PointF intersection1 = PointF.Empty; PointF intersection2 = PointF.Empty; using (Image <Gray, Byte> halfTest1 = origGray.CopyBlank()) using (Image <Gray, Byte> halfTest2 = origGray.CopyBlank()) { Point[] rect1 = new Point[] { new Point((int)sidePoints1[0].X, (int)sidePoints1[0].Y), new Point((int)midPoints[0].X, (int)midPoints[0].Y), new Point((int)midPoints[1].X, (int)midPoints[1].Y), new Point((int)sidePoints1[1].X, (int)sidePoints1[1].Y) }; Point[] rect2 = new Point[] { new Point((int)sidePoints1[2].X, (int)sidePoints1[2].Y), new Point((int)midPoints[0].X, (int)midPoints[0].Y), new Point((int)midPoints[1].X, (int)midPoints[1].Y), new Point((int)sidePoints1[3].X, (int)sidePoints1[3].Y) }; if (MathExtension.PolygonContainsPoint(rect1, center)) { //Rect 1 is head, look for line in r2 } else if (MathExtension.PolygonContainsPoint(rect2, center)) { //Rect 2 is head, look for line in r1 } else { //Something has gone wrong } halfTest1.FillConvexPoly(rect1, new Gray(255)); halfTest2.FillConvexPoly(rect2, new Gray(255)); //Image5 = ImageService.ToBitmapSource(halfTest1); //Image6 = ImageService.ToBitmapSource(halfTest2); //binary.Copy(holder1, halfTest1); //binary.Copy(holder2, halfTest2); int count1, count2; //using (Image<Gray, Byte> binaryInverse = subbed.Not()) using (Image <Gray, Byte> holder1 = subbed.Copy(halfTest1)) using (Image <Gray, Byte> holder2 = subbed.Copy(halfTest2)) { //Image4 = ImageService.ToBitmapSource(subbed); //Image5 = ImageService.ToBitmapSource(holder1); //Image6 = ImageService.ToBitmapSource(holder2); count1 = holder1.CountNonzero()[0]; count2 = holder2.CountNonzero()[0]; } PointF qr1 = PointF.Empty, qr2 = PointF.Empty, qr3 = PointF.Empty, qr4 = PointF.Empty; if (count1 > count2) { //holder 1 is head, holder 2 is rear qr1 = sidePoints1[2]; qr2 = sidePoints1[2].MidPoint(midPoints[0]); qr3 = sidePoints1[3].MidPoint(midPoints[1]); qr4 = sidePoints1[3]; } else if (count1 < count2) { //holder 2 is head, holder 1 is year qr1 = sidePoints1[0]; qr2 = sidePoints1[0].MidPoint(midPoints[0]); qr3 = sidePoints1[1].MidPoint(midPoints[1]); qr4 = sidePoints1[1]; } //fat line is qr2, qr3 PointF centerPoint = qr2.MidPoint(qr3); PointF i1 = qr2; PointF i2 = qr3; intersection1 = MathExtension.PolygonLineIntersectionPoint(centerPoint, i1, mouseContour); intersection2 = MathExtension.PolygonLineIntersectionPoint(centerPoint, i2, mouseContour); } double deltaX = halfLength * Math.Cos(rotatedRect.Angle * MathExtension.Deg2Rad); double deltaY = halfLength * Math.Sin(rotatedRect.Angle * MathExtension.Deg2Rad); const double scaleFactor = 0.25; PointF newPoint = new PointF((float)(center.X - (deltaX * scaleFactor)), (float)(center.Y - (deltaY * scaleFactor))); PointF intersectionPoint1 = PointF.Empty; PointF intersectionPoint2 = PointF.Empty; Point[] temp = null; PointF[] headPoints = RBSKService.RBSKParallel(binary, MouseService.GetStandardMouseRules(), ref temp); if (headPoints != null) { PointF tip = headPoints[2]; //targetPoints = new LineSegment2DF[3]; Point centerInt = new Point((int)newPoint.X, (int)newPoint.Y); //targetPoints[0] = new LineSegment2DF(centerInt, new PointF(tip.X, tip.Y)); Vector forwardVec = new Vector(tip.X - newPoint.X, tip.Y - newPoint.Y); Vector rotatedVec = new Vector(-forwardVec.Y, forwardVec.X); PointF i1 = new PointF((float)(newPoint.X + (rotatedVec.X * 1)), (float)(newPoint.Y + (rotatedVec.Y * 1))); PointF i2 = new PointF((float)(newPoint.X - (rotatedVec.X * 1)), (float)(newPoint.Y - (rotatedVec.Y * 1))); //targetPoints[1] = new LineSegment2DF(centerInt, i1); //targetPoints[2] = new LineSegment2DF(centerInt, i2); intersectionPoint1 = MathExtension.PolygonLineIntersectionPoint(newPoint, i1, mouseContour); intersectionPoint2 = MathExtension.PolygonLineIntersectionPoint(newPoint, i2, mouseContour); } //displayImage.Draw(mouseBlob.BoundingBox, new Bgr(Color.Red), 2); displayImage.Draw(new CircleF(mouseBlob.Centroid, 3), new Bgr(Color.Blue), 2); displayImage.Draw(rotatedRect, new Bgr(Color.Yellow), 3); //displayImage.Draw(mouseContour, new Bgr(Color.Aqua), 2); //displayImage.FillConvexPoly(new Point[] { new Point((int)sidePoints1[0].X, (int)sidePoints1[0].Y), new Point((int)midPoints[0].X, (int)midPoints[0].Y), new Point((int)midPoints[1].X, (int)midPoints[1].Y), new Point((int)sidePoints1[1].X, (int)sidePoints1[1].Y) }, new Bgr(Color.Blue)); //if (targetPoints != null) //{ // displayImage.Draw(targetPoints[0], new Bgr(Color.Green), 2); // displayImage.Draw(targetPoints[1], new Bgr(Color.Green), 2); // displayImage.Draw(targetPoints[2], new Bgr(Color.Green), 2); //} //if (!intersection1.IsEmpty && !intersection2.IsEmpty) //{ // LineSegment2DF lineSegment = new LineSegment2DF(intersection1, intersection2); // displayImage.Draw(lineSegment, new Bgr(Color.MediumPurple), 4); // //Console.WriteLine(lineSegment.Length); //} //displayImage.Draw(new CircleF(newPoint, 4), new Bgr(Color.MediumPurple), 3); //Console.WriteLine(rotatedRect.Angle); Image4 = ImageService.ToBitmapSource(displayImage); } } }
public void TestMinEnclosingCircle() { #region generate random points System.Random r = new Random(); int sampleCount = 100; Ellipse modelEllipse = new Ellipse(new PointF(200, 200), new SizeF(90, 60), -60); PointF[] pts = PointCollection.GeneratePointCloud(modelEllipse, sampleCount); #endregion Stopwatch watch = Stopwatch.StartNew(); CircleF circle = CvInvoke.MinEnclosingCircle(pts); watch.Stop(); #region draw the points and the circle Mat img = new Mat(400, 400, DepthType.Cv8U, 3); img.SetTo(new MCvScalar(255, 255, 255)); foreach (PointF p in pts) CvInvoke.Circle(img, Point.Round(p), 2, new MCvScalar(0, 255, 0), 1); #endregion //Emgu.CV.UI.ImageViewer.Show(img, String.Format("Time used: {0} milliseconds", watch.ElapsedMilliseconds)); }
public void TestMinAreaRect() { #region generate random points System.Random r = new Random(); int sampleCount = 100; Ellipse modelEllipse = new Ellipse(new PointF(200, 200), new SizeF(90, 60), -60); PointF[] pts = PointCollection.GeneratePointCloud(modelEllipse, sampleCount); #endregion Stopwatch watch = Stopwatch.StartNew(); RotatedRect box = CvInvoke.MinAreaRect(pts); watch.Stop(); #region draw the points and the box Mat img = new Mat(400, 400, DepthType.Cv8U, 3); img.SetTo(new MCvScalar(255, 255, 255)); #if NETFX_CORE Point[] vertices = Extensions.ConvertAll(box.GetVertices(), Point.Round); #else Point[] vertices = Array.ConvertAll(box.GetVertices(), Point.Round); #endif CvInvoke.Polylines(img, vertices, true, new MCvScalar(0, 0, 255), 1); foreach (PointF p in pts) CvInvoke.Circle(img, Point.Round(p), 2, new MCvScalar(0, 255, 0), 1); #endregion //Emgu.CV.UI.ImageViewer.Show(img, String.Format("Time used: {0} milliseconds", watch.ElapsedMilliseconds)); }
public void TestEllipseFitting() { #region generate random points System.Random r = new Random(); int sampleCount = 100; Ellipse modelEllipse = new Ellipse(new PointF(200, 200), new SizeF(150, 60), 90); PointF[] pts = PointCollection.GeneratePointCloud(modelEllipse, sampleCount); #endregion Stopwatch watch = Stopwatch.StartNew(); Ellipse fittedEllipse = PointCollection.EllipseLeastSquareFitting(pts); watch.Stop(); #region draw the points and the fitted ellips Mat img = new Mat(400, 400, DepthType.Cv8U, 3); img.SetTo(new MCvScalar(255, 255, 255)); foreach (PointF p in pts) CvInvoke.Circle(img, Point.Round(p), 2, new MCvScalar(0, 255, 0), 1); RotatedRect rect = fittedEllipse.RotatedRect; rect.Angle += 90; //the detected ellipse was off by 90 degree CvInvoke.Ellipse(img, rect, new MCvScalar(0, 0, 255), 2); #endregion //Emgu.CV.UI.ImageViewer.Show(img, String.Format("Time used: {0} milliseconds", watch.ElapsedMilliseconds)); }
public void ProcessImage(Emgu.CV.Image<Emgu.CV.Structure.Bgr, byte> image) { Emgu.CV.Image<Gray, byte> gray = image.Convert<Gray, byte>(); gray._ThresholdBinary(new Gray(_threshold), new Gray(255.0)); gray._Not(); //Emgu.CV.Contour<System.Drawing.Point> c = gray.FindContours(Emgu.CV.CvEnum.CHAIN_APPROX_METHOD.CV_CHAIN_CODE, Emgu.CV.CvEnum.RETR_TYPE.CV_RETR_LIST); Emgu.CV.Contour<System.Drawing.Point> c = gray.FindContours(); List<Ellipse> ellipses = new List<Ellipse>(); while (c != null) { if (c.Count() >= _min_contour_count) { System.Drawing.PointF[] mypoints = Array.ConvertAll( c.ToArray<System.Drawing.Point>(), value => new System.Drawing.PointF(value.X, value.Y) ); Ellipse e = Emgu.CV.PointCollection.EllipseLeastSquareFitting(mypoints); MCvBox2D box = e.MCvBox2D; box.size.Height *= 0.5f; box.size.Width *= 0.5f; Ellipse final_ellipse = new Ellipse(box); Matrix m = Matrix.Identity(3, 3); m[0, 0] = Math.Cos(final_ellipse.MCvBox2D.angle); m[0, 1] = -Math.Sin(final_ellipse.MCvBox2D.angle); m[0, 2] = final_ellipse.MCvBox2D.center.X; m[1, 0] = Math.Sin(final_ellipse.MCvBox2D.angle); m[1, 1] = Math.Cos(final_ellipse.MCvBox2D.angle); m[1, 2] = final_ellipse.MCvBox2D.center.Y; Matrix inv = m.Inverse(); double rating = 0.0; double a = final_ellipse.MCvBox2D.size.Width; double b = final_ellipse.MCvBox2D.size.Height; if (a < b) { double tmp = a; a = b; a = tmp; } foreach (System.Drawing.PointF p in mypoints) { Vector x = new Vector(new double[] { p.X, p.Y, 1 }); Matrix r = inv.Multiply(x.ToColumnMatrix()); rating += Math.Abs((Math.Pow(r[0, 0] / a, 2) + Math.Pow(r[1, 0] / b, 2)) - 1); } Console.WriteLine(rating); if (rating < 50) { ellipses.Add(final_ellipse); } } c = c.HNext; } ellipses.Sort( (a, b) => { double dista = a.MCvBox2D.center.X * a.MCvBox2D.center.X + a.MCvBox2D.center.Y * a.MCvBox2D.center.Y; double distb = b.MCvBox2D.center.X * b.MCvBox2D.center.X + b.MCvBox2D.center.Y * b.MCvBox2D.center.Y; return dista.CompareTo(distb); } ); Bgr bgr = new Bgr(0, 255, 0); MCvFont f = new MCvFont(Emgu.CV.CvEnum.FONT.CV_FONT_HERSHEY_PLAIN, 0.8, 0.8); int count = 1; foreach (Ellipse e in ellipses) { image.Draw(e, bgr, 2); image.Draw(count.ToString(), ref f, new System.Drawing.Point((int)e.MCvBox2D.center.X, (int)e.MCvBox2D.center.Y), bgr); count++; } }
private void MainWindow_Loaded(object sender, RoutedEventArgs e) { heightHand = (int)imageConvexHull.Height; widhtHand = (int)imageConvexHull.Width; nui.Initialize(RuntimeOptions.UseColor | RuntimeOptions.UseSkeletalTracking); nui.VideoFrameReady += Nui_VideoFrameReady; nui.VideoStream.Open(ImageStreamType.Video, 2, ImageResolution.Resolution1280x1024, ImageType.Color); #region SmoothTransform nui.SkeletonEngine.TransformSmooth = true; var parameters = new TransformSmoothParameters { Smoothing = 0.75f, Correction = 0.0f, Prediction = 0.0f, JitterRadius = 0.05f, MaxDeviationRadius = 0.04f }; nui.SkeletonEngine.SmoothParameters = parameters; #endregion nui.SkeletonFrameReady += Nui_skeleton_SkeletonFrameReady; #region HandRecognitionInit detector = new AdaptiveSkinDetector(1, AdaptiveSkinDetector.MorphingMethod.NONE); hsv_min = new Hsv(10, 45, 50); hsv_max = new Hsv(20, 255, 255); YCrCb_min = new Ycc(0, 131, 80); YCrCb_max = new Ycc(255, 185, 135); box = new MCvBox2D(); ellip = new Emgu.CV.Structure.Ellipse(); #endregion haarCascade = new HaarCascade(rootXML+xmlName) ?? null; if (haarCascade == null) Console.WriteLine("Haar cascade is null."); }
/// <summary> /// Fit an ellipse to the points collection /// </summary> /// <param name="points">The points to be fitted</param> /// <returns>An ellipse</returns> public static Ellipse EllipseLeastSquareFitting(PointF[] points) { IntPtr seq = Marshal.AllocHGlobal(StructSize.MCvSeq); IntPtr block = Marshal.AllocHGlobal(StructSize.MCvSeqBlock); GCHandle handle = GCHandle.Alloc(points, GCHandleType.Pinned); CvInvoke.cvMakeSeqHeaderForArray( CvInvoke.CV_MAKETYPE((int)CvEnum.MAT_DEPTH.CV_32F, 2), StructSize.MCvSeq, StructSize.PointF, handle.AddrOfPinnedObject(), points.Length, seq, block); Ellipse e = new Ellipse(CvInvoke.cvFitEllipse2(seq)); //The angle returned by cvFitEllipse2 has the wrong sign. //Returned angle is clock wise rotation, what we need for the definition of MCvBox is the counter clockwise rotation. //For this, we needs to change the sign of the angle MCvBox2D b = e.MCvBox2D; b.angle = -b.angle; if (b.angle < 0) b.angle += 360; e.MCvBox2D = b; handle.Free(); Marshal.FreeHGlobal(seq); Marshal.FreeHGlobal(block); return e; }
/// <summary> /// Generate a random point cloud around the ellipse. /// </summary> /// <param name="e">The region where the point cloud will be generated. The axes of e corresponds to std of the random point cloud.</param> /// <param name="numberOfPoints">The number of points to be generated</param> /// <returns>A random point cloud around the ellipse</returns> public static PointF[] GeneratePointCloud(Ellipse e, int numberOfPoints) { PointF[] cloud = new PointF[numberOfPoints]; GCHandle handle = GCHandle.Alloc(cloud, GCHandleType.Pinned); using (Matrix<float> points = new Matrix<float>(numberOfPoints, 2, handle.AddrOfPinnedObject())) using (Matrix<float> xValues = points.GetCol(0)) using (Matrix<float> yValues = points.GetCol(1)) using (RotationMatrix2D<float> rotation = new RotationMatrix2D<float>(e.MCvBox2D.center, e.MCvBox2D.angle, 1.0)) { xValues.SetRandNormal(new MCvScalar(e.MCvBox2D.center.X), new MCvScalar(e.MCvBox2D.size.Width / 2.0f)); yValues.SetRandNormal(new MCvScalar(e.MCvBox2D.center.Y), new MCvScalar(e.MCvBox2D.size.Height / 2.0f)); rotation.RotatePoints(points); } handle.Free(); return cloud; }
public List <Ball3D> Detect(KinectInterface kinect) { float threshDepth = 8.9f; float expectedRadius = 0.0251f; float radThres = 0.0025f; var balls = new List <Ball3D>(); int w = KinectInterface.w; int h = KinectInterface.h; int sw = w / 2; int sh = h / 2; byte depthByte = (byte)((int)threshDepth * 1000 >> 4); var dsmall = kinect.FullDepth.PyrDown(); var depthMask = dsmall.CopyBlank(); CvInvoke.cvThreshold(dsmall.Ptr, depthMask.Ptr, depthByte, 255, Emgu.CV.CvEnum.THRESH.CV_THRESH_BINARY_INV); var depthMaskBlock = depthMask.Erode(1).Dilate(1); var depthMaskOverlay = depthMaskBlock.Convert <Bgr, Byte>(); var edges = depthMaskBlock.Canny(new Gray(180), new Gray(120)); debugOut = edges.Convert <Bgr, Byte>(); MemStorage storage = new MemStorage(); //allocate storage for contour approximation for (Contour <System.Drawing.Point> contours = edges.FindContours(); contours != null; contours = contours.HNext) { //var ptsRaw = contours.Select(pt => new System.Drawing.PointF(pt.X, pt.Y)).ToArray(); //var centroid = new System.Drawing.PointF( // ptsRaw.Sum(p => p.X) / ptsRaw.Length, // ptsRaw.Sum(p => p.Y) / ptsRaw.Length); //TODO: fix this method to be actually correct //var cPts = ptsRaw.Select(p => new System.Drawing.PointF( // p.X - centroid.X, // p.Y - centroid.Y)).ToArray(); int bbxcent = contours.BoundingRectangle.X + contours.BoundingRectangle.Width / 2; int bbycent = contours.BoundingRectangle.Y + contours.BoundingRectangle.Height / 2; byte bbcentVal = depthMaskBlock.Data[bbycent, bbxcent, 0]; int minDim = Math.Min(contours.BoundingRectangle.Width, contours.BoundingRectangle.Height); if (bbcentVal == 255 && minDim > 5)//contour is filled in & greater than some pixel size { //var defects = approxContour.GetConvexityDefacts(storage, Emgu.CV.CvEnum.ORIENTATION.CV_CLOCKWISE); MCvBox2D box = contours.GetMinAreaRect(storage); float xc = (box.center.X * (w / sw)); float yc = (box.center.Y * (h / sh)); float rMin = (w / sw) * (float)(Math.Min(box.size.Width, box.size.Height) / 2); double dAvg = avgDepth(kinect.depthMM, (int)xc, (int)yc, (int)(rMin * 2 / 3)); double zproj = 0.001 * dAvg; //in meters //project var projectedPosV3 = kinect.UnprojectDepth((float)dAvg, xc, yc); var projectedBound = kinect.UnprojectDepth((float)dAvg, xc + rMin, yc); float actualRadius = (projectedPosV3 - projectedBound).Length(); if (actualRadius < expectedRadius + radThres && actualRadius > expectedRadius - radThres) { //RotationMatrix2D<float> rot = new RotationMatrix2D<float>(new System.Drawing.PointF(0, 0), box.angle, 1); //not -box.angle, because stupidly matrix rotations are counter-clockwise but box.angle is measured clockwise... //rot.RotatePoints(cPts); //var cnormPts = cPts.Select(p => new System.Drawing.PointF( // p.X / (box.size.Width / 2), // p.Y / (box.size.Height / 2))); //var variance = cnormPts.Sum(p => //{ // var d = Math.Sqrt(p.X * p.X + p.Y * p.Y); // return (d - 1) * (d - 1); //}) / cnormPts.Count(); //if (variance * rApprox * rApprox < 2.5f) if (contours.Area >= box.size.Width * box.size.Height * Math.PI / 4 * 0.9) { Emgu.CV.Structure.Ellipse ellipse = new Emgu.CV.Structure.Ellipse(box.center, new System.Drawing.SizeF(box.size.Height, box.size.Width), box.angle); depthMaskOverlay.Draw(ellipse, new Bgr(0, 255, 0), 3); depthMaskOverlay.Draw(new Cross2DF(box.center, 10, 10), new Bgr(0, 0, 255), 1); Ball3D ball = new Ball3D() { Position = projectedPosV3.ToLinV(), Radius = actualRadius }; balls.Add(ball); } else { //depthMaskOverlay.Draw(box, new Bgr(0, 0, 255), 2); } } } } storage.Dispose(); DetectorOverlay = depthMaskOverlay; return(balls); }