private void ComputeSparseOpticalFlow() { // Compute optical flow using pyramidal Lukas Kanade Method OpticalFlow.PyrLK(grayFrame, nextGrayFrame, ActualFeature[0], new System.Drawing.Size(10, 10), 3, new MCvTermCriteria(20, 0.03d), out NextFeature, out Status, out TrackError); using (MemStorage storage = new MemStorage()) nextHull = PointCollection.ConvexHull(ActualFeature[0], storage, Emgu.CV.CvEnum.ORIENTATION.CV_CLOCKWISE).ToArray(); nextCentroid = FindCentroid(nextHull); for (int i = 0; i < ActualFeature[0].Length; i++) { DrawTrackedFeatures(i); //Uncomment this to draw optical flow vectors DrawFlowVectors(i); } }
private void InitializeFaceTracking() { _faces = new HaarCascade("haarcascade_frontalface_alt_tree.xml"); frame = _capture.QueryFrame(); //We convert it to grayscale grayFrame = frame.Convert <Gray, Byte>(); // We detect a face using haar cascade classifiers, we'll work only on face area faceDetected = grayFrame.DetectHaarCascade(_faces, 1.1, 1, Emgu.CV.CvEnum.HAAR_DETECTION_TYPE.DO_CANNY_PRUNING, new Size(20, 20)); if (faceDetected[0].Length == 1) { trackingArea = new Rectangle(faceDetected[0][0].rect.X, faceDetected[0][0].rect.Y, faceDetected[0][0].rect.Width, faceDetected[0][0].rect.Height); // Here we enlarge or restrict the search features area on a smaller or larger face area float scalingAreaFactor = 0.6f; int trackingAreaWidth = (int)(faceDetected[0][0].rect.Width * scalingAreaFactor); int trackingAreaHeight = (int)(faceDetected[0][0].rect.Height * scalingAreaFactor); int leftXTrackingCoord = faceDetected[0][0].rect.X - (int)(((faceDetected[0][0].rect.X + trackingAreaWidth) - (faceDetected[0][0].rect.X + faceDetected[0][0].rect.Width)) / 2); int leftYTrackingCoord = faceDetected[0][0].rect.Y - (int)(((faceDetected[0][0].rect.Y + trackingAreaHeight) - (faceDetected[0][0].rect.Y + faceDetected[0][0].rect.Height)) / 2); trackingArea = new Rectangle(leftXTrackingCoord, leftYTrackingCoord, trackingAreaWidth, trackingAreaHeight); // Allocating proper working images faceImage = new Image <Bgr, Byte>(trackingArea.Width, trackingArea.Height); faceGrayImage = new Image <Gray, Byte>(trackingArea.Width, trackingArea.Height); frame.ROI = trackingArea; frame.Copy(faceImage, null); frame.ROI = Rectangle.Empty; faceGrayImage = faceImage.Convert <Gray, Byte>(); // Detecting good features that will be tracked in following frames ActualFeature = faceGrayImage.GoodFeaturesToTrack(400, 0.5d, 5d, 5); faceGrayImage.FindCornerSubPix(ActualFeature, new Size(5, 5), new Size(-1, -1), new MCvTermCriteria(25, 1.5d)); // Features computed on a different coordinate system are shifted to their original location for (int i = 0; i < ActualFeature[0].Length; i++) { ActualFeature[0][i].X += trackingArea.X; ActualFeature[0][i].Y += trackingArea.Y; } // Computing convex hull using (MemStorage storage = new MemStorage()) hull = PointCollection.ConvexHull(ActualFeature[0], storage, Emgu.CV.CvEnum.ORIENTATION.CV_CLOCKWISE).ToArray(); referenceCentroid = FindCentroid(hull); } }
//Code adapted and improved from: http://blog.csharphelper.com/2010/01/04/find-a-polygons-centroid-in-c.aspx // refer to wikipedia for math formulas centroid of polygon http://en.wikipedia.org/wiki/Centroid private PointF FindCentroid(PointF[] Feature) { PointF[] Hull; // Computing convex hull using (MemStorage storage = new MemStorage()) Hull = PointCollection.ConvexHull(Feature, storage, Emgu.CV.CvEnum.ORIENTATION.CV_CLOCKWISE).ToArray(); // Add the first point at the end of the array. int num_points = Hull.Length; PointF[] pts = new PointF[num_points + 1]; Hull.CopyTo(pts, 0); pts[num_points] = Hull[0]; // Find the centroid. float X = 0; float Y = 0; float second_factor; for (int i = 0; i < num_points; i++) { second_factor = pts[i].X * pts[i + 1].Y - pts[i + 1].X * pts[i].Y; X += (pts[i].X + pts[i + 1].X) * second_factor; Y += (pts[i].Y + pts[i + 1].Y) * second_factor; } // Divide by 6 times the polygon's area. float polygon_area = Math.Abs(SignedPolygonArea(Hull)); X /= (6 * polygon_area); Y /= (6 * polygon_area); // If the values are negative, the polygon is // oriented counterclockwise so reverse the signs. if (X < 0) { X = -X; Y = -Y; } return(new PointF(X, Y)); }