/// <summary> /// Detect the keypoints from the image /// </summary> /// <param name="detector">The keypoint detector</param> /// <param name="image">The image to extract keypoints from</param> /// <param name="mask">The optional mask.</param> /// <returns>An array of key points</returns> public static MKeyPoint[] Detect(this IFeatureDetector detector, IInputArray image, IInputArray mask = null) { using (VectorOfKeyPoint keypoints = new VectorOfKeyPoint()) { detector.DetectRaw(image, keypoints, mask); return(keypoints.ToArray()); } }
public static bool TestFeature2DTracker(IFeatureDetector keyPointDetector, IDescriptorExtractor descriptorGenerator) { //for (int k = 0; k < 1; k++) { Feature2D feature2D = null; if (keyPointDetector == descriptorGenerator) { feature2D = keyPointDetector as Feature2D; } Image <Gray, Byte> modelImage = EmguAssert.LoadImage <Gray, byte>("box.png"); //Image<Gray, Byte> modelImage = new Image<Gray, byte>("stop.jpg"); //modelImage = modelImage.Resize(400, 400, true); //modelImage._EqualizeHist(); #region extract features from the object image Stopwatch stopwatch = Stopwatch.StartNew(); VectorOfKeyPoint modelKeypoints = new VectorOfKeyPoint(); Mat modelDescriptors = new Mat(); if (feature2D != null) { feature2D.DetectAndCompute(modelImage, null, modelKeypoints, modelDescriptors, false); } else { keyPointDetector.DetectRaw(modelImage, modelKeypoints); descriptorGenerator.Compute(modelImage, modelKeypoints, modelDescriptors); } stopwatch.Stop(); EmguAssert.WriteLine(String.Format("Time to extract feature from model: {0} milli-sec", stopwatch.ElapsedMilliseconds)); #endregion //Image<Gray, Byte> observedImage = new Image<Gray, byte>("traffic.jpg"); Image <Gray, Byte> observedImage = EmguAssert.LoadImage <Gray, byte>("box_in_scene.png"); //Image<Gray, Byte> observedImage = modelImage.Rotate(45, new Gray(0.0)); //image = image.Resize(400, 400, true); //observedImage._EqualizeHist(); #region extract features from the observed image stopwatch.Reset(); stopwatch.Start(); VectorOfKeyPoint observedKeypoints = new VectorOfKeyPoint(); using (Mat observedDescriptors = new Mat()) { if (feature2D != null) { feature2D.DetectAndCompute(observedImage, null, observedKeypoints, observedDescriptors, false); } else { keyPointDetector.DetectRaw(observedImage, observedKeypoints); descriptorGenerator.Compute(observedImage, observedKeypoints, observedDescriptors); } stopwatch.Stop(); EmguAssert.WriteLine(String.Format("Time to extract feature from image: {0} milli-sec", stopwatch.ElapsedMilliseconds)); #endregion //Merge the object image and the observed image into one big image for display Image <Gray, Byte> res = modelImage.ConcateVertical(observedImage); Rectangle rect = modelImage.ROI; PointF[] pts = new PointF[] { new PointF(rect.Left, rect.Bottom), new PointF(rect.Right, rect.Bottom), new PointF(rect.Right, rect.Top), new PointF(rect.Left, rect.Top) }; HomographyMatrix homography = null; stopwatch.Reset(); stopwatch.Start(); int k = 2; DistanceType dt = modelDescriptors.Depth == CvEnum.DepthType.Cv8U ? DistanceType.Hamming : DistanceType.L2; //using (Matrix<int> indices = new Matrix<int>(observedDescriptors.Rows, k)) //using (Matrix<float> dist = new Matrix<float>(observedDescriptors.Rows, k)) using (VectorOfVectorOfDMatch matches = new VectorOfVectorOfDMatch()) using (BruteForceMatcher matcher = new BruteForceMatcher(dt)) { matcher.Add(modelDescriptors); matcher.KnnMatch(observedDescriptors, matches, k, null); Matrix <byte> mask = new Matrix <byte>(matches.Size, 1); mask.SetValue(255); Features2DToolbox.VoteForUniqueness(matches, 0.8, mask); int nonZeroCount = CvInvoke.CountNonZero(mask); if (nonZeroCount >= 4) { nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeypoints, observedKeypoints, matches, mask, 1.5, 20); if (nonZeroCount >= 4) { homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeypoints, observedKeypoints, matches, mask, 2); } } } stopwatch.Stop(); EmguAssert.WriteLine(String.Format("Time for feature matching: {0} milli-sec", stopwatch.ElapsedMilliseconds)); bool success = false; if (homography != null) { PointF[] points = pts.Clone() as PointF[]; homography.ProjectPoints(points); for (int i = 0; i < points.Length; i++) { points[i].Y += modelImage.Height; } res.DrawPolyline( #if NETFX_CORE Extensions. #else Array. #endif ConvertAll <PointF, Point>(points, Point.Round), true, new Gray(255.0), 5); success = true; } //Emgu.CV.UI.ImageViewer.Show(res); return(success); } /* * stopwatch.Reset(); stopwatch.Start(); * //set the initial region to be the whole image * using (Image<Gray, Single> priorMask = new Image<Gray, float>(observedImage.Size)) * { * priorMask.SetValue(1.0); * homography = tracker.CamShiftTrack( * observedFeatures, * (RectangleF)observedImage.ROI, * priorMask); * } * Trace.WriteLine(String.Format("Time for feature tracking: {0} milli-sec", stopwatch.ElapsedMilliseconds)); * * if (homography != null) //set the initial tracking window to be the whole image * { * PointF[] points = pts.Clone() as PointF[]; * homography.ProjectPoints(points); * * for (int i = 0; i < points.Length; i++) * points[i].Y += modelImage.Height; * res.DrawPolyline(Array.ConvertAll<PointF, Point>(points, Point.Round), true, new Gray(255.0), 5); * return true; * } * else * { * return false; * }*/ } }