public void RansacLineConstructorTest2() { Accord.Math.Random.Generator.Seed = 0; Bitmap image = Accord.Imaging.Image.Clone(Resources.noise_line); //Accord.Controls.ImageBox.Show(image); var detector = new SusanCornersDetector(); List <IntPoint> cloud = detector.ProcessImage(image); Assert.AreEqual(211, cloud.Count); Bitmap marks = new PointsMarker(cloud, Color.Pink).Apply(image); //Accord.Controls.ImageBox.Show(marks); RansacLine ransac = new RansacLine(5, 1e-10); Line line = ransac.Estimate(cloud); Assert.AreEqual(0.501134932f, line.Intercept, 1e-5); Assert.AreEqual(-0.865369201f, line.Slope, 1e-5); //var result = new LineMarker(line).Apply(image); //Accord.Controls.ImageBox.Show(result); }
public void PointsMarkerTest1() { IEnumerable <FastRetinaKeypoint> points = new FastRetinaKeypoint[] { new FastRetinaKeypoint(1, 2), new FastRetinaKeypoint(3, 4), }; var marker = new PointsMarker(points); double[,] m = Matrix.Zeros(5, 5); Bitmap bmp = m.ToBitmap(); marker.ApplyInPlace(bmp); double[,] actual = bmp.ToMatrix(0); double[,] expected = { { 0, 0, 0, 0, 0 }, { 1, 1, 1, 0, 0 }, { 1, 1, 1, 0, 0 }, { 1, 1, 1, 1, 1 }, { 0, 0, 1, 1, 1 }, }; Assert.AreEqual(expected, actual); }
public Bitmap DrawPoints(Color color) { Bitmap bmp = new Bitmap(n, n, PixelFormat.Format24bppRgb); PointsMarker pm = new PointsMarker(Points, color); return(pm.Apply(bmp)); }
public void blobcounter_test() { string basePath = Path.Combine(NUnit.Framework.TestContext.CurrentContext.TestDirectory, "Resources"); #region doc_process // Load an example image containing blobs (such the sample from the Blob Detection sample applications) // https://github.com/accord-net/framework/raw/development/Samples/Imaging/Detection%20(Blobs)/demo.png Bitmap image = Accord.Imaging.Image.FromFile(Path.Combine(basePath, "blob-input.png")); // Creeate a new blob counter object var blobCounter = new BlobCounter(); // Process the image looking for blobs blobCounter.ProcessImage(image); // Get information about all the image blobs found: Blob[] blobs = blobCounter.GetObjectsInformation(); // Prepare to extract their Convex Hull var grahamScan = new GrahamConvexHull(); var colors = new ColorSequenceCollection(); // For each blob in the image for (int i = 0; i < blobs.Length; i++) { // Get the blob Blob blob = blobs[i]; // Collect edge points List <IntPoint> edgePoints = blobCounter.GetBlobsEdgePoints(blob); // Find convex hull List <IntPoint> hull = grahamScan.FindHull(edgePoints); // Prepare to mark the hull in the image var marker = new PointsMarker(colors[i]) { Points = hull, Connect = true // connect the points with line segments }; // Draw the hull lines marker.ApplyInPlace(image); } // Save the image to disk image.Save(Path.Combine(basePath, "test.png")); #endregion Assert.AreEqual(25, blobs.Length); }
private static void show(Bitmap hand, List <IntPoint> contour, List <IntPoint> peaks, List <IntPoint> supports) { PointsMarker cmarker = new PointsMarker(contour, Color.White, 1); cmarker.ApplyInPlace(hand); PointsMarker pmarker = new PointsMarker(peaks, Color.Green, 5); pmarker.ApplyInPlace(hand); PointsMarker hmarker = new PointsMarker(supports, Color.Yellow, 5); hmarker.ApplyInPlace(hand); ImageBox.Show(hand, PictureBoxSizeMode.Zoom); }
private void btnFreak_Click(object sender, EventArgs e) { // Step 1: Detect feature points using FREAK Features Detector FastRetinaKeypointDetector freak = new FastRetinaKeypointDetector(); keyPoints1 = freak.ProcessImage(img1).ToArray(); keyPoints2 = freak.ProcessImage(img2).ToArray(); // Show the marked points in the original images Bitmap img1mark = new PointsMarker(keyPoints1.Apply(p => (IntPoint)p)).Apply(img1); Bitmap img2mark = new PointsMarker(keyPoints2.Apply(p => (IntPoint)p)).Apply(img2); // Concatenate the two images together in a single image (just to show on screen) Concatenate concatenate = new Concatenate(img1mark); pictureBox.Image = concatenate.Apply(img2mark); }
private void btnHarris_Click(object sender, EventArgs e) { // Step 1: Detect feature points using Harris Corners Detector HarrisCornersDetector harris = new HarrisCornersDetector(0.04f, 1000f); harrisPoints1 = harris.ProcessImage(img1).ToArray(); harrisPoints2 = harris.ProcessImage(img2).ToArray(); // Show the marked points in the original images Bitmap img1mark = new PointsMarker(harrisPoints1).Apply(img1); Bitmap img2mark = new PointsMarker(harrisPoints2).Apply(img2); // Concatenate the two images together in a single image (just to show on screen) Concatenate concatenate = new Concatenate(img1mark); pictureBox.Image = concatenate.Apply(img2mark); }
// Interest point detection private void InterestPtDetector() { // Step 1: Detect feature points using Harris Corners Detector HarrisCornersDetector harris = new HarrisCornersDetector(0.04f, 1000f); harrisPoints1 = harris.ProcessImage(_img1).ToArray(); harrisPoints2 = harris.ProcessImage(_img2).ToArray(); // Show the marked points in the original images Bitmap img1mark = new PointsMarker(harrisPoints1).Apply(_img1); Bitmap img2mark = new PointsMarker(harrisPoints2).Apply(_img2); // Concatenate the two images together in a single image (just to show on screen) Concatenate concatenate = new Concatenate(img1mark); _processImage1 = concatenate.Apply(img2mark); }
/// <summary> /// </summary> /// <param name="sender"> /// </param> /// <param name="e"> /// </param> private void BtnHarris_OnClick(object sender, RoutedEventArgs e) { // Step 1: Detect feature points using Harris Corners Detector var harris = new HarrisCornersDetector(0.04f, 1000f); this.harrisPoints1 = harris.ProcessImage(this.img1).ToArray(); this.harrisPoints2 = harris.ProcessImage(this.img2).ToArray(); // Show the marked points in the original images var img1mark = new PointsMarker(this.harrisPoints1).Apply(this.img1); var img2mark = new PointsMarker(this.harrisPoints2).Apply(this.img2); // Concatenate the two images together in a single image (just to show on screen) var concatenate = new Concatenate(img1mark); this.PictureBox.Source = (ImageSource)concatenate.Apply(img2mark); }
public void FindTest() { List <IntPoint> contour = new List <IntPoint>(); int max = 100; for (int i = 0; i < max; i++) { add(contour, i, max); } for (int i = 0; i < max; i++) { add(contour, max, i); } for (int i = 0; i < max; i++) { add(contour, 0, i); } for (int i = 0; i < max / 2; i++) { add(contour, i, i); } for (int i = 0; i < max / 2; i++) { add(contour, i + max / 2, max / 2 - i); } PointsMarker marker = new PointsMarker(contour); var bitmap = AForge.Imaging.Image.CreateGrayscaleImage(max + 1, max + 1); bitmap = marker.Apply(bitmap); // ImageBox.Show(bitmap); GrahamConvexHull graham = new GrahamConvexHull(); List <IntPoint> hull = graham.FindHull(contour); ConvexHullDefects hullDefects = new ConvexHullDefects(10); List <ConvexityDefect> defects = hullDefects.FindDefects(contour, hull); Assert.AreEqual(1, defects.Count); Assert.AreEqual(99, defects[0].Depth); }
private void btnFreak_Click(object sender, EventArgs e) { // Step 1: Detect feature points using FREAK Features Detector FastRetinaKeypointDetector freak = new FastRetinaKeypointDetector(); keyPoints1 = freak.Transform(img1); keyPoints2 = freak.Transform(img2); // Show the marked points in the original images // TODO: The following construct can be simplified Bitmap img1mark = new PointsMarker(keyPoints1.Select(x => (IFeaturePoint)x).ToList()).Apply(img1); Bitmap img2mark = new PointsMarker(keyPoints2.Select(x => (IFeaturePoint)x).ToList()).Apply(img2); // Concatenate the two images together in a single image (just to show on screen) Concatenate concatenate = new Concatenate(img1mark); pictureBox.Image = concatenate.Apply(img2mark); }
public void ApplyTest1() { string basePath = Path.Combine(NUnit.Framework.TestContext.CurrentContext.TestDirectory, "watershed"); Directory.CreateDirectory(basePath); Bitmap shapes = Accord.Imaging.Image.Clone(Resources.water); shapes.Save(Path.Combine(basePath, "shapes.jpg")); var bw = new BinaryWatershed(); Bitmap result = bw.Apply(shapes); Assert.AreEqual(746, result.Width); Assert.AreEqual(643, result.Height); Assert.AreEqual(PixelFormat.Format8bppIndexed, result.PixelFormat); Assert.AreEqual(9, bw.MaxPoints.Count); string strX = bw.MaxPoints.Select(i => i.X).ToArray().ToCSharp(); string strY = bw.MaxPoints.Select(i => i.Y).ToArray().ToCSharp(); double[] x = new double[] { 310, 546, 136, 254, 429, 612, 398, 345, 498 }; double[] y = new double[] { 436, 153, 392, 201, 336, 339, 242, 183, 319 }; Assert.AreEqual(x, bw.MaxPoints.Select(i => i.X).ToArray()); Assert.AreEqual(y, bw.MaxPoints.Select(i => i.Y).ToArray()); result.Save(Path.Combine(basePath, "watershed.jpg")); GrayscaleToRGB toRGB = new GrayscaleToRGB(); result = toRGB.Apply(result); PointsMarker marker = new PointsMarker(Color.Red, 5); marker.Points = bw.MaxPoints; Bitmap marked = marker.Apply(result); marked.Save(Path.Combine(basePath, "watershed-marks.jpg")); Assert.IsNotNull(result); Assert.IsNotNull(marked); }
public void RansacLineConstructorTest2() { Bitmap image = Properties.Resources.noise_line; ImageBox.Show(image); var detector = new SusanCornersDetector(); List <IntPoint> cloud = detector.ProcessImage(image); Bitmap marks = new PointsMarker(cloud, Color.Pink).Apply(image); ImageBox.Show(marks); RansacLine ransac = new RansacLine(5, 1e-10); Line line = ransac.Estimate(cloud); Bitmap result = new LineMarker(line).Apply(image); ImageBox.Show(result); Assert.Fail(); }
public static Bitmap MarkPoints(Bitmap bmp, List <IntPoint> points, Color color) { PointsMarker pm = new PointsMarker(points, color); return(pm.Apply(bmp)); }
/// <summary> /// takes the video and process it two frames at a time to calculate /// optical flow features and save them on the disk. /// </summary> /// <param name="vid">Path of the video on the disk.</param> /// <param name="save_path">Path to save the features on the disk.</param> /// <returns></returns> public void Extract_Featurers2(String vid, String save_path) { int mm = 0; try { mag = new Mat(); ang = new Mat(); frame = new Mat(); prev_frame = new Mat(); cap = new VideoCapture(vid); total_frames = Convert.ToInt32(cap.GetCaptureProperty(CapProp.FrameCount)); F_L = new List <int>(); frame = cap.QueryFrame(); prev_frame = frame; Console.WriteLine(total_frames); } catch (NullReferenceException except) { Console.WriteLine(except.Message); } //17900 while (mm < total_frames - 2) { try { prev_frame = frame; frame = cap.QueryFrame(); Bitmap image = new Bitmap(frame.Bitmap); // Create a new FAST Corners Detector FastCornersDetector fast = new FastCornersDetector() { Suppress = true, // suppress non-maximum points Threshold = 70 // less leads to more corners }; // Process the image looking for corners List <IntPoint> points = fast.ProcessImage(image); // Create a filter to mark the corners PointsMarker marker = new PointsMarker(points); // Apply the corner-marking filter Bitmap markers = marker.Apply(image); // Show on the screen //Accord.Controls.ImageBox.Show(markers); // Use it to extract interest points from the Lena image: List <IntPoint> descriptors = fast.ProcessImage(image); PointF[] features = new PointF[descriptors.Count]; int c = 0; foreach (IntPoint p in descriptors) { features[c] = new PointF(p.X, p.Y); c++; } ImageViewer viewer = new ImageViewer(); Image <Gray, Byte> prev_grey_img = new Image <Gray, byte>(frame.Width, frame.Height); Image <Gray, Byte> curr_grey_img = new Image <Gray, byte>(frame.Width, frame.Height); curr_grey_img = frame.ToImage <Gray, byte>(); prev_grey_img = prev_frame.ToImage <Gray, Byte>(); PointF[] shiftedFeatures; Byte[] status; float[] trackErrors; CvInvoke.CalcOpticalFlowPyrLK(prev_grey_img, curr_grey_img, features, new Size(9, 9), 3, new MCvTermCriteria(20, 0.05), out shiftedFeatures, out status, out trackErrors); //Image<Gray, Byte> displayImage = cap.QueryFrame().ToImage<Gray, Byte>(); //for (int i = 0; i < features.Length; i++) // displayImage.Draw(new LineSegment2DF(features[i], shiftedFeatures[i]), new Gray(), 2); for (int i = 0; i < features.Length; i++) { CvInvoke.Circle(frame, System.Drawing.Point.Round(shiftedFeatures[i]), 4, new MCvScalar(0, 255, 255), 2); } int mean_X = 0; int mean_Y = 0; foreach (PointF p in shiftedFeatures) { mean_X += (int)p.X; mean_Y += (int)p.Y; } mean_X /= shiftedFeatures.Length; mean_Y /= shiftedFeatures.Length; F_L.Add(mean_X); F_L.Add(mean_Y); //double[] inner = new double[] { mean_X, mean_Y }; //featuers_list[mm] = inner; //viewer.Image = frame; //viewer.ShowDialog(); //prev_frame = frame; //Console.WriteLine("frame:{0} " + mm); Console.WriteLine("frame:{0} " + mm + " X:{1} " + mean_X + " Y:{2} " + mean_Y); mm++; } catch (Exception e) { Console.WriteLine(e.Message); } } //int go = 0; //foreach (double[] arr in featuers_list) //{ // Console.Write("frame:{0} ", go++); // foreach (double d in arr) // Console.Write(d + " "); // Console.WriteLine(); //} Serialize.SerializeObject(F_L, save_path); }