public Contour() { // cvContourArea, cvArcLength // 輪郭によって区切られた領域の面積と,輪郭の長さを求める const int SIZE = 500; // (1)画像を確保し初期化する using (CvMemStorage storage = new CvMemStorage()) using (IplImage img = new IplImage(SIZE, SIZE, BitDepth.U8, 3)) { img.Zero(); // (2)点列を生成する CvSeq<CvPoint> points = new CvSeq<CvPoint>(SeqType.PolyLine, storage); CvRNG rng = new CvRNG((ulong)DateTime.Now.Ticks); double scale = rng.RandReal() + 0.5; CvPoint pt0 = new CvPoint { X = (int)(Math.Cos(0) * SIZE / 4 * scale + SIZE / 2), Y = (int)(Math.Sin(0) * SIZE / 4 * scale + SIZE / 2) }; img.Circle(pt0, 2, CvColor.Green); points.Push(pt0); for (int i = 1; i < 20; i++) { scale = rng.RandReal() + 0.5; CvPoint pt1 = new CvPoint { X = (int)(Math.Cos(i * 2 * Math.PI / 20) * SIZE / 4 * scale + SIZE / 2), Y = (int)(Math.Sin(i * 2 * Math.PI / 20) * SIZE / 4 * scale + SIZE / 2) }; img.Line(pt0, pt1, CvColor.Green, 2); pt0.X = pt1.X; pt0.Y = pt1.Y; img.Circle(pt0, 3, CvColor.Green, Cv.FILLED); points.Push(pt0); } img.Line(pt0, points.GetSeqElem(0).Value, CvColor.Green, 2); // (3)包含矩形,面積,長さを求める CvRect rect = points.BoundingRect(false); double area = points.ContourArea(); double length = points.ArcLength(CvSlice.WholeSeq, 1); // (4)結果を画像に書き込む img.Rectangle(new CvPoint(rect.X, rect.Y), new CvPoint(rect.X + rect.Width, rect.Y + rect.Height), CvColor.Red, 2); string text_area = string.Format("Area: wrect={0}, contour={1}", rect.Width * rect.Height, area); string text_length = string.Format("Length: rect={0}, contour={1}", 2 * (rect.Width + rect.Height), length); using (CvFont font = new CvFont(FontFace.HersheySimplex, 0.7, 0.7, 0, 1, LineType.AntiAlias)) { img.PutText(text_area, new CvPoint(10, img.Height - 30), font, CvColor.White); img.PutText(text_length, new CvPoint(10, img.Height - 10), font, CvColor.White); } // (5)画像を表示,キーが押されたときに終了 using (CvWindow window = new CvWindow("BoundingRect", WindowMode.AutoSize)) { window.Image = img; CvWindow.WaitKey(0); } } }
public Contour() { // cvContourArea, cvArcLength // 輪郭によって区切られた領域の面積と,輪郭の長さを求める const int SIZE = 500; // (1)画像を確保し初期化する using (CvMemStorage storage = new CvMemStorage()) using (IplImage img = new IplImage(SIZE, SIZE, BitDepth.U8, 3)) { img.Zero(); // (2)点列を生成する CvSeq <CvPoint> points = new CvSeq <CvPoint>(SeqType.PolyLine, storage); CvRNG rng = new CvRNG((ulong)DateTime.Now.Ticks); double scale = rng.RandReal() + 0.5; CvPoint pt0 = new CvPoint { X = (int)(Math.Cos(0) * SIZE / 4 * scale + SIZE / 2), Y = (int)(Math.Sin(0) * SIZE / 4 * scale + SIZE / 2) }; img.Circle(pt0, 2, CvColor.Green); points.Push(pt0); for (int i = 1; i < 20; i++) { scale = rng.RandReal() + 0.5; CvPoint pt1 = new CvPoint { X = (int)(Math.Cos(i * 2 * Math.PI / 20) * SIZE / 4 * scale + SIZE / 2), Y = (int)(Math.Sin(i * 2 * Math.PI / 20) * SIZE / 4 * scale + SIZE / 2) }; img.Line(pt0, pt1, CvColor.Green, 2); pt0.X = pt1.X; pt0.Y = pt1.Y; img.Circle(pt0, 3, CvColor.Green, Cv.FILLED); points.Push(pt0); } img.Line(pt0, points.GetSeqElem(0).Value, CvColor.Green, 2); // (3)包含矩形,面積,長さを求める CvRect rect = points.BoundingRect(false); double area = points.ContourArea(); double length = points.ArcLength(CvSlice.WholeSeq, 1); // (4)結果を画像に書き込む img.Rectangle(new CvPoint(rect.X, rect.Y), new CvPoint(rect.X + rect.Width, rect.Y + rect.Height), CvColor.Red, 2); string text_area = string.Format("Area: wrect={0}, contour={1}", rect.Width * rect.Height, area); string text_length = string.Format("Length: rect={0}, contour={1}", 2 * (rect.Width + rect.Height), length); using (CvFont font = new CvFont(FontFace.HersheySimplex, 0.7, 0.7, 0, 1, LineType.AntiAlias)) { img.PutText(text_area, new CvPoint(10, img.Height - 30), font, CvColor.White); img.PutText(text_length, new CvPoint(10, img.Height - 10), font, CvColor.White); } // (5)画像を表示,キーが押されたときに終了 using (CvWindow window = new CvWindow("BoundingRect", WindowMode.AutoSize)) { window.Image = img; CvWindow.WaitKey(0); } } }
/// <summary> /// /// </summary> public SeqPartition() { CvMemStorage storage = new CvMemStorage(0); pointSeq = new CvSeq <CvPoint>(SeqType.EltypeS32C2, CvSeq.SizeOf, storage); Random rand = new Random(); canvas = new IplImage(Width, Height, BitDepth.U8, 3); colors = new CvScalar[Count]; for (int i = 0; i < Count; i++) { CvPoint pt = new CvPoint { X = rand.Next(Width), Y = rand.Next(Height) }; pointSeq.Push(pt); int icolor = rand.Next() | 0x00404040; colors[i] = Cv.RGB(icolor & 255, (icolor >> 8) & 255, (icolor >> 16) & 255); } using (window = new CvWindowEx() { Text = "points" }) { window.CreateTrackbar("threshold", 10, 50, OnTrack); OnTrack(10); CvWindowEx.WaitKey(); } }
/// <summary> /// /// </summary> public SeqPartition() { CvMemStorage storage = new CvMemStorage(0); pointSeq = new CvSeq<CvPoint>(SeqType.EltypeS32C2, CvSeq.SizeOf, storage); Random rand = new Random(); canvas = new IplImage(Width, Height, BitDepth.U8, 3); colors = new CvScalar[Count]; for (int i = 0; i < Count; i++) { CvPoint pt = new CvPoint { X = rand.Next(Width), Y = rand.Next(Height) }; pointSeq.Push(pt); int icolor = rand.Next() | 0x00404040; colors[i] = Cv.RGB(icolor & 255, (icolor >> 8) & 255, (icolor >> 16) & 255); } using (window = new CvWindowEx() { Text = "points" }) { window.CreateTrackbar("threshold", 10, 50, OnTrack); OnTrack(10); CvWindowEx.WaitKey(); } }
public unsafe SeqTest() { using (CvMemStorage storage = new CvMemStorage(0)) { Random rand = new Random(); CvSeq <int> seq = new CvSeq <int>(SeqType.EltypeS32C1, storage); // push for (int i = 0; i < 10; i++) { int push = seq.Push(rand.Next(100));//seq.Push(i); Console.WriteLine("{0} is pushed", push); } Console.WriteLine("----------"); // enumerate Console.WriteLine("contents of seq"); foreach (int item in seq) { Console.Write("{0} ", item); } Console.WriteLine(); // sort CvCmpFunc <int> func = delegate(int a, int b) { return(a.CompareTo(b)); }; seq.Sort(func); // convert to array int[] array = seq.ToArray(); Console.WriteLine("contents of sorted seq"); foreach (int item in array) { Console.Write("{0} ", item); } Console.WriteLine(); Console.WriteLine("----------"); // pop for (int i = 0; i < 10; i++) { int pop = seq.Pop(); Console.WriteLine("{0} is popped", pop); } Console.ReadKey(); } }
public unsafe SeqTest() { using (CvMemStorage storage = new CvMemStorage(0)) { Random rand = new Random(); CvSeq<int> seq = new CvSeq<int>(SeqType.EltypeS32C1, storage); // push for (int i = 0; i < 10; i++) { int push = seq.Push(rand.Next(100));//seq.Push(i); Console.WriteLine("{0} is pushed", push); } Console.WriteLine("----------"); // enumerate Console.WriteLine("contents of seq"); foreach (int item in seq) { Console.Write("{0} ", item); } Console.WriteLine(); // sort CvCmpFunc<int> func = delegate(int a, int b) { return a.CompareTo(b); }; seq.Sort(func); // convert to array int[] array = seq.ToArray(); Console.WriteLine("contents of sorted seq"); foreach (int item in array) { Console.Write("{0} ", item); } Console.WriteLine(); Console.WriteLine("----------"); // pop for (int i = 0; i < 10; i++) { int pop = seq.Pop(); Console.WriteLine("{0} is popped", pop); } Console.ReadKey(); } }
static CvPoint[] FindSquares4(IplImage img, CvMemStorage storage) { const int N = 11; CvSize sz = new CvSize(img.Width & -2, img.Height & -2); IplImage timg = img.Clone(); // make a copy of input image IplImage gray = new IplImage(sz, BitDepth.U8, 1); IplImage pyr = new IplImage(sz.Width / 2, sz.Height / 2, BitDepth.U8, 3); // create empty sequence that will contain points - // 4 points per square (the square's vertices) CvSeq <CvPoint> squares = new CvSeq <CvPoint>(SeqType.Zero, CvSeq.SizeOf, storage); // select the maximum ROI in the image // with the width and height divisible by 2 timg.ROI = new CvRect(0, 0, sz.Width, sz.Height); // down-scale and upscale the image to filter out the noise Cv.PyrDown(timg, pyr, CvFilter.Gaussian5x5); Cv.PyrUp(pyr, timg, CvFilter.Gaussian5x5); IplImage tgray = new IplImage(sz, BitDepth.U8, 1); // find squares in every color plane of the image for (int c = 0; c < 3; c++) { // extract the c-th color plane timg.COI = c + 1; Cv.Copy(timg, tgray, null); // try several threshold levels for (int l = 0; l < N; l++) { // hack: use Canny instead of zero threshold level. // Canny helps to catch squares with gradient shading if (l == 0) { // apply Canny. Take the upper threshold from slider // and set the lower to 0 (which forces edges merging) Cv.Canny(tgray, gray, 0, Thresh, ApertureSize.Size5); // dilate canny output to remove potential // holes between edge segments Cv.Dilate(gray, gray, null, 1); } else { // apply threshold if l!=0: // tgray(x,y) = gray(x,y) < (l+1)*255/N ? 255 : 0 Cv.Threshold(tgray, gray, (l + 1) * 255.0 / N, 255, ThresholdType.Binary); } // find contours and store them all as a list CvSeq <CvPoint> contours; Cv.FindContours(gray, storage, out contours, CvContour.SizeOf, ContourRetrieval.List, ContourChain.ApproxSimple, new CvPoint(0, 0)); // test each contour while (contours != null) { // approximate contour with accuracy proportional // to the contour perimeter CvSeq <CvPoint> result = Cv.ApproxPoly(contours, CvContour.SizeOf, storage, ApproxPolyMethod.DP, contours.ContourPerimeter() * 0.02, false); // square contours should have 4 vertices after approximation // relatively large area (to filter out noisy contours) // and be convex. // Note: absolute value of an area is used because // area may be positive or negative - in accordance with the // contour orientation if (result.Total == 4 && Math.Abs(result.ContourArea(CvSlice.WholeSeq)) > 1000 && result.CheckContourConvexity()) { double s = 0; for (int i = 0; i < 5; i++) { // find minimum Angle between joint // edges (maximum of cosine) if (i >= 2) { double t = Math.Abs(Angle(result[i].Value, result[i - 2].Value, result[i - 1].Value)); s = s > t ? s : t; } } // if cosines of all angles are small // (all angles are ~90 degree) then write quandrange // vertices to resultant sequence if (s < 0.3) { for (int i = 0; i < 4; i++) { //Console.WriteLine(result[i]); squares.Push(result[i].Value); } } } // take the next contour contours = contours.HNext; } } } // release all the temporary images gray.Dispose(); pyr.Dispose(); tgray.Dispose(); timg.Dispose(); return(squares.ToArray()); }
/// <summary> /// returns sequence of squares detected on the image. /// the sequence is stored in the specified memory storage /// </summary> /// <param name="img"></param> /// <param name="storage"></param> /// <returns></returns> static CvPoint[] FindSquares4(IplImage img, CvMemStorage storage) { const int N = 11; CvSize sz = new CvSize(img.Width & -2, img.Height & -2); IplImage timg = img.Clone(); // make a copy of input image IplImage gray = new IplImage(sz, BitDepth.U8, 1); IplImage pyr = new IplImage(sz.Width / 2, sz.Height / 2, BitDepth.U8, 3); // create empty sequence that will contain points - // 4 points per square (the square's vertices) CvSeq<CvPoint> squares = new CvSeq<CvPoint>(SeqType.Zero, CvSeq.SizeOf, storage); // select the maximum ROI in the image // with the width and height divisible by 2 timg.ROI = new CvRect(0, 0, sz.Width, sz.Height); // down-Scale and upscale the image to filter out the noise Cv.PyrDown(timg, pyr, CvFilter.Gaussian5x5); Cv.PyrUp(pyr, timg, CvFilter.Gaussian5x5); IplImage tgray = new IplImage(sz, BitDepth.U8, 1); // find squares in every color plane of the image for (int c = 0; c < 3; c++) { // extract the c-th color plane timg.COI = c + 1; Cv.Copy(timg, tgray, null); // try several threshold levels for (int l = 0; l < N; l++) { // hack: use Canny instead of zero threshold level. // Canny helps to catch squares with gradient shading if (l == 0) { // apply Canny. Take the upper threshold from slider // and set the lower to 0 (which forces edges merging) Cv.Canny(tgray, gray, 0, Thresh, ApertureSize.Size5); // dilate canny output to remove potential // holes between edge segments Cv.Dilate(gray, gray, null, 1); } else { // apply threshold if l!=0: // tgray(x,y) = gray(x,y) < (l+1)*255/N ? 255 : 0 Cv.Threshold(tgray, gray, (l + 1) * 255.0 / N, 255, ThresholdType.Binary); } // find contours and store them all as a list CvSeq<CvPoint> contours; Cv.FindContours(gray, storage, out contours, CvContour.SizeOf, ContourRetrieval.List, ContourChain.ApproxSimple, new CvPoint(0, 0)); // test each contour while (contours != null) { // approximate contour with accuracy proportional // to the contour perimeter CvSeq<CvPoint> result = Cv.ApproxPoly(contours, CvContour.SizeOf, storage, ApproxPolyMethod.DP, contours.ContourPerimeter() * 0.02, false); // square contours should have 4 vertices after approximation // relatively large area (to filter out noisy contours) // and be convex. // Note: absolute value of an area is used because // area may be positive or negative - in accordance with the // contour orientation if (result.Total == 4 && Math.Abs(result.ContourArea(CvSlice.WholeSeq)) > 1000 && result.CheckContourConvexity()) { double s = 0; for (int i = 0; i < 5; i++) { // find minimum Angle between joint // edges (maximum of cosine) if (i >= 2) { double t = Math.Abs(Angle(result[i].Value, result[i - 2].Value, result[i - 1].Value)); s = s > t ? s : t; } } // if cosines of all angles are small // (all angles are ~90 degree) then write quandrange // vertices to resultant sequence if (s < 0.3) { for (int i = 0; i < 4; i++) { //Console.WriteLine(result[i]); squares.Push(result[i].Value); } } } // take the next contour contours = contours.HNext; } } } // release all the temporary images gray.Dispose(); pyr.Dispose(); tgray.Dispose(); timg.Dispose(); return squares.ToArray(); }
/// <summary> /// Detect the square in the image using contours /// </summary> /// <param name="img">Image</param> /// <param name="modifiedImg">Modified image to be return</param> /// <param name="storage">Memory storage</param> /// <returns></returns> public static CvPoint[] DetectSquares(IplImage img) { // Debug //System.Diagnostics.Stopwatch stopWatch = new System.Diagnostics.Stopwatch(); //stopWatch.Start(); using (CvMemStorage storage = new CvMemStorage()) { // create empty sequence that will contain points - // 4 points per square (the square's vertices) CvSeq<CvPoint> squares = new CvSeq<CvPoint>(SeqType.Zero, CvSeq.SizeOf, storage); using (IplImage timg = img.Clone()) using (IplImage gray = new IplImage(timg.Size, BitDepth.U8, 1)) using (IplImage dstCanny = new IplImage(timg.Size, BitDepth.U8, 1)) { // Get gray scale timg.CvtColor(gray, ColorConversion.BgrToGray); // Canny Cv.Canny(gray, dstCanny, 70, 300); // dilate canny output to remove potential // holes between edge segments Cv.Dilate(dstCanny, dstCanny, null, 2); // find contours and store them all as a list CvSeq<CvPoint> contours; dstCanny.FindContours(storage, out contours); // Debug //Cv.ShowImage("Edge", dstCanny); //if (contours != null) Console.WriteLine(contours.Count()); // Test each contour while (contours != null) { // Debug //if (stopWatch.ElapsedMilliseconds > 100) //{ // Console.WriteLine("ROI detection is taking too long and is skipped."); //} // approximate contour with accuracy proportional // to the contour perimeter CvSeq<CvPoint> result = Cv.ApproxPoly(contours, CvContour.SizeOf, storage, ApproxPolyMethod.DP, contours.ContourPerimeter() * 0.02, false); // square contours should have 4 vertices after approximation // relatively large area (to filter out noisy contours) // and be convex. // Note: absolute value of an area is used because // area may be positive or negative - in accordance with the // contour orientation if (result.Total == 4 && Math.Abs(result.ContourArea(CvSlice.WholeSeq)) > 250 && result.CheckContourConvexity()) { double s = 0; for (int i = 0; i < 5; i++) { // find minimum Angle between joint // edges (maximum of cosine) if (i >= 2) { double t = Math.Abs(Angle(result[i].Value, result[i - 2].Value, result[i - 1].Value)); s = s > t ? s : t; } } // if cosines of all angles are small // (all angles are ~90 degree) then write quandrange // vertices to resultant sequence if (s < 0.3) { //Console.WriteLine("ROI found!"); // Debug for (int i = 0; i < 4; i++) { //Console.WriteLine(result[i]); squares.Push(result[i].Value); } } } // Take the next contour contours = contours.HNext; } } //stopWatch.Stop(); //Console.WriteLine("ROI Detection : {0} ms", stopWatch.ElapsedMilliseconds); // Debug return squares.ToArray(); } }