//============================================================= // // 確率的Hough変換処理 // //============================================================= private void HoughPbl(PictureBox pbox, IplImage image) { IplImage gray; IplImage canny; IplImage hPbl; gray = Cv.CreateImage(image.Size, BitDepth.U8, 1); canny = Cv.CreateImage(image.Size, BitDepth.U8, 1); hPbl = Cv.CreateImage(image.Size, BitDepth.U8, 3); Cv.CvtColor(image, gray, ColorConversion.RgbToGray); Cv.Canny(gray, canny, 50, 200); Cv.CvtColor(canny, hPbl, ColorConversion.GrayToRgb); CvMemStorage storage = new CvMemStorage(); CvSeq lines = Cv.HoughLines2(canny, storage, HoughLinesMethod.Probabilistic, 1, Math.PI / 180, 50, 10, 10); for (int i = 0; i < lines.Total; i++) { CvLineSegmentPoint elem = lines.GetSeqElem <CvLineSegmentPoint>(i).Value; Cv.Line(hPbl, elem.P1, elem.P2, CvColor.Red, 1, LineType.AntiAlias, 0); } lines.Dispose(); storage.Dispose(); ViewBitmap(pbox, hPbl); Cv.ReleaseImage(gray); Cv.ReleaseImage(canny); Cv.ReleaseImage(hPbl); pictureBox2.Invalidate(); }
public static IntersectType ValidIntersect(CvLineSegmentPoint a, CvLineSegmentPoint b) { const int dI = 7; var intersect = a.LineIntersection(b); if (intersect == null) { return(IntersectType.None); } bool aFlag = false; bool bFlag = false; var i = intersect.Value; if (((a.P1.X - dI) < i.X && (a.P1.X + dI) > i.X && (a.P1.Y - dI) < i.Y && (a.P1.Y + dI) > i.Y) || ((a.P2.X - dI) < i.X && (a.P2.X + dI) > i.X && (a.P2.Y - dI) < i.Y && (a.P2.Y + dI) > i.Y)) { aFlag = true; } if (((b.P1.X - dI) < i.X && (b.P1.X + dI) > i.X && (b.P1.Y - dI) < i.Y && (b.P1.Y + dI) > i.Y) || ((b.P2.X - dI) < i.X && (b.P2.X + dI) > i.X && (b.P2.Y - dI) < i.Y && (b.P2.Y + dI) > i.Y)) { bFlag = true; } if (aFlag && bFlag) { return(IntersectType.Type2); } if (aFlag) { if (b.P1.X < i.X && b.P2.X > i.X && b.P1.Y < i.Y && b.P2.Y > i.Y) { return(IntersectType.Type1); } } else if (bFlag) { if (a.P1.X < i.X && a.P2.X > i.X && a.P1.Y < i.Y && a.P2.Y > i.Y) { return(IntersectType.Type1); } } //var vec = new CvLineSegmentPoint(CvPoint.Empty, i); //if (a.IntersectedSegments(vec)) return IntersectType.Type0; return(IntersectType.None); }
private void extendLines(CvLineSegmentPoint[] lines, double ext) { // TODO : this is stupid way to extend a line, does 2 sqrts and is generally non-comprehensible // Better just make it parametric, like x = x0 + t * x1, y = y0 + t * y1 where for t=0, {x,y} = P1 & for t=1, {x,y} = P2 // so we can increase in size by percent, like t = +-0.5 (+50%) //Transfer 2-point line segments to type "a*x=b" CvLine2D format //TODO : this should probably be redone manually without OpenCV's ultra-generic slow function. We only have 2 points after all. List <CvLine2D> fitLinesV = new List <CvLine2D>(lines.Length); CvPoint[] forFitline = new CvPoint[2]; for (int i = 0; i < lines.Length; ++i) { forFitline[0] = lines[i].P1; forFitline[1] = lines[i].P2; CvLine2D fitLinef = Cv.FitLine2D(forFitline, DistanceType.L2, 0, 0.01, 0.01); fitLinesV.Add(fitLinef); } CvPoint p1, p2, p3, p4; for (int i = 0; i < lines.Length; i++) { CvLineSegmentPoint lineSegm = lines[i]; CvLine2D fitLine = fitLinesV[i]; int fitLineVx = (int)(fitLine.Vx * ext); int fitLineVy = (int)(fitLine.Vy * ext); p1 = new CvPoint(lineSegm.P1.X + fitLineVx, lineSegm.P1.Y + fitLineVy); p2 = new CvPoint(lineSegm.P2.X - fitLineVx, lineSegm.P2.Y - fitLineVy); p3 = new CvPoint(lineSegm.P1.X - fitLineVx, lineSegm.P1.Y - fitLineVy); p4 = new CvPoint(lineSegm.P2.X + fitLineVx, lineSegm.P2.Y + fitLineVy); if (p1.DistanceTo(p2) > p3.DistanceTo(p4)) { lineSegm.P1.X = p1.X; lineSegm.P1.Y = p1.Y; lineSegm.P2.X = p2.X; lineSegm.P2.Y = p2.Y; } else { lineSegm.P1.X = p3.X; lineSegm.P1.Y = p3.Y; lineSegm.P2.X = p4.X; lineSegm.P2.Y = p4.Y; } } }
//public LineSegment() { } public LineSegment(CvLineSegmentPoint cvline) { this.cvline = cvline; CvPoint p1 = cvline.P1; CvPoint p2 = cvline.P2; A = p2.Y - p1.Y; B = p2.X - p1.X; C = p2.X * p1.Y - p2.Y * p1.X; // cache A_GT_B = Math.Abs(A) > Math.Abs(B); INV_A2_P_B2 = 1 / (A * A + B * B); A_DIV = A * INV_A2_P_B2; B_DIV = B * INV_A2_P_B2; AC_DIV = C * A_DIV; BC_DIV = C * B_DIV; }
public IplImage HoughLines(IplImage src) { houline = new IplImage(src.Size, BitDepth.U8, 3); bin = this.Binary(src, 150); Cv.Dilate(bin, bin, null, 1); Cv.Erode(bin, bin, null, 3); Cv.Dilate(bin, bin, null, 2); Cv.Canny(bin, bin, 0, 255); Cv.CvtColor(bin, houline, ColorConversion.GrayToBgr); CvMemStorage Storage = new CvMemStorage(); CvSeq lines = canny.HoughLines2(Storage, HoughLinesMethod.Probabilistic, 1, Math.PI / 180, 140, 50, 10); //for (int i = 0; i < Math.Min(lines.Total, 20); i++) //{ // CvLineSegmentPolar element = lines.GetSeqElem<CvLineSegmentPolar>(i).Value; // float r = element.Rho; // float theta = element.Theta; // double a = Math.Cos(theta); // double b = Math.Sin(theta); // double x0 = r * a; // double y0 = r * b; // int scale = src.Size.Width + src.Size.Height; // CvPoint pt1 = new CvPoint(Convert.ToInt32(x0 - scale * b), Convert.ToInt32(y0 + scale * a)); // CvPoint pt2 = new CvPoint(Convert.ToInt32(x0 + scale * b), Convert.ToInt32(y0 - scale * a)); // houline.Line(pt1, pt2, CvColor.Red, 1, LineType.AntiAlias); //} for (int i = 0; i < Math.Min(lines.Total, 20); i++) { CvLineSegmentPoint element = lines.GetSeqElem <CvLineSegmentPoint>(i).Value; houline.Line(element.P1, element.P2, CvColor.Yellow, 1, LineType.AntiAlias); } return(houline); }
/// <summary> /// sample of C style wrapper /// </summary> private void SampleC() { // cvHoughLines2 using (IplImage srcImgGray = new IplImage(FilePath.Image.Goryokaku, LoadMode.GrayScale)) using (IplImage srcImgStd = new IplImage(FilePath.Image.Goryokaku, LoadMode.Color)) using (IplImage srcImgProb = srcImgStd.Clone()) { Cv.Canny(srcImgGray, srcImgGray, 50, 200, ApertureSize.Size3); using (CvMemStorage storage = new CvMemStorage()) { // Standard algorithm CvSeq lines = srcImgGray.HoughLines2(storage, HoughLinesMethod.Standard, 1, Math.PI / 180, 50, 0, 0); // wrapper style //CvLineSegmentPolar[] lines = src_img_gray.HoughLinesStandard(1, Math.PI / 180, 50, 0, 0); int limit = Math.Min(lines.Total, 10); for (int i = 0; i < limit; i++) { // native code style /* * unsafe * { * float* line = (float*)lines.GetElem<IntPtr>(i).Value.ToPointer(); * float rho = line[0]; * float theta = line[1]; * } * //*/ // wrapper style CvLineSegmentPolar elem = lines.GetSeqElem <CvLineSegmentPolar>(i).Value; float rho = elem.Rho; float theta = elem.Theta; double a = Math.Cos(theta); double b = Math.Sin(theta); double x0 = a * rho; double y0 = b * rho; CvPoint pt1 = new CvPoint { X = Cv.Round(x0 + 1000 * (-b)), Y = Cv.Round(y0 + 1000 * (a)) }; CvPoint pt2 = new CvPoint { X = Cv.Round(x0 - 1000 * (-b)), Y = Cv.Round(y0 - 1000 * (a)) }; srcImgStd.Line(pt1, pt2, CvColor.Red, 3, LineType.AntiAlias, 0); } // Probabilistic algorithm lines = srcImgGray.HoughLines2(storage, HoughLinesMethod.Probabilistic, 1, Math.PI / 180, 50, 50, 10); // wrapper style //CvLineSegmentPoint[] lines = src_img_gray.HoughLinesProbabilistic(1, Math.PI / 180, 50, 0, 0); for (int i = 0; i < lines.Total; i++) { // native code style /* * unsafe * { * CvPoint* point = (CvPoint*)lines.GetElem<IntPtr>(i).Value.ToPointer(); * src_img_prob.Line(point[0], point[1], CvColor.Red, 3, LineType.AntiAlias, 0); * } * //*/ // wrapper style CvLineSegmentPoint elem = lines.GetSeqElem <CvLineSegmentPoint>(i).Value; srcImgProb.Line(elem.P1, elem.P2, CvColor.Red, 3, LineType.AntiAlias, 0); } } using (new CvWindow("Hough_line_standard", WindowMode.AutoSize, srcImgStd)) using (new CvWindow("Hough_line_probabilistic", WindowMode.AutoSize, srcImgProb)) { CvWindow.WaitKey(0); } } }
// => inputMat MUST be 24/32 bit private CvMat processFrame(CvMat inputMat) { // return "inputMat" after lots. LOTS. Of processing width = inputMat.Cols; height = inputMat.Rows; // taking out 4% of the input's edges: sounds wrong #if false // I have no idea what on earth is the purpose of this: //CvMat temp2 = inputMat( new CvRect( inputMat.Cols / 25, inputMat.Cols / 25, inputMat.Cols - 2 * (inputMat.Cols / 25), inputMat.Rows - 2 * (inputMat.Rows / 25) ) ); //resize( temp2, temp2, inputMat.size() ); //temp2.copyTo( inputMat ); int borderX = inputMat.Cols / 25; // 4% of original int borderY = inputMat.Rows / 25; CvRect roi = new CvRect(borderX, borderY, inputMat.Cols - 2 * borderX, inputMat.Rows - 2 * borderY); CvMat temp2 = inputMat.GetSubRect(out temp2, roi); // stupid to pass "out temp2"? inputMat = temp2; // =TODO : What? temp2.Copy( inputMat ); // is it really required to remove 4% of the input image's edges? #endif CvMat inputMat_grey; { // TODO : looks like a waste to make two conversions from inputMat to _grey, instead of 1 // since OpenCV doesn't support it, it could be made manually CvMat inputMat_grey8 = MatOps.ConvertChannels(inputMat); inputMat_grey = MatOps.ConvertElements(inputMat_grey8, MatrixType.F32C1, 1.0 / 255.0); } // NOTE : IBO seems to give good contrast with certain images, but with bbox7, it is just disastrous. //MatOps.NewWindowShow( inputMat_grey ); //inputMat_grey = Filters.IBO( inputMat_grey ); // inputMat_grey = 32f //MatOps.NewWindowShow( inputMat_grey ); inputMat_grey = MatOps.ConvertElements(inputMat_grey, MatrixType.U8C1, 255); // inputMat_grey = 8u // was: SLOW : Filters.ContrastEnhancement( inputMat_grey ); // NOTE : not needed AFTER IBO // NOTE : Contrast Enhancement2 may NOT be needed AT ALL, at this point at least, ANYWAY!!! Filters.ContrastEnhancement2(inputMat_grey); // NOTE : certainly NOT needed AFTER IBO MatOps.NewWindowShow(inputMat_grey); // mask passed originally in method below was all white, so I optimized it out. Passing the number of pixels was also dumb-o. double thresh = Filters.NeighborhoodValleyEmphasis(inputMat_grey); Cv.Threshold(inputMat_grey, inputMat_grey, thresh, 255, ThresholdType.BinaryInv); IplConvKernel element = new IplConvKernel(3, 3, 1, 1, ElementShape.Cross); Cv.Erode(inputMat_grey, inputMat_grey, element); Cv.Dilate(inputMat_grey, inputMat_grey, element); MatOps.NewWindowShow(inputMat_grey); // TODO : check if check is required if (inputMat_grey.ElemType != MatrixType.U8C1) { inputMat_grey = MatOps.ConvertElements(inputMat_grey, MatrixType.U8C1, 255.0); } // ======= // is this just a test? CvPoint[] newPtV = Filters.DistillContours(inputMat_grey, 5, Const.PointZero); CvMat imageDest; using (CvMemStorage storage = new CvMemStorage()) { CvSeq <CvPoint> updateContours = CvSeq <CvPoint> .FromArray(newPtV, SeqType.Contour, storage); imageDest = new CvMat(inputMat.Rows, inputMat.Cols, MatrixType.U8C1); Cv.DrawContours(imageDest, updateContours, Const.ScalarWhite, 0, 100, 16); } // ======= kawane(newPtV); // updates thresholdDist, minMaskY, final4P //*******************************************set a greater contour for estimation of the missing points*******************************// // ======= newPtV = Filters.DistillContours(inputMat_grey, 100, Const.PointZero); using (CvMemStorage storage = new CvMemStorage()) { CvSeq <CvPoint> updateContours = CvSeq <CvPoint> .FromArray(newPtV, SeqType.Contour, storage); imageDest = new CvMat(inputMat.Rows, inputMat.Cols, MatrixType.U8C1); Cv.DrawContours(imageDest, updateContours, Const.ScalarWhite, 0, 100, 1, LineType.AntiAlias); } // ======= CvMat mask1 = new CvMat(inputMat.Rows, inputMat.Cols, MatrixType.U8C1, 0); Cv.FillConvexPoly(mask1, newPtV, Const.ScalarWhite, 0, 0); temp = MatOps.ConvertChannels(inputMat); temp.Copy(imageDest, mask1); Cv.Canny(imageDest, imageDest, 150, 300, ApertureSize.Size3); IplConvKernel element2 = new IplConvKernel(3, 3, 1, 1, ElementShape.Rect); Cv.Dilate(imageDest, imageDest, element2); Cv.Erode(imageDest, imageDest, element2); CvLineSegmentPoint[] lines = Cv2.HoughLinesP(new Mat(imageDest), 1, Cv.PI / 180 /*NOTE : 1 degree angle*/, 50, 50, 50); // TODO : those 50s..? extendLines(lines, 350); // TODO : This idea sounds arbitary? And why 350? At least some percentage? // draw extended lines for (int i = 0; i < lines.Length; ++i) { CvLineSegmentPoint l = lines[i]; Cv.Line(imageDest, l.P1, l.P2, Const.ScalarWhite, 1, LineType.AntiAlias); } Cv.Dilate(imageDest, imageDest, element2); // TODO : FIX : Dilate again?! // another huge function here... fourPoints(lines); //////////// //********************************************************************* replace estimate points with mask corners ********// if (oldPt.Count != 0) { //** // BEWARE : great use of the English language following right below: // test for each and every one of the last slice delete each one of all the revisited of the above and estimate for only the best the off topic adapt //** List <int> positions = new List <int>(final4P.Count); for (int i = 0; i < final4P.Count; ++i) { positions.Add(-1); // "initialize" positions[i] double distmin = 10000; for (int j = 0; j < oldPt.Count; ++j) { double distAB = PointOps.Norm(oldPt[j] - final4P[i]); if (distAB < distmin) { distmin = distAB; positions[i] = j; } } } int flagFrCounter = 0; for (int i = 0; i < final4P.Count; ++i) { double distA = PointOps.Norm(oldPt[positions[i]] - final4P[i]); //********************* threshold pou na orizei tin megisti perioxi gia anazitisi,alliws na krataei to proigoumeno simeio*******// if (distA < thresholdDist) //if(distA<80) { oldPt[positions[i]] = final4P[i]; --flagFrCounter; } ++flagFrCounter; } if (reset) { numFrames = 0; oldPt.Clear(); final4P.Clear(); } } //pointsb[0]=thresholdDist; //****************************************************************************// for (int i = 0; i < oldPt.Count; ++i) { Cv.Circle(temp, oldPt[i], 2, Const.ScalarRed, 3); } MatOps.Convert8To24(temp).Copy(inputMat); //MatOps.ConvertChannels( temp, ColorConversion.GrayToBgr ).Copy( inputMat ); //temp.Copy( inputMat ); //******************************************************OVERLAY IMAGE***********************************************////// if (oldPt.Count == 0) { return(inputMat); // end of line } CvMat black2; if (overlay != null) { black2 = overlay.Clone(); //=imread("cubes.jpg"); Cv.Resize(black2, inputMat, Interpolation.NearestNeighbor); // TODO : check if interpolation type is appropriate } else { black2 = new CvMat(inputMat.Rows, inputMat.Cols, MatrixType.U8C3); } List <CvPoint> tempPoint = new List <CvPoint>(4); //vector<Point> tempPoint; int pp = 0; // BEWARE : the guy is copy/pasting needlessly? int mini = 1000000; for (int i = 0; i < oldPt.Count; ++i) { if (oldPt[i].Y < mini) { mini = oldPt[i].Y; pp = i; } } tempPoint.Add(oldPt[pp]); mini = 1000000; for (int i = 0; i < oldPt.Count; ++i) { if (oldPt[i].Y < mini && oldPt[i] != tempPoint[0]) { mini = oldPt[i].Y; pp = i; } } tempPoint.Add(oldPt[pp]); mini = 1000000; for (int i = 0; i < oldPt.Count; ++i) { int tempmini = Math.Abs(oldPt[i].X - tempPoint[1].X); if (tempmini < mini && oldPt[i] != tempPoint[0] && oldPt[i] != tempPoint[1]) { mini = tempmini; pp = i; } } tempPoint.Add(oldPt[pp]); for (int i = 0; i < oldPt.Count; ++i) { CvPoint pt = oldPt[i]; bool found = false; for (int j = 0; j < tempPoint.Count; ++j) { if (tempPoint[j] == pt) { found = true; break; } } if (!found) { tempPoint.Add(pt); } } // only keep up to 4 points List <CvPoint> co_ordinates = new List <CvPoint>(4); { int maxIndex = Math.Min(4, tempPoint.Count); for (int i = 0; i < maxIndex; ++i) { co_ordinates.Add(tempPoint[i]); } } // lost me... if (outputQuad[0] == outputQuad[2]) { { int maxIndex = Math.Min(4, tempPoint.Count); for (int i = 0; i < maxIndex; ++i) { outputQuad[i] = tempPoint[i]; } } } else { CvPoint2D32f rr; for (int i = 0; i < 4; ++i) { List <double> dist = new List <double>(tempPoint.Count); for (int j = 0; j < tempPoint.Count; ++j) { rr = tempPoint[j]; dist.Add(PointOps.Norm(outputQuad[i] - rr)); } double minimumDist = dist.Min(); int min_pos = Utils.FindIndex(dist, minimumDist); if (tempPoint.Count > 0) { outputQuad[i] = tempPoint[min_pos]; tempPoint.RemoveAt(min_pos); } } } // The 4 points where the mapping is to be done , from top-left in clockwise order inputQuad[0] = new CvPoint2D32f(0, 0); inputQuad[1] = new CvPoint2D32f(inputMat.Cols - 1, 0); inputQuad[2] = new CvPoint2D32f(inputMat.Cols - 1, inputMat.Rows - 1); inputQuad[3] = new CvPoint2D32f(0, inputMat.Rows - 1); //Input and Output Image; // Get the Perspective Transform Matrix i.e. lambda (2D warp transform) // Lambda Matrix CvMat lambda = Cv.GetPerspectiveTransform(inputQuad, outputQuad); // Apply this Perspective Transform to the src image // - get a "top-down" view of the supposedly box-y area Cv.WarpPerspective(black2, black2, lambda, Interpolation.Cubic, Const.ScalarBlack); // see nice explanation : http://www.pyimagesearch.com/2014/08/25/4-point-opencv-getperspective-transform-example/ CvMat maskOV = new CvMat(inputMat.Rows, inputMat.Cols, MatrixType.U8C1, Const.ScalarBlack); using (CvMemStorage storage = new CvMemStorage()) { CvSeq <CvPoint> updateContours = CvSeq <CvPoint> .FromArray(co_ordinates, SeqType.Contour, storage); imageDest = new CvMat(inputMat.Rows, inputMat.Cols, MatrixType.U8C1); Cv.DrawContours(maskOV, updateContours, Const.ScalarWhite, 0, 100, 16); //drawContours( maskOV, co_ordinates, 0, Scalar( 255 ), CV_FILLED, 8 ); } double alpha = 0.8; double beta = (1.0 - alpha); Cv.AddWeighted(black2, alpha, inputMat, beta, 0.0, black2); black2.Copy(inputMat, maskOV); return(inputMat); }
void fourPoints(CvLineSegmentPoint[] linesArray) { List <CvLineSegmentPoint> lines = new List <CvLineSegmentPoint>(linesArray); int i, j, k; List <double> angleV = new List <double>(lines.Count); for (i = lines.Count - 1; i >= 0; --i) { CvLineSegmentPoint lineSegm = lines[i]; angleV.Add(Math.Atan2(lineSegm.P1.Y - lineSegm.P2.Y, lineSegm.P1.X - lineSegm.P2.X)); } CvPoint p1, p2, p0, p0_; //Discard almost parallel lines and keep the largest // FIX : everything about this sucks for (i = 0; i < lines.Count; ++i) { CvLineSegmentPoint segi = lines[i]; p0 = segi.P1; p0_ = segi.P2; double e2 = p0.DistanceTo(p0_); for (j = 0; j < lines.Count; ++j) { if (i == j) // ugly? { continue; } if (Math.Abs(angleV[i] - angleV[j]) > 0.1 || Math.Abs(angleV[i] - angleV[j]) > Cv.PI / 2.0 - 0.1 && Math.Abs(angleV[i] - angleV[j]) < Cv.PI / 2.0 + 0.1) { continue; } CvLineSegmentPoint segj = lines[j]; p1 = segj.P1; p2 = segj.P2; if (PointOps.LineDistance(p1, p2, p0) < 15 && PointOps.LineDistance(p0, p0_, p1) < 15) { if (p1.DistanceTo(p2) > e2) { lines.RemoveAt(i); angleV.RemoveAt(i); --i; --j; break; } else { lines.RemoveAt(j); angleV.RemoveAt(j); --j; } } } } // instead of 3 lists, we could have one with custom struct containing all 3 required values List <CvPoint> allPointsV = new List <CvPoint>(); List <int> fstln = new List <int>(); List <int> secln = new List <int>(); const int bound = 50; for (i = 0; i < lines.Count; ++i) { CvLineSegmentPoint segmI = lines[i]; for (j = 0; j < lines.Count; ++j) { if (i == j) { continue; // ugly? } CvLineSegmentPoint segmJ = lines[j]; if (PointOps.LineIntersection(segmI.P1, segmI.P2, segmJ.P1, segmJ.P2, out p1)) { if (p1.X > -bound && p1.X < temp.Cols + bound && p1.Y > -bound && p1.Y < temp.Rows + bound) { bool foundSamePt = false; for (k = 0; k < allPointsV.Count; ++k) { if (p1 == allPointsV[k]) { foundSamePt = true; break; } } if (!foundSamePt) { allPointsV.Add(p1); fstln.Add(i); secln.Add(j); } } } } } if (allPointsV.Count == 0) { reset = true; return; } reset = false; // time to start doing our drawings if (imageDest3 == null) { imageDest3 = new CvMat(height, width, MatrixType.U8C3); } // are we at start or just not found any points yet? if (oldPt.Count == 0 || numFrames < 20) { //************************draw intersections************************// for (i = 0; i < allPointsV.Count; ++i) { CvScalar circleColor; if (allPointsV[i].Y < height - 10) { circleColor = Const.ScalarGreen; } else { circleColor = Const.ScalarWhite; } Cv.Circle(imageDest3, allPointsV[i], 7, circleColor); } //mapping the detected corners with lines intersections List <int> tracker = new List <int>(final4P.Count); for (j = 0; j < final4P.Count; ++j) { double dist = PointOps.Norm(final4P[j] - allPointsV[0]); tracker.Add(0); // tracker[j] = 0; for (i = 0; i < allPointsV.Count; ++i) { double distA = PointOps.Norm(final4P[j] - allPointsV[i]); if (distA < dist) { dist = distA; tracker[j] = i; } } } //******* draw mapped corners *****************// for (j = 0; j < final4P.Count; ++j) { Cv.Circle(imageDest3, allPointsV[tracker[j]], 8, Const.ScalarMagenta); } //*******************************************************************************************// List <int> linesIds = new List <int>(final4P.Count); for (i = 0; i < final4P.Count; ++i) { int counterfstln = 0; for (j = 0; j < final4P.Count; ++j) { if (i == j || fstln[tracker[i]] == fstln[tracker[j]] /*this might be redundant after 1st check*/ || fstln[tracker[i]] == secln[tracker[j]]) { ++counterfstln; } } int countersecln = 0; for (j = 0; j < final4P.Count; ++j) { if (i == j || secln[tracker[i]] == fstln[tracker[j]] || secln[tracker[i]] == secln[tracker[j]]) { ++countersecln; } } if (counterfstln < countersecln) { linesIds.Add(fstln[tracker[i]]); // linesIds[i] = fstln[tracker[i]]; } else { linesIds.Add(secln[tracker[i]]); // linesIds[i] = secln[tracker[i]]; } } List <int> maxdistpos1 = new List <int>(tracker.Count); // TODO : check if Count is always less than 3-4... for (j = 0; j < tracker.Count; j++) { maxdistpos1.Add(0); // maxdistpos1.Add( -1 ); // "initialize" maxdistpos1[j], so that it can be re-assigned below // TODO : logic is wrong!!!! Not all [j]s are assigned. Proof : if "initialized" with "-1", it just crashes later! double dist = 0; for (i = 0; i < fstln.Count; i++) { if (linesIds[j] == fstln[i] || linesIds[j] == secln[i] && allPointsV[i].Y < height - 15) { double distA = PointOps.Norm(allPointsV[tracker[j]] - allPointsV[i]); if (distA > dist) { dist = distA; maxdistpos1[j] = i; } } } } oldPt.Clear(); for (i = 0; i < final4P.Count; i++) { oldPt.Add(final4P[i]); } List <CvPoint> candidatePts = new List <CvPoint>(); List <double> candist = new List <double>(); for (i = 0; i < maxdistpos1.Count; i++) { Cv.Circle(imageDest3, allPointsV[maxdistpos1[i]], 7, Const.ScalarBlue); if (allPointsV[maxdistpos1[i]].Y > minMaskY && allPointsV[maxdistpos1[i]].Y < height - 10) { candidatePts.Add(allPointsV[maxdistpos1[i]]); } } for (i = 0; i < candidatePts.Count; i++) { double dist = 0; for (j = 0; j < oldPt.Count; j++) { dist += PointOps.Norm(candidatePts[i] - oldPt[j]); } candist.Add(dist); } while (oldPt.Count < 4) { if (candidatePts.Count != 0) { int p = candist.FindMaxIndex(); // Utils.FindMaxIndex( candist ); // p = max_element(candist.begin(),candist.end()) - candist.begin(); oldPt.Add(candidatePts[p]); candist.RemoveAt(p); candidatePts.RemoveAt(p); } else { break; } } for (j = 0; j < oldPt.Count; j++) { Cv.Circle(imageDest3, oldPt[j], 7, Const.ScalarBlue); } //***************************************************** end of estimation **************************************************************// } else { for (i = 0; i < allPointsV.Count; ++i) { if (allPointsV[i].Y < height - 10) { Cv.Circle(imageDest3, allPointsV[i], 7, Const.ScalarGreen); } else { Cv.Circle(imageDest3, allPointsV[i], 7, Const.ScalarWhite); } } //mapping the detected corners with lines intersections // List <int> tracker = new List <int>(oldPt.Count); for (j = 0; j < oldPt.Count; ++j) { tracker.Add(-1); double dist = 1000000; for (i = 0; i < allPointsV.Count; ++i) { double distA = PointOps.Norm(oldPt[j] - allPointsV[i]); if (distA < dist && allPointsV[i].Y < height - 10) { dist = distA; tracker[j] = i; } } } for (j = 0; j < oldPt.Count; ++j) { double distA = PointOps.Norm(oldPt[j] - allPointsV[tracker[j]]); //********************* threshold pou na orizei tin megisti perioxi gia anazitisi,alliws na krataei to proigoumeno simeio*******// if (distA < thresholdDist) { oldPt[j] = allPointsV[tracker[j]]; } } } }
public IplImage HoughLines(IplImage src) //허프를 통한 차선 검출 { flag_yellow = 0; flag_white = 0; slice = this.SliceImage(src); hsv = this.YellowTransform(slice); hls = this.WhiteTransform(slice); blur = this.BlurImage(hsv); bin = this.Binary(hsv, 50); //Adaptive Thresholding로 변환 Cv.Canny(bin, bin, 50, 200, ApertureSize.Size3); CvMemStorage Storage = new CvMemStorage(); /* Probabilistic 검출*/ CvSeq lines = bin.HoughLines2(Storage, HoughLinesMethod.Probabilistic, 1, Math.PI / 180, 50, 100, 100); double[] LineAngle = new double[lines.Total]; double[] LineAngle_q = new double[lines.Total]; if (lines != null) { for (int i = 0; i < lines.Total; i++) //검출된 모든 라인을 검사 { CvLineSegmentPoint elem = lines.GetSeqElem <CvLineSegmentPoint>(i).Value; // 해당 라인의 데이터를 가져옴 int dx = elem.P2.X - elem.P1.X; //x좌표의 차 int dy = elem.P2.Y - elem.P1.Y; //y좌표의 차 double angle = Math.Atan2(dy, dx) * 180 / Math.PI; //기울기 구하기 LineAngle[i] = angle; LineAngle_q[i] = angle; } if (lines.Total != 0) { Quick_Sort(LineAngle_q, lines.Total); } for (int i = 0; i < lines.Total; i++) { CvLineSegmentPoint elem = lines.GetSeqElem <CvLineSegmentPoint>(i).Value; // 해당 라인의 데이터를 가져옴 int dx = elem.P2.X - elem.P1.X; //x좌표의 차 int dy = elem.P2.Y - elem.P1.Y; //y좌표의 차 double angle = Math.Atan2(dy, dx) * 180 / Math.PI; //기울기 구하기 if (LineAngle_q[lines.Total - 1] == LineAngle[i] || LineAngle_q[0] == LineAngle[i]) { //P2에 50을 더해도 P1보다 작거나, P2에 50을 빼도 P1보다 큰경우 if (elem.P1.Y > elem.P2.Y || elem.P1.Y < elem.P2.Y) //P1 :시작점, P2 : 도착점 { if (Math.Abs(angle) >= 14 && Math.Abs(angle) <= 80) { Cv.PutText(src, "Yellow angle : " + angle.ToString(), new CvPoint(100, 50), new CvFont(FontFace.HersheyComplex, 1, 1), CvColor.Yellow); P1 = elem.P1; P2 = elem.P2; d_dx = Math.Abs(P2.X - P1.X); d_dy = Math.Abs(P2.Y - P1.Y); while (true) { if (P1.Y > P2.Y) { if (P1.Y < src.Height) { if (P1.X < P2.X) { P1.X -= d_dx / 100; P1.Y += d_dy / 100; } else { P1.X += d_dx / 100; P1.Y += d_dy / 100; } } else if (P2.Y > src.Height * 5 / 8) { if (P1.X > P2.X) { P2.X -= d_dx / 100; P2.Y -= d_dy / 100; } else { P2.X += d_dx / 100; P2.Y -= d_dy / 100; } } else { break; } } else { if (P2.Y < src.Height) { if (P1.X > P2.X) { P2.X -= d_dx / 100; P2.Y += d_dy / 100; } else { P2.X += d_dx / 100; P2.Y += d_dy / 100; } } else if (P1.Y > src.Height * 5 / 8) { if (P1.X < P2.X) { P1.X -= d_dx / 100; P1.Y -= d_dy / 100; } else { P1.X += d_dx / 100; P1.Y -= d_dy / 100; } } else { break; } } d_dx = Math.Abs(P2.X - P1.X); d_dy = Math.Abs(P2.Y - P1.Y); } flag_yellow = 1; prev_elemLeft.P1 = P1; prev_elemLeft.P2 = P2; src.Line(P1, P2, CvColor.Yellow, 10, LineType.AntiAlias, 0); //P1,P2의 좌표를 가지고 사용자에게 정보를 줄수있음(ex) angle이 심하게 올라가면 차선이탈 break; } } } } } blur = this.BlurImage(hls); bin = this.Binary(hls, 50); //Adaptive Thresholding로 변환 Cv.Canny(bin, bin, 50, 150, ApertureSize.Size3); /* Probabilistic 검출*/ lines = bin.HoughLines2(Storage, HoughLinesMethod.Probabilistic, 1, Math.PI / 180, 50, 100, 100); LineAngle = new double[lines.Total]; LineAngle_q = new double[lines.Total]; if (lines != null) { for (int i = 0; i < lines.Total; i++) //검출된 모든 라인을 검사 { CvLineSegmentPoint elem = lines.GetSeqElem <CvLineSegmentPoint>(i).Value; // 해당 라인의 데이터를 가져옴 int dx = elem.P2.X - elem.P1.X; //x좌표의 차 int dy = elem.P2.Y - elem.P1.Y; //y좌표의 차 double angle = Math.Atan2(dy, dx) * 180 / Math.PI; //기울기 구하기 LineAngle[i] = angle; LineAngle_q[i] = angle; } if (lines.Total != 0) { Quick_Sort(LineAngle_q, lines.Total); } for (int i = 0; i < lines.Total; i++) { CvLineSegmentPoint elem = lines.GetSeqElem <CvLineSegmentPoint>(i).Value; // 해당 라인의 데이터를 가져옴 int dx = elem.P2.X - elem.P1.X; //x좌표의 차 int dy = elem.P2.Y - elem.P1.Y; //y좌표의 차 double angle = Math.Atan2(dy, dx) * 180 / Math.PI; //기울기 구하기 if (LineAngle_q[lines.Total - 1] == LineAngle[i] || LineAngle_q[0] == LineAngle[i]) { //P2에 50을 더해도 P1보다 작거나, P2에 50을 빼도 P1보다 큰경우 if (elem.P1.Y > elem.P2.Y || elem.P1.Y < elem.P2.Y) //P1 :시작점, P2 : 도착점 { if (Math.Abs(angle) >= 14 && Math.Abs(angle) <= 80) { Cv.PutText(src, "Whtie angle : " + angle.ToString(), new CvPoint(100, 100), new CvFont(FontFace.HersheyComplex, 1, 1), CvColor.White); P1 = elem.P1; P2 = elem.P2; d_dx = Math.Abs(P2.X - P1.X); d_dy = Math.Abs(P2.Y - P1.Y); while (true) { if (P1.Y > P2.Y) { if (P1.Y < src.Height) { if (P1.X < P2.X) { P1.X -= d_dx / 100; P1.Y += d_dy / 100; } else { P1.X += d_dx / 100; P1.Y += d_dy / 100; } } else if (P2.Y > src.Height * 5 / 8) { if (P1.X > P2.X) { P2.X -= d_dx / 100; P2.Y -= d_dy / 100; } else { P2.X += d_dx / 100; P2.Y -= d_dy / 100; } } else { break; } } else { if (P2.Y < src.Height) { if (P1.X > P2.X) { P2.X -= d_dx / 100; P2.Y += d_dy / 100; } else { P2.X += d_dx / 100; P2.Y += d_dy / 100; } } else if (P1.Y > src.Height * 5 / 8) { if (P1.X < P2.X) { P1.X -= d_dx / 100; P1.Y -= d_dy / 100; } else { P1.X += d_dx / 100; P1.Y -= d_dy / 100; } } else { break; } } d_dx = Math.Abs(P2.X - P1.X); d_dy = Math.Abs(P2.Y - P1.Y); } flag_white = 1; prev_elemRight.P1 = P1; prev_elemRight.P2 = P2; src.Line(P1, P2, CvColor.White, 10, LineType.AntiAlias, 0); //P1,P2의 좌표를 가지고 사용자에게 정보를 줄수있음(ex) angle이 심하게 올라가면 차선이탈 break; } } } } } if (flag_yellow == 0) { src.Line(prev_elemLeft.P1, prev_elemLeft.P2, CvColor.Yellow, 10, LineType.AntiAlias, 0); } if (flag_white == 0) { src.Line(prev_elemRight.P1, prev_elemRight.P2, CvColor.White, 10, LineType.AntiAlias, 0); } Cv.ReleaseMemStorage(Storage); //vehicle_detection = this.VDConvert.VehicleDetect(src); Dispose(); return(src); }
/// <summary> /// sample of C style wrapper /// </summary> private void SampleC() { // cvHoughLines2 // 標準的ハフ変換と確率的ハフ変換を指定して線(線分)の検出を行なう.サンプルコード内の各パラメータ値は処理例の画像に対してチューニングされている. // (1)画像の読み込み using (IplImage srcImgGray = new IplImage(Const.ImageGoryokaku, LoadMode.GrayScale)) using (IplImage srcImgStd = new IplImage(Const.ImageGoryokaku, LoadMode.Color)) using (IplImage srcImgProb = srcImgStd.Clone()) { // (2)ハフ変換のための前処理 Cv.Canny(srcImgGray, srcImgGray, 50, 200, ApertureSize.Size3); using (CvMemStorage storage = new CvMemStorage()) { // (3)標準的ハフ変換による線の検出と検出した線の描画 CvSeq lines = srcImgGray.HoughLines2(storage, HoughLinesMethod.Standard, 1, Math.PI / 180, 50, 0, 0); // wrapper style //CvLineSegmentPolar[] lines = src_img_gray.HoughLinesStandard(1, Math.PI / 180, 50, 0, 0); int limit = Math.Min(lines.Total, 10); for (int i = 0; i < limit; i++) { // native code style /* * unsafe * { * float* line = (float*)lines.GetElem<IntPtr>(i).Value.ToPointer(); * float rho = line[0]; * float theta = line[1]; * } * //*/ // wrapper style CvLineSegmentPolar elem = lines.GetSeqElem <CvLineSegmentPolar>(i).Value; float rho = elem.Rho; float theta = elem.Theta; double a = Math.Cos(theta); double b = Math.Sin(theta); double x0 = a * rho; double y0 = b * rho; CvPoint pt1 = new CvPoint { X = Cv.Round(x0 + 1000 * (-b)), Y = Cv.Round(y0 + 1000 * (a)) }; CvPoint pt2 = new CvPoint { X = Cv.Round(x0 - 1000 * (-b)), Y = Cv.Round(y0 - 1000 * (a)) }; srcImgStd.Line(pt1, pt2, CvColor.Red, 3, LineType.AntiAlias, 0); } // (4)確率的ハフ変換による線分の検出と検出した線分の描画 lines = srcImgGray.HoughLines2(storage, HoughLinesMethod.Probabilistic, 1, Math.PI / 180, 50, 50, 10); // wrapper style //CvLineSegmentPoint[] lines = src_img_gray.HoughLinesProbabilistic(1, Math.PI / 180, 50, 0, 0); for (int i = 0; i < lines.Total; i++) { // native code style /* * unsafe * { * CvPoint* point = (CvPoint*)lines.GetElem<IntPtr>(i).Value.ToPointer(); * src_img_prob.Line(point[0], point[1], CvColor.Red, 3, LineType.AntiAlias, 0); * } * //*/ // wrapper style CvLineSegmentPoint elem = lines.GetSeqElem <CvLineSegmentPoint>(i).Value; srcImgProb.Line(elem.P1, elem.P2, CvColor.Red, 3, LineType.AntiAlias, 0); } } // (5)検出結果表示用のウィンドウを確保し表示する using (new CvWindow("Hough_line_standard", WindowMode.AutoSize, srcImgStd)) using (new CvWindow("Hough_line_probabilistic", WindowMode.AutoSize, srcImgProb)) { CvWindow.WaitKey(0); } } }
static void Main() { // CvCapture cap = CvCapture.FromFile("video.avi"); CvCapture cap = CvCapture.FromFile("road_3.avi"); CvWindow w = new CvWindow("Lane Detection"); CvWindow canny = new CvWindow("Lane Detection_2"); CvWindow hough = new CvWindow("Lane Detection"); // CvWindow smoothing = new CvWindow("Lane Detection_3"); IplImage src, gray, dstCanny, halfFrame, smallImg; CvMemStorage storage = new CvMemStorage(); CvSeq lines; CvHaarClassifierCascade cascade = CvHaarClassifierCascade.FromFile("haarcascade_cars3.xml"); const double Scale = 2.0; const double ScaleFactor = 1.05; const int MinNeighbors = 3; double min_range = 70; double max_range = 120; CvSeq <CvAvgComp> cars; while (CvWindow.WaitKey(10) < 0) { src = cap.QueryFrame(); halfFrame = new IplImage(new CvSize(src.Size.Width / 2, src.Size.Height / 2), BitDepth.U8, 3); Cv.PyrDown(src, halfFrame, CvFilter.Gaussian5x5); gray = new IplImage(src.Size, BitDepth.U8, 1); dstCanny = new IplImage(src.Size, BitDepth.U8, 1); /* * * smallImg = new IplImage(new CvSize(Cv.Round(src.Width / Scale), Cv.Round(src.Height / Scale)), BitDepth.U8, 1); * using (IplImage grey = new IplImage(src.Size, BitDepth.U8, 1)) * { * Cv.CvtColor(src, grey, ColorConversion.BgrToGray); * Cv.Resize(grey, smallImg, Interpolation.Linear); * Cv.EqualizeHist(smallImg, smallImg); * } * * cars = Cv.HaarDetectObjects(smallImg, cascade, storage, ScaleFactor, MinNeighbors, HaarDetectionType.DoCannyPruning, new CvSize(30, 30)); * * for (int i = 0; i < cars.Total; i++) * { * CvRect r = cars[i].Value.Rect; * CvPoint center = new CvPoint * { * X = Cv.Round((r.X + r.Width * 0.5) * Scale), * Y = Cv.Round((r.Y + r.Height * 0.5) * Scale) * }; * int radius = Cv.Round((r.Width + r.Height) * 0.25 * Scale); * src.Circle(center, radius, CvColor.Blue, 2, LineType.AntiAlias, 0); * } */ // Crop off top half of image since we're only interested in the lower portion of the video int halfWidth = src.Width / 2; int halfHeight = src.Height / 2; int startX = halfWidth - (halfWidth / 2); src.SetROI(new CvRect(0, halfHeight - 0, src.Width - 1, src.Height - 1)); gray.SetROI(src.GetROI()); dstCanny.SetROI(src.GetROI()); src.CvtColor(gray, ColorConversion.BgrToGray); Cv.Smooth(gray, gray, SmoothType.Gaussian, 5, 5); Cv.Canny(gray, dstCanny, 50, 200, ApertureSize.Size3); storage.Clear(); lines = dstCanny.HoughLines2(storage, HoughLinesMethod.Probabilistic, 1, Math.PI / 180, 50, 50, 100); for (int i = 0; i < lines.Total; i++) { CvLineSegmentPoint elem = lines.GetSeqElem <CvLineSegmentPoint>(i).Value; int dx = elem.P2.X - elem.P1.X; int dy = elem.P2.Y - elem.P1.Y; double angle = Math.Atan2(dy, dx) * 180 / Math.PI; // if (Math.Abs(angle) <= 10) // continue; if (elem.P1.Y > elem.P2.Y + 50 || elem.P1.Y < elem.P2.Y - 50) { src.Line(elem.P1, elem.P2, CvColor.Green, 9, LineType.Link8, 0); } } src.ResetROI(); storage.Clear(); w.Image = src; // canny.Image = dstCanny; // smoothing.Image = gray; // w.Image = dstCanny; // w.Image = dstCanny; } }