private void AddToFlowVectorImage(Image<Bgr, byte> flowVectorImage, LineSegment2DF flowVector) { var bgr = new HsvToBgrConverter(flowVector.GetExteriorAngleDegree(UnitVectorOfX), 1, 1).Convert(); flowVectorImage.Draw(flowVector, bgr, 1); }
public LineSegment2DF AvrageLine(List<LineSegment2D> lines) { float avg_k = 0, avg_c = 0; PointF p1 = new PointF(0,0), p2 = new PointF(0,0); foreach (LineSegment2D line in lines) { float k = line.Direction.Y / line.Direction.X; float c = line.P1.Y - k * line.P1.X; avg_k += k;avg_c += c; } avg_k /= lines.Count;avg_c /= lines.Count; p1.X = 0;p1.Y = avg_c; p2.X =640;p2.Y = 640*avg_k+avg_c; LineSegment2DF avg = new LineSegment2DF(p1,p2); return avg; //float tan_theta=0; //float c = 0; //foreach (LineSegment2D line in lines) //{ // tan_theta += line.Direction.Y / line.Direction.X; //} //tan_theta /= lines.Count; //return avg; }
/// <summary> /// Get the exterior angle between this line and <paramref name="otherLine"/> /// </summary> /// <param name="otherLine">The other line</param> /// <returns>The exterior angle between this line and <paramref name="otherLine"/></returns> public double GetExteriorAngleDegree(LineSegment2DF otherLine) { PointF direction1 = Direction; PointF direction2 = otherLine.Direction; double radianAngle = Math.Atan2(direction2.Y, direction2.X) - Math.Atan2(direction1.Y, direction1.X); double degreeAngle = radianAngle * (180.0 / Math.PI); return (degreeAngle <= -180.0 ? degreeAngle + 360 : degreeAngle > 180.0 ? degreeAngle - 360 : degreeAngle); }
private void AddToFlowLineArray(ComputedOpticalFlow.FlowLineVector[] opticalFlowLineArray, int vectorFieldX, int width, int height, LineSegment2DF flowVector) { opticalFlowLineArray[vectorFieldX * height + width] = new ComputedOpticalFlow.FlowLineVector { Line = flowVector, OverThreshold = !VectorNoiseThreshold.HasValue || flowVector.Length > VectorNoiseThreshold.Value }; }
//public void FindMiddleByPath(string url) //{ // Image<Bgr, byte> img = new Image<Bgr, byte>(url); // List<LineSegment2D> positiveLines = new List<LineSegment2D>(); // List<LineSegment2D> negativeLines = new List<LineSegment2D>(); // LineHelper helper = new LineHelper(Img_Height,Img_Width,Roi_Height); // //store all the possible lines // List<LineSegment2D> lines = helper.ExtractContour(img,out temp); // //divide the lines into two parts. // foreach (LineSegment2D line in lines) // { // float theta = line.Direction.Y / line.Direction.X; // if (theta > 0) { positiveLines.Add(line); } // else if (theta < 0) { negativeLines.Add(line); } // } // //find the average line of two groups above // if (positiveLines.Count == 0) throw new TextException("No negative Line, Try look right!"); // if (negativeLines.Count==0) throw new TextException("No positive Line,Try look left!"); // LineSegment2DF posline = helper.AvrageLine(positiveLines); // LineSegment2DF negline = helper.AvrageLine(negativeLines); // //draw two average lines. // temp.Draw(posline, new Bgr(0, 255, 0), 5); // temp.Draw(negline, new Bgr(0, 255, 0), 5); // //Find and draw the vanishing point // PointF crosspt = helper.GetIntersection(ref posline, ref negline); // if (crosspt.Y > 140) throw new TextException("Near Corner"); // temp.Draw(new CircleF(crosspt, 3), new Bgr(0, 0, 255), 5); // //Find the bisector // float i = 0; // for (i = 0; i < 640; i++) // { // PointF pt = new PointF(i, Roi_Height); // if (Math.Abs(helper.GetDist(pt, posline) - helper.GetDist(pt, negline)) < 1) // break; // } // LineSegment2DF middleline = new LineSegment2DF(crosspt, new PointF(i, 280)); // #region test bisector // //double angle1, angle2; // //angle1 = middleline.GetExteriorAngleDegree(posline); // //angle2 = middleline.GetExteriorAngleDegree(negline); // //double angle3 = posline.GetExteriorAngleDegree(negline); // #endregion // temp.Draw(middleline, new Bgr(0, 0, 255), 5); // temp.Save("D:\\temp\\lines.jpg"); //} public string FindMiddleByImg(Image<Bgr, byte> img) { List<LineSegment2D> positiveLines = new List<LineSegment2D>(); List<LineSegment2D> negativeLines = new List<LineSegment2D>(); LineHelper helper = new LineHelper(Img_Height, Img_Width, Roi_Height); //store all the possible lines List<LineSegment2D> lines = helper.ExtractContour(img, out temp); //divide the lines into two parts. foreach (LineSegment2D line in lines) { float theta = line.Direction.Y / line.Direction.X; if (theta > 0) { positiveLines.Add(line); } else if (theta < 0) { negativeLines.Add(line); } } //find the average line of two groups above if (positiveLines.Count == 0) return "No negative Line, Try look right!"; if (negativeLines.Count == 0) return "No positive Line,Try look left!"; LineSegment2DF posline = helper.AvrageLine(positiveLines); LineSegment2DF negline = helper.AvrageLine(negativeLines); //draw two average lines. temp.Draw(posline, new Bgr(0, 255, 0), 5); temp.Draw(negline, new Bgr(0, 255, 0), 5); //Find and draw the vanishing point PointF crosspt = helper.GetIntersection(ref posline, ref negline); if (crosspt.Y > 140) return "Near Corner"; //temp.Draw(new CircleF(crosspt, 3), new Bgr(0, 0, 255), 5); PointF pt=new PointF(0,Roi_Height); //Find the bisector float i = 0; for (i = 0; i < 640; i++) { pt = new PointF(i, Roi_Height); if (Math.Abs(helper.GetDist(pt, posline) - helper.GetDist(pt, negline)) < 1) break; } LineSegment2DF middleline = new LineSegment2DF(crosspt, new PointF(i, 280)); #region test bisector //double angle1, angle2; //angle1 = middleline.GetExteriorAngleDegree(posline); //angle2 = middleline.GetExteriorAngleDegree(negline); //double angle3 = posline.GetExteriorAngleDegree(negline); #endregion temp.Draw(middleline, new Bgr(0, 0, 255), 5); if (pt.X < 220 && pt.X > 0) return "Turn Left"; else if (pt.X >= 220 && pt.X < 420) return "Go Straight"; else if (pt.X >= 420 && pt.X <= 640) return "Turn Right"; else return "No Sense"; //temp.Save("D:\\temp\\lines.jpg"); }
public void TestLine() { PointF p1 = new PointF(0, 0); PointF p2 = new PointF(1, 0); PointF p3 = new PointF(0, 1); LineSegment2DF l1 = new LineSegment2DF(p1, p2); LineSegment2DF l2 = new LineSegment2DF(p1, p3); double angle = l1.GetExteriorAngleDegree(l2); Assert.AreEqual(angle, 90.0); }
/// <summary> /// main function processing of the image data /// </summary> /// <param name="sender"></param> /// <param name="e"></param> void _Capture_ImageGrabbed(object sender, EventArgs e) { //lets get a frame from our capture device img = _Capture.RetrieveBgrFrame(); Gray_Frame = img.Convert<Gray,Byte>(); //apply chess board detection if(currentMode == Mode.SavingFrames) { corners = CameraCalibration.FindChessboardCorners(Gray_Frame, patternSize, Emgu.CV.CvEnum.CALIB_CB_TYPE.ADAPTIVE_THRESH); //we use this loop so we can show a colour image rather than a gray: //CameraCalibration.DrawChessboardCorners(Gray_Frame, patternSize, corners); if (corners != null) //chess board found { //make mesurments more accurate by using FindCornerSubPixel Gray_Frame.FindCornerSubPix(new PointF[1][] { corners }, new Size(11, 11), new Size(-1, -1), new MCvTermCriteria(30, 0.1)); //if go button has been pressed start aquiring frames else we will just display the points if (start_Flag) { Frame_array_buffer[frame_buffer_savepoint] = Gray_Frame.Copy(); //store the image frame_buffer_savepoint++;//increase buffer positon //check the state of buffer if (frame_buffer_savepoint == Frame_array_buffer.Length) currentMode = Mode.Caluculating_Intrinsics; //buffer full } //dram the results img.Draw(new CircleF(corners[0], 3), new Bgr(Color.Yellow), 1); for(int i = 1; i<corners.Length; i++) { img.Draw(new LineSegment2DF(corners[i - 1], corners[i]), line_colour_array[i], 2); img.Draw(new CircleF(corners[i], 3), new Bgr(Color.Yellow), 1); Console.Write("Length corner2Corner "+i+" : "+new LineSegment2DF(corners[i - 1], corners[i]).Length.ToString()+"\n"); } //calibrate the delay bassed on size of buffer //if buffer small you want a big delay if big small delay Thread.Sleep(100);//allow the user to move the board to a different position } corners = null; } if (currentMode == Mode.Caluculating_Intrinsics) { //we can do this in the loop above to increase speed for (int k = 0; k < Frame_array_buffer.Length; k++) { corners_points_list[k] = CameraCalibration.FindChessboardCorners(Frame_array_buffer[k], patternSize, Emgu.CV.CvEnum.CALIB_CB_TYPE.ADAPTIVE_THRESH); //for accuracy Gray_Frame.FindCornerSubPix(corners_points_list, new Size(11, 11), new Size(-1, -1), new MCvTermCriteria(30, 0.1)); //Fill our objects list with the real world mesurments for the intrinsic calculations List<MCvPoint3D32f> object_list = new List<MCvPoint3D32f>(); for (int i = 0; i < height; i++) { for (int j = 0; j < width; j++) { object_list.Add(new MCvPoint3D32f(j*31.0F, i*31.0F, 0.0F)); } } corners_object_list[k] = object_list.ToArray(); } //our error should be as close to 0 as possible double error = CameraCalibration.CalibrateCamera(corners_object_list, corners_points_list, Gray_Frame.Size, IC, Emgu.CV.CvEnum.CALIB_TYPE.CV_CALIB_RATIONAL_MODEL, out EX_Param); //If Emgu.CV.CvEnum.CALIB_TYPE == CV_CALIB_USE_INTRINSIC_GUESS and/or CV_CALIB_FIX_ASPECT_RATIO are specified, some or all of fx, fy, cx, cy must be initialized before calling the function //if you use FIX_ASPECT_RATIO and FIX_FOCAL_LEGNTH options, these values needs to be set in the intrinsic parameters before the CalibrateCamera function is called. Otherwise 0 values are used as default. MessageBox.Show("Intrinsic Calculation Error: " + error.ToString(), "Results", MessageBoxButtons.OK, MessageBoxIcon.Information); //display the results to the user currentMode = Mode.Calibrated; for (int i1 = 0; i1 < 8; i1++) { Console.Write(IC.DistortionCoeffs[i1,0].ToString()+"\n"); } for (int i1 = 0; i1 < 3; i1++) { for (int j1 = 0; j1 < 3;j1++ ) Console.Write(IC.IntrinsicMatrix[i1, j1].ToString()+"\t"); Console.Write("\n"); } } if (currentMode == Mode.Calibrated) { //display the original image Sub_PicturBox.Image = img.ToBitmap(); //calculate the camera intrinsics Matrix<float> Map1, Map2; IC.InitUndistortMap(img.Width, img.Height, out Map1, out Map2); //remap the image to the particular intrinsics //In the current version of EMGU any pixel that is not corrected is set to transparent allowing the original image to be displayed if the same //image is mapped backed, in the future this should be controllable through the flag '0' Image<Bgr, Byte> temp = img.CopyBlank(); CvInvoke.cvRemap(img, temp, Map1, Map2, 0, new MCvScalar(0)); //added for corner drawing Gray_Frame = temp.Convert<Gray, Byte>(); corners = CameraCalibration.FindChessboardCorners(Gray_Frame, patternSize, Emgu.CV.CvEnum.CALIB_CB_TYPE.ADAPTIVE_THRESH); //we use this loop so we can show a colour image rather than a gray: //CameraCalibration.DrawChessboardCorners(Gray_Frame, patternSize, corners); if (corners != null) //chess board found { //make mesurments more accurate by using FindCornerSubPixel Gray_Frame.FindCornerSubPix(new PointF[1][] { corners }, new Size(11, 11), new Size(-1, -1), new MCvTermCriteria(30, 0.1)); //dram the results temp.Draw(new CircleF(corners[0], 3), new Bgr(Color.Yellow), 1); //CSV Writing before your loop var csv = new StringBuilder(); string dirSave = Directory.GetCurrentDirectory() + "/Resources/dirSave"; string filePath = dirSave + "/dataChessBoardCalibrated.csv"; var newLine = ""; var newItem = ""; newLine = string.Format("{0}", Environment.NewLine); if (!File.Exists(filePath)) { for (int i = 1; i < corners.Length; i++) { newItem = i.ToString() + ", "; csv.Append(newItem); } csv.Append(newLine); File.WriteAllText(filePath, csv.ToString()); csv.Clear(); } for (int i = 1; i < corners.Length; i++) { temp.Draw(new LineSegment2DF(corners[i - 1], corners[i]), line_colour_array[i], 2); temp.Draw(new CircleF(corners[i], 3), new Bgr(Color.Yellow), 1); Console.Write("Length corner2Corner " + i + " : " + new LineSegment2DF(corners[i - 1], corners[i]).Length.ToString() + "\n"); newItem = new LineSegment2DF(corners[i - 1], corners[i]).Length.ToString() + ", "; csv.Append(newItem); } csv.Append(newLine); //after your loop try { File.AppendAllText(filePath, csv.ToString()); } catch (Exception ex) { MessageBox.Show(ex.ToString()); } csv.Clear(); //calibrate the delay bassed on size of buffer //if buffer small you want a big delay if big small delay Thread.Sleep(100);//allow the user to move the board to a different position } corners = null; //end of corner drawing // original image corner calculation Gray_Frame = img.Convert<Gray,Byte>(); corners = CameraCalibration.FindChessboardCorners(Gray_Frame, patternSize, Emgu.CV.CvEnum.CALIB_CB_TYPE.ADAPTIVE_THRESH); //we use this loop so we can show a colour image rather than a gray: //CameraCalibration.DrawChessboardCorners(Gray_Frame, patternSize, corners); if (corners != null) //chess board found { //make mesurments more accurate by using FindCornerSubPixel Gray_Frame.FindCornerSubPix(new PointF[1][] { corners }, new Size(11, 11), new Size(-1, -1), new MCvTermCriteria(30, 0.1)); //if go button has been pressed start aquiring frames else we will just display the points //CSV Writing before your loop var csv = new StringBuilder(); string dirSave = Directory.GetCurrentDirectory() + "/Resources/dirSave"; string filePath = dirSave + "/dataChessBoard.csv"; var newLine = ""; var newItem = ""; newLine = string.Format("{0}", Environment.NewLine); if (!File.Exists(filePath)) { for (int i = 1; i < corners.Length; i++) { newItem = i.ToString() + ", "; csv.Append(newItem); } csv.Append(newLine); File.WriteAllText(filePath, csv.ToString()); csv.Clear(); } for (int i = 1; i < corners.Length; i++) { Console.Write("Length corner2Corner " + i + " : " + new LineSegment2DF(corners[i - 1], corners[i]).Length.ToString() + "\n"); newItem = new LineSegment2DF(corners[i - 1], corners[i]).Length.ToString() + ", "; csv.Append(newItem); } csv.Append(newLine); //after your loop try { File.AppendAllText(filePath, csv.ToString()); } catch (Exception ex) { MessageBox.Show(ex.ToString()); } csv.Clear(); //original image corner calculation } img = temp.Copy(); //set up to allow another calculation SetButtonState(true); start_Flag = false; } Main_Picturebox.Image = img.ToBitmap(); }
public static Transformation ApplyRansac(Dictionary<PointF, PointF> c, int nbMaxIteration = 200, int nbSuffisantInliers = 10000) { int nbTentative = 0; int maxInliers = 0; var best = new Transformation(); var matchs = new Dictionary<PointF, PointF>(); int nbMatch = c.Count; int nbCombi = (nbMatch * nbMatch - 1) / 2; int nbMin = Math.Min(nbCombi, nbMaxIteration); var tirages = new List<Point>(); if (nbCombi < 200000) { for (int i = 0; i < nbMatch; i++) { for (int j = 0; j < nbMatch; j++) { if (i > j) { tirages.Add(new Point(i, j)); } } } } else { var rand = new Random(); while (tirages.Count < 1000) { var rand1 = rand.Next(nbMatch); var rand2 = rand.Next(nbMatch); var randCoord = new Point(rand1, rand2); if (!tirages.Contains(randCoord)) tirages.Add(randCoord); } } while (nbTentative < nbMin) { var nbInliers = 0; matchs.Clear(); var rand = new Random(); int rando = rand.Next(tirages.Count); int rand1 = tirages[rando].X; int rand2 = tirages[rando].Y; tirages.RemoveAt(rando); nbTentative++; PointF p1 = c.Keys.ElementAt(rand1); PointF p2 = c.Keys.ElementAt(rand2); var lineFromSource = new LineSegment2DF(p1, p2); var lineFromPattern = new LineSegment2DF(c[p1], c[p2]); if (lineFromPattern.Length < 5 || lineFromSource.Length < 5) { nbTentative++; continue; } matchs.Add(p1, c[p1]); matchs.Add(p2, c[p2]); float translationX = lineFromSource.P1.X - lineFromPattern.P1.X; float translationY = lineFromSource.P1.Y - lineFromPattern.P1.Y; double scale = lineFromSource.Length / lineFromPattern.Length; if (scale > 4 || scale < 0.2) { nbTentative++; continue; } double rotation = lineFromSource.GetExteriorAngleDegree(lineFromPattern); var matRot = new Matrix<float>(2, 3); CvInvoke.cv2DRotationMatrix(lineFromPattern.P1, rotation, scale, matRot.Ptr); for (int i = 0; i < nbMatch; i++) { if (i == rand1 || i == rand2) { continue; } bool skip = false; foreach (var match in matchs.Keys) { var distValid1 = new LineSegment2DF(match, c.Keys.ElementAt(i)); var distValid2 = new LineSegment2DF(c[match], c[c.Keys.ElementAt(i)]); if (distValid1.Length < 5 || distValid2.Length < 5) { skip = true; break; } } if (skip) continue; PointF pi = c.Keys.ElementAt(i); var point1 = new Matrix<float>(3, 1) { [0, 0] = pi.X,
private void DrawRegionBoundary(Image<Bgr, Byte> image, int yPos) { PointF start = new PointF(0, yPos); PointF end = new PointF(image.Width, yPos); LineSegment2DF lineSegment = new LineSegment2DF(start, end); image.Draw(lineSegment, new Bgr(Color.Red), 1); }
private void DrawFlowVectors() { for (int i = 0; i < this.TrackedFeatures.Length; i++) { if (m_TrackingStatus[i] == 1) { LineSegment2DF lineSegment = new LineSegment2DF(this.PreviousFoundFeatures[i], this.TrackedFeatures[i]); this.FlowImage.Draw(lineSegment, new Bgr(Color.Red), 1); CircleF circle = new CircleF(this.TrackedFeatures[i], 2.0f); this.FlowImage.Draw(circle, new Bgr(Color.Red), 1); } } }
private LineSegment2DF translatationLineXNeg(LineSegment2DF line, LineSegment2DF translationLine) { float x1 = line.P1.X - (float)translationLine.Length / 2 * translationLine.Direction.X; float x2 = line.P2.X - (float)translationLine.Length / 2 * translationLine.Direction.X; PointF newpoint1 = new PointF(x1, line.P1.Y); PointF newpoint2 = new PointF(x2, line.P2.Y); LineSegment2DF lineTranslated = new LineSegment2DF(newpoint1, newpoint2); return lineTranslated; }
public void DetectRect(Image<Bgr, byte> img, List<Image<Gray, Byte>> stopSignList, List<Rectangle> boxList, List<Contour<Point>> contourSignFound) { imagecolor = img; joinContour.Clear(); Image<Bgr, Byte> smoothImg = img.SmoothGaussian(5, 5, 1.5, 1.5); Image<Gray, Byte> smoothedBlackMask = GetColorPixelMask(smoothImg, 0, 180, 0, 94, 0, 100); imageGray = smoothedBlackMask; //Use Dilate followed by Erode to eliminate small gaps in some countour. smoothedBlackMask._Dilate(1); smoothedBlackMask._Erode(1); using (Image<Gray, Byte> canny = smoothedBlackMask.Canny(new Gray(100), new Gray(50)))//Canny(100,50)) using (MemStorage stor = new MemStorage()) { Contour<Point> contours = canny.FindContours( Emgu.CV.CvEnum.CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_SIMPLE, Emgu.CV.CvEnum.RETR_TYPE.CV_RETR_TREE, stor); FindRect(img, stopSignList, boxList, contours, 5); } CvInvoke.cvAnd(imageGray, imageSelector, imageGray, IntPtr.Zero); using (Image<Gray, Byte> cannySelector = imageSelector.Canny(new Gray(100), new Gray(50)))//Canny(100,50)) using (MemStorage stor = new MemStorage()) { Contour<Point> contours = cannySelector.FindContours( Emgu.CV.CvEnum.CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_SIMPLE, Emgu.CV.CvEnum.RETR_TYPE.CV_RETR_TREE, stor); imageGray.Draw(contours, new Gray(255), 1); } //imageGray.Draw(joinContour.GetMinAreaRect(),new Gray(180),1); CvInvoke.cvShowImage("Image Black", imageGray); PointF temp = new PointF(); MCvBox2D tempbox = new MCvBox2D(); bool swapped = false; //bubble sort for making following sorting // 0 // 1 2 // 4 do { swapped = false; for (int i = 0; i < 3; i++) { if (pointBlack[i].Y > pointBlack[i + 1].Y) { temp = pointBlack[i]; tempbox= minBoxesBlack[i]; pointBlack[i] = pointBlack[i + 1]; minBoxesBlack[i] = minBoxesBlack[i + 1]; pointBlack[i + 1] = temp; minBoxesBlack[i + 1] = tempbox; swapped = true; } } } while (swapped); if (pointBlack[1].X > pointBlack[2].X) { temp = pointBlack[1]; tempbox = minBoxesBlack[1]; pointBlack[1] = pointBlack[2]; minBoxesBlack[1] = minBoxesBlack[2]; pointBlack[2] = temp; minBoxesBlack[2] = tempbox; } MCvFont f = new MCvFont(Emgu.CV.CvEnum.FONT.CV_FONT_HERSHEY_PLAIN, 0.8, 0.8); //for (int i=0; i < 4; i++) //{ // imageGray.Draw(" " + i, ref f, new Point((int)pointBlack[i].X, (int)pointBlack[i].Y), new Gray(200)); // imageGray.Draw(minBoxesBlack[i], new Gray(100), 2); //} LineSegment2DF[]lines = new LineSegment2DF[9]; lines[0] = new LineSegment2DF(pointBlack[0], pointBlack[3]); lines[1] = new LineSegment2DF(pointBlack[1], pointBlack[2]); lines[2] = translatationLineXNeg(lines[0], lines[1]); lines[3] = translatationLineXPos(lines[0], lines[1]); imageGray.Draw(lines[0], new Gray(100), 2); imageGray.Draw(lines[1], new Gray(100), 2); imageGray.Draw(lines[2], new Gray(100), 2); imageGray.Draw(lines[3], new Gray(100), 2); //areas.Clear(); Image<Gray, Byte> smoothedWhiteMask = GetColorPixelMask(smoothImg, 0, 180, 0, 94, 92, 255); imageGray = smoothedWhiteMask; //Use Dilate followed by Erode to eliminate small gaps in some countour. smoothedWhiteMask._Dilate(1); smoothedWhiteMask._Erode(1); CvInvoke.cvAnd(smoothedWhiteMask, imageSelector, smoothedWhiteMask, IntPtr.Zero); using (Image<Gray, Byte> canny = smoothedWhiteMask.Canny(new Gray(100), new Gray(50)))//Canny(100,50)) using (MemStorage stor = new MemStorage()) { Contour<Point> contours = canny.FindContours( Emgu.CV.CvEnum.CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_SIMPLE, Emgu.CV.CvEnum.RETR_TYPE.CV_RETR_TREE, stor); FindRect(img, stopSignList, boxList, contours,6); } CvInvoke.cvShowImage("Image White", smoothedWhiteMask); }
public PointF GetIntersection(ref LineSegment2DF posline, ref LineSegment2DF negline) { float kp = posline.Direction.Y / posline.Direction.X; float cp = posline.P1.Y - kp * posline.P1.X; float kn = negline.Direction.Y / negline.Direction.X; float cn = negline.P1.Y - kn * negline.P1.X; float x = (cp - cn) / (kn - kp); PointF crosspt = new PointF(x, kp * x + cp); return crosspt; }
public double GetDist(PointF pt, LineSegment2DF line) { double result = 0; double k = line.Direction.Y / line.Direction.X; double c = line.P1.Y - k * line.P1.X; result = Math.Abs(k * pt.X - pt.Y + c) / Math.Sqrt(1 + k * k); return result; }
private PointF RelocateTooFarPoint(PointF feature, PointF center_point) { PointF middle = new PointF(); middle.X = (int)((feature.X + center_point.X) * 0.5); middle.Y = (int)((feature.Y + center_point.Y) * 0.5); LineSegment2DF line = new LineSegment2DF(feature, middle); // colored_temp_image.Draw(new CircleF(feature, 3f), new Bgr(Color.Black), 2); colored_temp_image.Draw(new CircleF(middle, 3f), new Bgr(0, 255, 0), 2); colored_temp_image.Draw(line, new Bgr(Color.DeepPink), 2); return middle; }
private LineSegment2DF CalculateFlowVector(int dirX, int dirY) { var velxFloat = (float)velocityX[dirY * WindowSize.Height, dirX * WindowSize.Width].Intensity; var velyFloat = (float)velocityY[dirY * WindowSize.Height, dirX * WindowSize.Width].Intensity; LineSegment2DF flowVector = new LineSegment2DF( new PointF(dirX * WindowSize.Width, dirY * WindowSize.Height), //line start new PointF(dirX * WindowSize.Width + velxFloat, dirY * WindowSize.Height + velyFloat) //line end ); return flowVector; }
/// <summary> /// convert a series of points to LineSegment2D /// </summary> /// <param name="points">the array of points</param> /// <param name="closed">if true, the last line segment is defined by the last point of the array and the first point of the array</param> /// <returns>array of LineSegment2D</returns> public static LineSegment2DF[] PolyLine(PointF[] points, bool closed) { LineSegment2DF[] res; int length = points.Length; if (closed) { res = new LineSegment2DF[length]; PointF lastPoint = points[length - 1]; for (int i = 0; i < res.Length; i++) { res[i] = new LineSegment2DF(lastPoint, points[i]); lastPoint = points[i]; } } else { res = new LineSegment2DF[length - 1]; PointF lastPoint = points[0]; for (int i = 1; i < res.Length; i++) { res[i] = new LineSegment2DF(lastPoint, points[i]); lastPoint = points[i]; } } return res; }
private MKeyPoint GetNearestKeyPoint(double x, double y, VectorOfKeyPoint keyFeaturesVector) { var width = _capture.GetCaptureProperty(CapProp.FrameWidth); var height = _capture.GetCaptureProperty(CapProp.FrameHeight); var point = new PointF((float) (x*width), (float) (y*height)); var result = keyFeaturesVector.Enumerable().First(); var minDistanse = double.MaxValue; foreach (var mKeyPoint in keyFeaturesVector.Enumerable()) { var distance = new LineSegment2DF(mKeyPoint.Point, point).Length; if (distance < minDistanse) { result = mKeyPoint; minDistanse = distance; } } return result; }
/// <summary> /// Get the exterior angle between this line and <paramref name="otherLine"/> /// </summary> /// <param name="otherLine">The other line</param> /// <returns>The exterior angle between this line and <paramref name="otherLine"/></returns> public double GetExteriorAngleDegree(LineSegment2DF otherLine) { PointF direction1 = Direction; PointF direction2 = otherLine.Direction; double radianAngle = Math.Atan2(direction2.Y, direction2.X) - Math.Atan2(direction1.Y, direction1.X); double degreeAngle = radianAngle * (180.0 / Math.PI); return degreeAngle <= -180.0 ? degreeAngle + 360 : degreeAngle > 180.0 ? degreeAngle - 360 : degreeAngle; }
/// <summary> /// Find the wheels and run the rest of code. /// </summary> /// <param name="imgToPro"></param> /// <param name="imgToDrawOn"></param> /// <param name="isRealTime"></param> /// <returns></returns> public Image<Gray, Byte> process(Image<Gray, Byte> imgToPro, Image<Bgr,Byte> imgToDrawOn,bool isRealTime, String repairSubject) { // Detects the circles using the HoughCircles algorithm. // The important parameters are // Circle accumulator threshold double cannyThreshold = 200; double circleAccumulatorThreshold =300; double resolutionOfAccumulator = 2.0; double minDist = 500;//300; int minRadius = 150; int maxRadius = 300; timer1--; if (isRealTime) { minRadius = 50; } imgToPro.SmoothGaussian(333); // Take 2 smaller pieces of the picture and test that on them Rectangle rect = new Rectangle(0, 0+(this.Height/3), this.Width, this.Height - this.Height/3 ); imgToPro.ROI = rect; imgToDrawOn.ROI = rect; // Type avg = imgToPro.GetAverage(imgToPro); // The following is done for the sake of speed. // Since the wheels should be on the bottom part of the image, // then 1/4 of the picture height should be the maximum radius. maxRadius = rect.Height / 2; // minRadius = rect.Width/ 10; double[] valuesToTest = new double[] { 1,1.25,1.5,1.75, 2,2.5, 3, 4, 5, 6,7,8,9,10 }; // The minimum radius is List<CircleF[]> circles = new List<CircleF[]>(); List<PointF> centers = new List<PointF>(); foreach (int i in valuesToTest) { circleAccumulatorThreshold = rect.Width / i +1; circles.Add(imgToPro.HoughCircles(new Gray(cannyThreshold), new Gray(circleAccumulatorThreshold), resolutionOfAccumulator, minDist, minRadius, maxRadius)[0]); //Console.WriteLine(circles); } List<CircleF> circlesList = new List<CircleF>(); int radInt = 0; // Draws point on the circles where the rays are // We are working in a two dimensional array [][]. Image<Gray, Byte> circleImage = imgToPro.CopyBlank(); Point centerOfWheel = new Point(); for (int i = 0; i < circles.Count; i++) { for(int z = 0; z < circles[i].Length; z++) { circlesList.Add(circles[i][z]); //CircleF PointF cpoint = circles[i][z].Center; centers.Add(cpoint); float rad = circles[i][z].Radius; radInt = Convert.ToInt32(rad); if (centers.Count > 2) break; // Console.WriteLine(circles[i][z]); } } // Distance between the wheels double length = 0; // Connect the two points if (centers.Count > 2) { float startX = centers[0].X; float startY = centers[0].Y; float endX = centers[1].X; float endY = centers[1].Y; Point start = new Point((int)startX, (int)startY); Point end = new Point((int)endX, (int)endY); length = new LineSegment2DF(start, end).Length; } if (isRealTime) { // if the bike is found if (timer1 > 0) { // draw the last bike if (this.centers != null) { circleImage = drawEverything(this.centers, imgToPro, this.centerofwheel, imgToDrawOn, this.circleslist, this.length, radInt); } } else { // draw the new found bike if (isBikeFound(length, imgToPro)) { centerOfWheel = detectRightWheelCenter(centerOfWheel, centers, imgToPro); circleImage = drawEverything(centers, imgToPro, centerOfWheel, imgToDrawOn, circlesList, length, radInt); this.centers = centers; this.centerofwheel = centerOfWheel; this.circleslist = circlesList; this.radint = radInt; this.length = length; this.distancebetweenwheels = length; timer1 = 30; // pass coloured image imgToDrawOn = highlightPartThatNeedsFixing(repairSubject , imgToDrawOn); } } } else { //Console.WriteLine("sadsadsa"); if (isBikeFound(length, imgToPro)) { centerOfWheel = detectRightWheelCenter(centerOfWheel, centers, imgToPro); circleImage = drawEverything(centers, imgToPro, centerOfWheel, imgToDrawOn, circlesList, length, radInt); imgToDrawOn = highlightPartThatNeedsFixing(repairSubject, imgToDrawOn); } } // return circleImage; }