public Marker(Contour<Point> cInternal, Contour<Point> cExternal) { contourExternal = cExternal; contourInternal = cInternal; rot4Position = true; }
public SignDetector(Image<Bgr, Byte> stopSignModel) { _detector2 = new SURFDetector(500, false); using (Image<Gray, Byte> redMask = GetColorPixelMask(stopSignModel)) { try { _tracker2 = new Features2DTracker<float>(_detector2.DetectFeatures(redMask, null)); } catch { } } _octagonStorage2 = new MemStorage(); _octagon2 = new Contour<Point>(_octagonStorage2); _octagon2.PushMulti(new Point[] { //hexagon new Point(1, 0), new Point(2, 0), new Point(3, 1), new Point(2, 2), new Point(1, 2), new Point(0, 1)}, //octagon //new Point(1, 0), //new Point(2, 0), //new Point(3, 1), //new Point(3, 2), //new Point(2, 3), //new Point(1, 3), //new Point(0, 2), //new Point(0, 1)}, Emgu.CV.CvEnum.BACK_OR_FRONT.FRONT); }
public EContour(EContour c) { contour = c.getContour(); includeContour = c.getIncludeContour(); includeSquere = c.getIncludeSquere(); rect = c.getRectangle(); }
public Marker(Contour<Point> cInternal, Contour<Point> cExternal, Image<Gray, Byte> bin) { contourExternal = cExternal; contourInternal = cInternal; image = bin; rot4Position = true; }
private static void ResetContoursNavigation(ref Contour<System.Drawing.Point> contours) { if (contours == null) return; //go back to the begining while (contours.HPrev != null) contours = contours.HPrev; }
private void FindImage(Image<Gray, byte> img, List<Image<Gray, Byte>> imgList, List<Rectangle> boxList, Contour<Point> contours) { for (; contours != null; contours = contours.HNext) { contours.ApproxPoly(contours.Perimeter * 0.02, 0, contours.Storage); if (contours.Area > 200) { double ratio = CvInvoke.cvMatchShapes(_octagon, contours, Emgu.CV.CvEnum.CONTOURS_MATCH_TYPE.CV_CONTOURS_MATCH_I3, 0); if (ratio > 0.1) //not a good match of contour shape { Contour<Point> child = contours.VNext; if (child != null) FindImage(img, imgList, boxList, child); continue; } Rectangle box = contours.BoundingRectangle; Image<Gray, Byte> candidate; using (Image<Gray, Byte> tmp = img.Copy(box)) candidate = tmp.Convert<Gray, byte>(); //set the value of pixels not in the contour region to zero using (Image<Gray, Byte> mask = new Image<Gray, byte>(box.Size)) { mask.Draw(contours, new Gray(255), new Gray(255), 0, -1, new Point(-box.X, -box.Y)); double mean = CvInvoke.cvAvg(candidate, mask).v0; candidate._ThresholdBinary(new Gray(mean), new Gray(255.0)); candidate._Not(); mask._Not(); candidate.SetValue(0, mask); } ImageFeature<float>[] features = _detector.DetectFeatures(candidate, null); int minMatchCount = 10; if (features != null && features.Length >= minMatchCount) { Features2DTracker<float>.MatchedImageFeature[] matchedFeatures = _tracker.MatchFeature(features, 2); int goodMatchCount = 0; foreach (Features2DTracker<float>.MatchedImageFeature ms in matchedFeatures) if (ms.SimilarFeatures[0].Distance < 0.5) goodMatchCount++; if (goodMatchCount >= minMatchCount) { boxList.Add(box); imgList.Add(candidate); } } } } }
public EContour() { contour = new Contour<Point>(new MemStorage()); includeContour = new List<EContour>(); rect = null; includeSquere = null; coeff = new double [2]; coeff[0] = 3.8; coeff[1] = 4.2; }
public EContour(Contour<Point> c) { contour = c; includeContour = new List<EContour>(); rect = null; coeff = new double[2]; coeff[0] = 3.8; coeff[1] = 4.2; }
public void CreatePageForContour(Contour contour) { var page = new TabPage { Size = new Size(_control.Width - 8, _control.Height - 26), Text = string.Format("Контур {0}", contour.Index) }; var grid = new ContourDataGridView{Size = new Size(page.Width, page.Height), DataSource = contour}; page.Controls.Add(grid); _control.TabPages.Add(page); }
public static void draw4ContourAndCircle(Image<Bgr, Byte> img, Contour<Point> contour) { img.Draw(contour, new Bgr(255, 0, 0), 3); for (int i = 0; i < contour.Total; i++) { PointF pkt = new PointF(contour[0].X, contour[0].Y); img.Draw(new CircleF(pkt, 4), new Bgr(i*50, i*50, 250), 4); } }
public Contour<Point> FindContours(Contour<Point> contours) { for (; contours != null; contours = contours.HNext) { if (contours.Area >= ConstValue.OBJECT_MIN_AREA) { return contours; } } return null; }
private void Window_Loaded(object sender, RoutedEventArgs e) { Capture capture = new Capture(7); capture.Start(); ComponentDispatcher.ThreadIdle += (o, arg) => { var img = capture.QueryFrame(); Emgu.CV.Contour<Bgr> con = new Contour<Bgr>(new MemStorage()); Display.Source = BitmapSourceConvert.ToBitmapSource(img); }; }
/// <summary> /// Use camshift to track the feature /// </summary> /// <param name="observedFeatures">The feature found from the observed image</param> /// <param name="initRegion">The predicted location of the model in the observed image. If not known, use MCvBox2D.Empty as default</param> /// <param name="priorMask">The mask that should be the same size as the observed image. Contains a priori value of the probability a match can be found. If you are not sure, pass an image fills with 1.0s</param> /// <returns>If a match is found, the homography projection matrix is returned. Otherwise null is returned</returns> public HomographyMatrix CamShiftTrack(SURFFeature[] observedFeatures, MCvBox2D initRegion, Image<Gray, Single> priorMask) { using (Image<Gray, Single> matchMask = new Image<Gray, Single>(priorMask.Size)) { #region get the list of matched point on the observed image Single[, ,] matchMaskData = matchMask.Data; //Compute the matched features MatchedSURFFeature[] matchedFeature = _matcher.MatchFeature(observedFeatures, 2, 20); matchedFeature = VoteForUniqueness(matchedFeature, 0.8); foreach (MatchedSURFFeature f in matchedFeature) { PointF p = f.ObservedFeature.Point.pt; matchMaskData[(int)p.Y, (int)p.X, 0] = 1.0f / (float) f.SimilarFeatures[0].Distance; } #endregion Rectangle startRegion; if (initRegion.Equals(MCvBox2D.Empty)) startRegion = matchMask.ROI; else { startRegion = PointCollection.BoundingRectangle(initRegion.GetVertices()); if (startRegion.IntersectsWith(matchMask.ROI)) startRegion.Intersect(matchMask.ROI); } CvInvoke.cvMul(matchMask.Ptr, priorMask.Ptr, matchMask.Ptr, 1.0); MCvConnectedComp comp; MCvBox2D currentRegion; //Updates the current location CvInvoke.cvCamShift(matchMask.Ptr, startRegion, new MCvTermCriteria(10, 1.0e-8), out comp, out currentRegion); #region find the SURF features that belongs to the current Region MatchedSURFFeature[] featuesInCurrentRegion; using (MemStorage stor = new MemStorage()) { Contour<System.Drawing.PointF> contour = new Contour<PointF>(stor); contour.PushMulti(currentRegion.GetVertices(), Emgu.CV.CvEnum.BACK_OR_FRONT.BACK); CvInvoke.cvBoundingRect(contour.Ptr, 1); //this is required before calling the InContour function featuesInCurrentRegion = Array.FindAll(matchedFeature, delegate(MatchedSURFFeature f) { return contour.InContour(f.ObservedFeature.Point.pt) >= 0; }); } #endregion return GetHomographyMatrixFromMatchedFeatures(VoteForSizeAndOrientation(featuesInCurrentRegion, 1.5, 20 )); } }
private static int GetNumberOfChildren(Contour<Point> contours) { Contour<Point> child = contours.VNext; if (child == null) return 0; int count = 0; while (child != null) { count++; child = child.HNext; } return count; }
private static List<Rectangle> findContours(Contour<Point> contours, int minArea) { List<Rectangle> ratRect = new List<Rectangle>(); for (; contours != null; contours = contours.HNext) { if (contours.Area > minArea) { ratRect.Add(contours.BoundingRectangle); } } return ratRect; }
public static int[] getContourCurvatureIndices(Contour<Point> contour, Point[] curvePoints) { int[] curveIndices = new int[curvePoints.Count()]; for (int j = 0; j < curvePoints.Count(); j++) { curveIndices[j] = Array.IndexOf(contour.ToArray(), curvePoints[j]); Console.WriteLine(curveIndices[j] + ":" + curvePoints[j].ToString()); } return curveIndices; }
public BlobInfo(Contour<Point> contour) { this.Area = contour.Area; this.MinAreaRect = GetMinAreaRect(contour); this.CameraCenter = this.MinAreaRect.center; if (global::Vision.CameraCalibration.Instance.IsInitialized) { this.PhysicalCenter = global::Vision.CameraCalibration.Instance.GetPhysicalPoint(this.CameraCenter); } }
public static Contour TraceContour( int a_iStartingPixelIndex, NeighborDirection a_eStartingDirection, int a_iContourLabel, BinarizedImage a_rBinarizedImage, int[ ] a_rLabelMap ) { int iPixelIndexTrace; NeighborDirection eDirectionNext = a_eStartingDirection; FindNextPoint( a_iStartingPixelIndex, a_eStartingDirection, a_rBinarizedImage, a_rLabelMap, out iPixelIndexTrace, out eDirectionNext ); Contour oContour = new Contour( a_iContourLabel ); oContour.AddFirst( a_rBinarizedImage.PixelIndex2Coords( iPixelIndexTrace ) ); int iPreviousPixelIndex = a_iStartingPixelIndex; int iCurrentPixelIndex = iPixelIndexTrace; bool bDone = ( a_iStartingPixelIndex == iPixelIndexTrace ); // Choose a bias factor // The bias factor points to exterior if tracing an outer contour // The bias factor points to interior if tracing an inner contour float fOrthoBiasFactor; if( a_eStartingDirection == NeighborDirection.DirectionUpRight ) // inner contour { fOrthoBiasFactor = -0.2f; } else // outer contour { fOrthoBiasFactor = 0.2f; } while( bDone == false ) { a_rLabelMap[ iCurrentPixelIndex ] = a_iContourLabel; NeighborDirection eDirectionSearch = (NeighborDirection) ( ( (int) eDirectionNext + 6 ) % 8 ); int iNextPixelIndex; FindNextPoint( iCurrentPixelIndex, eDirectionSearch, a_rBinarizedImage, a_rLabelMap, out iNextPixelIndex, out eDirectionNext ); iPreviousPixelIndex = iCurrentPixelIndex; iCurrentPixelIndex = iNextPixelIndex; bDone = ( iPreviousPixelIndex == a_iStartingPixelIndex && iCurrentPixelIndex == iPixelIndexTrace ); if( bDone == false ) { // Apply some bias to inner and outer contours to avoid them overlap // Use the orthogonal vector to direction NeighborDirection eOrthoBiasDirection = (NeighborDirection) ( ( (int) eDirectionNext + 6 ) % 8 ); // == direction - 2 % 8 but easier to compute (always positive) Vector2 f2Bias = fOrthoBiasFactor * BinarizedImage.GetDirectionVector( eOrthoBiasDirection ); // Add bias to pixel pos Vector2 f2BiasedPos = f2Bias + a_rBinarizedImage.PixelIndex2Coords( iNextPixelIndex ); // Add biased pos to contour oContour.AddFirst( f2BiasedPos ); } } return oContour; }
private MCvBox2D GetMinAreaRect(Contour<Point> contour) { Point[] points = contour.ToArray(); PointF[] pointsF = new PointF[points.Length]; for (int i = 0; i < points.Length; i++) { pointsF[i] = new PointF( points[i].X, points[i].Y); } return PointCollection.MinAreaRect(pointsF); }
public PointDetector() { joinContourStorage = new MemStorage(); joinContour = new Contour<Point>(joinContourStorage); imageSelector = new Image<Gray, byte>("C:\\monitor_photo_tengah_Repaired_Selected.jpg").Resize(320, 240, Emgu.CV.CvEnum.INTER.CV_INTER_LINEAR); _rectStorage = new MemStorage(); rect = new Contour<Point>(_rectStorage); rect.PushMulti(new Point[] { //rect new Point(0, 0), new Point(20, 0), new Point(20, 20), new Point(0, 20)}, Emgu.CV.CvEnum.BACK_OR_FRONT.FRONT); }
public MergePolygon(Contour<Point> initContour) { contourList.Add(initContour); //Image<Gray, Byte> cannyEdges = gray.Canny(180, 120); //LineSegment2D[] lines = cannyEdges.HoughLinesBinary( // 1, //Distance resolution in pixel-related units // Math.PI / 45.0, //Angle resolution measured in radians. // 20, //threshold // 30, //min Line width // 10 //gap between lines // )[0]; //Get the lines from the first channel //for (Contour<Point> contours = cannyEdges.FindContours(); contours != null; contours = contours.HNext) //{ // contourList.Add(contours); //} }
public static IEnumerable<Contour<Point>> GetChildren(Contour<Point> cont) { for (Contour<Point> _cont = cont; _cont != null; _cont = _cont.HNext) { Contour<Point> child = _cont.VNext; if (child != null) { //yield return _cont.VNext; foreach(Contour<Point> sibling_of_child in GetSiblings(child)) { yield return sibling_of_child; } } } }
public ContourNode(Contour<Point> node) { this.Contour = node; Children = new List<ContourNode>(); List<Contour<Point>> kids = new List<Contour<Point>>(ContourNode.GetChildren(this.Contour)); foreach(Contour<Point> kid in kids) { ContourNode kidnode = new ContourNode(kid); kidnode.Parent = this; Children.Add(kidnode); } //if (kids.Count > 3) //{ // throw new SetGameException("Found a card with more than 3 shapes on it."); //} }
/// <summary> /// Determines whether the angles are close enough to 90 degrees /// </summary> /// <param name="contour"></param> /// <returns></returns> private static bool IsRectangle(Contour<Point> contour) { Point[] pts = contour.ToArray(); LineSegment2D[] edges = PointCollection.PolyLine(pts, true); for (int i = 0; i < edges.Length; i++) { LineSegment2D currentEdge = edges[i]; LineSegment2D nextEdge = edges[(i + 1) % edges.Length]; double angle = Math.Abs(nextEdge.GetExteriorAngleDegree(currentEdge)); if (angle < 80 || angle > 100) { return false; } } return true; }
private void DetectPlate(Contour<Point> contours, List<MCvBox2D> detectedLicensePlateRegionList) { for (; contours != null; contours = contours.HNext) { int numberOfChildren = GetNumberOfChildren(contours); if (numberOfChildren == 0) continue; if (contours.Area > 400) { if (numberOfChildren < 3) { DetectPlate(contours.VNext, detectedLicensePlateRegionList); continue; } MCvBox2D box = contours.GetMinAreaRect(); if (box.angle < -45.0) { float tmp = box.size.Width; box.size.Width = box.size.Height; box.size.Height = tmp; box.angle += 90.0f; } else if (box.angle > 45.0) { float tmp = box.size.Width; box.size.Width = box.size.Height; box.size.Height = tmp; box.angle -= 90.0f; } double whRatio = (double)box.size.Width / box.size.Height; if (!(3.0 < whRatio && whRatio < 10.0)) { Contour<Point> child = contours.VNext; if (child != null) DetectPlate(child, detectedLicensePlateRegionList); continue; } detectedLicensePlateRegionList.Add(box); } } }
private ObjectLayer CreateLayer(int width, int height, Contour[] contours) { Map map = new Map(width, height); UInt32 id = 1; for (int i = 0; i < contours.Length; i++) { Contour c = contours[i]; if (c.Length < MIN_CONTOURLENGTH) continue; c.Fill(map, id, false); id++; } return new ConnectedComponentCollector().Execute(map); }
public StopSignDetector() { _surfParam = new MCvSURFParams(500, false); using (Image<Bgr, Byte> stopSignModel = new Image<Bgr, Byte>("stop-sign-model.png")) using (Image<Gray, Byte> redMask = GetRedPixelMask(stopSignModel)) { _tracker = new SURFTracker(redMask.ExtractSURF(ref _surfParam)); } _octagonStorage = new MemStorage(); _octagon = new Contour<Point>(_octagonStorage); _octagon.PushMulti(new Point[] { new Point(1, 0), new Point(2, 0), new Point(3, 1), new Point(3, 2), new Point(2, 3), new Point(1, 3), new Point(0, 2), new Point(0, 1)}, Emgu.CV.CvEnum.BACK_OR_FRONT.FRONT); }
public ImageDetector(Image<Gray, Byte> imgModel) { _detector = new SURFDetector(500, false); ImageFeature<float>[] features = _detector.DetectFeatures(imgModel, null); if (features.Length == 0) throw new Exception("No image feature has been found in the image model"); _tracker = new Features2DTracker<float>(features); _octagonStorage = new MemStorage(); _octagon = new Contour<Point>(_octagonStorage); _octagon.PushMulti(new Point[] { new Point(1, 0), new Point(2, 0), new Point(3, 1), new Point(3, 2), new Point(2, 3), new Point(1, 3), new Point(0, 2), new Point(0, 1)}, Emgu.CV.CvEnum.BACK_OR_FRONT.FRONT); }
public StopSignDetector(Image<Bgr, Byte> stopSignModel) { _detector = new SURFDetector(500, false); using (Image<Gray, Byte> redMask = GetRedPixelMask(stopSignModel)) { ImageFeature<float>[] temp = _detector.DetectFeatures(redMask, null); _tracker = new Features2DTracker<float>(temp); } _octagonStorage = new MemStorage(); _octagon = new Contour<Point>(_octagonStorage); _octagon.PushMulti(new Point[] { new Point(1, 0), new Point(2, 0), new Point(3, 1), new Point(3, 2), new Point(2, 3), new Point(1, 3), new Point(0, 2), new Point(0, 1)}, Emgu.CV.CvEnum.BACK_OR_FRONT.FRONT ); }
public Bitmap DetectObject(Image<Bgr, byte> img, List<Image<Gray, Byte>> stopSignList, List<Rectangle> boxList, Image<Gray, Byte> grayImage, Contour<Point> exampleContour, ushort threshold) { Image<Bgr, Byte> whiteBlackImg = GetWhiteBlackImage(img, threshold); Image<Gray, Byte> grayImg = whiteBlackImg.Convert<Gray, Byte>(); Image<Gray, byte> canny = grayImg.Canny(new Gray(50), new Gray(100)); // Image<Gray, byte> canny = grayImg.Canny(new Gray(100), new Gray(50)); Contour<Point> contours = canny.FindContours( CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_SIMPLE, RETR_TYPE.CV_RETR_TREE, _tempStor); if (exampleContour != null) { FindObject(img, stopSignList, boxList, contours, exampleContour); } return whiteBlackImg.Bitmap; }
public static List <Contour> SimplifyContours(List <Contour> a_rOuterContours, float a_fAccuracy) { List <Contour> oSimplifiedContoursList = new List <Contour>(a_rOuterContours.Count); foreach (Contour rOuterContour in a_rOuterContours) { // Simplify the new outer contour List <Vector2> oOuterContourVertices = new List <Vector2>(rOuterContour.Vertices); List <Vector2> oSimplifiedOuterContourVertices = RamerDouglasPeuker(oOuterContourVertices, 0, oOuterContourVertices.Count - 1, a_fAccuracy); if (oSimplifiedOuterContourVertices.Count > 2) { // Create the contour Contour oSimplifiedOuterContour = new Contour(rOuterContour.Region); oSimplifiedOuterContour.AddLast(oSimplifiedOuterContourVertices); // Add the contour to the list oSimplifiedContoursList.Add(oSimplifiedOuterContour); } } return(oSimplifiedContoursList); }
private LineSegment2D[] GetRightAngleEdges(Contour <Point> contour) { var pts = contour.ToArray(); var edges = PointCollection.PolyLine(pts, true); var longestEdge = edges[0]; var index = 0; for (var i = 1; i < edges.Length; i++) { var edge = edges[i]; // Assumption is that the longest edge defines the width of the tracked device in the blob if (edge.Length > longestEdge.Length) { index = i; longestEdge = edges[i]; } } var nextEdgeToLongestEdge = edges[(index + 1) % edges.Length]; return(new[] { longestEdge, nextEdgeToLongestEdge }); }
private void RegisterContour(Contour contour) { List <Edge> edges = new List <Edge>(contour.CoordinateCount); Vector2Int prevPoint = default; Vector2Int firstPoint = default; for (int i = 0; i < contour.CoordinateCount; i++) { double xCoordinate = _boundingRectangle.MinX; double yCoordinate = _boundingRectangle.MinY; double xShifted = contour.Vertices[i].X - xCoordinate; double yShifted = contour.Vertices[i].Y - yCoordinate; int xInt = (int)Math.Round(xShifted / _cellWidth); int yInt = (int)Math.Round(yShifted / _cellHeight); Vector2Int point = new Vector2Int(xInt, yInt); //SetLeftBottomIfLesser(point); if (i == 0) { firstPoint = point; } else if (i > 0) { Edge edge = new Edge(prevPoint, point); edges.Add(edge); } prevPoint = point; } edges.Add(new Edge(firstPoint, prevPoint)); FillPolygon(edges); }
private void IdentifyContours() { int counter = 0; Image <Bgr, byte> color = new Image <Bgr, byte>(_colorImage); using (MemStorage storage = new MemStorage()) { for (Contour <Point> contours = _image.FindContours(Emgu.CV.CvEnum.CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_NONE, Emgu.CV.CvEnum.RETR_TYPE.CV_RETR_TREE, storage); contours != null; contours = contours.HNext) { contours.ApproxPoly(contours.Perimeter * 0.05, storage); CvInvoke.cvDrawContours(color, contours, new MCvScalar(255), new MCvScalar(255), -1, 1, Emgu.CV.CvEnum.LINE_TYPE.EIGHT_CONNECTED, new Point(0, 0)); counter++; } } using (MemStorage store = new MemStorage()) { for (Contour <Point> contours1 = color.FindContours(Emgu.CV.CvEnum.CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_NONE, Emgu.CV.CvEnum.RETR_TYPE.CV_RETR_TREE, store); contours1 != null; contours1 = contours1.HNext) { Rectangle r = CvInvoke.cvBoundingRect(contours1, 1); color.Draw(r, new Bgr(0, 255, 0), 1); } } }
private void RootCanvas_OnMouseDown(object sender, MouseButtonEventArgs e) { if (Contour == null) { return; } var clickPosition = e.GetPosition(RootCanvas); var point = Contour.FirstOrDefault(p => p.DistanceTo(clickPosition) < (12 / Scale)); var index = Contour.IndexOf(point); if (e.LeftButton == MouseButtonState.Pressed) { if (index == -1) { // no points get clicked Contour.Add(clickPosition); ActivePointIndex = Contour.Count - 1; } else if (index == 0 && (Keyboard.Modifiers & ModifierKeys.Control) > 0) { //todo } else { ActivePointIndex = index; } } else if (e.RightButton == MouseButtonState.Pressed) { if (index != -1) { Contour.RemoveAt(index); } } }
/// <summary> /// Returns EX and DX (Mean and Standard Derivation) for saturation and value layers in HSV color model with values from 0.0 to 1.0 (not 0-255) /// </summary> /// <param name="contour"></param> /// <param name="src"></param> /// <returns></returns> public static ((float mean, float stddev) saturation, (float mean, float stddev) value) GetHSVColorStats( Contour contour, Mat src) { var rect = GetContourRect(contour, src.Height, src.Width); var mask = GetMask(contour, src.Size(), color: Scalar.White, background: Scalar.Black) .Clone(rect) .CvtColor(ColorConversionCodes.BGR2GRAY); var layers = src.Clone(rect) .CvtColor(ColorConversionCodes.BGR2HSV) .Split(); (float mean, float stddev) LocalMeanStdDev(Mat area) { Cv2.MeanStdDev(area, out var scalarMean, out var scalarStddev, mask); var mean = (float)(scalarMean[0] / 255); var stddev = (float)(scalarStddev[0] / 255); return(mean, stddev); } return(LocalMeanStdDev(layers[1]) /*saturation layer*/, LocalMeanStdDev(layers[2]) /* value layer*/); }
//// /// <summary> //// /// http://www.codeproject.com/Articles/265354/Playing-Card-Recognition-Using-AForge-Net-Framewor //// /// http://www.aforgenet.com/framework/features/ //// /// </summary> //// public static Bitmap CropRotateFree(this Bitmap source, Size minSizeInch, Size maxSizeInch) //// { //// using (source) //// { //// using (Graphics g = Graphics.FromImage(source)) //// { //// #region OLD_CODE //// //int w1 = source.Width - 1; //// //int w6 = source.Width - 6; //// //int h1 = source.Height - 1; //// //int h6 = source.Height - 6; //// //Rectangle[] rects = new Rectangle[] { //// // Rectangle.FromLTRB( 0, 0, w1, 5), //// // Rectangle.FromLTRB( 0, 0, 5, h1), //// // Rectangle.FromLTRB( 0, h6, w1, h1), //// // Rectangle.FromLTRB( w6, 0, w1, h1), //// //}; //// //g.FillRectangles(Brushes.Black, rects); //// #endregion //// var monitor = new PictureModifier(source); //// monitor.ApplyGrayscale(); //// monitor.ApplySobelEdgeFilter(); //// //monitor.ApplyThresholdedDifference(); //// //monitor.ApplyFilling(); //// var list = new List<Region>(); //// try //// { //// list.AddRange(monitor.EnumKnownForms()); //// list.Sort(new RegionComparer(g)); //// foreach (var r in list) //// { //// var rectange = Rectangle.Round(r.GetBounds(g)); //// if (rectange.Size.InBetween(minSizeInch, maxSizeInch) || //// rectange.Size.InBetween(maxSizeInch, minSizeInch)) //// { //// using (var bmp3 = source.CropImageNoFree(rectange)) //// { //// if (bmp3.Width > bmp3.Height) //// bmp3.RotateFlip(RotateFlipType.Rotate90FlipNone); //// return bmp3.ToGrayscale4bpp(); //// } //// } ////#if TEST //// Debug.WriteLine(rectange); //// g.DrawRectangle(Pens.Red, rectange); //// using (Font font = new Font("Arial", 10, FontStyle.Bold)) //// g.DrawString(rectange.Size.ToString(), font, Brushes.White, rectange.Location); ////#endif //// } //// } //// finally //// { //// list.ForEach((r) => r.DisposeSf()); //// } //// } //// return null; //// } //// } public static void CropPicture(this Image <Bgr, byte> cameraFrame, double tolerance = 10) { // Get the image from the camera. // Crop the camera image a little -- it seems to include a black area on one side, which gets detected as a contour later. cameraFrame.ROI = new System.Drawing.Rectangle(0, 10, cameraFrame.Width, cameraFrame.Height - 14); Image <Bgr, byte> croppedFrame = new Image <Bgr, byte>(cameraFrame.ROI.Size); cameraFrame.CopyTo(croppedFrame); // Convert to grayscale. Image <Gray, byte> grayFrame = croppedFrame.Convert <Gray, byte>(); // Blur the image to minimize noise. Image <Gray, byte> smoothedFrame = new Image <Gray, byte>(cameraFrame.ROI.Size); CvInvoke.cvSmooth(grayFrame.Ptr, smoothedFrame.Ptr, SMOOTH_TYPE.CV_BLUR, 2, 2, 0, 0); // Detect edges. Image <Gray, byte> cannyFrame = smoothedFrame.Canny(100, 60); // Detect contours. Contour <System.Drawing.Point> nativeContours = cannyFrame.FindContours(CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_SIMPLE, RETR_TYPE.CV_RETR_EXTERNAL); // Draw contours. Image <Gray, byte> contourFrame = new Image <Gray, byte>(cameraFrame.ROI.Size); if (nativeContours != null) { CvInvoke.cvDrawContours(contourFrame.Ptr, nativeContours.Ptr, new MCvScalar(255), new MCvScalar(128), 10, 1, LINE_TYPE.FOUR_CONNECTED, new System.Drawing.Point()); } // Process contour data. List <List <System.Drawing.Point> > contours = ExtractContours(nativeContours, contourFrame, tolerance); // do lots of stuff with the resulting contours }
private void btnGetSingleContour_Click(object sender, EventArgs e) { try { //_pointsInputManager.SetUpTestCase(); _singleContour = _pointsInputManager.GetSingleContour(); tabCtrlContours.TabPages.Clear(); _tabControlHelper.CreatePageForContour(_singleContour); //Ekaterina //_geometryDrawer.SetImageBuffer(); _geometryDrawer.FillBufferRectangle(Brushes.White, 0, 0, pbDrawField.Width, pbDrawField.Height); //end Ekaterina _geometryDrawer.DrawContours(Pens.Black, _singleContour); //Evgeniya ButtonController.Instance().CurrentState = ButtonState.MakeSinglePressed; ButtonController.Instance().changeButtonState(btnClear, btnCompleteInput, btnGetSingleContour, btnNetReculc, btnTriangulate, btnRenumerator, btnSolve); //end Evgeniya } catch (Exception ex) { MessageBox.Show(ex.Message, "Ошибка"); } }
private void button1_Click(object sender, EventArgs e) { OpenFileDialog Openfile = new OpenFileDialog(); if (Openfile.ShowDialog() == DialogResult.OK) { Image <Bgr, byte> My_Image = new Image <Bgr, byte>(Openfile.FileName); Image <Gray, byte> gray_image = My_Image.Convert <Gray, byte>(); Image <Gray, byte> eh_gray_image = My_Image.Convert <Gray, byte>(); Image <Gray, byte> smooth_gray_image = My_Image.Convert <Gray, byte>(); Image <Gray, byte> ed_gray_image = new Image <Gray, byte>(gray_image.Size); Image <Bgr, byte> final_image = new Image <Bgr, byte>(Openfile.FileName); MemStorage stor = new MemStorage(); List <MCvBox2D> detectedLicensePlateRegionList = new List <MCvBox2D>(); CvInvoke.cvEqualizeHist(gray_image, eh_gray_image); CvInvoke.cvSmooth(eh_gray_image, smooth_gray_image, Emgu.CV.CvEnum.SMOOTH_TYPE.CV_GAUSSIAN, 3, 3, 0, 0); //CvInvoke.cvAdaptiveThreshold(smooth_gray_image, bi_gray_image, 255, Emgu.CV.CvEnum.ADAPTIVE_THRESHOLD_TYPE.CV_ADAPTIVE_THRESH_GAUSSIAN_C, Emgu.CV.CvEnum.THRESH.CV_THRESH_BINARY, 71, 1); CvInvoke.cvCanny(smooth_gray_image, ed_gray_image, 100, 50, 3); Contour <Point> contours = ed_gray_image.FindContours(Emgu.CV.CvEnum.CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_SIMPLE, Emgu.CV.CvEnum.RETR_TYPE.CV_RETR_TREE, stor); DetectPlate(contours, detectedLicensePlateRegionList); for (int i = 0; i < detectedLicensePlateRegionList.Count; i++) { final_image.Draw(detectedLicensePlateRegionList[i], new Bgr(Color.Red), 2); } imageBox1.Image = My_Image; imageBox2.Image = gray_image; imageBox3.Image = eh_gray_image; imageBox4.Image = smooth_gray_image; imageBox5.Image = ed_gray_image; imageBox6.Image = final_image; } }
public override IObservable <ConnectedComponent> Process(IObservable <ConnectedComponentCollection> source) { return(source.Select(input => { var result = new ConnectedComponent(); double angle = 0; double area = 0; Point2f centroid = new Point2f(); for (int i = 0; i < input.Count; i++) { var component = input[i]; centroid.X += component.Centroid.X; centroid.Y += component.Centroid.Y; angle += component.Orientation; area += component.Area; } if (input.Count > 0) { centroid.X = centroid.X / input.Count; centroid.Y = centroid.Y / input.Count; result.Centroid = centroid; result.Orientation = angle / input.Count; result.Area = area; result.Contour = Contour.FromSeq(null); } else { result.Centroid = new Point2f(float.NaN, float.NaN); result.Orientation = double.NaN; } return result; })); }
private void FindBlobsAndDraw(Image <Gray, Byte> blackAndWhiteImage) { m_BlobInfos.Clear(); m_DetectedBlobsImage = m_ClippedImage.CopyBlank(); using (MemStorage storage = new MemStorage()) //allocate storage for contour approximation { int width = 0; for (Contour <Point> contours = blackAndWhiteImage.FindContours( Emgu.CV.CvEnum.CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_SIMPLE, Emgu.CV.CvEnum.RETR_TYPE.CV_RETR_LIST, storage); contours != null; contours = contours.HNext) { Contour <Point> currentContour = contours.ApproxPoly(contours.Perimeter * 0.05, storage); //Debug.WriteLine(currentContour.Area); m_BlobInfos.Add(new BlobInfo(currentContour)); width++; m_DetectedBlobsImage.DrawPolyline(currentContour.ToArray(), true, new Bgr(Color.White), width); } } }
public void TestFindInteriorPointL() { // L-shaped contour (FindPointInPolygon() produces a test candidate // which lies exactly on a segment where IsPointInPolygon() returns // true, so IsPointOnSegment() is actually needed here). var points = new List <Vertex>() { new Vertex(3, 1), new Vertex(1, 1), new Vertex(1, 3), new Vertex(2, 3), new Vertex(2, 2), new Vertex(3, 2) }; var contour = new Contour(points); var poly = new Polygon(6); poly.Add(contour, true); var h = poly.Holes[0]; var p = RobustPredicates.Default; int count = points.Count; int i = count - 1; for (int j = 0; j < count; j++) { double ccw = p.CounterClockwise(points[i], h, points[j]); Assert.Greater(Math.Abs(ccw), 1e-12); i = j; } }
/// <summary> /// Get the nearest templateClass for the refContour /// </summary> /// <param name="refContour">the contour wich class's is to be found</param> /// <param name="r">the area of the contour</param> /// <param name="classes">the list of the classes within to search</param> /// <returns>the nearest class or "not found"</returns> public static FoundTemplateDesc GetNearestClass(Contour <Point> refContour, Rectangle r, List <TemplateClass> classes, HandType handType) { contourClasses = classes; List <FoundTemplateDesc> foundedTemplates = new List <FoundTemplateDesc>(); Template refTemp = new Template(refContour, r.Height * r.Width); foreach (TemplateClass tc in contourClasses) { if (tc.htype == handType) { FoundTemplateDesc templateDesc = TemplateFinder.CompareTemplates(tc, refTemp); if (templateDesc != null) { foundedTemplates.Add(templateDesc); } } } foundedTemplates = foundedTemplates.OrderBy(t => t.rate).ToList(); return((foundedTemplates.Count == 0) ? null : foundedTemplates.First()); }
private void PaintVisualAlerts(Image <Rgb, byte> ocvColorImage) { if (!_visualAlerts || possibleCollisions == null) { return; } while (possibleCollisions.HNext != null) { if (possibleCollisions.Area < 100) { possibleCollisions = possibleCollisions.HNext; continue; } ocvColorImage.Draw(new System.Drawing.Rectangle( possibleCollisions.BoundingRectangle.Left + possibleCollisions.BoundingRectangle.Width / 2 - VisualAlertSide / 2, ocvColorImage.Height / 2 - VisualAlertSide / 2, VisualAlertSide, VisualAlertSide), new Rgb(255, 0, 0), -1); possibleCollisions = possibleCollisions.HNext; } if (possibleCollisions.Area >= 100) { ocvColorImage.Draw(new System.Drawing.Rectangle( possibleCollisions.BoundingRectangle.Left + possibleCollisions.BoundingRectangle.Width / 2 - VisualAlertSide / 2, ocvColorImage.Height / 2 - VisualAlertSide / 2, VisualAlertSide, VisualAlertSide), new Rgb(255, 0, 0), -1); } while (possibleCollisions.HPrev != null) { possibleCollisions = possibleCollisions.HPrev; } }
public void setcam(string area, string stationID) { toolStripStatusLabel1.Visible = false; contourList = new List <Contour>(); this.area = area; this.stationID = stationID; string m_exePath = Path.GetDirectoryName(Assembly.GetExecutingAssembly().Location) + "\\Resources\\yaml\\station_" + stationID + ".yml"; var input = new StreamReader(m_exePath); var yaml = new YamlStream(); yaml.Load(input); foreach (YamlMappingNode entry in (YamlSequenceNode)yaml.Documents[0].RootNode) { Contour temp = new Contour(); var items = (YamlSequenceNode)entry.Children[new YamlScalarNode("points")]; var itemss = (YamlScalarNode)entry.Children[new YamlScalarNode("id")]; temp.id = Int32.Parse(itemss.ToString()); temp.x = Int32.Parse(items[0][0].ToString()); temp.y = Int32.Parse(items[0][1].ToString()); temp.h = Int32.Parse(items[1][0].ToString()) - temp.x; temp.w = Int32.Parse(items[1][1].ToString()) - temp.y; contourList.Add(temp); } }
/// <summary> /// Extract the biggest Contour in the image /// </summary> /// <param name="local">a binary image</param> /// <returns>the biggest contour </returns> private Contour <Point> ExtractBiggestContour(Image <Gray, byte> local) { Contour <Point> biggestContour = null; MemStorage storage = new MemStorage(); Contour <Point> contours = FindContours(local, Emgu.CV.CvEnum.CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_SIMPLE, Emgu.CV.CvEnum.RETR_TYPE.CV_RETR_LIST, storage); Double Result1 = 0; Double Result2 = 0; while (contours != null) { Result1 = contours.Area; if (Result1 > Result2) { Result2 = Result1; biggestContour = contours; } contours = contours.HNext; } return(biggestContour); }
internal StatusGV.TypeStatusExec ValidateSimpContDupl(GErrList gerrlist) { if (this.outl == null) { //throw new ExceptionGlyph("Glyph","ValidateSimpContDupl",null); return(StatusGV.TypeStatusExec.Aborted); } ListPairInt pairsIndKnotStart = new ListPairInt(); for (int pozContA = 0; pozContA < this.outl.NumCont - 1; pozContA++) { Contour contA = this.outl.ContourByPoz(pozContA); for (int pozContB = pozContA + 1; pozContB < this.outl.NumCont; pozContB++) { Contour contB = this.outl.ContourByPoz(pozContB); bool isDupl; if (!contA.AreDuplicated(contB, out isDupl)) { //throw new ExceptionGlyph("Glyph","ValidateSimpContDupl",null); return(StatusGV.TypeStatusExec.Aborted); } if (isDupl) { pairsIndKnotStart.Add(this.outl.ContourByPoz(pozContA).IndKnotStart, this.outl.ContourByPoz(pozContB).IndKnotStart); } } } if (pairsIndKnotStart.Count != 0) { GErr gerr = new GErrContDupl(this.index, pairsIndKnotStart); gerrlist.Add(gerr); //this.isOrientDefined=false; } return(StatusGV.TypeStatusExec.Completed); }
public static Mesh Triangulate(Polyline poly, Plane plane, double min_angle = 0.436332, double max_angle = Math.PI, double max_area = double.MaxValue) { min_angle = Rhino.RhinoMath.ToDegrees(min_angle); max_angle = Rhino.RhinoMath.ToDegrees(max_angle); var options = new ConstraintOptions() { ConformingDelaunay = true }; var quality = new QualityOptions() { MinimumAngle = min_angle, MaximumAngle = max_angle, MaximumArea = max_area, VariableArea = true }; Contour cnt = ToContour(poly, plane); Polygon pgon = new Polygon(); pgon.Add(cnt); var tmesh = pgon.Triangulate(options, quality); return(ToRhinoMesh(tmesh)); }
/// <summary> /// Find the board markers. /// </summary> /// <param name="InputImage"></param> /// <returns></returns> private static PointF[] GetBoardMarkers(Bitmap InputImage) { #region Local variables // Main image. Image <Gray, Byte> ModelImage; // Blob area rejectionlevels. int maximumBlobArea = 100; int minimumBlobArea = 360; // Treshold level int treshLevel = 220; // Blob circularity double circularityLevel = 0.20d; // Center of image PointF centerOfImage = new PointF((InputImage.Width / 2), (InputImage.Height / 2)); // For processing ModelImage = new Image <Gray, Byte>(InputImage); // Regionpoints PointF[] listOfEdges = new PointF[4]; #endregion // Adaptive tresh ModelImage = ModelImage.ThresholdAdaptive(new Gray(treshLevel), Emgu.CV.CvEnum.ADAPTIVE_THRESHOLD_TYPE.CV_ADAPTIVE_THRESH_GAUSSIAN_C, Emgu.CV.CvEnum.THRESH.CV_THRESH_BINARY_INV, 7, new Gray(4)); // Dilate one iteration the image. ModelImage = ModelImage.Dilate(1); // Show unperspective image // CvInvoke.cvShowImage("Stage", ModelImage); // Get the numers of blobs on the image. uint numBlobsFound = BlobDetector.Detect(ModelImage, ImageBlobs); // Lets try and iterate the blobs foreach (Emgu.CV.Cvb.CvBlob targetBlob in ImageBlobs.Values) { // Calculate circularity MemStorage BlbobStorage = new MemStorage(); Contour <Point> blobContour = targetBlob.GetContour(BlbobStorage); double blobCircularity = Board.GetCircularity(blobContour); // Calculate area double blobArea = targetBlob.Area; //Console.WriteLine("Area: {0}", blobArea); // Center image PointF blobCenter = targetBlob.Centroid; // Filter the marker by area if ((blobArea >= maximumBlobArea) && (blobArea <= minimumBlobArea)) { Console.WriteLine("Area: {0}", blobArea); Console.WriteLine("Circularity: {0}", Board.GetCircularity(blobContour)); if (blobCircularity > circularityLevel) { #region Find msrker quadrant position if ((blobCenter.X < centerOfImage.X) && (blobCenter.Y < centerOfImage.Y)) { Console.WriteLine("Quadrant: I"); listOfEdges[0] = targetBlob.Centroid; } if ((blobCenter.X > centerOfImage.X) && (blobCenter.Y < centerOfImage.Y)) { Console.WriteLine("Quadrant: II"); listOfEdges[1] = targetBlob.Centroid; } if ((blobCenter.X > centerOfImage.X) && (blobCenter.Y > centerOfImage.Y)) { Console.WriteLine("Quadrant: III"); listOfEdges[2] = targetBlob.Centroid; } if ((blobCenter.X < centerOfImage.X) && (blobCenter.Y > centerOfImage.Y)) { Console.WriteLine("Quadrant: IV"); listOfEdges[3] = targetBlob.Centroid; } #endregion } } } // If there is 4 markers return them if (listOfEdges.Length == 4) { return(listOfEdges); } // If not null return(null); }
public void Detect(DFrame myFrame) { if (myFrame != null) { Bitmap bmp2 = myFrame.GetBMP(); Image <Bgr, Byte> img; Image <Gray, Byte> grey; try { img = new Image <Bgr, Byte>(bmp2); grey = img.Convert <Gray, Byte>(); CvInvoke.cvInRangeS(img.Ptr, new MCvScalar(0.0, 0.0, 0.0), new MCvScalar(250.0, 250.0, 250.0), grey.Ptr); CvInvoke.cvErode(grey.Ptr, grey.Ptr, (IntPtr)null, 4); } catch (Exception) { return; } double area = area_check(grey); List <Edge> myEdge = new List <Edge>(); List <int> minDepthList = new List <int>(); List <int> counterList = new List <int>(); int minX = 512; int maxX = 0; int minY = 424; int maxY = 0; using (MemStorage storage = new MemStorage()) { CStream.SetFrame(grey.Copy()); for (Contour <System.Drawing.Point> contours = grey.FindContours(Emgu.CV.CvEnum.CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_SIMPLE, Emgu.CV.CvEnum.RETR_TYPE.CV_RETR_EXTERNAL, storage); contours != null; contours = contours.HNext) { Contour <System.Drawing.Point> currentContour = contours.ApproxPoly(contours.Perimeter * 0.015, storage); System.Drawing.Point[] pts = currentContour.ToArray(); foreach (System.Drawing.Point p in pts) { if (p.X < minX) { minX = p.X; } else if (p.X > maxX) { maxX = p.X; } if (p.Y < minY) { minY = p.Y; } else if (p.Y > maxY) { maxY = p.Y; } } if (maxX > minX && maxY > minY) { myEdge.Add(new Edge(minX, maxX, minY, maxY, 0)); } minX = 512; maxX = 0; minY = 424; maxY = 0; } int minDepth = 999999; int counter = 0; ushort[] depthdata = myFrame.get_DepthData(); foreach (var edge in myEdge) { for (int i = edge.getMinY(); i < edge.getMaxY(); i += 10) { for (int j = edge.getMinX(); j < edge.getMaxX(); j += 10) { int tmp = depthdata[(j) + (i * 512)]; if (tmp < minDepth && tmp != 0) { minDepth = tmp; } } } if (minDepth != 999999) { minDepthList.Add(minDepth); } else { counterList.Add(counter); } minDepth = 999999; counter++; } foreach (var index in counterList) { for (int i = myEdge.Count - 1; i > -1; i--) { if (i == index) { myEdge.RemoveAt(i); } } } counterList.Clear(); int kk = 0; foreach (var ele in minDepthList) { myEdge[kk].distance = ele; kk++; } List <Obstacle> obstacles = new List <Obstacle>(); foreach (Edge edge in myEdge) { Obstacle obstacle = EdgeToObstacle(edge); float frameWidth = grey.Width; float frameHeight = grey.Height; float leftPercentage = edge.getMinX() / frameWidth; float topPercentage = edge.getMinY() / frameHeight; float widthPercentage = (edge.getMaxX() - edge.getMinX()) / frameWidth; float heightPercentage = (edge.getMaxY() - edge.getMinY()) / frameHeight; System.Drawing.Size frameSize = new System.Drawing.Size((int)frameWidth, (int)frameHeight); ObstacleFrameInfo frameInfo = new ObstacleFrameInfo(leftPercentage, topPercentage, widthPercentage, heightPercentage, frameSize); obstacle.FrameInfo = frameInfo; obstacles.Add(obstacle); } _obstacleData = obstacles; NotifyObstaclesDetectedEvent(obstacles); } } }
/// <summary> /// hand detection function /// </summary> /// <param name="skin">a binary image that contains skin like objects</param> /// <returns>a list that contains detected hands</returns> private List <Contour <Point> > HandDetection(Image <Gray, byte> skin) { Point first_peak = new Point(), first_valley = new Point(), reference_peak = new Point(), refernce_valley = new Point(); double[,] v1 = new double[2, 1], v2 = new double[2, 1]; double angle; int direction, length, mod; bool tester_peak = false, tester_valley = false; using (MemStorage storage = new MemStorage()) { handCandiate.Clear(); for (Contour <Point> i = skin.FindContours(Emgu.CV.CvEnum.CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_SIMPLE, Emgu.CV.CvEnum.RETR_TYPE.CV_RETR_EXTERNAL, storage); i != null; i = i.HNext) { area = i.BoundingRectangle.Height * i.BoundingRectangle.Width; if (area > 3000 && !(i.Convex)) { tester_peak = false; tester_valley = false; skin.ROI = i.BoundingRectangle; this.center_pt = FindCentroidByDistanceTrans(skin); this.center_pt.X += skin.ROI.X; this.center_pt.Y += skin.ROI.Y; skin.ROI = Rectangle.Empty; Contour <Point> tt = i.ApproxPoly(accuracy, storage); LineSegment2D[] edges = PointCollection.PolyLine(tt.ToArray(), true); length = edges.Length; for (int ij = 0; ij < length; ij++) { mod = (ij + 1) % length; v1[0, 0] = edges[ij].P2.X - edges[ij].P1.X; v1[1, 0] = edges[ij].P2.Y - edges[ij].P1.Y; v2[0, 0] = edges[mod].P1.X - edges[mod].P2.X; v2[1, 0] = edges[mod].P1.Y - edges[mod].P2.Y; // this equation is quoted from http://www.mathworks.com/matlabcentral/newsreader/view_thread/276582 // and it is working very good angle = Math.Atan2(Math.Abs(det(v1, v2)), dot(v1, v2)) * (180.0 / Math.PI); if (angle < 90) { direction = dir(edges[ij].P1, edges[ij].P2, edges[mod].P2); if (direction > 0) { if ( ((edges[ij].Length <max_length && edges[ij].Length> min_length) || (edges[mod].Length <max_length && edges[mod].Length> min_length)) ) { if (!tester_valley) { tester_valley = true; refernce_valley = edges[ij].P2; numberOfValleys++; } else if (FindDistance(edges[ij].P2, first_valley) < min_length && FindDistance(edges[ij].P2, first_valley) > (0.5 * min_length) // && FindDistance(edges[ij].P2,center_pts) > min_length // && FindDistance(edges[ij].P2, center_pts) < max_length ) { if (tester_peak) { if (FindDistance(edges[ij].P2, first_peak) > min_length && FindDistance(edges[ij].P2, first_peak) < max_length ) { numberOfValleys++; } } } else if (FindDistance(edges[ij].P2, refernce_valley) < min_length && FindDistance(edges[ij].P2, refernce_valley) > (0.5 * min_length) // && FindDistance(edges[ij].P2, center_pts) > min_length // && FindDistance(edges[ij].P2, center_pts) < max_length ) { numberOfValleys++; } first_valley = edges[ij].P2; } } else { if ( (edges[ij].Length <max_length && edges[ij].Length> min_length) || (edges[mod].Length <max_length && edges[mod].Length> min_length) ) { if (!tester_peak) { tester_peak = true; reference_peak = edges[ij].P2; numberOfPeaks++; } else if (FindDistance(edges[ij].P2, first_peak) > min_length && FindDistance(edges[ij].P2, first_peak) < max_length) { if (tester_valley) { if (FindDistance(edges[ij].P2, first_valley) > min_length && FindDistance(edges[ij].P2, first_valley) < max_length ) { numberOfPeaks++; } } } else if (FindDistance(edges[ij].P2, reference_peak) > min_length && FindDistance(edges[ij].P2, reference_peak) < max_length) { numberOfPeaks++; } first_peak = edges[ij].P2; } } } } if (numberOfPeaks >= 3 && numberOfValleys >= 3) { //double diff = CvInvoke.cvMatchShapes(i.Ptr, temp_contour.Ptr, CONTOURS_MATCH_TYPE.CV_CONTOUR_MATCH_I1, 0); // if (diff < 0.1) // { newImage.Draw(i.BoundingRectangle, color_blue, 2); imageBoxSkin.Image = newImage; if (!hand_centers.Any()) { hand_centers.Add(0, center_pt); } else { double dis = FindDistance(center_pt, hand_centers[0]); if (dis < 200) { continue; } else { hand_centers.Add(1, center_pt); } } Rectangle te = i.BoundingRectangle; int teHeight = (int)(max_length + min_length); if (te.Height > teHeight) { te.Height = teHeight; } if (te.Width > teHeight) { te.Width = teHeight; } skin.ROI = te; Contour <Point> hand_ = ExtractBiggestContour(skin); if (hand_ != null) { handCandiate.Add(hand_); } skin.ROI = Rectangle.Empty; // } } // } numberOfPeaks = 0; numberOfValleys = 0; } } } return(handCandiate); }
private void GetFillerPoints() => FillerPointsSelector = new PointsSelector <IFillerVertex>(Contour.GetNextСandidates(), Colors.Red);
private void FindLicensePlate( Contour <Point> contours, Image <Gray, Byte> gray, Image <Gray, Byte> canny, List <Image <Gray, Byte> > licensePlateImagesList, List <Image <Gray, Byte> > filteredLicensePlateImagesList, List <MCvBox2D> detectedLicensePlateRegionList, List <String> licenses) { for (; contours != null; contours = contours.HNext) { int numberOfChildren = GetNumberOfChildren(contours); //if it does not contains any children (charactor), it is not a license plate region if (numberOfChildren == 0) { continue; } if (contours.Area > 400) { if (numberOfChildren < 3) { //If the contour has less than 3 children, it is not a license plate (assuming license plate has at least 3 charactor) //However we should search the children of this contour to see if any of them is a license plate FindLicensePlate(contours.VNext, gray, canny, licensePlateImagesList, filteredLicensePlateImagesList, detectedLicensePlateRegionList, licenses); continue; } MCvBox2D box = contours.GetMinAreaRect(); if (box.angle < -45.0) { float tmp = box.size.Width; box.size.Width = box.size.Height; box.size.Height = tmp; box.angle += 90.0f; } else if (box.angle > 45.0) { float tmp = box.size.Width; box.size.Width = box.size.Height; box.size.Height = tmp; box.angle -= 90.0f; } double whRatio = (double)box.size.Width / box.size.Height; if (!(3.0 < whRatio && whRatio < 10.0)) //if (!(1.0 < whRatio && whRatio < 2.0)) { //if the width height ratio is not in the specific range,it is not a license plate //However we should search the children of this contour to see if any of them is a license plate Contour <Point> child = contours.VNext; if (child != null) { FindLicensePlate(child, gray, canny, licensePlateImagesList, filteredLicensePlateImagesList, detectedLicensePlateRegionList, licenses); } continue; } using (Image <Gray, Byte> tmp1 = gray.Copy(box)) //resize the license plate such that the front is ~ 10-12. This size of front results in better accuracy from tesseract using (Image <Gray, Byte> tmp2 = tmp1.Resize(240, 180, Emgu.CV.CvEnum.INTER.CV_INTER_CUBIC, true)) { //removes some pixels from the edge int edgePixelSize = 2; tmp2.ROI = new Rectangle(new Point(edgePixelSize, edgePixelSize), tmp2.Size - new Size(2 * edgePixelSize, 2 * edgePixelSize)); Image <Gray, Byte> plate = tmp2.Copy(); Image <Gray, Byte> filteredPlate = FilterPlate(plate); Tesseract.Charactor[] words; StringBuilder strBuilder = new StringBuilder(); using (Image <Gray, Byte> tmp = filteredPlate.Clone()) { _ocr.Recognize(tmp); words = _ocr.GetCharactors(); if (words.Length == 0) { continue; } for (int i = 0; i < words.Length; i++) { strBuilder.Append(words[i].Text); } } licenses.Add(strBuilder.ToString()); licensePlateImagesList.Add(plate); filteredLicensePlateImagesList.Add(filteredPlate); detectedLicensePlateRegionList.Add(box); } } } }
void ProcessFramAndUpdateGUI(object Sender, EventArgs agr) { string[] filePaths = Directory.GetFiles(filepath); int Finger_num = 0; Double Result1 = 0; Double Result2 = 0; imgOrignal = CapWebCam.QueryFrame(); if (imgOrignal == null) { return; } //Applying YCrCb filter Image <Ycc, Byte> currentYCrCbFrame = imgOrignal.Convert <Ycc, byte>(); Image <Gray, byte> skin = new Image <Gray, byte>(imgOrignal.Width, imgOrignal.Height); skin = currentYCrCbFrame.InRange(new Ycc(0, 131, 80), new Ycc(255, 185, 135)); StructuringElementEx rect_12 = new StructuringElementEx(10, 10, 5, 5, Emgu.CV.CvEnum.CV_ELEMENT_SHAPE.CV_SHAPE_RECT); //Eroding the source image using the specified structuring element CvInvoke.cvErode(skin, skin, rect_12, 1); StructuringElementEx rect_6 = new StructuringElementEx(6, 6, 3, 3, Emgu.CV.CvEnum.CV_ELEMENT_SHAPE.CV_SHAPE_RECT); //dilating the source image using the specified structuring element CvInvoke.cvDilate(skin, skin, rect_6, 2); skin = skin.Flip(FLIP.HORIZONTAL); //smoothing the filterd , eroded and dilated image. skin = skin.SmoothGaussian(9); imgOrignal = imgOrignal.Flip(FLIP.HORIZONTAL); //extracting contours. Contour <Point> contours = skin.FindContours(); Contour <Point> biggestContour = null; //extracting the biggest contour. while (contours != null) { Result1 = contours.Area; if (Result1 > Result2) { Result2 = Result1; biggestContour = contours; } contours = contours.HNext; } //applying convexty defect allgoritm to find the count of fingers if (biggestContour != null) { Finger_num = 0; biggestContour = biggestContour.ApproxPoly((0.00025)); imgOrignal.Draw(biggestContour, new Bgr(Color.LimeGreen), 2); Hull = biggestContour.GetConvexHull(ORIENTATION.CV_CLOCKWISE); defects = biggestContour.GetConvexityDefacts(storage, ORIENTATION.CV_CLOCKWISE); imgOrignal.DrawPolyline(Hull.ToArray(), true, new Bgr(0, 0, 256), 2); box = biggestContour.GetMinAreaRect(); defectArray = defects.ToArray(); for (int i = 0; i < defects.Total; i++) { PointF startPoint = new PointF((float)defectArray[i].StartPoint.X, (float)defectArray[i].StartPoint.Y); PointF depthPoint = new PointF((float)defectArray[i].DepthPoint.X, (float)defectArray[i].DepthPoint.Y); PointF endPoint = new PointF((float)defectArray[i].EndPoint.X, (float)defectArray[i].EndPoint.Y); CircleF startCircle = new CircleF(startPoint, 5f); CircleF depthCircle = new CircleF(depthPoint, 5f); CircleF endCircle = new CircleF(endPoint, 5f); if ((startCircle.Center.Y < box.center.Y || depthCircle.Center.Y < box.center.Y) && (startCircle.Center.Y < depthCircle.Center.Y) && (Math.Sqrt(Math.Pow(startCircle.Center.X - depthCircle.Center.X, 2) + Math.Pow(startCircle.Center.Y - depthCircle.Center.Y, 2)) > box.size.Height / 6.5)) { Finger_num++; } } label2.Text = Finger_num.ToString(); // updating finger count } // Finding the center of contour MCvMoments moment = new MCvMoments(); // a new MCvMoments object try { moment = biggestContour.GetMoments(); // Moments of biggestContour } catch (NullReferenceException except) { //label3.Text = except.Message; return; } CvInvoke.cvMoments(biggestContour, ref moment, 0); double m_00 = CvInvoke.cvGetSpatialMoment(ref moment, 0, 0); double m_10 = CvInvoke.cvGetSpatialMoment(ref moment, 1, 0); double m_01 = CvInvoke.cvGetSpatialMoment(ref moment, 0, 1); int current_X = Convert.ToInt32(m_10 / m_00) / 10; // X location of centre of contour int current_Y = Convert.ToInt32(m_01 / m_00) / 10; // Y location of center of contour // transfer control to webcam only if button has already been clicked if (button_pressed) { if (Finger_num == 0 || Finger_num == 1) { Cursor.Position = new Point(current_X * 20, current_Y * 20); } if (Finger_num >= 4) { } } iborignal.Image = imgOrignal; Image <Bgr, Byte> currentFrame; Image <Gray, Byte> sourceImage = null; for (int tu = 0; tu < filePaths.Length; tu++) { if (filePaths[tu].Contains(".bmp")) { getfilename(System.IO.Path.GetFileName(filePaths[tu])); sourceImage = CapWebCam.QueryFrame().Convert <Gray, Byte>(); Image <Gray, Byte> templateImage = new Image <Gray, Byte>(filePaths[tu]); Image <Gray, float> result = sourceImage.MatchTemplate(templateImage, Emgu.CV.CvEnum.TM_TYPE.CV_TM_CCOEFF_NORMED); // iborignal.Image = result; double[] min, max; Point[] pointMin, pointMax; templateImage.MinMax(out min, out max, out pointMin, out pointMax); float[, ,] matches = result.Data; for (int x = 0; x < matches.GetLength(0); x++) { for (int y = 0; y < matches.GetLength(1); y++) { double matchScore = matches[x, y, 0]; Console.WriteLine(matchScore); label1.Text = Convert.ToString(matchScore).ToString(); // label4.Text = filePaths[tu]; if (matchScore > (0.40)) { CodeClass db = new CodeClass(); db.ConnectToDatabase(); DataTable dt = db.GetTable("Select * from tbldata where FilePath='" + filePaths[tu] + "'"); if (dt.Rows.Count > 0) { richTextBox1.Text = dt.Rows[0][0].ToString(); richTextBox2.Text = dt.Rows[0][1].ToString(); Console.WriteLine(matchScore); reader = new SpeechSynthesizer(); reader.SpeakAsync(richTextBox2.Text); Rectangle rect = new Rectangle(new Point(x, y), new Size(1, 1)); imgOrignal.Draw(rect, new Bgr(Color.Blue), 1); } } else { richTextBox1.Text = ""; richTextBox2.Text = ""; } } } } } }
private void RenderFillerLines(RenderManager.CameraInfo cameraInfo) { var color = IsHover && Hover.Equals(Contour.First) ? Colors.Green : Colors.Hover; Contour.Render(cameraInfo, color); }
// Finds a pair of inner contour vertex / outer contour vertex that are mutually visible public static Contour InsertInnerContourIntoOuterContour(Contour a_rOuterContour, Contour a_rInnerContour) { // Look for the inner vertex of maximum x-value Vector2 f2InnerContourVertexMax = Vector2.one * int.MinValue; CircularLinkedListNode <Vector2> rMutualVisibleInnerContourVertexNode = null; CircularLinkedList <Vector2> rInnerContourVertexList = a_rInnerContour.Vertices; CircularLinkedListNode <Vector2> rInnerContourVertexNode = rInnerContourVertexList.First; do { // x-value Vector2 f2InnerContourVertex = rInnerContourVertexNode.Value; // New max x found if (f2InnerContourVertexMax.x < f2InnerContourVertex.x) { f2InnerContourVertexMax = f2InnerContourVertex; rMutualVisibleInnerContourVertexNode = rInnerContourVertexNode; } // Go to next vertex rInnerContourVertexNode = rInnerContourVertexNode.Next; }while(rInnerContourVertexNode != rInnerContourVertexList.First); // Visibility ray Ray oInnerVertexVisibilityRay = new Ray(f2InnerContourVertexMax, Vector3.right); float fClosestDistance = int.MaxValue; Vector2 f2ClosestOuterEdgeStart = Vector2.zero; Vector2 f2ClosestOuterEdgeEnd = Vector2.zero; Contour rOuterCutContour = new Contour(a_rOuterContour.Region); rOuterCutContour.AddLast(a_rOuterContour.Vertices); CircularLinkedList <Vector2> rOuterCutContourVertexList = rOuterCutContour.Vertices; CircularLinkedListNode <Vector2> rOuterContourVertexEdgeStart = null; // Raycast from the inner contour vertex to every edge CircularLinkedListNode <Vector2> rOuterContourVertexNode = rOuterCutContourVertexList.First; do { // Construct outer edge from current and next outer contour vertices Vector2 f2OuterEdgeStart = rOuterContourVertexNode.Value; Vector2 f2OuterEdgeEnd = rOuterContourVertexNode.Next.Value; Vector2 f2OuterEdge = f2OuterEdgeEnd - f2OuterEdgeStart; // Orthogonal vector to edge (pointing to polygon interior) Vector2 f2OuterEdgeNormal = Uni2DMathUtils.PerpVector2(f2OuterEdge); // Vector from edge start to inner vertex Vector2 f2OuterEdgeStartToInnerVertex = f2InnerContourVertexMax - f2OuterEdgeStart; // If the inner vertex is on the left of the edge (interior), // test if there's any intersection if (Vector2.Dot(f2OuterEdgeStartToInnerVertex, f2OuterEdgeNormal) >= 0.0f) { float fDistanceT; // If visibility intersects outer edge... if (Uni2DMathUtils.Raycast2DSegment(oInnerVertexVisibilityRay, f2OuterEdgeStart, f2OuterEdgeEnd, out fDistanceT) == true) { // Is it the closest intersection we found? if (fClosestDistance > fDistanceT) { fClosestDistance = fDistanceT; rOuterContourVertexEdgeStart = rOuterContourVertexNode; f2ClosestOuterEdgeStart = f2OuterEdgeStart; f2ClosestOuterEdgeEnd = f2OuterEdgeEnd; } } } // Go to next edge rOuterContourVertexNode = rOuterContourVertexNode.Next; }while(rOuterContourVertexNode != rOuterCutContourVertexList.First); // Take the vertex of maximum x-value from the closest intersected edge Vector2 f2ClosestVisibleOuterContourVertex; CircularLinkedListNode <Vector2> rMutualVisibleOuterContourVertexNode; if (f2ClosestOuterEdgeStart.x < f2ClosestOuterEdgeEnd.x) { f2ClosestVisibleOuterContourVertex = f2ClosestOuterEdgeEnd; rMutualVisibleOuterContourVertexNode = rOuterContourVertexEdgeStart.Next; } else { f2ClosestVisibleOuterContourVertex = f2ClosestOuterEdgeStart; rMutualVisibleOuterContourVertexNode = rOuterContourVertexEdgeStart; } // Looking for points inside the triangle defined by inner vertex, intersection point an closest outer vertex // If a point is inside this triangle, at least one is a reflex vertex // The closest reflex vertex which minimises the angle this-vertex/inner vertex/intersection vertex // would be choosen as the mutual visible vertex Vector3 f3IntersectionPoint = oInnerVertexVisibilityRay.GetPoint(fClosestDistance); Vector2 f2InnerContourVertexToIntersectionPoint = new Vector2(f3IntersectionPoint.x, f3IntersectionPoint.y) - f2InnerContourVertexMax; Vector2 f2NormalizedInnerContourVertexToIntersectionPoint = f2InnerContourVertexToIntersectionPoint.normalized; float fMaxDotAngle = float.MinValue; float fMinDistance = float.MaxValue; rOuterContourVertexNode = rOuterCutContourVertexList.First; do { Vector2 f2OuterContourVertex = rOuterContourVertexNode.Value; // if vertex not part of triangle if (f2OuterContourVertex != f2ClosestVisibleOuterContourVertex) { // if vertex is inside triangle... if (Uni2DMathUtils.IsPointInsideTriangle(f2InnerContourVertexMax, f3IntersectionPoint, f2ClosestVisibleOuterContourVertex, f2OuterContourVertex) == true) { // if vertex is reflex Vector2 f2PreviousOuterContourVertex = rOuterContourVertexNode.Previous.Value; Vector2 f2NextOuterContourVertex = rOuterContourVertexNode.Next.Value; if (IsReflexVertex(f2OuterContourVertex, f2PreviousOuterContourVertex, f2NextOuterContourVertex) == true) { // Use dot product as distance Vector2 f2InnerContourVertexToReflexVertex = f2OuterContourVertex - f2InnerContourVertexMax; // INFO: f2NormalizedInnerContourVertexToIntersectionPoint == Vector3.right (if everything is right) float fDistance = Vector2.Dot(f2NormalizedInnerContourVertexToIntersectionPoint, f2InnerContourVertexToReflexVertex); float fDotAngle = Vector2.Dot(f2NormalizedInnerContourVertexToIntersectionPoint, f2InnerContourVertexToReflexVertex.normalized); // New mutual visible vertex if angle smaller (e.g. dot angle larger) than min or equal and closer if (fDotAngle > fMaxDotAngle || (fDotAngle == fMaxDotAngle && fDistance < fMinDistance)) { fMaxDotAngle = fDotAngle; fMinDistance = fDistance; rMutualVisibleOuterContourVertexNode = rOuterContourVertexNode; } } } } // Go to next vertex rOuterContourVertexNode = rOuterContourVertexNode.Next; }while(rOuterContourVertexNode != rOuterCutContourVertexList.First); // Insert now the cut into the polygon // The cut starts from the outer contour mutual visible vertex to the inner vertex CircularLinkedListNode <Vector2> rOuterContourVertexNodeToInsertBefore = rMutualVisibleOuterContourVertexNode.Next; // Loop over the inner contour starting from the inner contour vertex... rInnerContourVertexNode = rMutualVisibleInnerContourVertexNode; do { // ... add the inner contour vertex before the outer contour vertex after the cut rOuterCutContourVertexList.AddBefore(rOuterContourVertexNodeToInsertBefore, rInnerContourVertexNode.Value); rInnerContourVertexNode = rInnerContourVertexNode.Next; }while(rInnerContourVertexNode != rMutualVisibleInnerContourVertexNode); // Close the cut by doubling the inner and outer contour vertices rOuterCutContourVertexList.AddBefore(rOuterContourVertexNodeToInsertBefore, rMutualVisibleInnerContourVertexNode.Value); rOuterCutContourVertexList.AddBefore(rOuterContourVertexNodeToInsertBefore, rMutualVisibleOuterContourVertexNode.Value); return(rOuterCutContour); }
// Returns a polygonized mesh from a 2D outer contour private static void EarClipping(Contour a_rDominantOuterContour, Vector2 a_f2Scale, Vector3 a_f3PivotPoint, float a_fWidth, float a_fHeight, out Vector3[] a_rVerticesArray, out int[] a_rTrianglesArray, out Vector2[] a_rUVs) { // Sum of all contours count int iVerticesCount = a_rDominantOuterContour.Count; // Mesh vertices array a_rVerticesArray = new Vector3[iVerticesCount]; // Mesh UVs array a_rUVs = new Vector2[iVerticesCount]; // Vertex indexes lists array (used by ear clipping algorithm) CircularLinkedList <int> oVertexIndexesList = new CircularLinkedList <int>( ); // Build contour vertex index circular list // Store every Vector3 into mesh vertices array // Store corresponding index into the circular list int iVertexIndex = 0; foreach (Vector2 f2OuterContourVertex in a_rDominantOuterContour.Vertices) { a_rVerticesArray[iVertexIndex] = f2OuterContourVertex; oVertexIndexesList.AddLast(iVertexIndex); ++iVertexIndex; } // Build reflex/convex vertices lists LinkedList <int> rReflexVertexIndexesList; LinkedList <int> rConvexVertexIndexesList; BuildReflexConvexVertexIndexesLists(a_rVerticesArray, oVertexIndexesList, out rReflexVertexIndexesList, out rConvexVertexIndexesList); // Triangles for this contour List <int> oTrianglesList = new List <int>(3 * iVerticesCount); // Build ear tips list CircularLinkedList <int> rEarTipVertexIndexesList = BuildEarTipVerticesList(a_rVerticesArray, oVertexIndexesList, rReflexVertexIndexesList, rConvexVertexIndexesList); // Remove the ear tips one by one! while (rEarTipVertexIndexesList.Count > 0 && oVertexIndexesList.Count > 2) { CircularLinkedListNode <int> rEarTipNode = rEarTipVertexIndexesList.First; // Ear tip index int iEarTipVertexIndex = rEarTipNode.Value; // Ear vertex indexes CircularLinkedListNode <int> rContourVertexNode = oVertexIndexesList.Find(iEarTipVertexIndex); CircularLinkedListNode <int> rPreviousAdjacentContourVertexNode = rContourVertexNode.Previous; CircularLinkedListNode <int> rNextAdjacentContourVertexNode = rContourVertexNode.Next; int iPreviousAjdacentContourVertexIndex = rPreviousAdjacentContourVertexNode.Value; int iNextAdjacentContourVertexIndex = rNextAdjacentContourVertexNode.Value; // Add the ear triangle to our triangles list oTrianglesList.Add(iPreviousAjdacentContourVertexIndex); oTrianglesList.Add(iEarTipVertexIndex); oTrianglesList.Add(iNextAdjacentContourVertexIndex); // Remove the ear tip from vertices / convex / ear lists oVertexIndexesList.Remove(iEarTipVertexIndex); rConvexVertexIndexesList.Remove(iEarTipVertexIndex); // Adjacent n-1 vertex // if was convex => remain convex, can possibly an ear // if was an ear => can possibly not remain an ear // if was reflex => can possibly convex and possibly an ear if (rReflexVertexIndexesList.Contains(iPreviousAjdacentContourVertexIndex)) { CircularLinkedListNode <int> rPreviousPreviousAdjacentContourVertexNode = rPreviousAdjacentContourVertexNode.Previous; Vector3 f3AdjacentContourVertex = a_rVerticesArray[rPreviousAdjacentContourVertexNode.Value]; Vector3 f3PreviousAdjacentContourVertex = a_rVerticesArray[rPreviousPreviousAdjacentContourVertexNode.Value]; Vector3 f3NextAdjacentContourVertex = a_rVerticesArray[rPreviousAdjacentContourVertexNode.Next.Value]; if (IsReflexVertex(f3AdjacentContourVertex, f3PreviousAdjacentContourVertex, f3NextAdjacentContourVertex) == false) { rReflexVertexIndexesList.Remove(iPreviousAjdacentContourVertexIndex); rConvexVertexIndexesList.AddFirst(iPreviousAjdacentContourVertexIndex); } } // Adjacent n+1 vertex // if was convex => remain convex, can possibly an ear // if was an ear => can possibly not remain an ear // if was reflex => can possibly convex and possibly an ear if (rReflexVertexIndexesList.Contains(iNextAdjacentContourVertexIndex)) { CircularLinkedListNode <int> rNextNextAdjacentContourVertexNode = rNextAdjacentContourVertexNode.Next; Vector3 f3AdjacentContourVertex = a_rVerticesArray[rNextAdjacentContourVertexNode.Value]; Vector3 f3PreviousAdjacentContourVertex = a_rVerticesArray[rNextAdjacentContourVertexNode.Previous.Value]; Vector3 f3NextAdjacentContourVertex = a_rVerticesArray[rNextNextAdjacentContourVertexNode.Value]; if (IsReflexVertex(f3AdjacentContourVertex, f3PreviousAdjacentContourVertex, f3NextAdjacentContourVertex) == false) { rReflexVertexIndexesList.Remove(iNextAdjacentContourVertexIndex); rConvexVertexIndexesList.AddFirst(iNextAdjacentContourVertexIndex); } } if (rConvexVertexIndexesList.Contains(iPreviousAjdacentContourVertexIndex)) { if (IsEarTip(a_rVerticesArray, iPreviousAjdacentContourVertexIndex, oVertexIndexesList, rReflexVertexIndexesList)) { if (rEarTipVertexIndexesList.Contains(iPreviousAjdacentContourVertexIndex) == false) { rEarTipVertexIndexesList.AddLast(iPreviousAjdacentContourVertexIndex); } } else { rEarTipVertexIndexesList.Remove(iPreviousAjdacentContourVertexIndex); } } if (rConvexVertexIndexesList.Contains(iNextAdjacentContourVertexIndex)) { if (IsEarTip(a_rVerticesArray, iNextAdjacentContourVertexIndex, oVertexIndexesList, rReflexVertexIndexesList)) { if (rEarTipVertexIndexesList.Contains(iNextAdjacentContourVertexIndex) == false) { rEarTipVertexIndexesList.AddFirst(iNextAdjacentContourVertexIndex); } } else { rEarTipVertexIndexesList.Remove(iNextAdjacentContourVertexIndex); } } rEarTipVertexIndexesList.Remove(iEarTipVertexIndex); } // Create UVs, rescale vertices, apply pivot Vector2 f2Dimensions = new Vector2(1.0f / a_fWidth, 1.0f / a_fHeight); for (iVertexIndex = 0; iVertexIndex < iVerticesCount; ++iVertexIndex) { Vector3 f3VertexPos = a_rVerticesArray[iVertexIndex]; //a_rUVs[ iVertexIndex ] = Vector2.Scale( f3VertexPos, f2Dimensions ); a_rUVs[iVertexIndex] = new Vector2(f3VertexPos.x * f2Dimensions.x, f3VertexPos.y * f2Dimensions.y); Vector2 f2Vertex = (f3VertexPos - a_f3PivotPoint); f2Vertex.x *= a_f2Scale.x; f2Vertex.y *= a_f2Scale.y; a_rVerticesArray[iVertexIndex] = f2Vertex; } a_rTrianglesArray = oTrianglesList.ToArray( ); }
void setSphereMesh(List <Vector3> staticPositions, MPath path2) { foreach (var meshFilter in parentMeshObject.GetComponentsInChildren <MeshFilter> ()) { GameObject.Destroy(meshFilter.gameObject); } meshes.Clear(); var points = getPoints(path2); var northLists = new List <List <Vector3> > (); var southLists = new List <List <Vector3> > (); bool fromNorth = points[0].z > 0 ? true : false; if (fromNorth) { northLists.Add(new List <Vector3> ()); } else { southLists.Add(new List <Vector3> ()); } List <int> switchedBefore = new List <int> (); for (int i = 0; i < points.Count; i++) { var vector = points[i]; if (vector.z > 0) { if (!fromNorth) { switchedBefore.Add(i); fromNorth = true; northLists.Add(new List <Vector3> ()); } northLists[northLists.Count - 1].Add(vector); } else { if (fromNorth) { switchedBefore.Add(i); fromNorth = false; southLists.Add(new List <Vector3> ()); } southLists[southLists.Count - 1].Add(vector); } } UnityEngine.Debug.Log("Number Northlists: " + northLists.Count); UnityEngine.Debug.Log("Switched before: "); foreach (var point in switchedBefore) { UnityEngine.Debug.Log(point); } for (int i = 0; i < switchedBefore.Count / 2; i++) { var cross11 = points[switchedBefore[2 * i] - 1]; var cross12 = points[switchedBefore[2 * i]]; var mid1 = Vector3.Lerp(cross11, cross12, 0.5f); mid1.z = 0; var cross21 = points[switchedBefore[2 * i + 1] - 1]; var cross22 = points[switchedBefore[2 * i + 1]]; var mid2 = Vector3.Lerp(cross21, cross22, 0.5f); mid2.z = 0; } foreach (var pointsNorth in northLists) { var verticesNorth = ToVertex(pointsNorth); if (verticesNorth.Count > 0) { var polygonNorth = new Polygon(); UnityEngine.Debug.Log("Number of vertices north: " + verticesNorth.Count); var contourNorth = new Contour(verticesNorth); Point point = null; for (int i = 0; i < staticPositions.Count; i++) { if (IsPointInPolygon(staticPositions[i], verticesNorth)) { point = new Point(staticPositions[i].x, staticPositions[i].y); UnityEngine.Debug.Log("Is inside"); break; } else { if (IsPointInPolygon(constpoint, verticesNorth)) { UnityEngine.Debug.Log(constpoint + " Is inside"); } } } if (point != null) { polygonNorth.Add(contourNorth, point); } else { polygonNorth.Add(contourNorth); } List <int> trianglesNorth; List <Vector3> meshVerticesNorth; SetMesh(polygonNorth, out meshVerticesNorth, out trianglesNorth); var meshNorth = AddMesh(); meshNorth.vertices = meshVerticesNorth.ToArray(); meshNorth.triangles = trianglesNorth.ToArray(); Vector3[] normals = meshNorth.normals; for (int i = 0; i < normals.Length; i++) { normals[i] = -normals[i]; } meshNorth.normals = normals; for (int m = 0; m < meshNorth.subMeshCount; m++) { int[] triangles2 = meshNorth.GetTriangles(m); for (int i = 0; i < triangles2.Length; i += 3) { int temp = triangles2[i + 0]; triangles2[i + 0] = triangles2[i + 1]; triangles2[i + 1] = temp; } meshNorth.SetTriangles(triangles2, m); } } } UnityEngine.Debug.Log("Number Southlists: " + southLists.Count); foreach (var pointsSouth in southLists) { var verticesSouth = ToVertex(pointsSouth); if (verticesSouth.Count > 0) { var polygonSouth = new Polygon(); UnityEngine.Debug.Log("Number of vertices south: " + verticesSouth.Count); var contourSouth = new Contour(verticesSouth); Point point = null; for (int i = 0; i < staticPositions.Count; i++) { if (IsPointInPolygon(staticPositions[i], verticesSouth)) { point = new Point(staticPositions[i].x, staticPositions[i].y); UnityEngine.Debug.Log("Is inside"); break; } else { if (IsPointInPolygon(constpoint, verticesSouth)) { UnityEngine.Debug.Log(constpoint + " Is inside"); } } } if (point != null) { polygonSouth.Add(contourSouth, point); } else { polygonSouth.Add(contourSouth); } List <int> trianglesSouth; List <Vector3> meshVerticesSouth; SetMesh(polygonSouth, out meshVerticesSouth, out trianglesSouth); var meshSouth = AddMesh(); meshSouth.vertices = meshVerticesSouth.ToArray(); meshSouth.triangles = trianglesSouth.ToArray(); } } }
void set2DMesh(List <Vector3> staticPositions, MPath path2) { var mesh = meshes[0]; mesh.Clear(); var polygon = new Polygon(); var points = getPoints(path2); var vertices = ToVertex(points); var contour = new Contour(vertices); Point point = null; for (int i = 0; i < staticPositions.Count; i++) { if (IsPointInPolygon(staticPositions[i], vertices)) { point = new Point(staticPositions[i].x, staticPositions[i].y); UnityEngine.Debug.Log("Is inside"); break; } else { if (IsPointInPolygon(constpoint, vertices)) { UnityEngine.Debug.Log(constpoint + " Is inside"); } } } if (point != null) { polygon.Add(contour, point); } else { polygon.Add(contour); } var options = new ConstraintOptions() { }; var quality = new QualityOptions() { // MinimumAngle = 25, // MaximumArea = 0.01d }; // Triangulate the polygon var polyMesh = polygon.Triangulate(options, quality); var polyVertices = polyMesh.Vertices; var polyTriangles = polyMesh.Triangles; List <Vector3> meshVertices = new List <Vector3> (); List <int> triangles = getTriangles(polyTriangles, meshVertices); mesh.vertices = meshVertices.ToArray(); mesh.triangles = triangles.ToArray(); // Vector3[] normals = mesh.normals; // for (int i = 0; i < normals.Length; i++) // normals [i] = -normals [i]; // mesh.normals = normals; // for (int m = 0; m < mesh.subMeshCount; m++) { // int[] triangles2 = mesh.GetTriangles (m); // for (int i = 0; i < triangles2.Length; i += 3) { // int temp = triangles2 [i + 0]; // triangles2 [i + 0] = triangles2 [i + 1]; // triangles2 [i + 1] = temp; // } // mesh.SetTriangles (triangles2, m); // } }
//megkeresi a négyszögeket a képen. A felismert négyszögeket méretük alapján megpróbálja kategóriákba sorolni és csak azokat tekinti négyszögnek, amelyek a legtöbb elemszámú kategóriába esnek. Ezzel elkerülve, hogy pl a rajzlapot is felismerje négyszögként private Image <Bgr, Byte> GetRectangles(Image <Bgr, Byte> img, out List <MCvBox2D> boxlist) { Image <Gray, Byte> gray = img.Convert <Gray, Byte>(); Image <Gray, Byte> cannyEdges = img.Canny(100.0, 60.0); List <MCvBox2D> boxList = new List <MCvBox2D>(); Contour <Point> contours; using (MemStorage storage = new MemStorage()) for ( contours = cannyEdges.FindContours( Emgu.CV.CvEnum.CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_SIMPLE, Emgu.CV.CvEnum.RETR_TYPE.CV_RETR_LIST, storage); contours != null; contours = contours.HNext) { Contour <Point> currentContour = contours.ApproxPoly(contours.Perimeter * 0.05, storage); if (currentContour.Total == 4 && currentContour.Area > 50) { bool isRectangle = true; Point[] pts = currentContour.ToArray(); LineSegment2D[] edges = PointCollection.PolyLine(pts, true); for (int i = 0; i < edges.Length; i++) { double angle = Math.Abs( edges[(i + 1) % edges.Length].GetExteriorAngleDegree(edges[i])); if (angle < 80 || angle > 100) { isRectangle = false; break; } } if (isRectangle) { boxList.Add(currentContour.GetMinAreaRect()); } } } Image <Bgr, Byte> triangleRectangleImage = img.Copy(); bool tmp = false; int avg = 0; int max = 0; int[,] discVal = new int[20, 2]; foreach (MCvBox2D box in boxList) { int i = box.MinAreaRect().Width / 10; if (i < 20 && i > 3) { discVal[i, 0] += box.MinAreaRect().Width; discVal[i, 1]++; if (discVal[i, 1] > discVal[max, 1]) { max = i; } } } if (discVal[max, 1] != 0) { avg = discVal[max, 0] / discVal[max, 1]; } for (int i = 0; i < boxList.Count; i++) { if (boxList[i].MinAreaRect().Width > avg - 10 && boxList[i].MinAreaRect().Width < avg + 10) { triangleRectangleImage.Draw(boxList[i], new Bgr(0, 255, 0), 2); for (int j = i + 1; j < boxList.Count; j++) { double dist = Math.Sqrt((boxList[i].center.X - boxList[j].center.X) * (boxList[i].center.X - boxList[j].center.X) + (boxList[i].center.Y - boxList[j].center.Y) * (boxList[i].center.Y - boxList[j].center.Y)); if (dist < 10) { boxList.RemoveAt(j); } } } else { boxList.RemoveAt(i); i--; } } boxlist = boxList; return(triangleRectangleImage); //cannyEdges.Convert<Bgr, Byte>(); }