/// <summary> /// processing z-slabs of cells in parallel /// </summary> void generate_parallel() { mesh_lock = new SpinLock(); parallel_mesh_access = true; // [TODO] maybe shouldn't alway use Z axis here? gParallel.ForEach(Interval1i.Range(CellDimensions.z), (zi) => { var cell = new GridCell(); int[] vertlist = new int[12]; for (int yi = 0; yi < CellDimensions.y; ++yi) { if (CancelF()) { return; } // compute full cell at x=0, then slide along x row, which saves half of value computes var idx = new Vector3i(0, yi, zi); initialize_cell(cell, ref idx); polygonize_cell(cell, vertlist); for (int xi = 1; xi < CellDimensions.x; ++xi) { shift_cell_x(cell, xi); polygonize_cell(cell, vertlist); } } }); parallel_mesh_access = false; }
protected virtual void precompute_shells() { int nLayers = Slices.Count; LayerShells = new List <ShellsFillPolygon> [nLayers]; gParallel.ForEach(Interval1i.Range(nLayers), (layeri) => { PlanarSlice slice = Slices[layeri]; LayerShells[layeri] = new List <ShellsFillPolygon>(); List <GeneralPolygon2d> solids = slice.Solids; foreach (GeneralPolygon2d shape in solids) { ShellsFillPolygon shells_gen = new ShellsFillPolygon(shape); shells_gen.PathSpacing = Settings.SolidFillPathSpacingMM(); shells_gen.ToolWidth = Settings.Machine.NozzleDiamMM; shells_gen.Layers = Settings.Shells; shells_gen.InsetInnerPolygons = false; shells_gen.Compute(); LayerShells[layeri].Add(shells_gen); if (slice.Tags.Has(shape)) { ShellTags.Add(shells_gen, slice.Tags.Get(shape)); } } }); }
void cache_input_sdfs() { gParallel.ForEach(Interval1i.Range(mesh_sources.Count), (k) => { if (cached_sdfs[k] != null) { return; } if (is_invalidated()) { return; } DMesh3 source_mesh = mesh_sources[k].GetDMeshUnsafe(); Vector3d expand = source_mesh.CachedBounds.Extents; int exact_cells = 2; MeshSignedDistanceGrid sdf = new MeshSignedDistanceGrid(source_mesh, grid_cell_size) { ExactBandWidth = exact_cells, ComputeMode = MeshSignedDistanceGrid.ComputeModes.FullGrid, ExpandBounds = expand }; sdf.CancelF = is_invalidated; sdf.Compute(); if (is_invalidated()) { return; } cached_sdfs[k] = sdf; cached_isos[k] = new DenseGridTrilinearImplicit(sdf.Grid, sdf.GridOrigin, sdf.CellSize); }); }
void cache_bvtrees(bool bWinding) { gParallel.ForEach(Interval1i.Range(mesh_sources.Count), (k) => { if (cached_bvtrees[k] != null) { return; } if (is_invalidated()) { return; } DMesh3 source_mesh = mesh_sources[k].GetDMeshUnsafe(); cached_bvtrees[k] = new DMeshAABBTreePro(source_mesh, true); }); if (bWinding) { gParallel.ForEach(Interval1i.Range(mesh_sources.Count), (k) => { if (is_invalidated()) { return; } cached_bvtrees[k].FastWindingNumber(Vector3d.Zero); }); } }
/// <summary> /// Construct packed versions of input matrices, and then use sparse row/column dot /// to compute elements of output matrix. This is faster. But still relatively expensive. /// </summary> void multiply_fast(SymmetricSparseMatrix M2in, ref SymmetricSparseMatrix Rin, bool bParallel) { int N = Rows; if (M2in.Rows != N) { throw new Exception("SymmetricSparseMatrix.Multiply: matrices have incompatible dimensions"); } if (Rin == null) { Rin = new SymmetricSparseMatrix(); } SymmetricSparseMatrix R = Rin; // require alias for use in lambda below PackedSparseMatrix M = new PackedSparseMatrix(this); M.Sort(); PackedSparseMatrix M2 = new PackedSparseMatrix(M2in, true); M2.Sort(); // Parallel variant is vastly faster, uses spinlock to control access to R if (bParallel) { // goddamn SpinLock is in .Net 4 //SpinLock spin = new SpinLock(); gParallel.ForEach(Interval1i.Range(N), (r1i) => { for (int c2i = r1i; c2i < N; c2i++) { double v = M.DotRowColumn(r1i, c2i, M2); if (Math.Abs(v) > math.MathUtil.ZeroTolerance) { //bool taken = false; //spin.Enter(ref taken); //Debug.Assert(taken); //R[r1i, c2i] = v; //spin.Exit(); lock (R) { R[r1i, c2i] = v; } } } }); } else { for (int r1i = 0; r1i < N; r1i++) { for (int c2i = r1i; c2i < N; c2i++) { double v = M.DotRowColumn(r1i, c2i, M2); if (Math.Abs(v) > math.MathUtil.ZeroTolerance) { R[r1i, c2i] = v; } } } } }
public void Sort() { gParallel.ForEach(Interval1i.Range(Rows.Length), (i) => { Array.Sort(Rows[i], (x, y) => { return(x.j.CompareTo(y.j)); }); }); //for ( int i = 0; i < Rows.Length; ++i ) // Array.Sort(Rows[i], (x, y) => { return x.j.CompareTo(y.j); } ); Sorted = true; }
void cache_input_sdfs_bounded() { if (cached_bounded_sdfs == null) { cached_bounded_sdfs = new MeshSignedDistanceGrid[mesh_sources.Count]; cached_bounded_sdf_maxdist = new double[mesh_sources.Count]; } cache_bvtrees(false); double falloff_distance = blend_falloff; gParallel.ForEach(Interval1i.Range(mesh_sources.Count), (k) => { if (falloff_distance > cached_bounded_sdf_maxdist[k]) { cached_bounded_sdfs[k] = null; } // [TODO] we could expand via flood-fill here instead of throwing away all previously computed! if (cached_bounded_sdfs[k] != null) { return; } if (is_invalidated()) { return; } int exact_cells = (int)(falloff_distance / grid_cell_size) + 2; DMesh3 source_mesh = mesh_sources[k].GetDMeshUnsafe(); DMeshAABBTree3 use_spatial = GenerateClosedMeshOp.MeshSDFShouldUseSpatial( cached_bvtrees[k], exact_cells, grid_cell_size, source_edge_stats[k].z); MeshSignedDistanceGrid sdf = new MeshSignedDistanceGrid(source_mesh, grid_cell_size, use_spatial) { ExactBandWidth = exact_cells }; if (use_spatial != null) { sdf.NarrowBandMaxDistance = falloff_distance + grid_cell_size; sdf.ComputeMode = MeshSignedDistanceGrid.ComputeModes.NarrowBand_SpatialFloodFill; } sdf.CancelF = is_invalidated; sdf.Compute(); if (is_invalidated()) { return; } cached_bounded_sdfs[k] = sdf; cached_bounded_sdf_maxdist[k] = falloff_distance; }); }
/// <summary> /// Add explicit support points for any small floating polygons. /// These can be used at toolpathing time to ensure support for /// such areas, which otherwise might be lost (or insufficiently /// supported) by the standard techniques to detect support regions. /// </summary> public void AddMinZTipSupportPoints(double tipDiamThresh = 2.0, int nExtraLayers = 0) { List <Vector3d> tips = new List <Vector3d>(); SpinLock tiplock = new SpinLock(); int N = Slices.Count; gParallel.ForEach(Interval1i.FromToInclusive(1, N - 1), (li) => { PlanarSlice slice = Slices[li]; PlanarSlice prev = Slices[li - 1]; foreach (GeneralPolygon2d poly in slice.InputSolids) { AxisAlignedBox2d bounds = poly.Bounds; if (bounds.MaxDim > tipDiamThresh) { continue; } Vector2d c = bounds.Center; bool contained = false; foreach (var poly2 in prev.InputSolids) { if (poly2.Contains(c)) { contained = true; break; } } if (contained) { continue; } bool entered = false; tiplock.Enter(ref entered); tips.Add(new Vector3d(c.x, c.y, li)); tiplock.Exit(); } }); foreach (var tip in tips) { int layer_i = (int)tip.z; int add_to = Math.Min(N - 1, layer_i + nExtraLayers); for (int i = layer_i; i < add_to; ++i) { Slices[i].InputSupportPoints.Add(tip.xy); } } }
public InputField RegisterIntInput(string inputName, string toolParamName, Interval1i validRange) { InputField input = UnityUIUtil.FindInputAndAddIntHandlers(this.gameObject, inputName, () => { return(ActiveParameterSet.GetValueInt(toolParamName)); }, (intValue) => { ActiveParameterSet.SetValue <int>(toolParamName, intValue); update_values_from_tool(); }, validRange.a, validRange.b); TabOrder.Add(input); int_params.Add(new IntInputParam() { widget = input, paramName = toolParamName }); return(input); }
/// <summary> /// Format is: /// [num_slices] /// [slice0_z] /// [num_polys_in_slice_0] /// [x0 y0 x1 y1 x2 y3 ...] /// [x0 y0 x1 y1 ... ] /// [slice1_z] /// [num_polys_in_slice_1] /// ... /// </summary> public void ReadSimpleSliceFormat(TextReader reader) { PlanarComplex.FindSolidsOptions options = PlanarComplex.FindSolidsOptions.SortPolygons; options.TrustOrientations = false; char[] splitchars = new char[] { ' ' }; int nSlices = int.Parse(reader.ReadLine()); PlanarComplex[] layer_complexes = new PlanarComplex[nSlices]; for (int si = 0; si < nSlices; ++si) { PlanarSlice slice = new PlanarSlice(); slice.Z = double.Parse(reader.ReadLine()); PlanarComplex complex = new PlanarComplex(); int nPolys = int.Parse(reader.ReadLine()); for (int pi = 0; pi < nPolys; pi++) { string[] stringValues = reader.ReadLine().Split(splitchars, StringSplitOptions.RemoveEmptyEntries); double[] values = Array.ConvertAll(stringValues, Double.Parse); Polygon2d poly = new Polygon2d(values); if (poly.VertexCount < 3) { continue; } complex.Add(poly); } layer_complexes[si] = complex; //// this could be done in separate thread... //var solidInfo = complex.FindSolidRegions(options); //slice.InputSolids = solidInfo.Polygons; //slice.Resolve(); Slices.Add(slice); } gParallel.ForEach(Interval1i.Range(nSlices), (si) => { var solidInfo = layer_complexes[si].FindSolidRegions(options); Slices[si].InputSolids = solidInfo.Polygons; Slices[si].Resolve(); }); }
// returns this*this (requires less memory) public SymmetricSparseMatrix Square(bool bParallel = true) { SymmetricSparseMatrix R = new SymmetricSparseMatrix(); PackedSparseMatrix M = new PackedSparseMatrix(this); M.Sort(); // Parallel variant is vastly faster, uses spinlock to control access to R if (bParallel) { // goddamn SpinLock is in .Net 4 //SpinLock spin = new SpinLock(); gParallel.ForEach(Interval1i.Range(N), (r1i) => { for (int c2i = r1i; c2i < N; c2i++) { double v = M.DotRowColumn(r1i, c2i, M); if (Math.Abs(v) > math.MathUtil.ZeroTolerance) { //bool taken = false; //spin.Enter(ref taken); //Debug.Assert(taken); //R[r1i, c2i] = v; //spin.Exit(); lock (R) { R[r1i, c2i] = v; } } } }); } else { for (int r1i = 0; r1i < N; r1i++) { for (int c2i = r1i; c2i < N; c2i++) { double v = M.DotRowColumn(r1i, c2i, M); if (Math.Abs(v) > math.MathUtil.ZeroTolerance) { R[r1i, c2i] = v; } } } } return(R); }
public void Interval1iRoundTrip() { var interval = new Interval1i(3, 6); var converter = new Interval1iConverter(); var serializerSettings = new JsonSerializerSettings() { Converters = new JsonConverter[] { converter } }; var json = JsonConvert.SerializeObject(interval, serializerSettings); var result = JsonConvert.DeserializeObject <Interval1i>(json, serializerSettings); Assert.AreEqual(interval.a, result.a); Assert.AreEqual(interval.b, result.b); }
void compute_cache_lazy_sdfs() { if (cached_lazy_sdfs == null) { cached_lazy_sdfs = new CachingMeshSDF[mesh_sources.Count]; cached_lazy_sdf_maxdists = new double[mesh_sources.Count]; } cache_bvtrees(false); double need_distance = blend_falloff; gParallel.ForEach(Interval1i.Range(mesh_sources.Count), (k) => { if (need_distance > cached_lazy_sdf_maxdists[k]) { cached_lazy_sdfs[k] = null; } // [TODO] we could expand via flood-fill here instead of throwing away all previously computed! if (cached_lazy_sdfs[k] != null) { return; } if (is_invalidated()) { return; } float use_max_offset = (float)blend_falloff; // (float)(3 * blend_falloff); DMesh3 source_mesh = mesh_sources[k].GetDMeshUnsafe(); CachingMeshSDF sdf = new CachingMeshSDF(source_mesh, grid_cell_size, cached_bvtrees[k]) { MaxOffsetDistance = use_max_offset }; sdf.CancelF = is_invalidated; sdf.Initialize(); if (is_invalidated()) { return; } cached_lazy_sdfs[k] = sdf; cached_lazy_sdf_maxdists[k] = use_max_offset; }); }
// for each From[i], find closest point on TargetSurface void update_to() { double max_dist = double.MaxValue; bool bNormals = (UseNormals && Source.HasVertexNormals); Interval1i range = Interval1i.Range(From.Length); gParallel.ForEach(range, (vi) => { int tid = TargetSurface.FindNearestTriangle(From[vi], max_dist); if (tid == NGonsCore.geometry3Sharp.mesh.DMesh3.InvalidID) { Weights[vi] = 0; return; } DistPoint3Triangle3 d = MeshQueries.TriangleDistance(TargetSurface.Mesh, tid, From[vi]); if (d.DistanceSquared > MaxAllowableDistance * MaxAllowableDistance) { Weights[vi] = 0; return; } To[vi] = d.TriangleClosest; Weights[vi] = 1.0f; if (bNormals) { Vector3F fromN = Rotation * Source.GetVertexNormal(vi); Vector3F toN = (Vector3F)TargetSurface.Mesh.GetTriNormal(tid); float fDot = fromN.Dot(toN); Debug.Assert(math.MathUtil.IsFinite(fDot)); if (fDot < 0) { Weights[vi] = 0; } else { Weights[vi] += Math.Sqrt(fDot); } } }); }
public DMesh3 Make3DTubes(Interval1i layer_range, double merge_tol, double tube_radius) { Polygon2d tube_profile = Polygon2d.MakeCircle(tube_radius, 8); Frame3f frame = Frame3f.Identity; DMesh3 full_mesh = new DMesh3(); foreach (int layer_i in layer_range) { PlanarSlice slice = Slices[layer_i]; frame.Origin = new Vector3f(0, 0, slice.Z); foreach (GeneralPolygon2d gpoly in slice.Solids) { List <Polygon2d> polys = new List <Polygon2d>() { gpoly.Outer }; polys.AddRange(gpoly.Holes); foreach (Polygon2d poly in polys) { Polygon2d simpPoly = new Polygon2d(poly); simpPoly.Simplify(merge_tol, 0.01, true); if (simpPoly.VertexCount < 3) { Util.gBreakToDebugger(); } TubeGenerator tubegen = new TubeGenerator(simpPoly, frame, tube_profile) { NoSharedVertices = true }; DMesh3 tubeMesh = tubegen.Generate().MakeDMesh(); MeshEditor.Append(full_mesh, tubeMesh); } } } return(full_mesh); }
public virtual bool Smooth() { int NV = Loop.Vertices.Length; double a = math.MathUtil.Clamp(Alpha, 0, 1); double num_rounds = math.MathUtil.Clamp(Rounds, 0, 10000); for (int round = 0; round < num_rounds; ++round) { // compute gParallel.ForEach(Interval1i.Range(NV), (i) => { int vid = Loop.Vertices[(i + 1) % NV]; Vector3D prev = Mesh.GetVertex(Loop.Vertices[i]); Vector3D cur = Mesh.GetVertex(vid); Vector3D next = Mesh.GetVertex(Loop.Vertices[(i + 2) % NV]); Vector3D centroid = (prev + next) * 0.5; SmoothedPostions[i] = (1 - a) * cur + (a) * centroid; }); // bake gParallel.ForEach(Interval1i.Range(NV), (i) => { int vid = Loop.Vertices[(i + 1) % NV]; Vector3D pos = SmoothedPostions[i]; if (ProjectF != null) { pos = ProjectF(pos, vid); } Mesh.SetVertex(vid, pos); }); } return(true); }
public IOWriteResult RunBackgroundWrite() { // transform meshes gParallel.ForEach(Interval1i.Range(ExportMeshes.Length), (i) => { if (MeshFrames[i].Origin != Vector3f.Zero || MeshFrames[i].Rotation != Quaternionf.Identity) { MeshTransforms.FromFrame(ExportMeshes[i], MeshFrames[i]); } MeshTransforms.FlipLeftRightCoordSystems(ExportMeshes[i]); if (ExportYUp == false) { MeshTransforms.ConvertYUpToZUp(ExportMeshes[i]); } }); List <WriteMesh> writeMeshes = new List <WriteMesh>(); for (int i = 0; i < ExportMeshes.Length; ++i) { writeMeshes.Add(new WriteMesh(ExportMeshes[i])); } WriteOptions options = WriteOptions.Defaults; options.bWriteBinary = true; options.ProgressFunc = BackgroundProgressFunc; StandardMeshWriter writer = new StandardMeshWriter(); IOWriteResult result = writer.Write(WritePath, writeMeshes, options); return(result); }
public IntParameter() { name = "int_parameter"; defaultValue = 0; ValidRange = new Interval1i(int.MinValue, int.MaxValue); }
public void SetValidRange(int min, int max) { ValidRange = new Interval1i(min, max); }
public virtual bool Cut() { double invalidDist = double.MinValue; // compute signs int MaxVID = Mesh.MaxVertexID; double[] signs = new double[MaxVID]; gParallel.ForEach(Interval1i.Range(MaxVID), (vid) => { if (Mesh.IsVertex(vid)) { Vector3D v = Mesh.GetVertex(vid); signs[vid] = (v - PlaneOrigin).Dot(PlaneNormal); } else { signs[vid] = invalidDist; } }); HashSet <int> ZeroEdges = new HashSet <int>(); HashSet <int> ZeroVertices = new HashSet <int>(); HashSet <int> OnCutEdges = new HashSet <int>(); // have to skip processing of new edges. If edge id // is > max at start, is new. Otherwise if in NewEdges list, also new. int MaxEID = Mesh.MaxEdgeID; HashSet <int> NewEdges = new HashSet <int>(); // cut existing edges with plane, using edge split for (int eid = 0; eid < MaxEID; ++eid) { if (Mesh.IsEdge(eid) == false) { continue; } if (eid >= MaxEID || NewEdges.Contains(eid)) { continue; } Index2i ev = Mesh.GetEdgeV(eid); double f0 = signs[ev.a]; double f1 = signs[ev.b]; // If both signs are 0, this edge is on-contour // If one sign is 0, that vertex is on-contour int n0 = (Math.Abs(f0) < math.MathUtil.Epsilon) ? 1 : 0; int n1 = (Math.Abs(f1) < math.MathUtil.Epsilon) ? 1 : 0; if (n0 + n1 > 0) { if (n0 + n1 == 2) { ZeroEdges.Add(eid); } else { ZeroVertices.Add((n0 == 1) ? ev[0] : ev[1]); } continue; } // no crossing if (f0 * f1 > 0) { continue; } NGonsCore.geometry3Sharp.mesh.DMesh3.EdgeSplitInfo splitInfo; MeshResult result = Mesh.SplitEdge(eid, out splitInfo); if (result != MeshResult.Ok) { throw new Exception("MeshPlaneCut.Cut: failed in SplitEdge"); //return false; } // SplitEdge just bisects edge - use plane intersection instead double t = f0 / (f0 - f1); Vector3D newPos = (1 - t) * Mesh.GetVertex(ev.a) + (t) * Mesh.GetVertex(ev.b); Mesh.SetVertex(splitInfo.vNew, newPos); NewEdges.Add(splitInfo.eNewBN); NewEdges.Add(splitInfo.eNewCN); OnCutEdges.Add(splitInfo.eNewCN); if (splitInfo.eNewDN != NGonsCore.geometry3Sharp.mesh.DMesh3.InvalidID) { NewEdges.Add(splitInfo.eNewDN); OnCutEdges.Add(splitInfo.eNewDN); } } // remove one-rings of all positive-side vertices. for (int i = 0; i < signs.Length; ++i) { if (signs[i] > 0 && Mesh.IsVertex(i)) { Mesh.RemoveVertex(i, true, false); } } // ok now we extract boundary loops, but restricted // to either the zero-edges we found, or the edges we created! bang!! Func <int, bool> CutEdgeFilterF = (eid) => { if (OnCutEdges.Contains(eid) || ZeroEdges.Contains(eid)) { return(true); } return(false); }; try { MeshBoundaryLoops loops = new MeshBoundaryLoops(Mesh, false); loops.EdgeFilterF = CutEdgeFilterF; loops.Compute(); CutLoops = loops.Loops; CutLoopsFailed = false; } catch { CutLoops = new List <EdgeLoop>(); CutLoopsFailed = true; } return(true); } // Cut()
DMesh3 ComputeInflation(DMesh3 planarMesh) { DMesh3 mesh = new DMesh3(planarMesh); DijkstraGraphDistance dist = new DijkstraGraphDistance( mesh.MaxVertexID, false, (vid) => { return(mesh.IsVertex(vid)); }, (a, b) => { return((float)mesh.GetVertex(a).Distance(mesh.GetVertex(b))); }, mesh.VtxVerticesItr); foreach (int vid in MeshIterators.BoundaryVertices(mesh)) { dist.AddSeed(vid, 0); } dist.Compute(); float max_dist = dist.MaxDistance; float[] distances = new float[mesh.MaxVertexID]; foreach (int vid in mesh.VertexIndices()) { distances[vid] = dist.GetDistance(vid); } List <int> local_maxima = new List <int>(); foreach (int vid in MeshIterators.InteriorVertices(mesh)) { float d = distances[vid]; bool is_maxima = true; foreach (int nbrid in mesh.VtxVerticesItr(vid)) { if (distances[nbrid] > d) { is_maxima = false; } } if (is_maxima) { local_maxima.Add(vid); } } // smooth distances (really should use cotan here!!) float smooth_alpha = 0.1f; int smooth_rounds = 5; foreach (int ri in Interval1i.Range(smooth_rounds)) { foreach (int vid in mesh.VertexIndices()) { float cur = distances[vid]; float centroid = 0; int nbr_count = 0; foreach (int nbrid in mesh.VtxVerticesItr(vid)) { centroid += distances[nbrid]; nbr_count++; } centroid /= nbr_count; distances[vid] = (1 - smooth_alpha) * cur + (smooth_alpha) * centroid; } } Vector3d normal = Vector3d.AxisZ; foreach (int vid in mesh.VertexIndices()) { if (dist.IsSeed(vid)) { continue; } float h = distances[vid]; // [RMS] there are different options here... h = 2 * (float)Math.Sqrt(h); float offset = h; Vector3d d = mesh.GetVertex(vid); mesh.SetVertex(vid, d + (Vector3d)(offset * normal)); } DMesh3 compacted = new DMesh3(); var compactInfo = compacted.CompactCopy(mesh); IndexUtil.Apply(local_maxima, compactInfo.MapV); mesh = compacted; MeshVertexSelection boundary_sel = new MeshVertexSelection(mesh); HashSet <int> boundaryV = new HashSet <int>(MeshIterators.BoundaryVertices(mesh)); boundary_sel.Select(boundaryV); boundary_sel.ExpandToOneRingNeighbours(); LaplacianMeshSmoother smoother = new LaplacianMeshSmoother(mesh); foreach (int vid in boundary_sel) { if (boundaryV.Contains(vid)) { smoother.SetConstraint(vid, mesh.GetVertex(vid), 100.0f, true); } else { smoother.SetConstraint(vid, mesh.GetVertex(vid), 10.0f, false); } } foreach (int vid in local_maxima) { smoother.SetConstraint(vid, mesh.GetVertex(vid), 50, false); } bool ok = smoother.SolveAndUpdateMesh(); Util.gDevAssert(ok); List <int> intVerts = new List <int>(MeshIterators.InteriorVertices(mesh)); MeshIterativeSmooth smooth = new MeshIterativeSmooth(mesh, intVerts.ToArray(), true); smooth.SmoothType = MeshIterativeSmooth.SmoothTypes.Cotan; smooth.Rounds = 10; smooth.Alpha = 0.1f; smooth.Smooth(); return(mesh); }
// join disconnected vertices within distance threshold protected int JoinInTolerance_Parallel(DGraph2 graph, double fMergeDist) { double mergeSqr = fMergeDist * fMergeDist; int NV = graph.MaxVertexID; if (collapse_cache.size < NV) { collapse_cache.resize(NV); } gParallel.ForEach(Interval1i.Range(NV), (a) => { collapse_cache[a] = new Vector2d(-1, double.MaxValue); if (!graph.IsVertex(a)) { return; } Vector2d va = graph.GetVertex(a); int bNearest = -1; double nearDistSqr = double.MaxValue; for (int b = a + 1; b < NV; ++b) { if (b == a || graph.IsVertex(b) == false) { continue; } double distsqr = va.DistanceSquared(graph.GetVertex(b)); if (distsqr < mergeSqr && distsqr < nearDistSqr) { if (graph.FindEdge(a, b) == DGraph2.InvalidID) { nearDistSqr = distsqr; bNearest = b; } } } if (bNearest != -1) { collapse_cache[a] = new Vector2d(bNearest, nearDistSqr); } }); // [TODO] sort int merged = 0; for (int a = 0; a < NV; ++a) { if (collapse_cache[a].x == -1) { continue; } int bNearest = (int)collapse_cache[a].x; Vector2d pos_a = graph.GetVertex(a); Vector2d pos_bNearest = graph.GetVertex(bNearest); /*int eid = */ graph.AppendEdge(a, bNearest); DGraph2.EdgeCollapseInfo collapseInfo; graph.CollapseEdge(bNearest, a, out collapseInfo); graph_cache.RemovePointUnsafe(a, pos_a); last_step_size[a] = 0; graph_cache.UpdatePointUnsafe(bNearest, pos_bNearest, graph.GetVertex(bNearest)); merged++; } return(merged); }
// smooth vertices, but don't move further than max_move protected void smooth_pass(DGraph2 graph, int passes, double smooth_alpha, double max_move) { double max_move_sqr = max_move * max_move; int NV = graph.MaxVertexID; DVector <Vector2d> smoothedV = offset_cache; if (smoothedV.size < NV) { smoothedV.resize(NV); } if (position_cache.size < NV) { position_cache.resize(NV); } for (int pi = 0; pi < passes; ++pi) { gParallel.ForEach(Interval1i.Range(NV), (vid) => { if (!graph.IsVertex(vid)) { return; } Vector2d v = graph.GetVertex(vid); Vector2d c = Vector2d.Zero; int n = 0; foreach (int vnbr in graph.VtxVerticesItr(vid)) { c += graph.GetVertex(vnbr); n++; } if (n >= 2) { c /= n; Vector2d dv = (smooth_alpha) * (c - v); if (dv.LengthSquared > max_move_sqr) { /*double d = */ dv.Normalize(); dv *= max_move; } v += dv; } smoothedV[vid] = v; }); if (pi == 0) { for (int vid = 0; vid < NV; ++vid) { if (graph.IsVertex(vid)) { position_cache[vid] = graph.GetVertex(vid); graph.SetVertex(vid, smoothedV[vid]); } } } else { for (int vid = 0; vid < NV; ++vid) { if (graph.IsVertex(vid)) { graph.SetVertex(vid, smoothedV[vid]); } } } } for (int vid = 0; vid < NV; ++vid) { if (graph.IsVertex(vid)) { graph_cache.UpdatePointUnsafe(vid, position_cache[vid], smoothedV[vid]); } } }
/// <summary> /// Slice the meshes and return the slice stack. /// </summary> public PlanarSliceStack Compute() { if (Meshes.Count == 0) { return(new PlanarSliceStack()); } Interval1d zrange = Interval1d.Empty; foreach (var meshinfo in Meshes) { zrange.Contain(meshinfo.bounds.Min.z); zrange.Contain(meshinfo.bounds.Max.z); } if (SetMinZValue != double.MinValue) { zrange.a = SetMinZValue; } int nLayers = (int)(zrange.Length / LayerHeightMM); if (nLayers > MaxLayerCount) { throw new Exception("MeshPlanarSlicer.Compute: exceeded layer limit. Increase .MaxLayerCount."); } // make list of slice heights (could be irregular) List <double> heights = new List <double>(); for (int i = 0; i < nLayers + 1; ++i) { double t = zrange.a + (double)i * LayerHeightMM; if (SliceLocation == SliceLocations.EpsilonBase) { t += 0.01 * LayerHeightMM; } else if (SliceLocation == SliceLocations.MidLine) { t += 0.5 * LayerHeightMM; } heights.Add(t); } int NH = heights.Count; // process each *slice* in parallel PlanarSlice[] slices = new PlanarSlice[NH]; for (int i = 0; i < NH; ++i) { slices[i] = SliceFactoryF(heights[i], i); slices[i].EmbeddedPathWidth = OpenPathDefaultWidthMM; } // assume Resolve() takes 2x as long as meshes... TotalCompute = (Meshes.Count * NH) + (2 * NH); Progress = 0; // compute slices separately for each mesh for (int mi = 0; mi < Meshes.Count; ++mi) { if (Cancelled()) { break; } DMesh3 mesh = Meshes[mi].mesh; PrintMeshOptions mesh_options = Meshes[mi].options; // [TODO] should we hang on to this spatial? or should it be part of assembly? DMeshAABBTree3 spatial = new DMeshAABBTree3(mesh, true); AxisAlignedBox3d bounds = Meshes[mi].bounds; bool is_cavity = mesh_options.IsCavity; bool is_crop = mesh_options.IsCropRegion; bool is_support = mesh_options.IsSupport; bool is_closed = (mesh_options.IsOpen) ? false : mesh.IsClosed(); var useOpenMode = (mesh_options.OpenPathMode == PrintMeshOptions.OpenPathsModes.Default) ? DefaultOpenPathMode : mesh_options.OpenPathMode; // each layer is independent so we can do in parallel gParallel.ForEach(Interval1i.Range(NH), (i) => { if (Cancelled()) { return; } double z = heights[i]; if (z < bounds.Min.z || z > bounds.Max.z) { return; } // compute cut Polygon2d[] polys; PolyLine2d[] paths; compute_plane_curves(mesh, spatial, z, is_closed, out polys, out paths); // if we didn't hit anything, try again with jittered plane // [TODO] this could be better... if ((is_closed && polys.Length == 0) || (is_closed == false && polys.Length == 0 && paths.Length == 0)) { compute_plane_curves(mesh, spatial, z + LayerHeightMM * 0.25, is_closed, out polys, out paths); } if (is_closed) { // construct planar complex and "solids" // (ie outer polys and nested holes) PlanarComplex complex = new PlanarComplex(); foreach (Polygon2d poly in polys) { complex.Add(poly); } PlanarComplex.FindSolidsOptions options = PlanarComplex.FindSolidsOptions.Default; options.WantCurveSolids = false; options.SimplifyDeviationTolerance = 0.001; options.TrustOrientations = true; options.AllowOverlappingHoles = true; PlanarComplex.SolidRegionInfo solids = complex.FindSolidRegions(options); if (is_support) { add_support_polygons(slices[i], solids.Polygons, mesh_options); } else if (is_cavity) { add_cavity_polygons(slices[i], solids.Polygons, mesh_options); } else if (is_crop) { add_crop_region_polygons(slices[i], solids.Polygons, mesh_options); } else { add_solid_polygons(slices[i], solids.Polygons, mesh_options); } } else if (useOpenMode != PrintMeshOptions.OpenPathsModes.Ignored) { foreach (PolyLine2d pline in paths) { if (useOpenMode == PrintMeshOptions.OpenPathsModes.Embedded) { slices[i].AddEmbeddedPath(pline); } else { slices[i].AddClippedPath(pline); } } // [TODO] // - does not really handle clipped polygons properly, there will be an extra break somewhere... foreach (Polygon2d poly in polys) { PolyLine2d pline = new PolyLine2d(poly, true); if (useOpenMode == PrintMeshOptions.OpenPathsModes.Embedded) { slices[i].AddEmbeddedPath(pline); } else { slices[i].AddClippedPath(pline); } } } Interlocked.Increment(ref Progress); }); // end of parallel.foreach } // end mesh iter // resolve planar intersections, etc gParallel.ForEach(Interval1i.Range(NH), (i) => { if (Cancelled()) { return; } slices[i].Resolve(); Interlocked.Add(ref Progress, 2); }); // discard spurious empty slices int last = slices.Length - 1; while (slices[last].IsEmpty && last > 0) { last--; } int first = 0; if (DiscardEmptyBaseSlices) { while (slices[first].IsEmpty && first < slices.Length) { first++; } } PlanarSliceStack stack = SliceStackFactoryF(); for (int k = first; k <= last; ++k) { stack.Add(slices[k]); } if (SupportMinZTips) { stack.AddMinZTipSupportPoints(MinZTipMaxDiam, MinZTipExtraLayers); } return(stack); }
public void ComputeOnBackgroundThread() { Deviation = null; DebugUtil.Log(SO.GetToolpathStats()); ToolpathSet paths = SO.GetToolpaths(); PlanarSliceStack slices = SO.GetSlices(); var settings = SO.GetSettings(); // AAHHH double bed_width = settings.Machine.BedSizeXMM; double bed_height = settings.Machine.BedSizeYMM; Vector3d origin = new Vector3d(-bed_width / 2, -bed_height / 2, 0); if (settings is gs.info.MakerbotSettings) { origin = Vector3d.Zero; } List <DeviationPt> points = new List <DeviationPt>(); SpinLock pointsLock = new SpinLock(); Action <DeviationPt> appendPointF = (pt) => { bool entered = false; pointsLock.Enter(ref entered); points.Add(pt); pointsLock.Exit(); }; double tolerance = settings.Machine.NozzleDiamMM * 0.5 + DeviationToleranceMM; gParallel.ForEach(Interval1i.Range(slices.Count), (slicei) => { PlanarSlice slice = slices[slicei]; //Interval1d zrange = (slicei < slices.Count - 1) ? // new Interval1d(slice.Z, slices[slicei + 1].Z - 0.5*settings.LayerHeightMM) : // new Interval1d(slice.Z, slice.Z + 0.5*settings.LayerHeightMM); double dz = 0.5 * settings.LayerHeightMM; Interval1d zrange = new Interval1d(slice.Z - dz, slice.Z + dz); double cellSize = 2.0f; ToolpathsLayerGrid grid = new ToolpathsLayerGrid(); grid.Build(paths, zrange, cellSize); foreach (GeneralPolygon2d poly in slice.Solids) { measure_poly(poly.Outer, slice.Z, grid, tolerance, appendPointF); foreach (var hole in poly.Holes) { measure_poly(poly.Outer, slice.Z, grid, tolerance, appendPointF); } } }); int N = points.Count; for (int k = 0; k < N; ++k) { DeviationPt pt = points[k]; Vector3d v = origin + pt.pos; v = MeshTransforms.ConvertZUpToYUp(v); pt.pos = MeshTransforms.FlipLeftRightCoordSystems(v); points[k] = pt; } Deviation = new DeviationData(); Deviation.DeviationPoints = points; OnGeometryUpdateRequired?.Invoke(this); }
public void Sort() { int N = Components.Count; ComponentMesh[] comps = Components.ToArray(); // sort by bbox containment to speed up testing (does it??) Array.Sort(comps, (i, j) => { return(i.Bounds.Contains(j.Bounds) ? -1 : 1); }); // containment sets bool[] bIsContained = new bool[N]; Dictionary <int, List <int> > ContainSets = new Dictionary <int, List <int> >(); Dictionary <int, List <int> > ContainedParents = new Dictionary <int, List <int> >(); SpinLock dataLock = new SpinLock(); // [TODO] this is 90% of compute time... // - if I know X contains Y, and Y contains Z, then I don't have to check that X contains Z // - can we exploit this somehow? // - if j contains i, then it cannot be that i contains j. But we are // not checking for this! (although maybe bbox check still early-outs it?) // construct containment sets gParallel.ForEach(Interval1i.Range(N), (i) => { ComponentMesh compi = comps[i]; if (compi.IsClosed == false && AllowOpenContainers == false) { return; } for (int j = 0; j < N; ++j) { if (i == j) { continue; } ComponentMesh compj = comps[j]; // cannot be contained if bounds are not contained if (compi.Bounds.Contains(compj.Bounds) == false) { continue; } // any other early-outs?? if (compi.Contains(compj)) { bool entered = false; dataLock.Enter(ref entered); compj.InsideOf.Add(compi); compi.InsideSet.Add(compj); if (ContainSets.ContainsKey(i) == false) { ContainSets.Add(i, new List <int>()); } ContainSets[i].Add(j); bIsContained[j] = true; if (ContainedParents.ContainsKey(j) == false) { ContainedParents.Add(j, new List <int>()); } ContainedParents[j].Add(i); dataLock.Exit(); } } }); List <MeshSolid> solids = new List <MeshSolid>(); HashSet <ComponentMesh> used = new HashSet <ComponentMesh>(); Dictionary <ComponentMesh, int> CompToOuterIndex = new Dictionary <ComponentMesh, int>(); List <int> ParentsToProcess = new List <int>(); // The following is a lot of code but it is very similar, just not clear how // to refactor out the common functionality // 1) we find all the top-level uncontained polys and add them to the final polys list // 2a) for any poly contained in those parent-polys, that is not also contained in anything else, // add as hole to that poly // 2b) remove all those used parents & holes from consideration // 2c) now find all the "new" top-level polys // 3) repeat 2a-c until done all polys // 4) any remaining polys must be interior solids w/ no holes // **or** weird leftovers like intersecting polys... // add all top-level uncontained polys for (int i = 0; i < N; ++i) { ComponentMesh compi = comps[i]; if (bIsContained[i]) { continue; } MeshSolid g = new MeshSolid() { Outer = compi }; int idx = solids.Count; CompToOuterIndex[compi] = idx; used.Add(compi); if (ContainSets.ContainsKey(i)) { ParentsToProcess.Add(i); } solids.Add(g); } // keep iterating until we processed all parents while (ParentsToProcess.Count > 0) { List <int> ContainersToRemove = new List <int>(); // now for all top-level components that contain children, add those children // as long as they do not have multiple contain-parents foreach (int i in ParentsToProcess) { ComponentMesh parentComp = comps[i]; int outer_idx = CompToOuterIndex[parentComp]; List <int> children = ContainSets[i]; foreach (int childj in children) { ComponentMesh childComp = comps[childj]; Util.gDevAssert(used.Contains(childComp) == false); // skip multiply-contained children List <int> parents = ContainedParents[childj]; if (parents.Count > 1) { continue; } solids[outer_idx].Cavities.Add(childComp); used.Add(childComp); if (ContainSets.ContainsKey(childj)) { ContainersToRemove.Add(childj); } } ContainersToRemove.Add(i); } // remove all containers that are no longer valid foreach (int ci in ContainersToRemove) { ContainSets.Remove(ci); // have to remove from each ContainedParents list List <int> keys = new List <int>(ContainedParents.Keys); foreach (int j in keys) { if (ContainedParents[j].Contains(ci)) { ContainedParents[j].Remove(ci); } } } ParentsToProcess.Clear(); // ok now find next-level uncontained parents... for (int i = 0; i < N; ++i) { ComponentMesh compi = comps[i]; if (used.Contains(compi)) { continue; } if (ContainSets.ContainsKey(i) == false) { continue; } List <int> parents = ContainedParents[i]; if (parents.Count > 0) { continue; } MeshSolid g = new MeshSolid() { Outer = compi }; int idx = solids.Count; CompToOuterIndex[compi] = idx; used.Add(compi); if (ContainSets.ContainsKey(i)) { ParentsToProcess.Add(i); } solids.Add(g); } } // any remaining components must be top-level for (int i = 0; i < N; ++i) { ComponentMesh compi = comps[i]; if (used.Contains(compi)) { continue; } MeshSolid g = new MeshSolid() { Outer = compi }; solids.Add(g); } Solids = solids; }
public void FindConnectedT() { Components = new List <Component>(); int NT = Mesh.MaxTriangleID; // [TODO] could use Euler formula to determine if mesh is closed genus-0... Func <int, bool> filter_func = (i) => { return(Mesh.IsTriangle(i)); }; if (FilterF != null) { filter_func = (i) => { return(Mesh.IsTriangle(i) && FilterF(i)); } } ; // initial active set contains all valid triangles byte[] active = new byte[Mesh.MaxTriangleID]; Interval1i activeRange = Interval1i.Empty; if (FilterSet != null) { for (int i = 0; i < NT; ++i) { active[i] = 255; } foreach (int tid in FilterSet) { bool bValid = filter_func(tid); if (bValid) { active[tid] = 0; activeRange.Contain(tid); } } } else { for (int i = 0; i < NT; ++i) { bool bValid = filter_func(i); if (bValid) { active[i] = 0; activeRange.Contain(i); } else { active[i] = 255; } } } // temporary buffers List <int> queue = new List <int>(NT / 10); List <int> cur_comp = new List <int>(NT / 10); // keep finding valid seed triangles and growing connected components // until we are done IEnumerable <int> range = (FilterSet != null) ? FilterSet : activeRange; foreach (int i in range) { //for ( int i = 0; i < NT; ++i ) { if (active[i] == 255) { continue; } int seed_t = i; if (SeedFilterF != null && SeedFilterF(seed_t) == false) { continue; } queue.Add(seed_t); active[seed_t] = 1; // in queue while (queue.Count > 0) { int cur_t = queue[queue.Count - 1]; queue.RemoveAt(queue.Count - 1); active[cur_t] = 2; // tri has been processed cur_comp.Add(cur_t); Index3i nbrs = Mesh.GetTriNeighbourTris(cur_t); for (int j = 0; j < 3; ++j) { int nbr_t = nbrs[j]; if (nbr_t != NGonsCore.geometry3Sharp.mesh.DMesh3.InvalidID && active[nbr_t] == 0) { queue.Add(nbr_t); active[nbr_t] = 1; // in queue } } } Component comp = new Component() { Indices = cur_comp.ToArray() }; Components.Add(comp); // remove tris in this component from active set for (int j = 0; j < comp.Indices.Length; ++j) { active[comp.Indices[j]] = 255; } cur_comp.Clear(); queue.Clear(); } } }
// join disconnected vertices within distance threshold. Use point-hashtable to make this faster. protected int JoinInTolerance_Parallel_Cache(DGraph2 graph, double fMergeDist) { double mergeSqr = fMergeDist * fMergeDist; int NV = graph.MaxVertexID; if (collapse_cache.size < NV) { collapse_cache.resize(NV); } gParallel.ForEach(Interval1i.Range(NV), (a) => { collapse_cache[a] = new Vector2d(-1, double.MaxValue); if (!graph.IsVertex(a)) { return; } Vector2d va = graph.GetVertex(a); KeyValuePair <int, double> found = graph_cache.FindNearestInRadius(va, mergeSqr, (b) => { return(va.DistanceSquared(graph.GetVertex(b))); }, (b) => { return(b <= a || (graph.FindEdge(a, b) != DGraph2.InvalidID)); }); if (found.Key != -1) { collapse_cache[a] = new Vector2d(found.Key, found.Value); } }); // [TODO] sort int merged = 0; for (int a = 0; a < NV; ++a) { if (collapse_cache[a].x == -1) { continue; } int bNearest = (int)collapse_cache[a].x; if (!graph.IsVertex(bNearest)) { continue; } Vector2d pos_a = graph.GetVertex(a); Vector2d pos_bNearest = graph.GetVertex(bNearest); /*int eid = */ graph.AppendEdge(a, bNearest); DGraph2.EdgeCollapseInfo collapseInfo; graph.CollapseEdge(bNearest, a, out collapseInfo); graph_cache.RemovePointUnsafe(a, pos_a); last_step_size[a] = 0; graph_cache.UpdatePointUnsafe(bNearest, pos_bNearest, graph.GetVertex(bNearest)); collapse_cache[bNearest] = new Vector2d(-1, double.MaxValue); merged++; } return(merged); }
/// <summary> /// Slice the meshes and return the slice stack. /// </summary> public PlanarSliceStack Compute() { if (Meshes.Count == 0) { return(new PlanarSliceStack()); } Interval1d zrange = Interval1d.Empty; foreach (var meshinfo in Meshes) { zrange.Contain(meshinfo.bounds.Min.z); zrange.Contain(meshinfo.bounds.Max.z); } if (SetMinZValue != double.MinValue) { zrange.a = SetMinZValue; } // construct layers List <PlanarSlice> slice_list = new List <PlanarSlice>(); double cur_layer_z = zrange.a; int layer_i = 0; while (cur_layer_z < zrange.b) { double layer_height = get_layer_height(layer_i); double z = cur_layer_z; Interval1d zspan = new Interval1d(z, z + layer_height); if (SliceLocation == SliceLocations.EpsilonBase) { z += 0.01 * layer_height; } else if (SliceLocation == SliceLocations.MidLine) { z += 0.5 * layer_height; } PlanarSlice slice = SliceFactoryF(zspan, z, layer_i); slice.EmbeddedPathWidth = OpenPathDefaultWidthMM; slice_list.Add(slice); layer_i++; cur_layer_z += layer_height; } int NH = slice_list.Count; if (NH > MaxLayerCount) { throw new Exception("MeshPlanarSlicer.Compute: exceeded layer limit. Increase .MaxLayerCount."); } PlanarSlice[] slices = slice_list.ToArray(); // determine if we have crop objects bool have_crop_objects = false; foreach (var mesh in Meshes) { if (mesh.options.IsCropRegion) { have_crop_objects = true; } } // assume Resolve() takes 2x as long as meshes... TotalCompute = (Meshes.Count * NH) + (2 * NH); Progress = 0; // compute slices separately for each mesh for (int mi = 0; mi < Meshes.Count; ++mi) { if (Cancelled()) { break; } DMesh3 mesh = Meshes[mi].mesh; PrintMeshOptions mesh_options = Meshes[mi].options; // [TODO] should we hang on to this spatial? or should it be part of assembly? DMeshAABBTree3 spatial = new DMeshAABBTree3(mesh, true); AxisAlignedBox3d bounds = Meshes[mi].bounds; bool is_cavity = mesh_options.IsCavity; bool is_crop = mesh_options.IsCropRegion; bool is_support = mesh_options.IsSupport; bool is_closed = (mesh_options.IsOpen) ? false : mesh.IsClosed(); var useOpenMode = (mesh_options.OpenPathMode == OpenPathsModes.Default) ? DefaultOpenPathMode : mesh_options.OpenPathMode; // each layer is independent so we can do in parallel gParallel.ForEach(Interval1i.Range(NH), (i) => { if (Cancelled()) { return; } double z = slices[i].Z; if (z < bounds.Min.z || z > bounds.Max.z) { return; } // compute cut Polygon2d[] polys; PolyLine2d[] paths; compute_plane_curves(mesh, spatial, z, is_closed, out polys, out paths); // if we didn't hit anything, try again with jittered plane // [TODO] this could be better... if ((is_closed && polys.Length == 0) || (is_closed == false && polys.Length == 0 && paths.Length == 0)) { double jitterz = slices[i].LayerZSpan.Interpolate(0.75); compute_plane_curves(mesh, spatial, jitterz, is_closed, out polys, out paths); } if (is_closed) { // construct planar complex and "solids" // (ie outer polys and nested holes) PlanarComplex complex = new PlanarComplex(); foreach (Polygon2d poly in polys) { complex.Add(poly); } PlanarComplex.FindSolidsOptions options = PlanarComplex.FindSolidsOptions.Default; options.WantCurveSolids = false; options.SimplifyDeviationTolerance = 0.001; options.TrustOrientations = true; options.AllowOverlappingHoles = true; PlanarComplex.SolidRegionInfo solids = complex.FindSolidRegions(options); List <GeneralPolygon2d> solid_polygons = ApplyValidRegions(solids.Polygons); if (is_support) { add_support_polygons(slices[i], solid_polygons, mesh_options); } else if (is_cavity) { add_cavity_polygons(slices[i], solid_polygons, mesh_options); } else if (is_crop) { add_crop_region_polygons(slices[i], solid_polygons, mesh_options); } else { add_solid_polygons(slices[i], solid_polygons, mesh_options); } } else if (useOpenMode != OpenPathsModes.Ignored) { // [TODO] // - does not really handle clipped polygons properly, there will be an extra break somewhere... List <PolyLine2d> all_paths = new List <PolyLine2d>(paths); foreach (Polygon2d poly in polys) { all_paths.Add(new PolyLine2d(poly, true)); } List <PolyLine2d> open_polylines = ApplyValidRegions(all_paths); foreach (PolyLine2d pline in open_polylines) { if (useOpenMode == OpenPathsModes.Embedded) { slices[i].AddEmbeddedPath(pline); } else { slices[i].AddClippedPath(pline); } } } Interlocked.Increment(ref Progress); }); // end of parallel.foreach } // end mesh iter // resolve planar intersections, etc gParallel.ForEach(Interval1i.Range(NH), (i) => { if (Cancelled()) { return; } if (have_crop_objects && slices[i].InputCropRegions.Count == 0) { // don't resolve, we have fully cropped this layer } else { slices[i].Resolve(); } Interlocked.Add(ref Progress, 2); }); // discard spurious empty slices int last = slices.Length - 1; while (slices[last].IsEmpty && last > 0) { last--; } int first = 0; if (DiscardEmptyBaseSlices || have_crop_objects) { while (slices[first].IsEmpty && first < slices.Length) { first++; } } PlanarSliceStack stack = SliceStackFactoryF(); for (int k = first; k <= last; ++k) { stack.Add(slices[k]); } if (SupportMinZTips) { stack.AddMinZTipSupportPoints(MinZTipMaxDiam, MinZTipExtraLayers); } return(stack); }
/// <summary> /// Slice the meshes and return the slice stack. /// </summary> public Result Compute() { Result result = new Result(); if (Meshes.Count == 0) { return(result); } // find Z interval we want to slice in Interval1d zrange = Interval1d.Empty; foreach (var meshinfo in Meshes) { zrange.Contain(meshinfo.bounds.Min.z); zrange.Contain(meshinfo.bounds.Max.z); } if (SetMinZValue != double.MinValue) { zrange.a = SetMinZValue; } result.TopZ = Math.Round(zrange.b, PrecisionDigits); result.BaseZ = Math.Round(zrange.a, PrecisionDigits); // [TODO] might be able to make better decisions if we took flat regions // into account when constructing initial Z-heights? if we have large flat // region just below Zstep, might make sense to do two smaller Z-steps so we // can exactly hit it?? // construct list of clearing Z-heights List <double> clearingZLayers = new List <double>(); double cur_layer_z = zrange.b; int layer_i = 0; while (cur_layer_z > zrange.a) { double layer_height = get_layer_height(layer_i); cur_layer_z -= layer_height; double z = Math.Round(cur_layer_z, PrecisionDigits); clearingZLayers.Add(z); layer_i++; } if (clearingZLayers.Last() < result.BaseZ) { clearingZLayers[clearingZLayers.Count - 1] = result.BaseZ; } if (clearingZLayers.Last() == clearingZLayers[clearingZLayers.Count - 2]) { clearingZLayers.RemoveAt(clearingZLayers.Count - 1); } // construct layer slices from Z-heights List <PlanarSlice> clearing_slice_list = new List <PlanarSlice>(); layer_i = 0; for (int i = 0; i < clearingZLayers.Count; ++i) { double layer_height = (i == clearingZLayers.Count - 1) ? (result.TopZ - clearingZLayers[i]) : (clearingZLayers[i + 1] - clearingZLayers[i]); double z = clearingZLayers[i]; Interval1d zspan = new Interval1d(z, z + layer_height); if (SliceLocation == SliceLocations.EpsilonBase) { z += 0.001; } PlanarSlice slice = SliceFactoryF(zspan, z, layer_i); clearing_slice_list.Add(slice); layer_i++; } int NH = clearing_slice_list.Count; if (NH > MaxLayerCount) { throw new Exception("MeshPlanarSlicer.Compute: exceeded layer limit. Increase .MaxLayerCount."); } PlanarSlice[] clearing_slices = clearing_slice_list.ToArray(); // assume Resolve() takes 2x as long as meshes... TotalCompute = (Meshes.Count * NH) + (2 * NH); Progress = 0; // compute slices separately for each mesh for (int mi = 0; mi < Meshes.Count; ++mi) { if (Cancelled()) { break; } DMesh3 mesh = Meshes[mi].mesh; PrintMeshOptions mesh_options = Meshes[mi].options; // [TODO] should we hang on to this spatial? or should it be part of assembly? DMeshAABBTree3 spatial = new DMeshAABBTree3(mesh, true); AxisAlignedBox3d bounds = Meshes[mi].bounds; bool is_cavity = mesh_options.IsCavity; bool is_crop = mesh_options.IsCropRegion; bool is_support = mesh_options.IsSupport; bool is_closed = (mesh_options.IsOpen) ? false : mesh.IsClosed(); var useOpenMode = (mesh_options.OpenPathMode == PrintMeshOptions.OpenPathsModes.Default) ? DefaultOpenPathMode : mesh_options.OpenPathMode; if (is_crop || is_support) { throw new Exception("Not supported!"); } // each layer is independent so we can do in parallel gParallel.ForEach(Interval1i.Range(NH), (i) => { if (Cancelled()) { return; } double z = clearing_slices[i].Z; if (z < bounds.Min.z || z > bounds.Max.z) { return; } // compute cut Polygon2d[] polys; PolyLine2d[] paths; ComputeSlicePlaneCurves(mesh, spatial, z, is_closed, out polys, out paths); if (is_closed) { // construct planar complex and "solids" // (ie outer polys and nested holes) PlanarComplex complex = new PlanarComplex(); foreach (Polygon2d poly in polys) { complex.Add(poly); } PlanarComplex.FindSolidsOptions options = PlanarComplex.FindSolidsOptions.Default; options.WantCurveSolids = false; options.SimplifyDeviationTolerance = 0.001; options.TrustOrientations = true; options.AllowOverlappingHoles = true; PlanarComplex.SolidRegionInfo solids = complex.FindSolidRegions(options); List <GeneralPolygon2d> solid_polygons = ApplyValidRegions(solids.Polygons); if (is_cavity) { add_cavity_polygons(clearing_slices[i], solid_polygons, mesh_options); } else { if (ExpandStockAmount > 0) { solid_polygons = ClipperUtil.MiterOffset(solid_polygons, ExpandStockAmount); } add_solid_polygons(clearing_slices[i], solid_polygons, mesh_options); } } Interlocked.Increment(ref Progress); }); // end of parallel.foreach } // end mesh iter // resolve planar intersections, etc gParallel.ForEach(Interval1i.Range(NH), (i) => { if (Cancelled()) { return; } clearing_slices[i].Resolve(); Interlocked.Add(ref Progress, 2); }); // add to clearing stack result.Clearing = SliceStackFactoryF(); for (int k = 0; k < clearing_slices.Length; ++k) { result.Clearing.Add(clearing_slices[k]); } /* * Horizontal planar regions finishing pass. * First we find all planar horizontal Z-regions big enough to mill. * Then we add slices at the Z's we haven't touched yet. * * Cannot just 'fill' planar regions because we will miss edges that might * be millable. So we grow region and then intersect with full-slice millable area. */ // find set of horizontal flat regions Dictionary <double, List <PlanarRegion> > flat_regions = FindPlanarZRegions(ToolDiameter); if (flat_regions.Count == 0) { goto done_slicing; } // if we have already milled this exact Z-height in clearing pass, then we can skip it List <double> doneZ = new List <double>(); foreach (double z in flat_regions.Keys) { if (clearingZLayers.Contains(z)) { doneZ.Add(z); } } foreach (var z in doneZ) { flat_regions.Remove(z); } // create slice for each layer PlanarSlice[] horz_slices = new PlanarSlice[flat_regions.Count]; List <double> flatZ = new List <double>(flat_regions.Keys); flatZ.Sort(); for (int k = 0; k < horz_slices.Length; ++k) { double z = flatZ[k]; Interval1d zspan = new Interval1d(z, z + LayerHeightMM); horz_slices[k] = SliceFactoryF(zspan, z, k); // compute full millable region slightly above this slice. PlanarSlice clip_slice = ComputeSolidSliceAtZ(z + 0.0001, false); clip_slice.Resolve(); // extract planar polys List <Polygon2d> polys = GetPlanarPolys(flat_regions[z]); PlanarComplex complex = new PlanarComplex(); foreach (Polygon2d poly in polys) { complex.Add(poly); } // convert to planar solids PlanarComplex.FindSolidsOptions options = PlanarComplex.FindSolidsOptions.SortPolygons; options.SimplifyDeviationTolerance = 0.001; options.TrustOrientations = true; options.AllowOverlappingHoles = true; PlanarComplex.SolidRegionInfo solids = complex.FindSolidRegions(options); List <GeneralPolygon2d> solid_polygons = ApplyValidRegions(solids.Polygons); // If planar solid has holes, then when we do inset later, we might lose // too-thin parts. Shrink the holes to avoid this case. //FilterHoles(solid_polygons, 0.55 * ToolDiameter); // ok now we need to expand region and intersect with full region. solid_polygons = ClipperUtil.MiterOffset(solid_polygons, ToolDiameter * 0.5, 0.0001); solid_polygons = ClipperUtil.Intersection(solid_polygons, clip_slice.Solids, 0.0001); // Same idea as above, but if we do after, we keep more of the hole and // hence do less extra clearing. // Also this could then be done at the slicer level instead of here... // (possibly this entire thing should be done at slicer level, except we need clip_slice!) FilterHoles(solid_polygons, 1.1 * ToolDiameter); add_solid_polygons(horz_slices[k], solid_polygons, PrintMeshOptions.Default()); } // resolve planar intersections, etc int NF = horz_slices.Length; gParallel.ForEach(Interval1i.Range(NF), (i) => { if (Cancelled()) { return; } horz_slices[i].Resolve(); Interlocked.Add(ref Progress, 2); }); // add to clearing stack result.HorizontalFinish = SliceStackFactoryF(); for (int k = 0; k < horz_slices.Length; ++k) { result.HorizontalFinish.Add(horz_slices[k]); } done_slicing: return(result); }