/// <summary> /// processing z-slabs of cells in parallel /// </summary> void generate_parallel() { hash_lock = new SpinLock(); mesh_lock = new SpinLock(); bParallel = true; // [TODO] maybe shouldn't alway use Z axis here? gParallel.ForEach(Interval1i.Range(CellDimensions.z), (zi) => { GridCell cell = new GridCell(); Vector3d[] vertlist = new Vector3d[12]; for (int yi = 0; yi < CellDimensions.y; ++yi) { // compute full cell at x=0, then slide along x row, which saves half of value computes Vector3i idx = new Vector3i(0, yi, zi); initialize_cell(cell, ref idx); polygonize_cell(cell, vertlist); for (int xi = 1; xi < CellDimensions.x; ++xi) { shift_cell_x(cell, xi); polygonize_cell(cell, vertlist); } } }); bParallel = false; }
/// <summary> /// Process indices [iStart,iEnd] *inclusive* by passing sub-intervals [start,end] to blockF. /// Blocksize is automatically determind unless you specify one. /// Iterate over [start,end] *inclusive* in each block /// </summary> public static void BlockStartEnd(int iStart, int iEnd, Action <int, int> blockF, int iBlockSize = -1, bool bDisableParallel = false) { if (iBlockSize == -1) { iBlockSize = 100; // seems to work } int N = (iEnd - iStart + 1); int num_blocks = N / iBlockSize; // process main blocks in parallel if (bDisableParallel) { ForEach_Sequential(Interval1i.Range(num_blocks), (bi) => { int k = iStart + iBlockSize * bi; blockF(k, k + iBlockSize - 1); }); } else { ForEach(Interval1i.Range(num_blocks), (bi) => { int k = iStart + iBlockSize * bi; blockF(k, k + iBlockSize - 1); }); } // process leftover elements int remaining = N - (num_blocks * iBlockSize); if (remaining > 0) { int k = iStart + num_blocks * iBlockSize; blockF(k, k + remaining - 1); } }
/// <summary> /// Construct packed versions of input matrices, and then use sparse row/column dot /// to compute elements of output matrix. This is faster. But still relatively expensive. /// </summary> void multiply_fast(SymmetricSparseMatrix M2in, ref SymmetricSparseMatrix Rin, bool bParallel) { int N = Rows; if (M2in.Rows != N) { throw new Exception("SymmetricSparseMatrix.Multiply: matrices have incompatible dimensions"); } if (Rin == null) { Rin = new SymmetricSparseMatrix(); } SymmetricSparseMatrix R = Rin; // require alias for use in lambda below PackedSparseMatrix M = new PackedSparseMatrix(this); M.Sort(); PackedSparseMatrix M2 = new PackedSparseMatrix(M2in, true); M2.Sort(); // Parallel variant is vastly faster, uses spinlock to control access to R if (bParallel) { // goddamn SpinLock is in .Net 4 //SpinLock spin = new SpinLock(); gParallel.ForEach(Interval1i.Range(N), (r1i) => { for (int c2i = r1i; c2i < N; c2i++) { double v = M.DotRowColumn(r1i, c2i, M2); if (Math.Abs(v) > MathUtil.ZeroTolerance) { //bool taken = false; //spin.Enter(ref taken); //Debug.Assert(taken); //R[r1i, c2i] = v; //spin.Exit(); lock (R) { R[r1i, c2i] = v; } } } }); } else { for (int r1i = 0; r1i < N; r1i++) { for (int c2i = r1i; c2i < N; c2i++) { double v = M.DotRowColumn(r1i, c2i, M2); if (Math.Abs(v) > MathUtil.ZeroTolerance) { R[r1i, c2i] = v; } } } } }
// Result must be as large as Mesh.MaxVertexID public bool SolveMultipleRHS(Vector3d[] Result) { if (WeightsM == null) { Initialize(); // force initialize... } UpdateForSolve(); // use initial positions as initial solution. double[][] B = BufferUtil.InitNxM(3, N, new double[][] { Bx, By, Bz }); double[][] X = BufferUtil.InitNxM(3, N, new double[][] { Px, Py, Pz }); Action <double[][], double[][]> CombinedMultiply = (Xt, Bt) => { PackedM.Multiply_Parallel_3(Xt, Bt); gParallel.ForEach(Interval1i.Range(3), (j) => { BufferUtil.MultiplyAdd(Bt[j], WeightsM.D, Xt[j]); }); }; var Solver = new SparseSymmetricCGMultipleRHS() { B = B, X = X, MultiplyF = CombinedMultiply, PreconditionMultiplyF = null, UseXAsInitialGuess = true }; bool ok = Solver.Solve(); if (ok == false) { return(false); } for (int i = 0; i < N; ++i) { int vid = ToMeshV[i]; Result[vid] = new Vector3d(X[0][i], X[1][i], X[2][i]); } // apply post-fixed constraints if (HavePostFixedConstraints) { foreach (var constraint in SoftConstraints) { if (constraint.Value.PostFix) { int vid = constraint.Key; Result[vid] = constraint.Value.Position; } } } return(true); }
/// <summary> /// Evaluate input actions in parallel /// </summary> public static void Evaluate(params Action[] funcs) { int N = funcs.Length; gParallel.ForEach(Interval1i.Range(N), (i) => { funcs[i](); }); }
public void Sort() { gParallel.ForEach(Interval1i.Range(Rows.Length), (i) => { Array.Sort(Rows[i], (x, y) => { return(x.j.CompareTo(y.j)); }); }); //for ( int i = 0; i < Rows.Length; ++i ) // Array.Sort(Rows[i], (x, y) => { return x.j.CompareTo(y.j); } ); Sorted = true; }
public void Multiply(DenseMatrix M2, ref DenseMatrix R, bool bParallel = true) { int rows1 = N, cols1 = M; int rows2 = M2.N, cols2 = M2.M; if (cols1 != rows2) { throw new Exception("DenseMatrix.Multiply: matrices have incompatible dimensions"); } if (R == null) { R = new DenseMatrix(Rows, M2.Columns); } if (R.Rows != rows1 || R.Columns != cols2) { throw new Exception("DenseMatrix.Multiply: Result matrix has incorrect dimensions"); } if (bParallel) { DenseMatrix Rt = R; gParallel.ForEach(Interval1i.Range(0, rows1), (r1i) => { int ii = r1i * M; for (int c2i = 0; c2i < cols2; c2i++) { double v = 0; for (int k = 0; k < cols1; ++k) { v += d[ii + k] * M2.d[k * M + c2i]; } Rt[ii + c2i] = v; } }); } else { for (int r1i = 0; r1i < rows1; r1i++) { int ii = r1i * M; for (int c2i = 0; c2i < cols2; c2i++) { double v = 0; for (int k = 0; k < cols1; ++k) { v += d[ii + k] * M2.d[k * M + c2i]; } R[ii + c2i] = v; } } } }
// returns this*this (requires less memory) public SymmetricSparseMatrix Square(bool bParallel = true) { var R = new SymmetricSparseMatrix(); var M = new PackedSparseMatrix(this); M.Sort(); // Parallel variant is vastly faster, uses spinlock to control access to R if (bParallel) { // goddamn SpinLock is in .Net 4 //SpinLock spin = new SpinLock(); gParallel.ForEach(Interval1i.Range(N), (r1i) => { for (int c2i = r1i; c2i < N; c2i++) { double v = M.DotRowColumn(r1i, c2i, M); if (Math.Abs(v) > MathUtil.ZeroTolerance) { //bool taken = false; //spin.Enter(ref taken); //Debug.Assert(taken); //R[r1i, c2i] = v; //spin.Exit(); lock (R) { R[r1i, c2i] = v; } } } }); } else { for (int r1i = 0; r1i < N; r1i++) { for (int c2i = r1i; c2i < N; c2i++) { double v = M.DotRowColumn(r1i, c2i, M); if (Math.Abs(v) > MathUtil.ZeroTolerance) { R[r1i, c2i] = v; } } } } return(R); }
public int Dist(Interval1i o) { if (b < o.a) { return(o.a - b); } else if (a > o.b) { return(a - o.b); } else { return(0); } }
void UpdateP(double[][] P, double[] beta, double[][] R, bool[] converged) { Interval1i rhs = Interval1i.Range(P.Length); gParallel.ForEach(rhs, (j) => { if (converged[j] == false) { int n = P[j].Length; for (int i = 0; i < n; ++i) { P[j][i] = R[j][i] + beta[j] * P[j][i]; } } }); }
public int SquaredDist(Interval1i o) { if (b < o.a) { return((o.a - b) * (o.a - b)); } else if (a > o.b) { return((a - o.b) * (a - o.b)); } else { return(0); } }
// for each From[i], find closest point on TargetSurface void update_to() { double max_dist = double.MaxValue; bool bNormals = (UseNormals && Source.HasVertexNormals); var range = Interval1i.Range(From.Length); gParallel.ForEach(range, (vi) => { int tid = TargetSurface.FindNearestTriangle(From[vi], max_dist); if (tid == DMesh3.InvalidID) { Weights[vi] = 0; return; } DistPoint3Triangle3 d = MeshQueries.TriangleDistance(TargetSurface.Mesh, tid, From[vi]); if (d.DistanceSquared > MaxAllowableDistance * MaxAllowableDistance) { Weights[vi] = 0; return; } To[vi] = d.TriangleClosest; Weights[vi] = 1.0f; if (bNormals) { Vector3d fromN = Rotation * Source.GetVertexNormal(vi); Vector3d toN = TargetSurface.Mesh.GetTriNormal(tid); double fDot = fromN.Dot(toN); Debug.Assert(MathUtil.IsFinite(fDot)); if (fDot < 0) { Weights[vi] = 0; } else { Weights[vi] += Math.Sqrt(fDot); } } }); }
/// <summary> /// Calculate the two most extreme vertices along an axis, with optional transform /// </summary> public static Interval1i ExtremeVertices(DMesh3 mesh, Vector3d axis, Func <Vector3d, Vector3d> TransformF = null) { Interval1d extent = Interval1d.Empty; Interval1i extreme = new Interval1i(DMesh3.InvalidID, DMesh3.InvalidID); if (TransformF == null) { foreach (int vid in mesh.VertexIndices()) { double t = mesh.GetVertex(vid).Dot(ref axis); if (t < extent.a) { extent.a = t; extreme.a = vid; } else if (t > extent.b) { extent.b = t; extreme.b = vid; } } } else { foreach (int vid in mesh.VertexIndices()) { double t = TransformF(mesh.GetVertex(vid)).Dot(ref axis); if (t < extent.a) { extent.a = t; extreme.a = vid; } else if (t > extent.b) { extent.b = t; extreme.b = vid; } } } return(extreme); }
/// <summary> /// For row r, find interval that nonzeros lie in /// </summary> public Interval1i NonZerosRange(int r) { nonzero[] Row = Rows[r]; if (Row.Length == 0) { return(Interval1i.Empty); } if (Sorted == false) { Interval1i range = Interval1i.Empty; for (int i = 0; i < Row.Length; ++i) { range.Contain(Row[i].j); } return(range); } else { return(new Interval1i(Row[0].j, Row[Row.Length - 1].j)); } }
// TODO: parallel version, cache tri normals void Compute_FaceAvg_AreaWeighted() { int NV = Mesh.MaxVertexID; if (NV != Normals.size) { Normals.resize(NV); } for (int i = 0; i < NV; ++i) { Normals[i] = Vector3d.Zero; } int NT = Mesh.MaxTriangleID; for (int ti = 0; ti < NT; ++ti) { if (Mesh.IsTriangle(ti) == false) { continue; } Index3i tri = Mesh.GetTriangle(ti); Vector3d va = Mesh.GetVertex(tri.a); Vector3d vb = Mesh.GetVertex(tri.b); Vector3d vc = Mesh.GetVertex(tri.c); Vector3d N = MathUtil.Normal(va, vb, vc); double a = MathUtil.Area(va, vb, vc); Normals[tri.a] += a * N; Normals[tri.b] += a * N; Normals[tri.c] += a * N; } gParallel.ForEach(Interval1i.Range(NV), (vi) => { if (Normals[vi].LengthSquared > MathUtil.ZeroTolerancef) { Normals[vi] = Normals[vi].Normalized; } }); }
public virtual bool Smooth() { int NV = Loop.Vertices.Length; double a = MathUtil.Clamp(Alpha, 0, 1); double num_rounds = MathUtil.Clamp(Rounds, 0, 10000); for (int round = 0; round < num_rounds; ++round) { // compute gParallel.ForEach(Interval1i.Range(NV), (i) => { int vid = Loop.Vertices[(i + 1) % NV]; Vector3d prev = Mesh.GetVertex(Loop.Vertices[i]); Vector3d cur = Mesh.GetVertex(vid); Vector3d next = Mesh.GetVertex(Loop.Vertices[(i + 2) % NV]); Vector3d centroid = (prev + next) * 0.5; SmoothedPostions[i] = (1 - a) * cur + (a) * centroid; }); // bake gParallel.ForEach(Interval1i.Range(NV), (i) => { int vid = Loop.Vertices[(i + 1) % NV]; Vector3d pos = SmoothedPostions[i]; if (ProjectF != null) { pos = ProjectF(pos, vid); } Mesh.SetVertex(vid, pos); }); } return(true); }
// TODO: parallel version, cache tri normals void Compute_FaceAvg_AreaWeighted() { int NV = Mesh.MaxVertexID; if (NV != Normals.size) { Normals.resize(NV); } for (int i = 0; i < NV; ++i) { Normals[i] = Vector3d.Zero; } SpinLock Normals_lock = new SpinLock(); gParallel.ForEach(Mesh.TriangleIndices(), (ti) => { Index3i tri = Mesh.GetTriangle(ti); Vector3d va = Mesh.GetVertex(tri.a); Vector3d vb = Mesh.GetVertex(tri.b); Vector3d vc = Mesh.GetVertex(tri.c); Vector3d N = MathUtil.Normal(ref va, ref vb, ref vc); double a = MathUtil.Area(ref va, ref vb, ref vc); bool taken = false; Normals_lock.Enter(ref taken); Normals[tri.a] += a * N; Normals[tri.b] += a * N; Normals[tri.c] += a * N; Normals_lock.Exit(); }); gParallel.ForEach(Interval1i.Range(NV), (vi) => { if (Normals[vi].LengthSquared > MathUtil.ZeroTolerancef) { Normals[vi] = Normals[vi].Normalized; } }); }
// Result must be as large as Mesh.MaxVertexID public bool SolveMultipleCG(Vector3d[] Result) { if (WeightsM == null) { Initialize(); // force initialize... } UpdateForSolve(); // use initial positions as initial solution. Array.Copy(Px, Sx, N); Array.Copy(Py, Sy, N); Array.Copy(Pz, Sz, N); Action <double[], double[]> CombinedMultiply = (X, B) => { //PackedM.Multiply(X, B); PackedM.Multiply_Parallel(X, B); for (int i = 0; i < N; ++i) { B[i] += WeightsM[i, i] * X[i]; } }; List <SparseSymmetricCG> Solvers = new List <SparseSymmetricCG>(); if (SolveX) { Solvers.Add(new SparseSymmetricCG() { B = Bx, X = Sx, MultiplyF = CombinedMultiply, PreconditionMultiplyF = Preconditioner.Multiply, UseXAsInitialGuess = true }); } if (SolveY) { Solvers.Add(new SparseSymmetricCG() { B = By, X = Sy, MultiplyF = CombinedMultiply, PreconditionMultiplyF = Preconditioner.Multiply, UseXAsInitialGuess = true }); } if (SolveZ) { Solvers.Add(new SparseSymmetricCG() { B = Bz, X = Sz, MultiplyF = CombinedMultiply, PreconditionMultiplyF = Preconditioner.Multiply, UseXAsInitialGuess = true }); } bool[] ok = new bool[Solvers.Count]; gParallel.ForEach(Interval1i.Range(Solvers.Count), (i) => { ok[i] = Solvers[i].Solve(); // preconditioned solve is slower =\ //ok[i] = solvers[i].SolvePreconditioned(); }); ConvergeFailed = false; foreach (bool b in ok) { if (b == false) { ConvergeFailed = true; } } for (int i = 0; i < N; ++i) { int vid = ToCurveV[i]; Result[vid] = new Vector3d(Sx[i], Sy[i], Sz[i]); } // apply post-fixed constraints if (HavePostFixedConstraints) { foreach (var constraint in SoftConstraints) { if (constraint.Value.PostFix) { int vid = constraint.Key; Result[vid] = constraint.Value.Position; } } } return(true); }
public virtual bool Apply() { HashSet <int> OnCurveVerts = new HashSet <int>(); // original vertices that were epsilon-coincident w/ curve vertices insert_corners(OnCurveVerts); // [RMS] not using this? //HashSet<int> corner_v = new HashSet<int>(CurveVertices); // not sure we need to track all of these HashSet <int> ZeroEdges = new HashSet <int>(); HashSet <int> ZeroVertices = new HashSet <int>(); OnCutEdges = new HashSet <int>(); HashSet <int> NewEdges = new HashSet <int>(); HashSet <int> NewCutVertices = new HashSet <int>(); sbyte[] signs = new sbyte[2 * Mesh.MaxVertexID + 2 * Curve.VertexCount]; HashSet <int> segTriangles = new HashSet <int>(); HashSet <int> segVertices = new HashSet <int>(); HashSet <int> segEdges = new HashSet <int>(); // loop over segments, insert each one in sequence int N = (IsLoop) ? Curve.VertexCount : Curve.VertexCount - 1; for (int si = 0; si < N; ++si) { int i0 = si; int i1 = (si + 1) % Curve.VertexCount; Segment2d seg = new Segment2d(Curve[i0], Curve[i1]); int i0_vid = CurveVertices[i0]; int i1_vid = CurveVertices[i1]; // If these vertices are already connected by an edge, we can just continue. int existing_edge = Mesh.FindEdge(i0_vid, i1_vid); if (existing_edge != DMesh3.InvalidID) { add_cut_edge(existing_edge); continue; } if (triSpatial != null) { segTriangles.Clear(); segVertices.Clear(); segEdges.Clear(); AxisAlignedBox2d segBounds = new AxisAlignedBox2d(seg.P0); segBounds.Contain(seg.P1); segBounds.Expand(MathUtil.ZeroTolerancef * 10); triSpatial.FindTrianglesInRange(segBounds, segTriangles); IndexUtil.TrianglesToVertices(Mesh, segTriangles, segVertices); IndexUtil.TrianglesToEdges(Mesh, segTriangles, segEdges); } int MaxVID = Mesh.MaxVertexID; IEnumerable <int> vertices = Interval1i.Range(MaxVID); if (triSpatial != null) { vertices = segVertices; } // compute edge-crossing signs // [TODO] could walk along mesh from a to b, rather than computing for entire mesh? if (signs.Length < MaxVID) { signs = new sbyte[2 * MaxVID]; } gParallel.ForEach(vertices, (vid) => { if (Mesh.IsVertex(vid)) { if (vid == i0_vid || vid == i1_vid) { signs[vid] = 0; } else { Vector2d v2 = PointF(vid); // tolerance defines band in which we will consider values to be zero signs[vid] = (sbyte)seg.WhichSide(v2, SpatialEpsilon); } } else { signs[vid] = sbyte.MaxValue; } }); // have to skip processing of new edges. If edge id // is > max at start, is new. Otherwise if in NewEdges list, also new. // (need both in case we re-use an old edge index) int MaxEID = Mesh.MaxEdgeID; NewEdges.Clear(); NewCutVertices.Clear(); NewCutVertices.Add(i0_vid); NewCutVertices.Add(i1_vid); // cut existing edges with segment IEnumerable <int> edges = Interval1i.Range(MaxEID); if (triSpatial != null) { edges = segEdges; } foreach (int eid in edges) { if (Mesh.IsEdge(eid) == false) { continue; } if (eid >= MaxEID || NewEdges.Contains(eid)) { continue; } // cannot cut boundary edges? if (Mesh.IsBoundaryEdge(eid)) { continue; } Index2i ev = Mesh.GetEdgeV(eid); int eva_sign = signs[ev.a]; int evb_sign = signs[ev.b]; // [RMS] should we be using larger epsilon here? If we don't track OnCurveVerts explicitly, we // need to at least use same epsilon we passed to insert_corner_from_bary...do we still also // need that to catch the edges we split in the poke? bool eva_in_segment = false; if (eva_sign == 0) { eva_in_segment = OnCurveVerts.Contains(ev.a) || Math.Abs(seg.Project(PointF(ev.a))) < (seg.Extent + SpatialEpsilon); } bool evb_in_segment = false; if (evb_sign == 0) { evb_in_segment = OnCurveVerts.Contains(ev.b) || Math.Abs(seg.Project(PointF(ev.b))) < (seg.Extent + SpatialEpsilon); } // If one or both vertices are on-segment, we have special case. // If just one vertex is on the segment, we can skip this edge. // If both vertices are on segment, then we can just re-use this edge. if (eva_in_segment || evb_in_segment) { if (eva_in_segment && evb_in_segment) { ZeroEdges.Add(eid); add_cut_edge(eid); NewCutVertices.Add(ev.a); NewCutVertices.Add(ev.b); } else { int zvid = eva_in_segment ? ev.a : ev.b; ZeroVertices.Add(zvid); NewCutVertices.Add(zvid); } continue; } // no crossing if (eva_sign * evb_sign > 0) { continue; } // compute segment/segment intersection Vector2d va = PointF(ev.a); Vector2d vb = PointF(ev.b); Segment2d edge_seg = new Segment2d(va, vb); IntrSegment2Segment2 intr = new IntrSegment2Segment2(seg, edge_seg); intr.Compute(); if (intr.Type == IntersectionType.Segment) { // [RMS] we should have already caught this above, so if it happens here it is probably spurious? // we should have caught this case above, but numerics are different so it might occur again ZeroEdges.Add(eid); NewCutVertices.Add(ev.a); NewCutVertices.Add(ev.b); add_cut_edge(eid); continue; } else if (intr.Type != IntersectionType.Point) { continue; // no intersection } Vector2d x = intr.Point0; double t = Math.Sqrt(x.DistanceSquared(va) / va.DistanceSquared(vb)); // this case happens if we aren't "on-segment" but after we do the test the intersection pt // is within epsilon of one end of the edge. This is a spurious t-intersection and we // can ignore it. Some other edge should exist that picks up this vertex as part of it. // [TODO] what about if this edge is degenerate? bool x_in_segment = Math.Abs(edge_seg.Project(x)) < (edge_seg.Extent - SpatialEpsilon); if (!x_in_segment) { continue; } Index2i et = Mesh.GetEdgeT(eid); spatial_remove_triangles(et.a, et.b); // split edge at this segment DMesh3.EdgeSplitInfo splitInfo; MeshResult result = Mesh.SplitEdge(eid, out splitInfo, t); if (result != MeshResult.Ok) { throw new Exception("MeshInsertUVSegment.Apply: SplitEdge failed - " + result.ToString()); //return false; } // move split point to intersection position SetPointF(splitInfo.vNew, x); NewCutVertices.Add(splitInfo.vNew); NewEdges.Add(splitInfo.eNewBN); NewEdges.Add(splitInfo.eNewCN); spatial_add_triangles(et.a, et.b); spatial_add_triangles(splitInfo.eNewT2, splitInfo.eNewT3); // some splits - but not all - result in new 'other' edges that are on // the polypath. We want to keep track of these edges so we can extract loop later. Index2i ecn = Mesh.GetEdgeV(splitInfo.eNewCN); if (NewCutVertices.Contains(ecn.a) && NewCutVertices.Contains(ecn.b)) { add_cut_edge(splitInfo.eNewCN); } // since we don't handle bdry edges this should never be false, but maybe we will handle bdry later... if (splitInfo.eNewDN != DMesh3.InvalidID) { NewEdges.Add(splitInfo.eNewDN); Index2i edn = Mesh.GetEdgeV(splitInfo.eNewDN); if (NewCutVertices.Contains(edn.a) && NewCutVertices.Contains(edn.b)) { add_cut_edge(splitInfo.eNewDN); } } } } // extract the cut paths if (EnableCutSpansAndLoops) { find_cut_paths(OnCutEdges); } return(true); } // Apply()
/// <summary> /// Preconditioned variant /// Similar to non-preconditioned version, this can suffer if one solution converges /// much slower than others, as we can't skip matrix multiplies in that case. /// </summary> public bool SolvePreconditioned() { Iterations = 0; if (B == null || MultiplyF == null || PreconditionMultiplyF == null) { throw new Exception("SparseSymmetricCGMultipleRHS.SolvePreconditioned(): Must set B and MultiplyF and PreconditionMultiplyF!"); } int NRHS = B.Length; if (NRHS == 0) { throw new Exception("SparseSymmetricCGMultipleRHS.SolvePreconditioned(): Need at least one RHS vector in B"); } int n = B[0].Length; R = BufferUtil.AllocNxM(NRHS, n); P = BufferUtil.AllocNxM(NRHS, n); AP = BufferUtil.AllocNxM(NRHS, n); Z = BufferUtil.AllocNxM(NRHS, n); if (X == null || UseXAsInitialGuess == false) { if (X == null) { X = BufferUtil.AllocNxM(NRHS, n); } for (int j = 0; j < NRHS; ++j) { Array.Clear(X[j], 0, n); Array.Copy(B[j], R[j], n); } } else { // hopefully is X is a decent initialization... InitializeR(R); } // [RMS] for convergence test? double[] norm = new double[NRHS]; for (int j = 0; j < NRHS; ++j) { norm[j] = BufferUtil.Dot(B[j], B[j]); } double[] root1 = new double[NRHS]; for (int j = 0; j < NRHS; ++j) { root1[j] = Math.Sqrt(norm[j]); } // r_0 = b - A*x_0 MultiplyF(X, R); for (int j = 0; j < NRHS; ++j) { for (int i = 0; i < n; ++i) { R[j][i] = B[j][i] - R[j][i]; } } // z0 = M_inverse * r_0 PreconditionMultiplyF(R, Z); // p0 = z0 for (int j = 0; j < NRHS; ++j) { Array.Copy(Z[j], P[j], n); } // compute initial R*Z double[] RdotZ_k = new double[NRHS]; for (int j = 0; j < NRHS; ++j) { RdotZ_k[j] = BufferUtil.Dot(R[j], Z[j]); } double[] alpha_k = new double[NRHS]; double[] beta_k = new double[NRHS]; bool[] converged = new bool[NRHS]; var rhs = Interval1i.Range(NRHS); int iter = 0; while (iter++ < MaxIterations) { // convergence test bool done = true; for (int j = 0; j < NRHS; ++j) { if (converged[j] == false) { double root0 = Math.Sqrt(RdotZ_k[j]); if (root0 <= ConvergeTolerance * root1[j]) { converged[j] = true; } } if (converged[j] == false) { done = false; } } if (done) { break; } MultiplyF(P, AP); gParallel.ForEach(rhs, (j) => { if (converged[j] == false) { alpha_k[j] = RdotZ_k[j] / BufferUtil.Dot(P[j], AP[j]); } }); // x_k+1 = x_k + alpha_k * p_k gParallel.ForEach(rhs, (j) => { if (converged[j] == false) { BufferUtil.MultiplyAdd(X[j], alpha_k[j], P[j]); } }); // r_k+1 = r_k - alpha_k * A * p_k gParallel.ForEach(rhs, (j) => { if (converged[j] == false) { BufferUtil.MultiplyAdd(R[j], -alpha_k[j], AP[j]); } }); // z_k+1 = M_inverse * r_k+1 PreconditionMultiplyF(R, Z); // beta_k = (z_k+1 * r_k+1) / (z_k * r_k) gParallel.ForEach(rhs, (j) => { if (converged[j] == false) { beta_k[j] = BufferUtil.Dot(Z[j], R[j]) / RdotZ_k[j]; } }); // can do these in parallel but improvement is minimal // p_k+1 = z_k+1 + beta_k * p_k gParallel.ForEach(rhs, (j) => { if (converged[j] == false) { for (int i = 0; i < n; ++i) { P[j][i] = Z[j][i] + beta_k[j] * P[j][i]; } } }); gParallel.ForEach(rhs, (j) => { if (converged[j] == false) { RdotZ_k[j] = BufferUtil.Dot(R[j], Z[j]); } }); } //System.Console.WriteLine("{0} iterations", iter); Iterations = iter; return(iter < MaxIterations); }
public void FindConnectedT() { Components = new List <Component>(); int NT = Mesh.MaxTriangleID; // [TODO] could use Euler formula to determine if mesh is closed genus-0... Func <int, bool> filter_func = (i) => { return(Mesh.IsTriangle(i)); }; if (FilterF != null) { filter_func = (i) => { return(Mesh.IsTriangle(i) && FilterF(i)); } } ; // initial active set contains all valid triangles byte[] active = new byte[Mesh.MaxTriangleID]; Interval1i activeRange = Interval1i.Empty; if (FilterSet != null) { for (int i = 0; i < NT; ++i) { active[i] = 255; } foreach (int tid in FilterSet) { bool bValid = filter_func(tid); if (bValid) { active[tid] = 0; activeRange.Contain(tid); } } } else { for (int i = 0; i < NT; ++i) { bool bValid = filter_func(i); if (bValid) { active[i] = 0; activeRange.Contain(i); } else { active[i] = 255; } } } // temporary buffers List <int> queue = new List <int>(NT / 10); List <int> cur_comp = new List <int>(NT / 10); // keep finding valid seed triangles and growing connected components // until we are done IEnumerable <int> range = (FilterSet != null) ? FilterSet : activeRange; foreach (int i in range) { //for ( int i = 0; i < NT; ++i ) { if (active[i] == 255) { continue; } int seed_t = i; if (SeedFilterF != null && SeedFilterF(seed_t) == false) { continue; } queue.Add(seed_t); active[seed_t] = 1; // in queue while (queue.Count > 0) { int cur_t = queue[queue.Count - 1]; queue.RemoveAt(queue.Count - 1); active[cur_t] = 2; // tri has been processed cur_comp.Add(cur_t); Index3i nbrs = Mesh.GetTriNeighbourTris(cur_t); for (int j = 0; j < 3; ++j) { int nbr_t = nbrs[j]; if (nbr_t != DMesh3.InvalidID && active[nbr_t] == 0) { queue.Add(nbr_t); active[nbr_t] = 1; // in queue } } } Component comp = new Component() { Indices = cur_comp.ToArray() }; Components.Add(comp); // remove tris in this component from active set for (int j = 0; j < comp.Indices.Length; ++j) { active[comp.Indices[j]] = 255; } cur_comp.Clear(); queue.Clear(); } }
/// <summary> /// standard CG solve /// </summary> public bool Solve() { Iterations = 0; if (B == null || MultiplyF == null) { throw new Exception("SparseSymmetricCGMultipleRHS.Solve(): Must set B and MultiplyF!"); } int NRHS = B.Length; if (NRHS == 0) { throw new Exception("SparseSymmetricCGMultipleRHS.Solve(): Need at least one RHS vector in B"); } int size = B[0].Length; // Based on the algorithm in "Matrix Computations" by Golum and Van Loan. R = BufferUtil.AllocNxM(NRHS, size); P = BufferUtil.AllocNxM(NRHS, size); W = BufferUtil.AllocNxM(NRHS, size); if (X == null || UseXAsInitialGuess == false) { if (X == null) { X = BufferUtil.AllocNxM(NRHS, size); } for (int j = 0; j < NRHS; ++j) { Array.Clear(X[j], 0, size); Array.Copy(B[j], R[j], size); } } else { // hopefully is X is a decent initialization... InitializeR(R); } // [RMS] these were inside loop but they are constant! double[] norm = new double[NRHS]; for (int j = 0; j < NRHS; ++j) { norm[j] = BufferUtil.Dot(B[j], B[j]); } double[] root1 = new double[NRHS]; for (int j = 0; j < NRHS; ++j) { root1[j] = Math.Sqrt(norm[j]); } // The first iteration. double[] rho0 = new double[NRHS]; for (int j = 0; j < NRHS; ++j) { rho0[j] = BufferUtil.Dot(R[j], R[j]); } // [RMS] If we were initialized w/ constraints already satisfied, // then we are done! (happens for example in mesh deformations) bool[] converged = new bool[NRHS]; int nconverged = 0; for (int j = 0; j < NRHS; ++j) { converged[j] = rho0[j] < (ConvergeTolerance * root1[j]); if (converged[j]) { nconverged++; } } if (nconverged == NRHS) { return(true); } for (int j = 0; j < NRHS; ++j) { Array.Copy(R[j], P[j], size); } MultiplyF(P, W); double[] alpha = new double[NRHS]; for (int j = 0; j < NRHS; ++j) { alpha[j] = rho0[j] / BufferUtil.Dot(P[j], W[j]); } for (int j = 0; j < NRHS; ++j) { BufferUtil.MultiplyAdd(X[j], alpha[j], P[j]); } for (int j = 0; j < NRHS; ++j) { BufferUtil.MultiplyAdd(R[j], -alpha[j], W[j]); } double[] rho1 = new double[NRHS]; for (int j = 0; j < NRHS; ++j) { rho1[j] = BufferUtil.Dot(R[j], R[j]); } double[] beta = new double[NRHS]; var rhs = Interval1i.Range(NRHS); // The remaining iterations. int iter; for (iter = 1; iter < MaxIterations; ++iter) { bool done = true; for (int j = 0; j < NRHS; ++j) { if (converged[j] == false) { double root0 = Math.Sqrt(rho1[j]); if (root0 <= ConvergeTolerance * root1[j]) { converged[j] = true; } } if (converged[j] == false) { done = false; } } if (done) { break; } for (int j = 0; j < NRHS; ++j) { beta[j] = rho1[j] / rho0[j]; } UpdateP(P, beta, R, converged); MultiplyF(P, W); gParallel.ForEach(rhs, (j) => { if (converged[j] == false) { alpha[j] = rho1[j] / BufferUtil.Dot(P[j], W[j]); } }); // can do all these in parallel, but improvement is minimal gParallel.ForEach(rhs, (j) => { if (converged[j] == false) { BufferUtil.MultiplyAdd(X[j], alpha[j], P[j]); } }); gParallel.ForEach(rhs, (j) => { if (converged[j] == false) { rho0[j] = rho1[j]; rho1[j] = BufferUtil.MultiplyAdd_GetSqrSum(R[j], -alpha[j], W[j]); } }); } //System.Console.WriteLine("{0} iterations", iter); Iterations = iter; return(iter < MaxIterations); }
public void Set(Interval1i o) { a = o.a; b = o.b; }
public Interval1i(Interval1i copy) { a = copy.a; b = copy.b; }
public virtual bool Apply() { insert_corners(); // [RMS] not using this? //HashSet<int> corner_v = new HashSet<int>(CurveVertices); // not sure we need to track all of these HashSet <int> ZeroEdges = new HashSet <int>(); HashSet <int> ZeroVertices = new HashSet <int>(); OnCutEdges = new HashSet <int>(); // loop over segments, insert each one in sequence int N = (IsLoop) ? Curve.VertexCount : Curve.VertexCount - 1; for (int si = 0; si < N; ++si) { int i0 = si; int i1 = (si + 1) % Curve.VertexCount; Segment2d seg = new Segment2d(Curve[i0], Curve[i1]); int i0_vid = CurveVertices[i0]; int i1_vid = CurveVertices[i1]; // If these vertices are already connected by an edge, we can just continue. int existing_edge = Mesh.FindEdge(i0_vid, i1_vid); if (existing_edge != DMesh3.InvalidID) { OnCutEdges.Add(existing_edge); continue; } // compute edge-crossing signs // [TODO] could walk along mesh from a to b, rather than computing for entire mesh? int MaxVID = Mesh.MaxVertexID; int[] signs = new int[MaxVID]; gParallel.ForEach(Interval1i.Range(MaxVID), (vid) => { if (Mesh.IsVertex(vid)) { if (vid == i0_vid || vid == i1_vid) { signs[vid] = 0; } else { Vector2d v2 = PointF(vid); // tolerance defines band in which we will consider values to be zero signs[vid] = seg.WhichSide(v2, MathUtil.ZeroTolerance); } } else { signs[vid] = int.MaxValue; } }); // have to skip processing of new edges. If edge id // is > max at start, is new. Otherwise if in NewEdges list, also new. // (need both in case we re-use an old edge index) int MaxEID = Mesh.MaxEdgeID; HashSet <int> NewEdges = new HashSet <int>(); HashSet <int> NewCutVertices = new HashSet <int>(); NewCutVertices.Add(i0_vid); NewCutVertices.Add(i1_vid); // cut existing edges with segment for (int eid = 0; eid < MaxEID; ++eid) { if (Mesh.IsEdge(eid) == false) { continue; } if (eid >= MaxEID || NewEdges.Contains(eid)) { continue; } // cannot cut boundary edges? if (Mesh.IsBoundaryEdge(eid)) { continue; } Index2i ev = Mesh.GetEdgeV(eid); int eva_sign = signs[ev.a]; int evb_sign = signs[ev.b]; bool eva_in_segment = false; if (eva_sign == 0) { eva_in_segment = Math.Abs(seg.Project(PointF(ev.a))) < (seg.Extent + MathUtil.ZeroTolerance); } bool evb_in_segment = false; if (evb_sign == 0) { evb_in_segment = Math.Abs(seg.Project(PointF(ev.b))) < (seg.Extent + MathUtil.ZeroTolerance); } // If one or both vertices are on-segment, we have special case. // If just one vertex is on the segment, we can skip this edge. // If both vertices are on segment, then we can just re-use this edge. if (eva_in_segment || evb_in_segment) { if (eva_in_segment && evb_in_segment) { ZeroEdges.Add(eid); OnCutEdges.Add(eid); } else { ZeroVertices.Add(eva_in_segment ? ev.a : ev.b); } continue; } // no crossing if (eva_sign * evb_sign > 0) { continue; } // compute segment/segment intersection Vector2d va = PointF(ev.a); Vector2d vb = PointF(ev.b); Segment2d edge_seg = new Segment2d(va, vb); IntrSegment2Segment2 intr = new IntrSegment2Segment2(seg, edge_seg); intr.Compute(); if (intr.Type == IntersectionType.Segment) { // [RMS] we should have already caught this above, so if it happens here it is probably spurious? // we should have caught this case above, but numerics are different so it might occur again ZeroEdges.Add(eid); OnCutEdges.Add(eid); continue; } else if (intr.Type != IntersectionType.Point) { continue; // no intersection } Vector2d x = intr.Point0; // this case happens if we aren't "on-segment" but after we do the test the intersection pt // is within epsilon of one end of the edge. This is a spurious t-intersection and we // can ignore it. Some other edge should exist that picks up this vertex as part of it. // [TODO] what about if this edge is degenerate? bool x_in_segment = Math.Abs(edge_seg.Project(x)) < (edge_seg.Extent - MathUtil.ZeroTolerance); if (!x_in_segment) { continue; } // split edge at this segment DMesh3.EdgeSplitInfo splitInfo; MeshResult result = Mesh.SplitEdge(eid, out splitInfo); if (result != MeshResult.Ok) { throw new Exception("MeshInsertUVSegment.Cut: failed in SplitEdge"); //return false; } // move split point to intersection position SetPointF(splitInfo.vNew, x); NewCutVertices.Add(splitInfo.vNew); NewEdges.Add(splitInfo.eNewBN); NewEdges.Add(splitInfo.eNewCN); // some splits - but not all - result in new 'other' edges that are on // the polypath. We want to keep track of these edges so we can extract loop later. Index2i ecn = Mesh.GetEdgeV(splitInfo.eNewCN); if (NewCutVertices.Contains(ecn.a) && NewCutVertices.Contains(ecn.b)) { OnCutEdges.Add(splitInfo.eNewCN); } // since we don't handle bdry edges this should never be false, but maybe we will handle bdry later... if (splitInfo.eNewDN != DMesh3.InvalidID) { NewEdges.Add(splitInfo.eNewDN); Index2i edn = Mesh.GetEdgeV(splitInfo.eNewDN); if (NewCutVertices.Contains(edn.a) && NewCutVertices.Contains(edn.b)) { OnCutEdges.Add(splitInfo.eNewDN); } } } } //MeshEditor editor = new MeshEditor(Mesh); //foreach (int eid in OnCutEdges) // editor.AppendBox(new Frame3f(Mesh.GetEdgePoint(eid, 0.5)), 0.1f); //Util.WriteDebugMesh(Mesh, string.Format("C:\\git\\geometry3SharpDemos\\geometry3Test\\test_output\\after_inserted.obj")); // extract the cut paths if (EnableCutSpansAndLoops) { find_cut_paths(OnCutEdges); } return(true); } // Apply()
public virtual bool Cut() { double invalidDist = double.MinValue; MeshEdgeSelection CutEdgeSet = null; MeshVertexSelection CutVertexSet = null; if (CutFaceSet != null) { CutEdgeSet = new MeshEdgeSelection(Mesh, CutFaceSet); CutVertexSet = new MeshVertexSelection(Mesh, CutEdgeSet); } // compute signs int MaxVID = Mesh.MaxVertexID; double[] signs = new double[MaxVID]; gParallel.ForEach(Interval1i.Range(MaxVID), (vid) => { if (Mesh.IsVertex(vid)) { Vector3d v = Mesh.GetVertex(vid); signs[vid] = (v - PlaneOrigin).Dot(PlaneNormal); } else { signs[vid] = invalidDist; } }); HashSet <int> ZeroEdges = new HashSet <int>(); HashSet <int> ZeroVertices = new HashSet <int>(); HashSet <int> OnCutEdges = new HashSet <int>(); // have to skip processing of new edges. If edge id // is > max at start, is new. Otherwise if in NewEdges list, also new. int MaxEID = Mesh.MaxEdgeID; HashSet <int> NewEdges = new HashSet <int>(); IEnumerable <int> edgeItr = Interval1i.Range(MaxEID); if (CutEdgeSet != null) { edgeItr = CutEdgeSet; } // cut existing edges with plane, using edge split foreach (int eid in edgeItr) { if (Mesh.IsEdge(eid) == false) { continue; } if (eid >= MaxEID || NewEdges.Contains(eid)) { continue; } Index2i ev = Mesh.GetEdgeV(eid); double f0 = signs[ev.a]; double f1 = signs[ev.b]; // If both signs are 0, this edge is on-contour // If one sign is 0, that vertex is on-contour int n0 = (Math.Abs(f0) < MathUtil.Epsilon) ? 1 : 0; int n1 = (Math.Abs(f1) < MathUtil.Epsilon) ? 1 : 0; if (n0 + n1 > 0) { if (n0 + n1 == 2) { ZeroEdges.Add(eid); } else { ZeroVertices.Add((n0 == 1) ? ev[0] : ev[1]); } continue; } // no crossing if (f0 * f1 > 0) { continue; } DMesh3.EdgeSplitInfo splitInfo; MeshResult result = Mesh.SplitEdge(eid, out splitInfo); if (result != MeshResult.Ok) { throw new Exception("MeshPlaneCut.Cut: failed in SplitEdge"); //return false; } // SplitEdge just bisects edge - use plane intersection instead double t = f0 / (f0 - f1); Vector3d newPos = (1 - t) * Mesh.GetVertex(ev.a) + (t) * Mesh.GetVertex(ev.b); Mesh.SetVertex(splitInfo.vNew, newPos); NewEdges.Add(splitInfo.eNewBN); NewEdges.Add(splitInfo.eNewCN); OnCutEdges.Add(splitInfo.eNewCN); if (splitInfo.eNewDN != DMesh3.InvalidID) { NewEdges.Add(splitInfo.eNewDN); OnCutEdges.Add(splitInfo.eNewDN); } } // remove one-rings of all positive-side vertices. IEnumerable <int> vertexSet = Interval1i.Range(MaxVID); if (CutVertexSet != null) { vertexSet = CutVertexSet; } foreach (int vid in vertexSet) { if (signs[vid] > 0 && Mesh.IsVertex(vid)) { Mesh.RemoveVertex(vid, true, false); } } // ok now we extract boundary loops, but restricted // to either the zero-edges we found, or the edges we created! bang!! Func <int, bool> CutEdgeFilterF = (eid) => { if (OnCutEdges.Contains(eid) || ZeroEdges.Contains(eid)) { return(true); } return(false); }; try { MeshBoundaryLoops loops = new MeshBoundaryLoops(Mesh, false); loops.EdgeFilterF = CutEdgeFilterF; loops.Compute(); CutLoops = loops.Loops; CutSpans = loops.Spans; CutLoopsFailed = false; FoundOpenSpans = CutSpans.Count > 0; } catch { CutLoops = new List <EdgeLoop>(); CutLoopsFailed = true; } return(true); } // Cut()
public bool Overlaps(Interval1i o) { return(!(o.a > b || o.b < a)); }
public IntSequence(int iStart, int iEnd) { range = new Interval1i(iStart, iEnd); }
public IntSequence(Interval1i ival) { range = ival; }