/// <summary> /// Returns the source which contains Au_{n-1}+b for the Operator and the field provided in the constructor /// </summary> /// <param name="k">results of Ay+b</param> /// <param name="dt">optional scaling by time step size</param> public void ExplicitEulerSource(SubgridCoordinateMapping u, out double[] convectiveSource, double dt) { convectiveSource = new double[u.subgridCoordinates.Length]; u.Compress(); SubgridOperatorMatr.SpMVpara <double[], double[]>(dt, u.subgridCoordinates, 1.0, convectiveSource); BLAS.daxpy(SubgridAffine.Length, dt, SubgridAffine, 1, convectiveSource, 1); }
/// <summary> /// computes the pressure correction. /// </summary> /// <param name="Pressure">input: current pressure</param> /// <param name="VelocityPCorrIn">input: intermediate velocity</param> /// <param name="PressurePCorr">output: pressure correction</param> /// <param name="RHSContinuity">input: RHS of the continuity equation</param> /// <returns></returns> double PressureCorrector(double[] Pressure, double[] VelocityPCorrIn, double[] PressurePCorr, double[] RHSContinuity) { using (new FuncTrace()) { // solve Poisson equation // ====================== // Matrix: Div*A^-1*Grad + S InitPressureSolver(); // RHS var RHS = new double[Pressure.Length]; { RHS.AccV(-1.0, RHSContinuity); VelocityDiv.SpMVpara(+1.0, VelocityPCorrIn, 1.0, RHS); Stab.SpMVpara(+1.0, Pressure, 1.0, RHS); } // solve m_PressureSolver.Solve(PressurePCorr, RHS); // return return(PressurePCorr.L2Norm()); } }
void VelocityUpdate(double[] PressureCorrection, double[] VelocityPrediction, double[] FinalVelocity) { FinalVelocity.SetV(VelocityPrediction); double[] temp = new double[FinalVelocity.Length]; PressureGrad.SpMVpara(-1.0, PressureCorrection, 0.0, temp); AapproxInverse.SpMVpara(1.0, temp, 1.0, FinalVelocity); }
public void VelocityPredictor(double[] PressureEstimateIn, double[] VelocityEstimateIn, double[] VelocityPrediction, double[] RHSMomentum) { using (new FuncTrace()) { double m_relax_vel = m_SIMPLEOptions.relax_v; //build Matrix var PredictorMX = ConvDiff.CloneAs(); //underrelaxation LHS if (m_relax_vel <= 0.0) { throw new ArithmeticException("Illegal velocity underrelaxation parameter: " + m_relax_vel); } PredictorMX.Acc((1 - m_relax_vel) / m_relax_vel, Aapprox); #if DEBUG PredictorMX.CheckForNanOrInfM(); #endif // build RHS: b1-grad*p (+ oldVelocities/dt) double[] RHS = new double [RHSMomentum.Length]; RHS.AccV(1.0, RHSMomentum); PressureGrad.SpMVpara(-1.0, PressureEstimateIn, 1.0, RHS); //underrelaxation RHS Aapprox.SpMVpara((1 - m_relax_vel) / m_relax_vel, VelocityEstimateIn, 1.0, RHS); // solve using (ISparseSolver solver = m_SIMPLEOptions.ViscousSolver) { //Agglomerator.ClearAgglomerated(RHS, VelocityMapping); //double SolverResidual = PC.SolveDirect(VelocityPrediction.CoordinateVector, RHS, solver, false); //solver.Dispose(); solver.DefineMatrix(PredictorMX); solver.Solve(VelocityPrediction, RHS); } } }
void DerivativeByFluxLinear(SinglePhaseField fin, SinglePhaseField fres, int d, SinglePhaseField fBnd) { var Op = (new LinearDerivFlx(d)).Operator(); MsrMatrix OpMtx = new MsrMatrix(fres.Mapping, fin.Mapping); double[] OpAff = new double[fres.Mapping.LocalLength]; Op.ComputeMatrixEx(fin.Mapping, new DGField[] { fBnd }, fres.Mapping, OpMtx, OpAff, OnlyAffine: false); fres.Clear(); fres.CoordinateVector.Acc(1.0, OpAff); OpMtx.SpMVpara(1.0, fin.CoordinateVector, 1.0, fres.CoordinateVector); }
/// <summary> /// Approximate the inverse of the Schur matrix and perform two Poisson solves and Matrix-Vector products. Finite elements and Fast Iterative Solvers p.383 /// </summary> public void ApproxAndSolveSchur <U, V>(U Xp, V Bp) where U : IList <double> where V : IList <double> { var temp = new double[Xp.Count]; var sol = new double[pGrad.RowPartitioning.LocalLength]; // Poisson solve using (var solver = new ilPSP.LinSolvers.MUMPS.MUMPSSolver()) { solver.DefineMatrix(PoissonMtx_T); solver.Solve(temp, Bp); } // Schur Convective part with scaling pGrad.SpMVpara(1, temp, 0, sol); temp = sol.ToArray(); sol.Clear(); invVelMassMatrix.SpMVpara(1, temp, 0, sol); temp = sol.ToArray(); sol.Clear(); ConvDiff.SpMVpara(1, temp, 0, sol); temp = sol.ToArray(); sol.Clear(); invVelMassMatrixSqrt.SpMVpara(1, temp, 0, sol); temp = sol.ToArray(); divVel.SpMVpara(1, temp, 0, Xp); // Poisson solve using (var solver = new ilPSP.LinSolvers.MUMPS.MUMPSSolver()) { solver.DefineMatrix(PoissonMtx_H); solver.Solve(Xp, Xp); } }
/// <summary> /// checks whether the linear and nonlinear implementation of operator evaluation are mathematically equal /// </summary> void LinearNonlinComparisonTest() { int L = this.bnd.CoordinateVector.Count(); // need to assure to use the same quadrature oder on both evaluation variants var volQrSch = (new CellQuadratureScheme(false, CellMask.GetFullMask(this.GridData, MaskType.Geometrical))) .AddFixedOrderRules(this.GridData, this.PolynomialDegree * 3); var edgQrSch = new EdgeQuadratureScheme(true, EdgeMask.GetFullMask(this.GridData, MaskType.Geometrical)) .AddFixedOrderRules(this.GridData, this.PolynomialDegree * 3); //var volQrSch = new CellQuadratureScheme(true, CellMask.GetEmptyMask(this.GridData)); //var edgQrSch = new EdgeQuadratureScheme(true, EdgeMask.GetEmptyMask(this.GridData)); //var edgQrSch = new EdgeQuadratureScheme(true, this.GridData.BoundaryEdges) // .AddFixedOrderRules(this.GridData, this.PolynomialDegree * 3); //var edgQrSch = new EdgeQuadratureScheme(true, this.GridData.BoundaryEdges.Complement()) // .AddFixedOrderRules(this.GridData, this.PolynomialDegree * 3); for (int run = 0; run < 1; run++) { // setup a random test vector Random rnd = new Random(); var TestArgument = this.bnd.CloneAs().CoordinateVector; for (int i = 0; i < L; i++) { TestArgument[i] = rnd.NextDouble(); } Stopwatch lin = new Stopwatch(); Stopwatch nol = new Stopwatch(); // linear evaluation CoordinateVector LinResult = this.ViscU_linear.CoordinateVector; LinResult.Clear(); lin.Start(); { var map = this.U.Mapping; var tempOperatorMtx = new MsrMatrix(map, map); var tempAffine = new double[L]; Operator.ComputeMatrixEx(map, new DGField[] { this.mu }, map, tempOperatorMtx, tempAffine, volQuadScheme: volQrSch, edgeQuadScheme: edgQrSch); tempOperatorMtx.SpMVpara(1.0, TestArgument, 0.0, LinResult); LinResult.AccV(1.0, tempAffine); } lin.Stop(); // nonliner evaluation CoordinateVector NolResult = this.ViscU_nonlinear.CoordinateVector; NolResult.Clear(); nol.Start(); { var evaluator = Operator.GetEvaluatorEx(TestArgument.Mapping, new DGField[] { this.mu }, this.Residual.Mapping, volQrCtx: volQrSch, edgeQrCtx: edgQrSch); evaluator.Evaluate(1.0, 0.0, NolResult); } nol.Stop(); double L2Dist = GenericBlas.L2DistPow2(LinResult, NolResult).MPISum().Sqrt(); Console.WriteLine("L2 dist of linear/Nonlinear evaluation comparison: {0}", L2Dist); LinResult.Acc(-1.0, NolResult); foreach (SinglePhaseField DGfield in LinResult.Mapping.Fields) { for (int p = 0; p <= DGfield.Basis.Degree; p++) { double L2err_p = DGfield.L2NormPerMode(p); Console.WriteLine(" ERR{2} {1} \t{0}", L2err_p, DGfield.Identification, p); } } Console.WriteLine("Time linear {0}, time nonlinear: {1}", lin.Elapsed, nol.Elapsed); Assert.LessOrEqual(L2Dist, 1.0e-4, "L2 distance between linear and nonlinear evaluation of the same flux."); } }
/// <summary> /// projects some DG field onto this /// </summary> /// <param name="alpha"></param> /// <param name="DGField"></param> /// <param name="_cm">optional restriction to computational domain</param> /// <remarks> /// This method computes an exact /// L2-projection of the DG-field onto the SpecFEM-space, so a global linear system, which contains all /// DOF's, has to be solved. /// In contrast, <see cref="ProjectDGFieldCheaply"/> performs an approximate projection which only involves /// local operations for each cell. /// </remarks> public void ProjectDGField(double alpha, ConventionalDGField DGField, CellMask _cm = null) { using (var trx = new Transceiver(this.Basis)) { CellMask cm = _cm; if (cm == null) { cm = CellMask.GetFullMask(this.Basis.GridDat); } int J = m_Basis.GridDat.Cells.NoOfLocalUpdatedCells; var Trafo = m_Basis.GridDat.ChefBasis.Scaling; var C2N = m_Basis.CellNode_To_Node; var MtxM2N = m_Basis.m_Modal2Nodal; var CellData = this.Basis.GridDat.Cells; // compute RHS // =========== var b = MultidimensionalArray.Create(this.m_Basis.NoOfLocalNodes); { int[] _K = m_Basis.NodesPerCell; int L = m_Basis.ContainingDGBasis.Length; double[][] _NodalCoordinates = _K.Select(K => new double[K]).ToArray(); // temporary storage for nodal coordinates per cell // 1st idx: ref. elm., 2nd idx: node index double[] ModalCoordinates = new double[L]; foreach (Chunk cnk in cm) { int j0 = cnk.i0; int jE = cnk.JE; for (int j = j0; j < jE; j++) // loop over cells... { int iKref = CellData.GetRefElementIndex(j); double[] NodalCoordinates = _NodalCoordinates[iKref]; int K = _K[iKref]; if (!CellData.IsCellAffineLinear(j)) { throw new NotSupportedException(); } // Get DG coordinates Array.Clear(ModalCoordinates, 0, L); int Lmin = Math.Min(L, DGField.Basis.GetLength(j)); for (int l = 0; l < Lmin; l++) { ModalCoordinates[l] = DGField.Coordinates[j, l]; } var tr = 1.0 / Trafo[j]; // transform //DGField.Coordinates.GetRow(j, ModalCoordinates); ModalCoordinates.ClearEntries(); for (int l = 0; l < Lmin; l++) { ModalCoordinates[l] = DGField.Coordinates[j, l]; } MtxM2N[iKref].GEMV(tr, ModalCoordinates, 0.0, NodalCoordinates, transpose: true); // collect coordinates for cell 'j': for (int k = 0; k < K; k++) { int _c2n = C2N[j, k]; b[_c2n] += NodalCoordinates[k]; } } } } trx.AccumulateGather(b); /* * * var bcheck = new double[b.Length]; * { * var polys = this.Basis.NodalBasis; * * * CellQuadrature.GetQuadrature(new int[] { K }, * this.Basis.GridDat.Context, * (new CellQuadratureScheme()).Compile(this.Basis.GridDat, this.Basis.ContainingDGBasis.Degree*2), * delegate(MultidimensionalArray NodesUntransformed) { // Del_CreateNodeSetFamily * var NSC = this.Basis.GridDat.Context.NSC; * return new NodeSetController.NodeSetContainer[] { NSC.CreateContainer(NodesUntransformed) }; * }, * delegate(int i0, int Length, int _NoOfNodes, MultidimensionalArray EvalResult) { * var PolyAtNode = MultidimensionalArray.Create(K, _NoOfNodes); * for (int k = 0; k < K; k++) { * polys[k].Evaluate(PolyAtNode.ExtractSubArrayShallow(k, -1), this.Basis.GridDat.Context.NSC.Current_NodeSetFamily[0].NodeSet); * } * * var DGFatNodes = MultidimensionalArray.Create(Length, _NoOfNodes); * DGField.Evaluate(i0, Length, 0, DGFatNodes); * * //for(int i = 0; i < Length; i++) { * // for (int n = 0; n < _NoOfNodes; n++) { * // for (int k = 0; k < K; k++) { * // for (int l = 0; l < K; l++) { * // EvalResult[i, n, k, l] = PolyAtNode[k, n]*PolyAtNode[l, n]; * // } * // } * // } * //} * * EvalResult.Multiply(1.0, PolyAtNode, DGFatNodes, 0.0, "jnk", "kn", "jn"); * * //double errSum = 0; * //for (int i = 0; i < Length; i++) { * // for (int n = 0; n < _NoOfNodes; n++) { * // for (int k = 0; k < K; k++) { * // for (int l = 0; l < K; l++) { * // double soll = PolyAtNode[k, n]*PolyAtNode[l, n]; * // errSum += Math.Abs(soll - EvalResult[i, n, k, l]); * // } * // } * // } * //} * //Console.WriteLine("errsum = " + errSum); * }, * delegate(int i0, int Length, MultidimensionalArray ResultsOfIntegration) { // SaveIntegrationResults * for (int i = 0; i < Length; i++) { * int jCell = i + i0; * * for (int k = 0; k < K; k++) { * bcheck[C2N[jCell, k]] += ResultsOfIntegration[i, k]; * } * * //CellMass[jCell] = new FullMatrix(K, K); * //CellMass[jCell].Initialize(ResultsOfIntegration.ExtractSubArrayShallow(i, -1, -1)); * } * }).Execute(); * * * double f**k = GenericBlas.L2Dist(b, bcheck); * Console.WriteLine("Distance error = " + f**k); * * } * * */ if (_cm == null) { // full domain projection branch // +++++++++++++++++++++++++++++ var x = new double[this.Basis.NoOfLocalOwnedNodes]; var solStat = m_Basis.MassSolver.Solve(x, b.ExtractSubArrayShallow(new int[] { 0 }, new int[] { this.Basis.NoOfLocalOwnedNodes - 1 }).To1DArray()); { if (solStat.Converged == false) { throw new ArithmeticException("DG -> SpecFEM Projection failed because the Mass matrix solver did not converge."); } double[] chk = b.ExtractSubArrayShallow(new int[] { 0 }, new int[] { this.Basis.NoOfLocalOwnedNodes - 1 }).To1DArray(); this.Basis.MassMatrix.SpMVpara(-1.0, x, 1.0, chk); double chk_nomr = chk.L2Norm(); if (chk_nomr >= 1.0e-8) { throw new ArithmeticException(string.Format("DG -> SpecFEM Projection failed: solver converged, but with high residual {0}.", chk_nomr.ToStringDot())); } } //m_Basis.MassMatrix.SpMV(1.0, b, 0.0, x); m_Coordinates.ExtractSubArrayShallow(new int[] { 0 }, new int[] { this.Basis.NoOfLocalOwnedNodes - 1 }).AccVector(alpha, x); //m_Coordinates.AccV(alpha, b); } else { // restricted domain projection branch // +++++++++++++++++++++++++++++++++++ List <int> OccupiedRows_Global = new List <int>(); //List<int> OccupiedRows_Local = new List<int>(); var MM = Basis.ComputeMassMatrix(cm); int i0 = MM.RowPartitioning.i0, iE = MM.RowPartitioning.iE; for (int i = i0; i < iE; i++) { if (MM.GetNoOfNonZerosPerRow(i) > 0) { OccupiedRows_Global.Add(i); //OccupiedRows_Local.Add(i - i0); } } var CompressedPart = new Partitioning(OccupiedRows_Global.Count); var CompressedMM = new MsrMatrix(CompressedPart); MM.WriteSubMatrixTo(CompressedMM, OccupiedRows_Global, default(int[]), OccupiedRows_Global, default(int[])); var b_sub = new double[OccupiedRows_Global.Count]; //try { b_sub.AccV(1.0, b.To1DArray(), default(int[]), OccupiedRows_Global, b_index_shift: -i0); //} catch(Exception e) { // Debugger.Launch(); //} //csMPI.Raw.Barrier(csMPI.Raw._COMM.WORLD); var x_sub = new double[b_sub.Length]; var solver = new ilPSP.LinSolvers.monkey.CG(); solver.MatrixType = ilPSP.LinSolvers.monkey.MatrixType.CCBCSR; solver.DevType = ilPSP.LinSolvers.monkey.DeviceType.CPU; solver.ConvergenceType = ConvergenceTypes.Absolute; solver.Tolerance = 1.0e-12; solver.DefineMatrix(CompressedMM); var solStat = solver.Solve(x_sub, b_sub.CloneAs()); { if (solStat.Converged == false) { throw new ArithmeticException("DG -> SpecFEM Projection failed because the Mass matrix solver did not converge."); } var chk = b_sub; CompressedMM.SpMVpara(-1.0, x_sub, 1.0, chk); double chk_nomr = chk.L2Norm(); if (chk_nomr >= 1.0e-8) { throw new ArithmeticException(string.Format("DG -> SpecFEM Projection failed: solver converged, but with high residual {0}.", chk_nomr.ToStringDot())); } } double[] x = new double[this.Basis.NoOfLocalOwnedNodes]; x.AccV(1.0, x_sub, OccupiedRows_Global, default(int[]), acc_index_shift: -i0); m_Coordinates.ExtractSubArrayShallow(new int[] { 0 }, new int[] { this.Basis.NoOfLocalOwnedNodes - 1 }).AccVector(alpha, x); } trx.Scatter(this.m_Coordinates); } }
protected override double RunSolverOneStep(int TimestepNo, double phystime, double dt) { Console.WriteLine(" Timestep # " + TimestepNo + ", phystime = " + phystime); //phystime = 1.8; LsUpdate(phystime); // operator-matrix assemblieren MsrMatrix OperatorMatrix = new MsrMatrix(u.Mapping, u.Mapping); double[] Affine = new double[OperatorMatrix.RowPartitioning.LocalLength]; MultiphaseCellAgglomerator Agg; MassMatrixFactory Mfact; // Agglomerator setup int quadOrder = Op.QuadOrderFunction(new int[] { u.Basis.Degree }, new int[0], new int[] { u.Basis.Degree }); //Agg = new MultiphaseCellAgglomerator(new CutCellMetrics(MomentFittingVariant, quadOrder, LsTrk, ), this.THRESHOLD, false); Agg = LsTrk.GetAgglomerator(new SpeciesId[] { LsTrk.GetSpeciesId("B") }, quadOrder, this.THRESHOLD); Console.WriteLine("Inter-Process agglomeration? " + Agg.GetAgglomerator(LsTrk.GetSpeciesId("B")).AggInfo.InterProcessAgglomeration); if (this.THRESHOLD > 0.01) { TestAgglomeration_Extraploation(Agg); TestAgglomeration_Projection(quadOrder, Agg); } // operator matrix assembly Op.ComputeMatrixEx(LsTrk, u.Mapping, null, u.Mapping, OperatorMatrix, Affine, false, 0.0, true, Agg.CellLengthScales, LsTrk.GetSpeciesId("B")); Agg.ManipulateMatrixAndRHS(OperatorMatrix, Affine, u.Mapping, u.Mapping); // mass matrix factory Mfact = LsTrk.GetXDGSpaceMetrics(new SpeciesId[] { LsTrk.GetSpeciesId("B") }, quadOrder, 1).MassMatrixFactory;// new MassMatrixFactory(u.Basis, Agg); // Mass matrix/Inverse Mass matrix //var MassInv = Mfact.GetMassMatrix(u.Mapping, new double[] { 1.0 }, true, LsTrk.GetSpeciesId("B")); var Mass = Mfact.GetMassMatrix(u.Mapping, new double[] { 1.0 }, false, LsTrk.GetSpeciesId("B")); Agg.ManipulateMatrixAndRHS(Mass, default(double[]), u.Mapping, u.Mapping); var MassInv = Mass.InvertBlocks(OnlyDiagonal: true, Subblocks: true, ignoreEmptyBlocks: true, SymmetricalInversion: false); // test that operator depends only on B-species values double DepTest = LsTrk.Regions.GetSpeciesSubGrid("B").TestMatrixDependency(OperatorMatrix, u.Mapping, u.Mapping); Console.WriteLine("Matrix dependency test: " + DepTest); Assert.LessOrEqual(DepTest, 0.0); // diagnostic output Console.WriteLine("Number of Agglomerations (all species): " + Agg.TotalNumberOfAgglomerations); Console.WriteLine("Number of Agglomerations (species 'B'): " + Agg.GetAgglomerator(LsTrk.GetSpeciesId("B")).AggInfo.SourceCells.NoOfItemsLocally.MPISum()); // operator auswerten: double[] x = new double[Affine.Length]; BLAS.daxpy(x.Length, 1.0, Affine, 1, x, 1); OperatorMatrix.SpMVpara(1.0, u.CoordinateVector, 1.0, x); MassInv.SpMV(1.0, x, 0.0, du_dx.CoordinateVector); Agg.GetAgglomerator(LsTrk.GetSpeciesId("B")).Extrapolate(du_dx.Mapping); // markieren, wo ueberhaupt A und B sind Bmarker.AccConstant(1.0, LsTrk.Regions.GetSpeciesSubGrid("B").VolumeMask); Amarker.AccConstant(+1.0, LsTrk.Regions.GetSpeciesSubGrid("A").VolumeMask); Xmarker.AccConstant(+1.0, LsTrk.Regions.GetSpeciesSubGrid("X").VolumeMask); // compute error ERR.Clear(); ERR.Acc(1.0, du_dx_Exact, LsTrk.Regions.GetSpeciesSubGrid("B").VolumeMask); ERR.Acc(-1.0, du_dx, LsTrk.Regions.GetSpeciesSubGrid("B").VolumeMask); double L2Err = ERR.L2Norm(LsTrk.Regions.GetSpeciesSubGrid("B").VolumeMask); Console.WriteLine("L2 Error: " + L2Err); XERR.Clear(); XERR.GetSpeciesShadowField("B").Acc(1.0, ERR, LsTrk.Regions.GetSpeciesSubGrid("B").VolumeMask); double xL2Err = XERR.L2Norm(); Console.WriteLine("L2 Error (in XDG space): " + xL2Err); // check error if (this.THRESHOLD > 0.01) { // without agglomeration, the error in very tiny cut-cells may be large over the whole cell // However, the error in the XDG-space should be small under all circumstances Assert.LessOrEqual(L2Err, 1.0e-6); } Assert.LessOrEqual(xL2Err, 1.0e-6); bool IsPassed = ((L2Err <= 1.0e-6 || this.THRESHOLD <= 0.01) && xL2Err <= 1.0e-7); if (IsPassed) { Console.WriteLine("Test PASSED"); } else { Console.WriteLine("Test FAILED: check errors."); } // return/Ende base.NoOfTimesteps = 17; //base.NoOfTimesteps = 2; dt = 0.3; return(dt); }
protected override double RunSolverOneStep(int TimestepNo, double phystime, double dt) { Console.WriteLine(" Timestep # " + TimestepNo + ", phystime = " + phystime); //phystime = 1.8; LsUpdate(phystime); // operator-matrix assemblieren MsrMatrix OperatorMatrix = new MsrMatrix(u.Mapping, u.Mapping); double[] Affine = new double[OperatorMatrix.RowPartitioning.LocalLength]; // Agglomerator setup MultiphaseCellAgglomerator Agg = LsTrk.GetAgglomerator(new SpeciesId[] { LsTrk.GetSpeciesId("B") }, QuadOrder, this.THRESHOLD); // plausibility of cell length scales if (SER_PAR_COMPARISON) { TestLengthScales(QuadOrder, TimestepNo); } Console.WriteLine("Inter-Process agglomeration? " + Agg.GetAgglomerator(LsTrk.GetSpeciesId("B")).AggInfo.InterProcessAgglomeration); if (this.THRESHOLD > 0.01) { TestAgglomeration_Extraploation(Agg); TestAgglomeration_Projection(QuadOrder, Agg); } CheckExchange(true); CheckExchange(false); // operator matrix assembly XSpatialOperatorMk2.XEvaluatorLinear mtxBuilder = Op.GetMatrixBuilder(base.LsTrk, u.Mapping, null, u.Mapping); mtxBuilder.time = 0.0; mtxBuilder.ComputeMatrix(OperatorMatrix, Affine); Agg.ManipulateMatrixAndRHS(OperatorMatrix, Affine, u.Mapping, u.Mapping); // mass matrix factory var Mfact = LsTrk.GetXDGSpaceMetrics(new SpeciesId[] { LsTrk.GetSpeciesId("B") }, QuadOrder, 1).MassMatrixFactory;// new MassMatrixFactory(u.Basis, Agg); // Mass matrix/Inverse Mass matrix //var MassInv = Mfact.GetMassMatrix(u.Mapping, new double[] { 1.0 }, true, LsTrk.GetSpeciesId("B")); var Mass = Mfact.GetMassMatrix(u.Mapping, new double[] { 1.0 }, false, LsTrk.GetSpeciesId("B")); Agg.ManipulateMatrixAndRHS(Mass, default(double[]), u.Mapping, u.Mapping); var MassInv = Mass.InvertBlocks(OnlyDiagonal: true, Subblocks: true, ignoreEmptyBlocks: true, SymmetricalInversion: false); // test that operator depends only on B-species values double DepTest = LsTrk.Regions.GetSpeciesSubGrid("B").TestMatrixDependency(OperatorMatrix, u.Mapping, u.Mapping); Console.WriteLine("Matrix dependency test: " + DepTest); Assert.LessOrEqual(DepTest, 0.0); // diagnostic output Console.WriteLine("Number of Agglomerations (all species): " + Agg.TotalNumberOfAgglomerations); Console.WriteLine("Number of Agglomerations (species 'B'): " + Agg.GetAgglomerator(LsTrk.GetSpeciesId("B")).AggInfo.SourceCells.NoOfItemsLocally.MPISum()); // operator auswerten: double[] x = new double[Affine.Length]; BLAS.daxpy(x.Length, 1.0, Affine, 1, x, 1); OperatorMatrix.SpMVpara(1.0, u.CoordinateVector, 1.0, x); MassInv.SpMV(1.0, x, 0.0, du_dx.CoordinateVector); Agg.GetAgglomerator(LsTrk.GetSpeciesId("B")).Extrapolate(du_dx.Mapping); // markieren, wo ueberhaupt A und B sind Bmarker.AccConstant(1.0, LsTrk.Regions.GetSpeciesSubGrid("B").VolumeMask); Amarker.AccConstant(+1.0, LsTrk.Regions.GetSpeciesSubGrid("A").VolumeMask); if (usePhi0 && usePhi1) { Xmarker.AccConstant(+1.0, LsTrk.Regions.GetSpeciesSubGrid("X").VolumeMask); } // compute error ERR.Clear(); ERR.Acc(1.0, du_dx_Exact, LsTrk.Regions.GetSpeciesSubGrid("B").VolumeMask); ERR.Acc(-1.0, du_dx, LsTrk.Regions.GetSpeciesSubGrid("B").VolumeMask); double L2Err = ERR.L2Norm(LsTrk.Regions.GetSpeciesSubGrid("B").VolumeMask); Console.WriteLine("L2 Error: " + L2Err); XERR.Clear(); XERR.GetSpeciesShadowField("B").Acc(1.0, ERR, LsTrk.Regions.GetSpeciesSubGrid("B").VolumeMask); double xL2Err = XERR.L2Norm(); Console.WriteLine("L2 Error (in XDG space): " + xL2Err); // check error double ErrorThreshold = 1.0e-1; if (this.MomentFittingVariant == XQuadFactoryHelper.MomentFittingVariants.OneStepGaussAndStokes) { ErrorThreshold = 1.0e-6; // HMF is designed for such integrands and should perform close to machine accuracy; on general integrands, the precision is different. } bool IsPassed = ((L2Err <= ErrorThreshold || this.THRESHOLD <= ErrorThreshold) && xL2Err <= ErrorThreshold); if (IsPassed) { Console.WriteLine("Test PASSED"); } else { Console.WriteLine("Test FAILED: check errors."); //PlotCurrentState(phystime, TimestepNo, 3); } if (TimestepNo > 1) { if (this.THRESHOLD > ErrorThreshold) { // without agglomeration, the error in very tiny cut-cells may be large over the whole cell // However, the error in the XDG-space should be small under all circumstances Assert.LessOrEqual(L2Err, ErrorThreshold, "DG L2 error of computing du_dx"); } Assert.LessOrEqual(xL2Err, ErrorThreshold, "XDG L2 error of computing du_dx"); } // return/Ende base.NoOfTimesteps = 17; //base.NoOfTimesteps = 2; dt = 0.3; return(dt); }
/// <summary> /// Arnoldi iteration /// </summary> /// <param name="V">Output: Arnoldi vectors</param> /// <param name="H">Output: </param> /// <param name="kact">Output:</param> /// <param name="A">Input: (n-by-n) the matrix </param> /// <param name="v0">Input: n-vector</param> /// <param name="k">Input: number of Arnoldi steps requested</param> /// <param name="reorth">Input: (optional) set to 1 for reorthogonalization, (default), set to any other value to switch it off</param> /// <remarks> /// (c) Ren-Cang Li, [email protected], 06/16/07 /// </remarks> public static void arnoldi(out double[][] V, out double[,] H, out int kact, MsrMatrix A, double[] v0, int k, bool reorth = false) { //% //% ----- kact=k ------- //% V n-by-(k+1) Arnoldi vectors //% H (k+1)-by-k //% ----- kact=j<k ------- //% V n-by-j Arnoldi vectors //% H j-by-j double eps = BLAS.MachineEps; int n = A.RowPartitioning.LocalLength; if (A.ColPartition.LocalLength != A.RowPartitioning.LocalLength) { throw new ArgumentException("the sizes of input matrix incorrect"); } V = (k + 1).ForLoop(i => new double[n]); H = new double[k + 1, k]; double nrm2 = v0.L2NormPow2().MPISum().Sqrt(); if (nrm2 == 0.0) { throw new ArgumentException("arnoldi: input v0 is a zero vector"); } double tol = n * eps; V[0].SetV(v0, 1 / nrm2); //v(:,1)=v0/nrm2; for (int j = 0; j < k; j++) { double[] vh = new double[n]; A.SpMVpara(1.0, V[j], 0.0, vh); //vh = A*V(:,j); double nrmvh = vh.L2NormPow2().MPISum().Sqrt(); //% >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> //% by MGS for (int i = 0; i < j; i++) { double hij = GenericBlas.InnerProd(V[i], vh).MPISum(); vh.AccV(-hij, V[i]); //vh = vh - hij*V(:,i); H[i, j] = hij; } if (reorth) { for (int i = 0; i < j; i++) { double tmp = GenericBlas.InnerProd(V[i], vh).MPISum(); vh.AccV(-tmp, V[i]); //vh = vh - tmp*V(:,i); H[i, j] = H[i, j] + tmp; } } // <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< H[j + 1, j] = vh.L2NormPow2().MPISum().Sqrt(); V[j + 1].SetV(vh, 1.0 / H[j + 1, j]); if (H[j + 1, j] <= tol * nrmvh) { //% ----- kact<k ------- //% V n -by- kact Arnoldi vectors //% H kact -by- kact // Console.WriteLine("termination at step: " + j); kact = j + 1; V = V.GetSubVector(0, kact); H = H.GetSubMatrix(0, kact, 0, kact); return; } } kact = k; Debug.Assert(V.Length == kact + 1); Debug.Assert(V.Length == kact + 1); //% ----- kact=k ------- //% V n -by- (kact+1) Arnoldi vectors //% H (kact+1) -by- kact }
/// <summary> /// computes derivatives in various ways and compares them against known values. /// </summary> protected override double RunSolverOneStep(int TimestepNo, double phystime, double dt) { base.EndTime = 0.0; base.NoOfTimesteps = 0; int D = this.GridData.SpatialDimension; int J = this.GridData.Cells.NoOfLocalUpdatedCells; Console.WriteLine("DerivativeTest.exe, test case #" + GRID_CASE + " ******************************"); //var Fix = this.GridData.iGeomEdges.FaceIndices; //for(int iEdge = 0; iEdge < Fix.GetLength(0); iEdge++) { // Debug.Assert(Fix[iEdge, 0] >= 0); // Debug.Assert(Fix[iEdge, 1] >= 0); //} // sealing test // ================= TestSealing(this.GridData); // cell volume and edge area check, if possible // =============================================== if (this.CellVolume > 0) { double err = 0; double Treshold = 1.0e-10; for (int j = 0; j < J; j++) { err += Math.Abs(this.GridData.Cells.GetCellVolume(j) - this.CellVolume); } bool passed = (err < Treshold); m_passed = m_passed && passed; Console.WriteLine("Cell volume error: " + err + " passed? " + passed); Console.WriteLine("--------------------------------------------"); } if (this.EdgeArea > 0) { double err = 0; double Treshold = 1.0e-10; int E = this.GridData.Edges.Count; for (int e = 0; e < E; e++) { err += Math.Abs(this.GridData.Edges.GetEdgeArea(e) - this.EdgeArea); } bool passed = (err < Treshold); m_passed = m_passed && passed; Console.WriteLine("Edge area error: " + err + " passed? " + passed); Console.WriteLine("--------------------------------------------"); } // Orthonormality of basis in physical coords // ========================================== { Basis Bs = this.f1.Basis; int N = Bs.Length; int degQuad = this.GridData.Cells.GetInterpolationDegree(0) * D + Bs.Degree + 3; // mass matrix: should be identity! MultidimensionalArray MassMatrix = MultidimensionalArray.Create(J, N, N); // compute mass matrix by quadrature. var quad = CellQuadrature.GetQuadrature(new int[] { N, N }, base.GridData, (new CellQuadratureScheme()).Compile(base.GridData, degQuad), delegate(int i0, int Length, QuadRule QR, MultidimensionalArray EvalResult) { NodeSet QuadNodes = QR.Nodes; MultidimensionalArray BasisVals = Bs.CellEval(QuadNodes, i0, Length); EvalResult.Multiply(1.0, BasisVals, BasisVals, 0.0, "jknm", "jkn", "jkm"); }, delegate(int i0, int Length, MultidimensionalArray ResultsOfIntegration) { MassMatrix.SetSubArray(ResultsOfIntegration, new int[] { i0, 0, 0 }, new int[] { i0 + Length - 1, N - 1, N - 1 }); }, cs: CoordinateSystem.Physical); quad.Execute(); // check that mass matrix is Id. int MaxErrorCell = -1; double MaxError = -1; for (int j = 0; j < J; j++) { MultidimensionalArray MassMatrix_j = MassMatrix.ExtractSubArrayShallow(j, -1, -1); MassMatrix_j.AccEye(-1.0); double Norm_j = MassMatrix_j.InfNorm(); if (Norm_j > MaxError) { MaxError = Norm_j; MaxErrorCell = j; } } bool passed = (MaxError < 1.0e-8); m_passed = m_passed && passed; Console.WriteLine("Mass Matrix, maximum error in Cell #" + MaxErrorCell + ", mass matrix error norm: " + MaxError + " passed? " + passed); } // Broken Derivatives // ================= double totalVolume = (new SubGrid(CellMask.GetFullMask(this.GridData))).Volume; for (int d = 0; d < D; d++) { // compute f1Gradient_Numerical[d].Clear(); f1Gradient_Numerical[d].Derivative(1.0, f1, d); f2Gradient_Numerical[d].Clear(); f2Gradient_Numerical[d].Derivative(1.0, f2, d); // subtract analytical var Errfield1 = f1Gradient_Numerical[d].CloneAs(); Errfield1.Acc(-1, f1Gradient_Analytical[d]); var Errfield2 = f2Gradient_Numerical[d].CloneAs(); Errfield2.Acc(-1, f2Gradient_Analytical[d]); Console.WriteLine("Broken Derivatives: "); double Treshold = 1.0e-10; if (AltRefSol) { Treshold = 1.0e-4; // not exactly polynomial, therefore a higher threshold } double err1_dx = Errfield1.L2Norm() / totalVolume; bool passed = (err1_dx < Treshold); m_passed = m_passed && passed; Console.WriteLine(string.Format("|| df1/dx{0}_Numerical - df1/dx{0}_Analytical ||_2 = {1}, passed? {2}", d, err1_dx, passed)); double err2_dx = Errfield2.L2Norm() / totalVolume; passed = (err2_dx < Treshold); m_passed = m_passed && passed; Console.WriteLine(string.Format("|| df2/dx{0}_Numerical - df2/dx{0}_Analytical ||_2 = {1}, passed? {2}", d, err2_dx, passed)); Console.WriteLine("--------------------------------------------"); } // Flux Derivatives // ================= for (int d = 0; d < D; d++) { // compute f1Gradient_Numerical[d].Clear(); f1Gradient_Numerical[d].DerivativeByFlux(1.0, f1, d); f2Gradient_Numerical[d].Clear(); f2Gradient_Numerical[d].DerivativeByFlux(1.0, f2, d); f1Gradient_Numerical[d].CheckForNanOrInf(true, true, true); f2Gradient_Numerical[d].CheckForNanOrInf(true, true, true); // subtract analytical var Errfield1 = f1Gradient_Numerical[d].CloneAs(); Errfield1.Acc(-1, f1Gradient_Analytical[d]); var Errfield2 = f2Gradient_Numerical[d].CloneAs(); Errfield2.Acc(-1, f2Gradient_Analytical[d]); Console.WriteLine("Flux Derivatives: "); double Treshold = 1.0e-10; if (AltRefSol) { Treshold = 1.0e-4; // not exactly polynomial, therefore a higher threshold } double err1_dx = Errfield1.L2Norm() / totalVolume; bool passed = (err1_dx < Treshold); m_passed = m_passed && passed; Console.WriteLine(string.Format("|| df1/dx{0}_Numerical - df1/dx{0}_Analytical ||_2 = {1}, passed? {2}", d, err1_dx, passed)); double err2_dx = Errfield2.L2Norm() / totalVolume; passed = (err2_dx < Treshold); m_passed = m_passed && passed; Console.WriteLine(string.Format("|| df2/dx{0}_Numerical - df2/dx{0}_Analytical ||_2 = {1}, passed? {2}", d, err2_dx, passed)); Console.WriteLine("--------------------------------------------"); } // Linear flux Derivatives // ======================= for (int d = 0; d < D; d++) { double[] korrekto = f1Gradient_Numerical[d].CoordinateVector.ToArray(); // compute DerivativeByFluxLinear(f1, f1Gradient_Numerical[d], d, f1); DerivativeByFluxLinear(f2, f2Gradient_Numerical[d], d, f2); // subtract analytical var Errfield1 = f1Gradient_Numerical[d].CloneAs(); Errfield1.Acc(-1, f1Gradient_Analytical[d]); var Errfield2 = f2Gradient_Numerical[d].CloneAs(); Errfield2.Acc(-1, f2Gradient_Analytical[d]); Console.WriteLine("Linear Flux Derivatives: "); double Treshold = 1.0e-10; if (AltRefSol) { Treshold = 1.0e-4; // not exactly polynomial, therefore a higher threshold } double err1_dx = Errfield1.L2Norm() / totalVolume; bool passed = (err1_dx < Treshold); m_passed = m_passed && passed; Console.WriteLine(string.Format("|| df1/dx{0}_Numerical - df1/dx{0}_Analytical ||_2 = {1}, passed? {2}", d, err1_dx, passed)); double err2_dx = Errfield2.L2Norm() / totalVolume; passed = (err2_dx < Treshold); m_passed = m_passed && passed; Console.WriteLine(string.Format("|| df2/dx{0}_Numerical - df2/dx{0}_Analytical ||_2 = {1}, passed? {2}", d, err2_dx, passed)); Console.WriteLine("--------------------------------------------"); } // Laplacian, nonlinear // ==================== if (!AltRefSol) { var Laplace = (new ipLaplace()).Operator(1); Laplace.Evaluate(new DGField[] { this.f1 }, new DGField[] { this.Laplace_f1_Numerical }); Laplace.Evaluate(new DGField[] { this.f2 }, new DGField[] { this.Laplace_f2_Numerical }); double Treshold = 1.0e-8; // subtract analytical var Errfield1 = Laplace_f1_Numerical.CloneAs(); Errfield1.Acc(-1, Laplace_f1_Analytical); var Errfield2 = Laplace_f2_Numerical.CloneAs(); Errfield2.Acc(-1, Laplace_f2_Analytical); double err_Lf1 = Errfield1.L2Norm() / totalVolume; bool passed = (err_Lf1 < Treshold); m_passed = m_passed && passed; Console.WriteLine(string.Format("|| /\\f1 Numerical - /\\f1 Analytical ||_2 = {0} (nonlinear evaluation), passed? {1}", err_Lf1, passed)); double err_Lf2 = Errfield2.L2Norm() / totalVolume; passed = (err_Lf2 < Treshold); m_passed = m_passed && passed; Console.WriteLine(string.Format("|| /\\f2 Numerical - /\\f2 Analytical ||_2 = {0} (nonlinear evaluation), passed? {1}", err_Lf2, passed)); Console.WriteLine("--------------------------------------------"); } // Laplacian, linear // ==================== if (!AltRefSol) { var Laplace = (new ipLaplace()).Operator(1); var LaplaceMtx = new MsrMatrix(this.f1.Mapping, this.Laplace_f1_Numerical.Mapping); var LaplaceAffine = new double[LaplaceMtx.RowPartitioning.LocalLength]; Laplace.ComputeMatrix(this.f1.Mapping, null, this.Laplace_f1_Numerical.Mapping, LaplaceMtx, LaplaceAffine, false); this.Laplace_f1_Numerical.CoordinateVector.SetV(LaplaceAffine); LaplaceMtx.SpMVpara(1.0, this.f1.CoordinateVector, 1.0, this.Laplace_f1_Numerical.CoordinateVector); this.Laplace_f2_Numerical.CoordinateVector.SetV(LaplaceAffine); LaplaceMtx.SpMVpara(1.0, this.f2.CoordinateVector, 1.0, this.Laplace_f2_Numerical.CoordinateVector); // subtract analytical var Errfield1 = Laplace_f1_Numerical.CloneAs(); Errfield1.Acc(-1, Laplace_f1_Analytical); var Errfield2 = Laplace_f2_Numerical.CloneAs(); Errfield2.Acc(-1, Laplace_f2_Analytical); double Treshold = 1.0e-8; double err_Lf1 = Errfield1.L2Norm() / totalVolume; bool passed = (err_Lf1 < Treshold); m_passed = m_passed && passed; Console.WriteLine(string.Format("|| /\\f1 Numerical - /\\f1 Analytical ||_2 = {0} (linear evaluation), passed? {1}", err_Lf1, passed)); double err_Lf2 = Errfield2.L2Norm() / totalVolume; passed = (err_Lf2 < Treshold); m_passed = m_passed && passed; Console.WriteLine(string.Format("|| /\\f2 Numerical - /\\f2 Analytical ||_2 = {0} (linear evaluation), passed? {1}", err_Lf2, passed)); Console.WriteLine("--------------------------------------------"); } // finally... // ================= if (m_passed) { Console.WriteLine("All tests passed. *****************************"); } else { Console.WriteLine("Some error above threshold. *******************"); } return(0.0); // return some artificial timestep }
//Include in Euler Time Stepping...... /// <summary> /// Evaluation of the operator on the subgrid by matrix-vector product. There might be a more efficient method.... /// </summary> /// <param name="k">results of Ay+b</param> /// <param name="dt">optional scaling by time step size</param> protected void Evaluate(double[] k, double dt) { subgridMatrix.SpMVpara <double[], double[]>(dt, m_SubgridMapping.subgridCoordinates, 1.0, k); //shift OperatorMatrix*subgridCoordinates+subgridAffine into mappingCopy BLAS.daxpy(subgridAffine.Length, 1.0, subgridAffine, 1, k, 1); }
/// <summary> /// Evaluation of the operator on the subgrid by matrix-vector product. There might be a more efficient method.... /// </summary> /// <param name="k">results of Ay+b</param> /// <param name="dt">optional scaling by time step size</param> protected void Evaluate(SubgridCoordinateMapping u, double[] k, double dt) { SubgridOperatorMatr.SpMVpara <double[], double[]>(dt, u.subgridCoordinates, 1.0, k); BLAS.daxpy(SubgridAffine.Length, dt, SubgridAffine, 1, k, 1); }