/// <summary> /// Preconditioned Conjugate Gradient Method <see cref="SolveConjugateGradient(Microsoft.Msagl.Core.Layout.ProximityOverlapRemoval.ConjugateGradient.SparseMatrix,Microsoft.Msagl.Core.Layout.ProximityOverlapRemoval.ConjugateGradient.Vector,Microsoft.Msagl.Core.Layout.ProximityOverlapRemoval.ConjugateGradient.Vector,int,double)"/> /// Preconditioner: Jacobi Preconditioner. /// This method should generally be preferred, due to its faster convergence. /// </summary> /// <param name="A"></param> /// <param name="b"></param> /// <param name="x"></param> /// <param name="iMax"></param> /// <param name="epsilon"></param> /// <returns></returns> public static Vector SolvePrecondConjugateGradient(SparseMatrix A, Vector b, Vector x, int iMax, double epsilon) { //create Jacobi (diagonal) preconditioner // M=diagonal(A), M^-1=1/M(i,i), for i=0,...,n // since M^-1 is only applied to vectors, we can just keep the diagonals as a vector Vector Minv = A.DiagonalPreconditioner(); Vector r = b - (A*x); Vector d = Minv.CompProduct(r); double deltaNew = r*d; double normRes = Math.Sqrt(r*r) / A.NumRow; double normRes0 = normRes; int i = 0; while ((i++) < iMax && normRes > epsilon * normRes0) { Vector q = A*d; double alpha = deltaNew/(d*q); x.Add(alpha*d); // correct residual to avoid error accumulation of floating point computation, other methods are possible. // if (i == Math.Max(50, (int)Math.Sqrt(A.NumRow))) // r = b - (A*x); // else r.Sub(alpha*q); Vector s = Minv.CompProduct(r); double deltaOld = deltaNew; normRes = Math.Sqrt(r*r) / A.NumRow; deltaNew = r*s; double beta = deltaNew/deltaOld; d = s + beta*d; } return x; }
/// <summary> /// Conjugate Gradient method for solving Sparse linear system of the form Ax=b with an iterative procedure. Matrix A should be positive semi-definite, otherwise several solutions could exist and convergence is not guaranteed. /// see article for algorithm description: An Introduction to the Conjugate Gradient Method /// Without the Agonizing Pain by Jonathan Richard Shewchuk /// </summary> /// <param name="A"></param> /// <param name="b"></param> /// <param name="x">initial guess for x</param> /// <param name="iMax"></param> /// <param name="epsilon"></param> /// <returns></returns> static Vector SolveConjugateGradient(SparseMatrix A, Vector b, Vector x, int iMax, double epsilon) { Vector r = b - (A*x); // r= b-Ax Vector d = r.Clone(); // d=r double deltaNew = r*r; double normRes = Math.Sqrt(deltaNew)/A.NumRow; double normRes0 = normRes; int i = 0; while ((i++) < iMax && normRes > epsilon*normRes0) { Vector q = A*d; double alpha = deltaNew/(d*q); x.Add(alpha*d); // correct residual to avoid error accumulation of floating point computation, other methods are possible. // if (i == Math.Max(50,(int)Math.Sqrt(A.NumRow))) // r = b - (A*x); // else r.Sub(alpha*q); double deltaOld = deltaNew; deltaNew = r*r; normRes = Math.Sqrt(deltaNew)/A.NumRow; double beta = deltaNew/deltaOld; d = r + beta*d; } return x; }
/// <summary> /// Substracts vector b directly from the current vector (without creating a new vector). /// </summary> /// <param name="b">vector to be substracted</param> /// <returns></returns> public void Sub(Vector b) { for (int i = 0; i < array.Length; i++) { array[i] -= b.array[i]; } }
/// <summary> /// Adds vector b without creating a new vector. /// </summary> /// <param name="b">vector</param> /// <returns></returns> public void Add(Vector b) { for (int i = 0; i < array.Length; i++) { array[i] += b.array[i]; } }
/// <summary> /// Multiplies two vectors component wise and return the result in a new vector. /// </summary> /// <param name="v"></param> /// <returns></returns> public Vector CompProduct(Vector v) { double[] res = new double[array.Length]; for (int i = 0; i < array.Length; i++) { res[i] = array[i] * v.array[i]; } return new Vector(res); }