Exemplo n.º 1
0
        protected override IterativeStatistics SolveInternal(int maxIterations, Func <IVector> zeroVectorInitializer)
        {
            // In contrast to the source algorithm, we initialize s here. At each iteration it will be overwritten,
            // thus avoiding allocating & deallocating a new vector.
            precondResidual = zeroVectorInitializer();

            // d = inv(M) * r
            direction = zeroVectorInitializer();
            Preconditioner.SolveLinearSystem(residual, direction);

            // δnew = δ0 = r * d
            resDotPrecondRes = residual.DotProduct(direction);

            // The convergence and beta strategies must be initialized immediately after the first r and r*inv(M)*r are computed.
            convergence.Initialize(this);
            betaCalculation.Initialize(this);

            // This is also used as output
            double residualNormRatio = double.NaN;

            // Allocate memory for other vectors, which will be reused during each iteration
            matrixTimesDirection = zeroVectorInitializer();

            for (iteration = 0; iteration < maxIterations; ++iteration)
            {
                // q = A * d
                Matrix.Multiply(direction, matrixTimesDirection);

                // α = δnew / (d * q)
                stepSize = resDotPrecondRes / direction.DotProduct(matrixTimesDirection);

                // x = x + α * d
                solution.AxpyIntoThis(direction, stepSize);

                // Normally the residual vector is updated as: r = r - α * q. However corrections might need to be applied.
                residualUpdater.UpdateResidual(this, residual);

                // s = inv(M) * r
                Preconditioner.SolveLinearSystem(residual, precondResidual);

                // δold = δnew
                resDotPrecondResOld = resDotPrecondRes;

                // δnew = r * s
                resDotPrecondRes = residual.DotProduct(precondResidual);

                // At this point we can check if CG has converged and exit, thus avoiding the uneccesary operations that follow.
                residualNormRatio = convergence.EstimateResidualNormRatio(this);
                Debug.WriteLine($"PCG Iteration = {iteration}: residual norm ratio = {residualNormRatio}");
                if (residualNormRatio <= residualTolerance)
                {
                    return(new IterativeStatistics
                    {
                        AlgorithmName = name,
                        HasConverged = true,
                        NumIterationsRequired = iteration + 1,
                        ResidualNormRatioEstimation = residualNormRatio
                    });
                }

                // The default Fletcher-Reeves formula is: β = δnew / δold = (sNew * rNew) / (sOld * rOld)
                // However we could use a different one, e.g. for variable preconditioning Polak-Ribiere is usually better.
                paramBeta = betaCalculation.CalculateBeta(this);

                // d = s + β * d
                //TODO: benchmark the two options to find out which is faster
                //direction = preconditionedResidual.Axpy(direction, beta); //This allocates a new vector d, copies r and GCs the existing d.
                direction.LinearCombinationIntoThis(paramBeta, precondResidual, 1.0); //This performs additions instead of copying and needless multiplications.
            }

            // We reached the max iterations before PCG converged
            return(new IterativeStatistics
            {
                AlgorithmName = name,
                HasConverged = false,
                NumIterationsRequired = maxIterations,
                ResidualNormRatioEstimation = residualNormRatio
            });
        }
Exemplo n.º 2
0
        protected override IterativeStatistics SolveInternal(int maxIterations, Func <IVector> zeroVectorInitializer)
        {
            // In contrast to the source algorithm, we initialize s here. At each iteration it will be overwritten,
            // thus avoiding allocating deallocating a new vector.
            precondResidual = zeroVectorInitializer();

            // d = inv(M) * r
            direction = zeroVectorInitializer();
            Preconditioner.SolveLinearSystem(residual, direction);

            // q = A * d
            matrixTimesDirection = zeroVectorInitializer();
            Matrix.Multiply(direction, matrixTimesDirection);
            double directionTimesMatrixTimesDirection = direction.DotProduct(matrixTimesDirection);

            // Update the direction vectors cache
            reorthoCache.StoreDirectionData(this);

            // δnew = δ0 = r * d
            double resDotPrecondRes = residual.DotProduct(direction);

            // The convergence strategy must be initialized immediately after the first r and r*inv(M)*r are computed.
            convergence.Initialize(this);

            // This is also used as output
            double residualNormRatio = double.NaN;

            //TODO: Find proof that this correct. Why is it better than the default formula α = (r * s) / (d * q)?
            // α = (d * r) / (d * q) = (d * r) / (d * (A * d))
            // In the first iteration all multiplications have already been performed.
            stepSize = resDotPrecondRes / directionTimesMatrixTimesDirection;

            for (int iteration = 0; iteration < maxIterations; ++iteration)
            {
                // x = x + α * d
                solution.AxpyIntoThis(direction, stepSize);

                // Normally the residual vector is updated as: r = r - α * q. However corrections might need to be applied.
                residualUpdater.UpdateResidual(this, residual);

                // s = inv(M) * r
                Preconditioner.SolveLinearSystem(residual, precondResidual);

                // δold = δnew
                resDotPrecondResOld = resDotPrecondRes;

                // δnew = r * s
                resDotPrecondRes = residual.DotProduct(precondResidual);

                /// At this point we can check if CG has converged and exit, thus avoiding the uneccesary operations that follow.
                residualNormRatio = convergence.EstimateResidualNormRatio(this);
                if (residualNormRatio <= residualTolerance)
                {
                    return(new IterativeStatistics
                    {
                        AlgorithmName = name,
                        HasConverged = true,
                        NumIterationsRequired = iteration + 1,
                        ResidualNormRatioEstimation = residualNormRatio
                    });
                }

                // Update the direction vector using previous cached direction vectors.
                UpdateDirectionVector(precondResidual, direction);

                // q = A * d
                Matrix.Multiply(direction, matrixTimesDirection);
                directionTimesMatrixTimesDirection = direction.DotProduct(matrixTimesDirection);

                // Update the direction vectors cache
                reorthoCache.StoreDirectionData(this);

                //TODO: Find proof that this correct. Why is it better than the default formula α = (r * s) / (d * q)?
                // α = (d * r) / (d * q) = (d * r) / (d * (A * d))
                stepSize = direction.DotProduct(residual) / directionTimesMatrixTimesDirection;
            }

            // We reached the max iterations before PCG converged
            return(new IterativeStatistics
            {
                AlgorithmName = name,
                HasConverged = false,
                NumIterationsRequired = maxIterations,
                ResidualNormRatioEstimation = residualNormRatio
            });
        }
        protected override IterativeStatistics SolveInternal(int maxIterations, Func <IVector> zeroVectorInitializer)
        {
            iteration = 0;
            Preconditioner.SolveLinearSystem(residual, precondResidual);

            // d0 = s0 = inv(M) * r0
            //direction.CopyFrom(precondResidual);
            //Preconditioner.SolveLinearSystem(residual, direction);
            UpdateDirectionVector(precondResidual, direction);

            // q0 = A * d0
            Matrix.Multiply(direction, matrixTimesDirection);
            DirectionTimesMatrixTimesDirection = direction.DotProduct(matrixTimesDirection);

            // Update the direction vectors cache
            ReorthoCache.StoreDirectionData(this);

            // δnew = δ0 = r0 * s0 = r0 * d0
            resDotPrecondRes = residual.DotProduct(direction);

            // The convergence strategy must be initialized immediately after the first r and r*inv(M)*r are computed.
            Convergence.Initialize(this);
            Stagnation.StoreInitialError(Convergence.EstimateResidualNormRatio(this));

            // This is also used as output
            double residualNormRatio = double.NaN;

            // α0 = (d0 * r0) / (d0 * q0) = (s0 * r0) / (d0 * (A * d0))
            stepSize = resDotPrecondRes / DirectionTimesMatrixTimesDirection;

            for (iteration = 1; iteration < maxIterations; ++iteration)
            {
                // x = x + α * d
                solution.AxpyIntoThis(direction, stepSize);

                // Normally the residual vector is updated as: r = r - α * q. However corrections might need to be applied.
                residualUpdater.UpdateResidual(this, residual);

                // s = inv(M) * r
                Preconditioner.SolveLinearSystem(residual, precondResidual);

                // δold = δnew
                resDotPrecondResOld = resDotPrecondRes;

                // δnew = r * s
                resDotPrecondRes = residual.DotProduct(precondResidual);

                /// At this point we can check if CG has converged and exit, thus avoiding the uneccesary operations that follow.
                residualNormRatio = Convergence.EstimateResidualNormRatio(this);
                //Debug.WriteLine($"Reorthogonalized PCG iteration = {iteration}: residual norm ratio = {residualNormRatio}");
                Stagnation.StoreNewError(residualNormRatio);
                bool hasStagnated = Stagnation.HasStagnated();
                if (residualNormRatio <= ResidualTolerance)
                {
                    return(new IterativeStatistics
                    {
                        AlgorithmName = name,
                        HasConverged = true,
                        HasStagnated = false,
                        NumIterationsRequired = iteration + 1,
                        ResidualNormRatioEstimation = residualNormRatio
                    });
                }
                if (hasStagnated)
                {
                    return(new IterativeStatistics
                    {
                        AlgorithmName = name,
                        HasConverged = false,
                        HasStagnated = true,
                        NumIterationsRequired = iteration + 1,
                        ResidualNormRatioEstimation = residualNormRatio
                    });
                }

                // Update the direction vector using previous cached direction vectors.
                UpdateDirectionVector(precondResidual, direction);

                // q = A * d
                Matrix.Multiply(direction, matrixTimesDirection);
                DirectionTimesMatrixTimesDirection = direction.DotProduct(matrixTimesDirection);

                // Update the direction vectors cache
                ReorthoCache.StoreDirectionData(this);

                // α = (d * r) / (d * q) = (d * r) / (d * (A * d))
                stepSize = direction.DotProduct(residual) / DirectionTimesMatrixTimesDirection;
            }

            // We reached the max iterations before PCG converged
            return(new IterativeStatistics
            {
                AlgorithmName = name,
                HasConverged = false,
                HasStagnated = false,
                NumIterationsRequired = maxIterations,
                ResidualNormRatioEstimation = residualNormRatio
            });
        }