/// <summary>
        /// Preconditioned Conjugate Gradient Method <see cref="SolveConjugateGradient(Microsoft.Msagl.Core.Layout.ProximityOverlapRemoval.ConjugateGradient.SparseMatrix,Microsoft.Msagl.Core.Layout.ProximityOverlapRemoval.ConjugateGradient.Vector,Microsoft.Msagl.Core.Layout.ProximityOverlapRemoval.ConjugateGradient.Vector,int,double)"/>
        /// Preconditioner: Jacobi Preconditioner.
        /// This method should generally be preferred, due to its faster convergence.
        /// </summary>
        /// <param name="A"></param>
        /// <param name="b"></param>
        /// <param name="x"></param>
        /// <param name="iMax"></param>
        /// <param name="epsilon"></param>
        /// <returns></returns>
        public static Vector SolvePrecondConjugateGradient(SparseMatrix A, Vector b, Vector x, int iMax, double epsilon) {
            //create Jacobi (diagonal) preconditioner
            // M=diagonal(A), M^-1=1/M(i,i), for i=0,...,n
            // since M^-1 is only applied to vectors, we can just keep the diagonals as a vector
            Vector Minv = A.DiagonalPreconditioner();

            Vector r = b - (A*x);
            Vector d = Minv.CompProduct(r);
            double deltaNew = r*d;

            double normRes = Math.Sqrt(r*r) / A.NumRow;
            double normRes0 = normRes;
            int i = 0;
            while ((i++) < iMax && normRes > epsilon * normRes0) {
                Vector q = A*d;
                double alpha = deltaNew/(d*q);
                x.Add(alpha*d);
                // correct residual to avoid error accumulation of floating point computation, other methods are possible.
//                if (i == Math.Max(50, (int)Math.Sqrt(A.NumRow)))
//                    r = b - (A*x);
//                else 
                    r.Sub(alpha*q);
                Vector s = Minv.CompProduct(r);
                double deltaOld = deltaNew;
                normRes = Math.Sqrt(r*r) / A.NumRow; 
                deltaNew = r*s;
                double beta = deltaNew/deltaOld;
                d = s + beta*d;
            }
            return x;
        }
        /// <summary>
        ///     Conjugate Gradient method for solving Sparse linear system of the form Ax=b with an iterative procedure. Matrix A should be positive semi-definite, otherwise several solutions could exist and convergence is not guaranteed.
        ///     see article for algorithm description: An Introduction to the Conjugate Gradient Method
        ///     Without the Agonizing Pain by Jonathan Richard Shewchuk
        /// </summary>
        /// <param name="A"></param>
        /// <param name="b"></param>
        /// <param name="x">initial guess for x</param>
        /// <param name="iMax"></param>
        /// <param name="epsilon"></param>
        /// <returns></returns>
         static Vector SolveConjugateGradient(SparseMatrix A, Vector b, Vector x, int iMax, double epsilon) {
            Vector r = b - (A*x); // r= b-Ax
            Vector d = r.Clone(); // d=r
            double deltaNew = r*r;
            
            double normRes = Math.Sqrt(deltaNew)/A.NumRow;
            double normRes0 = normRes;
            int i = 0;
            while ((i++) < iMax && normRes > epsilon*normRes0) {
                Vector q = A*d;
                double alpha = deltaNew/(d*q);
                x.Add(alpha*d);
                // correct residual to avoid error accumulation of floating point computation, other methods are possible.
//                if (i == Math.Max(50,(int)Math.Sqrt(A.NumRow)))
//                    r = b - (A*x);
//                else 
                    r.Sub(alpha*q);
                double deltaOld = deltaNew;
                deltaNew = r*r;
                normRes = Math.Sqrt(deltaNew)/A.NumRow; 
                double beta = deltaNew/deltaOld;
                d = r + beta*d;
            }

            return x;
        }
        /// <summary>
        /// </summary>
        public static void TestConjugateGradientMethod2() {
            var values = new double[28] {
                4, 1,
                1, 16, 1,
                1, 64, 1,
                1, 256, 1,
                1, 1024, 1,
                1, 4096, 1,
                1, 16384, 1,
                1, 65536, 1,
                1, 262144, 1,
                1, 1048576
            };
            var colInd = new int[28] {
                0, 1,
                0, 1, 2,
                1, 2, 3,
                2, 3, 4,
                3, 4, 5,
                4, 5, 6,
                5, 6, 7,
                6, 7, 8,
                7, 8, 9,
                8, 9
            };
            var rowPtr = new int[11] {0, 2, 5, 8, 11, 14, 17, 20, 23, 26, 28};

            var A = new SparseMatrix(values, colInd, rowPtr, rowPtr.Length - 1);

            var b = new double[10] {5, 18, 66, 258, 1026, 4098, 16386, 65538, 262146, 1048577};

            double[] result1 = SolvePrecondConjugateGradient(A, b, new double[10], 1000, 1E-6);
            string res = result1.Aggregate("", (s, t) => string.Format("{0},\t{1}", s, t));

            //Result should be 1 =(1,1,1,.....,1,1)
            Console.WriteLine(res);
        }
        /// <summary>
        /// </summary>
        public static void TestConjugateGradientMethod() {
            //matrix A={{4,1}{1,3}}

            var values = new double[4] {4, 1, 1, 3};
            var col_ind = new int[4] {0, 1, 0, 1};
            var row_ptr = new int[3] {0, 2, 4};

            var b = new double[2] {1, 2};

            var xStart = new double[2] {2, 1};
            var A = new SparseMatrix(values, col_ind, row_ptr, 2);

            double[] res = SolveConjugateGradient(A, b, xStart, 1000, 1E-4);
            Console.WriteLine("Solution: x: {0}, y={1}", res[0], res[1]);
            res = SolvePrecondConjugateGradient(A, b, xStart, 1000, 1E-4);
            Console.WriteLine("SolutionPreconditioned: x: {0}, y={1}", res[0], res[1]);
        }
 /// <summary>
 ///     Preconditioned Conjugate Gradient method, where the preconditioner M is the diagonal of A (Jacobi Preconditioner), <seealso cref="SolvePrecondConjugateGradient(Microsoft.Msagl.Core.Layout.ProximityOverlapRemoval.ConjugateGradient.SparseMatrix,Microsoft.Msagl.Core.Layout.ProximityOverlapRemoval.ConjugateGradient.Vector,Microsoft.Msagl.Core.Layout.ProximityOverlapRemoval.ConjugateGradient.Vector,int,double)"/>
 /// </summary>
 /// <param name="A"></param>
 /// <param name="b"></param>
 /// <param name="x"></param>
 /// <param name="iMax"></param>
 /// <param name="epsilon"></param>
 /// <returns></returns>
 public static double[] SolvePrecondConjugateGradient(SparseMatrix A, double[] b, double[] x, int iMax,
                                                      double epsilon) {
     return
         SolvePrecondConjugateGradient(A, new Vector(b), new Vector((double[]) x.Clone()), iMax, epsilon).array;
 }
         void ConstructLinearSystemFromMajorization(out SparseMatrix Lw, out SparseMatrix Lx) {
            int numEdges = GetNumberOfEdges(NodeVotings);

#if SHARPKIT //SharpKit/Colin: multidimensional arrays not supported in JavaScript - https://code.google.com/p/sharpkit/issues/detail?id=340
            var edgesDistance = new double[numEdges];
            var edgesWeight = new double[numEdges];
#else            
            var edges = new double[numEdges,2];
#endif

            // list of undirected (symmetric) edges: [,0]=distance, [,1]=weight , every edge must be symmetric (thus exist once in each direction).

            //each element in the array corresponds to a node, the list corresponds to the adjacency list of that node.
            //int[0]: node id of adjacent node, int[1]: edge id of this edge
            var adjLists = new List<int[]>[NodeVotings.Count];

            int row = 0;
            int edgeId = 0;
            foreach (NodeVoting nodeVoting in NodeVotings) {
                int targetId = nodeVoting.VotedNodeIndex;
                int numAdj = 0;
                nodeVoting.VotingBlocks.ForEach(block => numAdj += block.Votings.Count);
                var currentAdj = new List<int[]>(numAdj);
                if (row != targetId)
                    throw new ArgumentOutOfRangeException("VotedNodeIndex must be consecutive starting from 0");
                adjLists[row] = currentAdj;
                foreach (VoteBlock block in nodeVoting.VotingBlocks) {
                    foreach (Vote voting in block.Votings) {
                        //corresponds to an edge or an entry in the corresponding matrix
                        int sourceId = voting.VoterIndex;

#if SHARPKIT
                        edgesDistance[edgeId] = voting.Distance;
                        edgesWeight[edgeId] = voting.Weight * block.BlockWeight;
#else
                        edges[edgeId, 0] = voting.Distance;
                        edges[edgeId, 1] = voting.Weight*block.BlockWeight;
#endif
                        currentAdj.Add(new[] { sourceId, edgeId });
                        edgeId++;
                    }
                }
                row++;
            }

            //TODO check if sorting can be removed to make it faster.

            //sort the adjacency lists, so that we can create a sparse matrix where the ordering is important
            for (int rowC = 0; rowC < adjLists.Length; rowC++) {
                List<int[]> adjList = adjLists[rowC];
                //add self loop, since there will be an entry in the matrix too.
                adjList.Add(new[] {rowC, -1});

                adjList.Sort((a, b) => a[0].CompareTo(b[0]));
            }
            numEdges += adjLists.Length; //self loop added to every node, so number of edges has increased by n.

            var diagonalPos = new int[adjLists.Length];
            // points to the position of the i-th diagonal entry in the flatened value array
            Lw = new SparseMatrix(numEdges, adjLists.Length, adjLists.Length);
            Lx = new SparseMatrix(numEdges, adjLists.Length, adjLists.Length);

            int valPos = 0;
            for (int rowId = 0; rowId < adjLists.Length; rowId++) {
                List<int[]> adjList = adjLists[rowId];
                double sumLw = 0;
                double sumLx = 0;
                foreach (var node in adjList) {
                    int colId = node[0];
                    Lw.ColInd()[valPos] = colId;
                    Lx.ColInd()[valPos] = colId;
                    if (rowId == colId) {
                        //on diagonal of matrix
                        diagonalPos[rowId] = valPos; //we will fill in the value later with the sum of all row entries
                    }
                    else {
#if SHARPKIT
                        double distance = edgesDistance[node[1]]; // distance(rowId,colId)
                        double weight = edgesWeight[node[1]]; // weight(rowId,colId)
#else
                        double distance = edges[node[1], 0]; // distance(rowId,colId)
                        double weight = edges[node[1], 1]; // weight(rowId,colId)
#endif

                        // weighted laplacian fill
                        Lw.Values()[valPos] = -weight;
                        sumLw += weight;

                        // Lx, which is similar to weighted laplacian but takes the distance into account
                        //TODO extract the distance computation outside of this step to make the procedure faster
                        double euclid = (Positions[rowId] - Positions[colId]).Length;
                        double entry = -weight*distance/euclid;
                        Lx.Values()[valPos] = entry;
                        sumLx += entry;
                    }
                    valPos++;
                }
                //set the diagonal to the sum of all other entries in row
                Lw.Values()[diagonalPos[rowId]] = sumLw;
                Lx.Values()[diagonalPos[rowId]] = -sumLx;

                //mark row end in flatened list
                Lw.RowPtr()[rowId + 1] = valPos;
                Lx.RowPtr()[rowId + 1] = valPos;
            }
        }