public void Optimize(Dictionary<double, double> values)
        {
            var n = _f.Functions.Count();
              var xs = values.Select(v => v.Key).ToList();
              var ys = values.Select(v => v.Value).ToList();
              var fs = new List<List<double>>(n);

              for (var i = 0; i < n; i++)
              {
            fs[i] = _f.Functions[i].Evaluate(xs);
              }

              var matrix = new DenseMatrix(n, n);
              var vector = new DenseVector(n);
              for (var i = 0; i < n; i++)
              {
            for (var j = 0; j < n; j++)
            {
              matrix[i, j] = fs[i].ScalarProduct(fs[j]);
            }
            vector[i] = ys.ScalarProduct(fs[i]);
              }

              var matrixInverse = matrix.Inverse();

              var result = matrixInverse * vector;

              for (var i = 0; i < n; i++)
              {
            _f.LinearParameters[i].Value = result[i];
              }
        }
示例#2
0
 public FedKF(KF[] filters, DenseMatrix dcm, DenseMatrix covariances)
 {
     _filters = filters;
     _filteredSignals = new DenseMatrix(_filters.Length, 1);
     _dcm = dcm;
     _dcmt = dcm.Transpose();
     _cInv = covariances.Inverse();
 }
        public void Optimize(Dictionary<double, double> values)
        {
            var ys = new List<double>();
              var fs = new List<double>();
              var gs = new List<double>();
              var hs = new List<double>();

              foreach (var value in values)
              {
            ys.Add(value.Value);
            fs.Add(_lppl.F(value.Key));
            gs.Add(_lppl.G(value.Key));
            hs.Add(_lppl.H(value.Key));
              }

              var N = values.Count;
              var sfs = fs.Sum();
              var sgs = gs.Sum();
              var shs = hs.Sum();
              var sfsfs = fs.ScalarProduct(fs);
              var sgsgs = gs.ScalarProduct(gs);
              var shshs = hs.ScalarProduct(hs);
              var sfsgs = fs.ScalarProduct(gs);
              var sfshs = fs.ScalarProduct(hs);
              var sgshs = gs.ScalarProduct(hs);

              var sys = ys.Sum();
              var sysfs = ys.ScalarProduct(fs);
              var sysgs = ys.ScalarProduct(gs);
              var syshs = ys.ScalarProduct(hs);

              var matrixArray = new[]
                          {
                            N, sfs, sgs, shs,
                            sfs, sfsfs, sfsgs, sfshs,
                            sgs, sfsgs, sgsgs, sgshs,
                            shs, sfshs, sgshs, shshs
                          };

              var matrix = new DenseMatrix(4, 4, matrixArray);

              var matrixInverse = matrix.Inverse();

              var vector = new DenseVector(new[] { sys, sysfs, sysgs, syshs });

              var result = matrixInverse * vector;
              _lppl.A = result[0];
              _lppl.B = result[1];
              _lppl.C1 = result[2];
              _lppl.C2 = result[3];
        }
示例#4
0
        public static Matrix<double> WorldToImagePoints(this Matrix<double> worldPoints, DenseMatrix cameraCalibration, DenseVector posePosition, DenseMatrix poseRotation)
        {
            return
            cameraCalibration.Multiply(
                        poseRotation.Inverse()
                            .Multiply(worldPoints)
                            .Translate(-posePosition)
                            .ProjectCamCenteredWorldToHomogImagePoints());

            /*This version is consistent with my DPixelToWorld but different from openCV ProjectPoints
             * cameraCalibration.Multiply(
                poseRotation.Inverse()
                .Multiply(worldPoints.Translate(-posePosition))
                .ProjectCamCenteredWorldToHomogImagePoints());*/
        }
        public void Optimize(Dictionary<double, double> values)
        {
            var ys = values.Select(v => v.Value).ToList();
              var fs = _f.Functions.Select(f => values.Select(v => NaNToZero(f(v.Key))).ToList()).ToList();

              var matrix = new DenseMatrix(_f.LinearCount, _f.LinearCount);
              var vector = new DenseVector(_f.LinearCount);
              for (var i = 0; i < _f.LinearCount; i++)
              {
            for (var j = 0; j < _f.LinearCount; j++)
            {
              matrix[i, j] = fs[i].ScalarProduct(fs[j]);
            }
            vector[i] = ys.ScalarProduct(fs[i]);
              }

              var matrixInverse = matrix.Inverse();
              var result = matrixInverse * vector;

              for (var i = 0; i < _f.LinearCount; i++)
              {
            _f.SetA(i, result[i]);
              }
        }
示例#6
0
文件: WRMF.cs 项目: bemde/MyMediaLite
        /// <summary>Optimizes the specified data</summary>
        /// <param name="data">data</param>
        /// <param name="W">W</param>
        /// <param name="H">H</param>
        protected virtual void Optimize(IBooleanMatrix data, Matrix<float> W, Matrix<float> H)
        {
            var HH          = new Matrix<double>(num_factors, num_factors);
            var HC_minus_IH = new Matrix<double>(num_factors, num_factors);
            var HCp         = new double[num_factors];

            var m = new DenseMatrix(num_factors, num_factors);

            // source code comments are in terms of computing the user factors
            // works the same with users and items exchanged

            // (1) create HH in O(f^2|Items|)
            // HH is symmetric
            for (int f_1 = 0; f_1 < num_factors; f_1++)
                for (int f_2 = 0; f_2 < num_factors; f_2++)
                {
                    double d = 0;
                    for (int i = 0; i < H.dim1; i++)
                        d += H[i, f_1] * H[i, f_2];
                    HH[f_1, f_2] = d;
                }
            // (2) optimize all U
            // HC_minus_IH is symmetric
            for (int u = 0; u < W.dim1; u++)
            {
                var row = data.GetEntriesByRow(u);
                // create HC_minus_IH in O(f^2|S_u|)
                for (int f_1 = 0; f_1 < num_factors; f_1++)
                    for (int f_2 = 0; f_2 < num_factors; f_2++)
                    {
                        double d = 0;
                        foreach (int i in row)
                            //d += H[i, f_1] * H[i, f_2] * (c_pos - 1);
                            d += H[i, f_1] * H[i, f_2] * c_pos;
                        HC_minus_IH[f_1, f_2] = d;
                    }
                // create HCp in O(f|S_u|)
                for (int f = 0; f < num_factors; f++)
                {
                    double d = 0;
                    foreach (int i in row)
                        //d += H[i, f] * c_pos;
                        d += H[i, f] * (1 + c_pos);
                    HCp[f] = d;
                }
                // create m = HH + HC_minus_IH + reg*I
                // m is symmetric
                // the inverse m_inv is symmetric
                for (int f_1 = 0; f_1 < num_factors; f_1++)
                    for (int f_2 = 0; f_2 < num_factors; f_2++)
                    {
                        double d = HH[f_1, f_2] + HC_minus_IH[f_1, f_2];
                        if (f_1 == f_2)
                            d += regularization;
                        m[f_1, f_2] = d;
                    }
                var m_inv = m.Inverse();
                // write back optimal W
                for (int f = 0; f < num_factors; f++)
                {
                    double d = 0;
                    for (int f_2 = 0; f_2 < num_factors; f_2++)
                        d += m_inv[f, f_2] * HCp[f_2];
                    W[u, f] = (float) d;
                }
            }
        }
示例#7
0
        private void optimize(DenseMatrix coefficients, DenseVector objFunValues, bool artifical)
        {
            //for calculations on the optimal solution row
            int cCounter,
                width = coefficients.ColumnCount;
            DenseVector cBVect = new DenseVector(basics.Count);

            //Sets up the b matrix
            DenseMatrix b = new DenseMatrix(basics.Count, 1);

            //basics will have values greater than coefficients.ColumnCount - 1 if there are still artificial variables
            //or if Nathan is bad and didn't get rid of them correctly
            foreach (int index in basics)
            {
                b = (DenseMatrix)b.Append(DenseVector.OfVector(coefficients.Column(index)).ToColumnMatrix());
            }
            // removes the first column
            b = (DenseMatrix)b.SubMatrix(0, b.RowCount, 1, b.ColumnCount - 1);

            double[] cPrimes = new double[width];
            double[] rhsOverPPrime;
            DenseMatrix[] pPrimes = new DenseMatrix[width];
            DenseMatrix bInverse;

            int newEntering, exitingRow;

            bool optimal = false;

            if(artifical)
            {
                rhsOverPPrime = new double[numConstraints + 1];
            }
            else
            {
                rhsOverPPrime = new double[numConstraints];
            }

            while (!optimal)
            {
                //calculates the inverse of b for this iteration
                bInverse = (DenseMatrix)b.Inverse();

                //updates the C vector with the most recent basic variables
                cCounter = 0;
                foreach (int index in basics)
                {
                    cBVect[cCounter++] = objFunValues.At(index);
                }

                //calculates the pPrimes and cPrimes
                for (int i = 0; i < coefficients.ColumnCount; i++)
                {
                    if (!basics.Contains(i))
                    {
                        pPrimes[i] = (DenseMatrix)bInverse.Multiply((DenseMatrix)coefficients.Column(i).ToColumnMatrix());

                        //c' = objFunVals - cB * P'n
                        //At(0) to turn it into a double
                        cPrimes[i] = objFunValues.At(i) - (pPrimes[i].LeftMultiply(cBVect)).At(0);
                    }
                    else
                    {
                        pPrimes[i] = null;
                    }
                }

                //RHS'
                xPrime = (DenseMatrix)bInverse.Multiply((DenseMatrix)rhsValues.ToColumnMatrix());

                //Starts newEntering as the first nonbasic
                newEntering = -1;
                int iter = 0;
                while(newEntering == -1)
                {
                    if(!basics.Contains(iter))
                    {
                        newEntering = iter;
                    }

                    iter++;
                }

                //new entering becomes the small cPrime that corresponds to a non-basic value
                for (int i = 0; i < cPrimes.Length; i++)
                {
                    if (cPrimes[i] < cPrimes[newEntering] && !basics.Contains(i))
                    {
                        newEntering = i;
                    }
                }

                //if the smallest cPrime is >= 0, ie they are all positive
                if (cPrimes[newEntering] >= 0)
                {
                    optimal = true;
                }
                else
                {
                    //fix me to deal with if all these values are negative
                    exitingRow = 0;
                    for (int i = 0; i < xPrime.RowCount; i++)
                    {
                        double[,] pPrime = pPrimes[newEntering].ToArray();
                        rhsOverPPrime[i] = xPrime.ToArray()[i, 0] / pPrime[i, 0];

                        if (rhsOverPPrime[i] < rhsOverPPrime[exitingRow] && rhsOverPPrime[i] > 0 )
                        {
                            exitingRow = i;
                        }
                    }

                    //translates from the index in the basics list to the actual row
                    exitingRow = basics[exitingRow];

                    //make sure you're not being stupid here!!!!
                    int tempIndex = basics.IndexOf(exitingRow);
                    basics.Remove(exitingRow);

                    basics.Insert(tempIndex, newEntering);

                    b.SetColumn(basics.IndexOf(newEntering), coefficients.Column(newEntering));
                }
            }
        }
示例#8
0
        private void CalcBaseAandC()
        {
            BaseA = new DenseMatrix(CurrBaseJ.Count);
            BaseC = new DenseVector(CurrBaseJ.Count);

            int i = 0;
            foreach (var j in CurrBaseJ)
            {
                BaseA.SetColumn(i, A.Column(j));
                BaseC[i] = c[j];
                i++;
            }

            InvBaseA = (DenseMatrix)BaseA.Inverse();

            CurrNonBaseJ = new List<int>(Enumerable.Range(0, A.ColumnCount).Except(CurrBaseJ));
        }
示例#9
0
        public void placeCompensatorPoles(double value)
        {
            // Check for dimensions
            if (A.ColumnCount != A.RowCount || A.ColumnCount <= 0) {
                // TODO: Throw exception
            }

            int n = A.ColumnCount;

            // Calculate controllability matrix
            Matrix<double> controllabilityMatrix = new DenseMatrix(n, n);
            for (int i = 0; i < n; i++) {
                Vector<double> vec = B.Column(0);

                for (int j = 0; j < i; j++) {
                    vec = A * vec;
                }

                controllabilityMatrix.SetColumn(i, vec);
            }

            // Unity vector
            Matrix<double> unityVector = new DenseMatrix(1,n);
            for (int i = 0; i < n; i++) {
                // Set 1 at last index
                unityVector.At(0, i,
                    (i == n-1 ? 1 : 0)
                    );
            }

            // Coefficients matrix
            Matrix<double> preparedMatrix = A.Clone();
            for (int i = 0; i < n; i++) {
                // Substract value from diagonal
                preparedMatrix.At(i, i,
                    preparedMatrix.At(i, i) - value
                    );
            }
            Matrix<double> coefficientsMatrix = preparedMatrix.Clone();
            for (int i = 0; i < n-1; i++) {
                // Multiply n-1 times
                coefficientsMatrix = preparedMatrix * coefficientsMatrix;
            }

            // Calculate new K using Ackermann's formula
            K = unityVector * controllabilityMatrix.Inverse() * coefficientsMatrix;
        }
        /// <summary>
        /// Run example
        /// </summary>
        /// <seealso cref="http://en.wikipedia.org/wiki/Transpose">Transpose</seealso>
        /// <seealso cref="http://en.wikipedia.org/wiki/Invertible_matrix">Invertible matrix</seealso>
        public void Run()
        {
            // Format matrix output to console
            var formatProvider = (CultureInfo)CultureInfo.InvariantCulture.Clone();
            formatProvider.TextInfo.ListSeparator = " ";

            // Create random square matrix
            var matrix = new DenseMatrix(5);
            var rnd = new Random(1);
            for (var i = 0; i < matrix.RowCount; i++)
            {
                for (var j = 0; j < matrix.ColumnCount; j++)
                {
                    matrix[i, j] = rnd.NextDouble();
                }
            }

            Console.WriteLine(@"Initial matrix");
            Console.WriteLine(matrix.ToString("#0.00\t", formatProvider));
            Console.WriteLine();

            // 1. Get matrix inverse
            var inverse = matrix.Inverse();
            Console.WriteLine(@"1. Matrix inverse");
            Console.WriteLine(inverse.ToString("#0.00\t", formatProvider));
            Console.WriteLine();

            // 2. Matrix multiplied by its inverse gives identity matrix
            var identity = matrix * inverse;
            Console.WriteLine(@"2. Matrix multiplied by its inverse");
            Console.WriteLine(identity.ToString("#0.00\t", formatProvider));
            Console.WriteLine();

            // 3. Get matrix transpose
            var transpose = matrix.Transpose();
            Console.WriteLine(@"3. Matrix transpose");
            Console.WriteLine(transpose.ToString("#0.00\t", formatProvider));
            Console.WriteLine();

            // 4. Get orthogonal  matrix, i.e. do QR decomposition and get matrix Q
            var orthogonal = matrix.QR().Q;
            Console.WriteLine(@"4. Orthogonal  matrix");
            Console.WriteLine(orthogonal.ToString("#0.00\t", formatProvider));
            Console.WriteLine();

            // 5. Transpose and multiply orthogonal matrix by iteslf gives identity matrix
            identity = orthogonal.TransposeAndMultiply(orthogonal);
            Console.WriteLine(@"Transpose and multiply orthogonal matrix by iteslf");
            Console.WriteLine(identity.ToString("#0.00\t", formatProvider));
            Console.WriteLine();
        }
示例#11
0
        private double[] linearApproxym(double[] x, double[] y)
        {
            int n = x.Length;
            double sumOfX = 0;
            double sumOfXSquare = 0;
            double sumOfY = 0;
            double sumOfXY = 0;

            for (int i = 0; i < n; i++)
            {
                sumOfX += x[i];
                sumOfXSquare += x[i] * x[i];
                sumOfY += y[i];
                sumOfXY += x[i] * y[i];
            }

            var matrixX = new DenseMatrix(new double[,] { { n, sumOfX }, { sumOfX, sumOfXSquare } });
            var matrixY = new DenseMatrix(2, 1, new double[] { sumOfY, sumOfXY });

            var matrixP = matrixX.Inverse().Multiply(matrixY);

            double[] p = new double[2];

            p[0] = matrixP[1, 0];
            p[1] = matrixP[0, 0];

            return p;
        }
        private void CalculateCoefficients(int points)
        {
            var c = new double[points][,];

            // For ever possible center given the number of points, compute ever possible coefficient for all possible orders.
            for (int center = 0; center < points; center++)
            {
                // Deltas matrix for center located at 'center'.
                var A = new DenseMatrix(points);
                var l = points - center - 1;
                for (int row = points - 1; row >= 0; row--)
                {
                    A[row, 0] = 1.0;
                    for (int col = 1; col < points; col++)
                    {
                        A[row, col] = A[row, col - 1] * l / col;
                    }
                    l -= 1;
                }

                c[center] = A.Inverse().ToArray();

                // "Polish" results by rounding.
                var fac = SpecialFunctions.Factorial(points);
                for (int j = 0; j < points; j++)
                    for (int k = 0; k < points; k++)
#if PORTABLE
                        c[center][j, k] = (Math.Round(c[center][j, k] * fac)) / fac;
#else
                        c[center][j, k] = (Math.Round(c[center][j, k] * fac, MidpointRounding.AwayFromZero)) / fac;
#endif
            }

            _coefficients = c;
        }
示例#13
0
        //Given an observation, mu and sigma. What is N(O | mu, sigma)?
        public double queryGuassian(DenseVector observation, DenseVector mean, DenseMatrix covar)
        {
            double scaletemp, scale, exponent, prob;
            DenseMatrix v1, v2; //Temp matrices, for multiplying

            scaletemp = (Math.Pow(2 * Math.PI, mean.Count / 2));
            scale = (Math.Sqrt(covar.Determinant()));
            scale *= scaletemp;
            scale = 1 / scale;

            v1 = (DenseMatrix)(observation - mean).ToRowMatrix();
            v2 = (DenseMatrix)(observation - mean).ToColumnMatrix();
            v2 = (DenseMatrix)(covar.Inverse()) * v2;

            exponent = (-0.5) *  ((v1 * v2).ToArray()[0,0]);
            prob = scale * Math.Pow(Math.E, exponent);

            return prob;
        }
示例#14
0
		private void Optimize(int u, IBooleanMatrix data, Matrix<float> W, Matrix<float> H, Matrix<double> HH)
		{
			var row = data.GetEntriesByRow(u);
			// HC_minus_IH is symmetric
			// create HC_minus_IH in O(f^2|S_u|)
			var HC_minus_IH = new Matrix<double>(num_factors, num_factors);
			for (int f_1 = 0; f_1 < num_factors; f_1++)
				for (int f_2 = f_1; f_2 < num_factors; f_2++)
				{
					double d = 0;
					foreach (int i in row)
						d += H[i, f_1] * H[i, f_2];
					HC_minus_IH[f_1, f_2] = d * alpha;
					HC_minus_IH[f_2, f_1] = d * alpha;
				}
			// create HCp in O(f|S_u|)
			var HCp = new double[num_factors];
			for (int f = 0; f < num_factors; f++)
			{
				double d = 0;
				foreach (int i in row)
					d += H[i, f];
				HCp[f] = d * (1 + alpha);
			}
			// create m = HH + HC_minus_IH + reg*I
			// m is symmetric
			// the inverse m_inv is symmetric
			var m = new DenseMatrix(num_factors, num_factors);
			for (int f_1 = 0; f_1 < num_factors; f_1++)
				for (int f_2 = f_1; f_2 < num_factors; f_2++)
				{
					double d = HH[f_1, f_2] + HC_minus_IH[f_1, f_2];
					if (f_1 == f_2)
						d += regularization;
					m[f_1, f_2] = d;
					m[f_2, f_1] = d;
				}
			var m_inv = m.Inverse();
			// write back optimal W
			for (int f = 0; f < num_factors; f++)
			{
				double d = 0;
				for (int f_2 = 0; f_2 < num_factors; f_2++)
					d += m_inv[f, f_2] * HCp[f_2];
				W[u, f] = (float) d;
			}
		}
示例#15
0
 public Vector<double> Solve(DenseMatrix systemMatrix, Vector<double> freeTerms)
 {
     return systemMatrix.Inverse().Multiply(freeTerms);
 }
示例#16
0
        private bool Iterate()
        {
            // Step 1
            var cb = new DenseVector(_task.Jb.Count());
            for (int i = 0; i < _task.Jb.Count(); i++)
            {
                cb[i] = _task.c[_task.Jb[i]];
            }

            Matrix<double> bMatrix = new DenseMatrix(_task.Jb.Count());
            for (int i = 0; i < bMatrix.ColumnCount; i++)
            {
                bMatrix.SetColumn(i, _task.A.Column(_task.Jb[i]));
            }
            bMatrix = bMatrix.Inverse();

            var u = cb * bMatrix;
            var deltas = DenseVector.Create(
                _task.c.Count, i => _task.Jb.Contains(i) ? double.PositiveInfinity : u * _task.A.Column(i) - _task.c[i]);

            //Step 2
            if (deltas.ToList().TrueForAll(x => x > 0 || Math.Abs(x) < Eps))
            {
                // STOP vector is optimal
                //_writer.WriteLine("Optimal plan is found: {0}", _task.xo);
                //_writer.WriteLine("Target function value = {0}", _task.c * _task.xo);
                return false;
            }

            // Step 3
            int j0 = 0;
            for (int j = 0; j < deltas.Count; j++)
            {
                if (!_task.Jb.Contains(j) && deltas[j] < 0 && Math.Abs(deltas[j]) >= Eps)
                {
                    j0 = j;
                    break;
                }
            }

            var z = bMatrix * _task.A.Column(j0);

            if (z.ToList().TrueForAll(x => x < -Eps || Math.Abs(x) < Eps ))
            {
                // STOP target function is unlimited
                _writer.WriteLine("Target function is unlimited");
                return false;
            }

            // Step 4
            var tetta0 = double.PositiveInfinity;
            var s = -1;
            for (int i = 0; i < z.Count; i++)
            {
                if (z[i] > 0 && Math.Abs(z[i]) > Eps)
                {
                    var tetta = _task.xo[_task.Jb[i]]/z[i];
                    if(double.IsPositiveInfinity(tetta0) || (Math.Abs(tetta0 - tetta) > Eps && tetta < tetta0))
                    {
                        tetta0 = tetta;
                        s = i;
                    }
                }
            }

            // Step 5
            var newXo = DenseVector.Create(_task.xo.Count, i => 0);
            for (int i = 0; i < _task.Jb.Count(); i++)
            {
                newXo[_task.Jb[i]] = _task.xo[_task.Jb[i]] - tetta0 * z[i];
            }
            newXo[j0] = tetta0;

            _task.xo = newXo;
            _task.Jb[s] = j0;

            // Step 6 is unneccesary
            return true;
        }
示例#17
0
        public override void Match()
        {
            int r21 = 2 * WindowRadius + 1;
            _leftMoments = new List<Vector<double>>();
            _rightMoments = new List<Vector<double>>();

            // Find circural mask bounds:
            // for x = [-r,r] -> y = [ -sqrt(r^2 - x^2), sqrt(r^2 - x^2) ]
            _ybounds = new int[r21];
            for(int x = -WindowRadius; x <= WindowRadius; ++x)
            {
                _ybounds[x + WindowRadius] = (int)Math.Sqrt(WindowRadius * WindowRadius - x * x);
            }

            // Find moment vectors for each feature point
            for(int i = 0; i < LeftFeaturePoints.Count; ++i)
            {
                IntVector2 pixel = LeftFeaturePoints[i];
                if(pixel.X < WindowRadius || pixel.Y < WindowRadius ||
                    pixel.X >= LeftImage.ColumnCount - WindowRadius ||
                    pixel.Y >= LeftImage.RowCount - WindowRadius)
                {
                    _leftMoments.Add(null);
                }
                else
                {
                    Vector<double> mom = ComputeMomentVectorForPatch(pixel, LeftImage);
                    if(UseCenteredMoments)
                        mom = ComputeCenteredMomentVector(mom);
                    if(UseScaledMoments)
                        ScaleMomentVector(mom);
                    Vector<double> invMom = ComputeInvariantMomentVector(mom);
                    _leftMoments.Add(invMom);
                }
            }

            for(int i = 0; i < RightFeaturePoints.Count; ++i)
            {
                IntVector2 pixel = RightFeaturePoints[i];
                if(pixel.X < WindowRadius || pixel.Y < WindowRadius ||
                    pixel.X >= RightImage.ColumnCount - WindowRadius ||
                    pixel.Y >= RightImage.RowCount - WindowRadius)
                {
                    _rightMoments.Add(null);
                }
                else
                {
                    Vector<double> mom = ComputeMomentVectorForPatch(pixel, RightImage);
                    if(UseCenteredMoments)
                        mom = ComputeCenteredMomentVector(mom);
                    if(UseScaledMoments)
                        ScaleMomentVector(mom);
                    Vector<double> invMom = ComputeInvariantMomentVector(mom);
                    _rightMoments.Add(invMom);
                }
            }

            // We need to find covariance matrix of invariants as they have
            // different magnitudes, so simple ||Il - Ir|| may not be best choice
            // E = 1/(n-1) sum( (xi-m)(xi-m)^T ) (x is column vector, m = 1/n sum(xi)
            // 1) Find mean
            int n = 0;
            Vector<double> meanInv = new DenseVector(_invCount);
            for(int i = 0; i < _leftMoments.Count; ++i)
            {
                if(_leftMoments[i] != null)
                {
                    meanInv.PointwiseAddThis(_leftMoments[i]);
                    ++n;
                }
            }
            for(int i = 0; i < _rightMoments.Count; ++i)
            {
                if(_rightMoments[i] != null)
                {
                    meanInv.PointwiseAddThis(_rightMoments[i]);
                    ++n;
                }
            }
            meanInv.MultiplyThis(1.0 / n);
            // 2) Find E
            Matrix<double> cov = new DenseMatrix(_invCount, _invCount);
            for(int i = 0; i < _leftMoments.Count; ++i)
            {
                if(_leftMoments[i] != null)
                {
                    cov.PointwiseAddThis(CamCore.MatrixExtensions.FromVectorProduct(_leftMoments[i] - meanInv));
                }
            }
            for(int i = 0; i < _rightMoments.Count; ++i)
            {
                if(_rightMoments[i] != null)
                {
                    cov.PointwiseAddThis(CamCore.MatrixExtensions.FromVectorProduct(_rightMoments[i] - meanInv));
                }
            }
            cov.MultiplyThis(1.0 / (n - 1));
            var covInv = cov.Inverse();

            // Match each point pair and find ||Il - Ir||E
            List<MatchedPair> costs;
            var matchLeft = new List<MatchedPair>();
            var matchRight = new List<MatchedPair>();
            for(int l = 0; l < LeftFeaturePoints.Count; ++l)
            {
                costs = new List<MatchedPair>(LeftFeaturePoints.Count);
                if(_leftMoments[l] != null)
                {
                    for(int r = 0; r < RightFeaturePoints.Count; ++r)
                    {
                        if(_rightMoments[r] != null)
                        {
                            var d = _leftMoments[l] - _rightMoments[r];
                            costs.Add(new MatchedPair()
                            {
                                LeftPoint = new Vector2(LeftFeaturePoints[l]),
                                RightPoint = new Vector2(RightFeaturePoints[r]),
                                Cost = UseMahalanobis ? d * covInv * d : // cost = d^T * E^-1 * d
                                    d.DotProduct(d)
                            });
                        }
                    }
                    costs.Sort((c1, c2) => { return c1.Cost > c2.Cost ? 1 : (c1.Cost < c2.Cost ? -1 : 0); });
                    // Confidence will be (c2-c1)/(c1+c2)
                    MatchedPair match = costs[0];
                    match.Confidence = (costs[1].Cost - costs[0].Cost) / (costs[1].Cost + costs[0].Cost);
                    matchLeft.Add(match);
                }
            }

            for(int r = 0; r < RightFeaturePoints.Count; ++r)
            {
                costs = new List<MatchedPair>(RightFeaturePoints.Count);
                if(_rightMoments[r] != null)
                {
                    for(int l = 0; l < LeftFeaturePoints.Count; ++l)
                    {
                        if(_leftMoments[l] != null)
                        {
                            var d = _leftMoments[l] - _rightMoments[r];
                            costs.Add(new MatchedPair()
                            {
                                LeftPoint = new Vector2(LeftFeaturePoints[l]),
                                RightPoint = new Vector2(RightFeaturePoints[r]),
                                Cost = UseMahalanobis ? d * covInv * d : // cost = d^T * E^-1 * d
                                    d.DotProduct(d)
                            });
                        }
                    }
                    costs.Sort((c1, c2) => { return c1.Cost > c2.Cost ? 1 : (c1.Cost < c2.Cost ? -1 : 0); });
                    // Confidence will be (c2-c1)/(c1+c2)
                    MatchedPair match = costs[0];
                    match.Confidence = (costs[1].Cost - costs[0].Cost) / (costs[1].Cost + costs[0].Cost);
                    matchRight.Add(match);
                }
            }

            Matches = new List<MatchedPair>();
            foreach(var ml in matchLeft)
            {
                MatchedPair mr = matchRight.Find((m) => { return ml.LeftPoint.DistanceTo(m.LeftPoint) < 0.01; });
                // We have both sides matches
                if(mr != null && ml.RightPoint.DistanceTo(mr.RightPoint) < 0.01)
                {
                    // Cross check sucessful
                    Matches.Add(mr);
                }
            }
        }
示例#18
0
        private Func<double, DenseVector> LinearGradient()
        {
            var ys = _values.Select(v => _l.Value(v.Key)).ToList();
              var fs = _l.Functions.Select(f => _values.Select(v => f(v.Key)).ToList()).ToList();
              var dfs = _functionsDerivatives
            .Select(dpf => dpf.Select(df => _values.Select(v => df.Value(v.Key)).ToList()).ToList()).ToList();

              var n = _l.Functions.Count;
              const int nb = 3;

              var results = new List<Vector<double>>();
              for (var b = 0; b < nb; b++)
              {
            var matrix = new DenseMatrix(n, n);
            var vector = new DenseVector(n);
            for (var i = 0; i < n; i++)
            {
              for (var j = 0; j < n; j++)
              {
            matrix[i, j] = dfs[i][b].ScalarProduct(fs[j]) + dfs[j][b].ScalarProduct(fs[i]);
              }
              vector[i] = ys.ScalarProduct(dfs[i][b]);
            }
            var matrixInverse = matrix.Inverse();
            var result = matrixInverse * vector;
            results.Add(result);
              }

              Func<double, DenseVector> gradient = x => new DenseVector(new[]
                                              {
                                                DotProduct(results[0], _l.Functions)(x),
                                                DotProduct(results[1], _l.Functions)(x),
                                                0//DotProduct(results[2], _l.Functions)(x)
                                              });

              return gradient;
        }
示例#19
0
        private DenseVector NewtonMethod(ref Parser[] parsers)
        {
            double eps = 10e-3;

            //var helperMatrix = new DenseMatrix(equationsCount, equationsCount);
            var helperMatrixR = new double[equationsCount, equationsCount];
            for (int i = 0; i < equationsCount; i++)
            {
                for (int j = 0; j < equationsCount; j++)
                {
                    double p;
                    double p1;
                    try
                    {
                        parsers[i].Variables[variables.ElementAt(j).Key] = variables.ElementAt(j).Value + eps;
                        p1 = parsers[i].Calculate();
                        parsers[i].Variables[variables.ElementAt(j).Key] = variables.ElementAt(j).Value;
                        p = parsers[i].Calculate();
                    }
                    catch(Exception exception)
                    {
                        ErrorAllert(exception.Message);
                        return null;
                    }

                    helperMatrixR[i, j] = (p1 - p)/eps;
                }
            }
            var helperMatrix = new DenseMatrix(helperMatrixR);

            if (Math.Abs(helperMatrix.Determinant()) < eps)
            {
                ErrorAllert("Determinant of Jacobi matrix is 0. Can't calculate result.");
                return null;
            }
            var inverseMatrix = helperMatrix.Inverse();
            var initVars = new DenseVector(equationsCount);
            var modVars = new Dictionary<string, double>();

            for (int i = 0; i < variables.Count; i++)
            {
                initVars[i] = variables.ElementAt(i).Value;
                modVars[variables.ElementAt(i).Key] = variables.ElementAt(i).Value;
            }

            foreach (var parser in parsers)
            {
                parser.Variables = modVars;
            }

            bool diff = false;
            int itCount = 0;

            while (!diff && itCount < 10)
            {
                var f = new DenseVector(equationsCount);

                for (int j = 0; j < equationsCount; j++)
                {
                    f[j] = parsers[j].Calculate();
                }

                var result = new DenseVector((initVars - inverseMatrix*f).ToArray());

                for (int i = 0; i < equationsCount; i++)
                {
                    diff = diff && (Math.Abs(result[i] - initVars[i]) < eps);
                }

                initVars = result;

                for (int j = 0; j < equationsCount; j++)
                {
                    modVars[modVars.ElementAt(j).Key] = initVars[j];
                }
                ++itCount;
            }

            return initVars;
        }
示例#20
0
文件: GomoriMethod.cs 项目: Kant8/IOp
        private bool GomoriIteration()
        {
            // Шаг 1
            _writer.WriteLine("Iteration: {0}", iterationNumber);
            var simplexMethod = new SimplexMethod(_writer);
            simplexMethod.Solve(_task);     // Решаем задачу симплекс методом

            _writer.WriteLine("Optimal plan is found: {0}", _task.xo);
            _writer.WriteLine("Target function value = {0}", _task.c * _task.xo);

            // Шаг 2
            //var artJToRemoveRow = -1;
            //var artJToRemoveColumn = -1;
            //artJToRemoveRow = -1;
            //artJToRemoveColumn = -1;

            //foreach (var artJ in _artJ)
            //{
            //    if (_task.Jb.Contains(artJ.Column))
            //    {
            //        var rowToRemove = artJ.Row;     // TODO probably need to rewrite row selection

            //        var ai = _task.A.Row(rowToRemove); // Выбираем строку с искусственым ограничением
            //        ai = -ai / ai[artJ.Column];
            //        var rowList = ai.ToList();
            //        rowList.RemoveAt(artJ.Column);
            //        ai = DenseVector.OfEnumerable(rowList);

            //        var aj = _task.A.Column(artJ.Column);   // Выбираем столбец с искусственным ограничением
            //        var columnList = aj.ToList();
            //        var bCoef = _task.b[rowToRemove] / columnList[rowToRemove];
            //        columnList.RemoveAt(rowToRemove);
            //        aj = DenseVector.OfEnumerable(columnList);

            //        var newA = DenseMatrix.Create(_task.A.RowCount - 1, _task.A.ColumnCount - 1,
            //            (i, j) => _task.A[i < rowToRemove ? i : i + 1, j < artJ.Column ? j : j + 1]);

            //        newA += DenseMatrix.OfMatrix(aj.ToColumnMatrix() * ai.ToRowMatrix());   // Удаляем искусственные строку
            //        _task.A = newA;                                                         // и столбец из матрицы А
            //        _task.b = DenseVector.Create(_task.b.Count - 1, i => i < rowToRemove ? _task.b[i] : _task.b[i + 1]);
            //        _task.b += bCoef * aj;

            //        _task.c = DenseVector.Create(_task.c.Count - 1, i => i < artJ.Column ? _task.c[i] : _task.c[i + 1]);    // Удаляем искусственную переменную из вектора с

            //        _task.xo = DenseVector.Create(_task.xo.Count - 1, i => i < artJ.Column ? _task.xo[i] : _task.xo[i + 1]);    // Удаляем искусственную переменную из xo

            //        _task.Jb.Remove(artJ.Column);
            //        artJToRemoveColumn = artJ.Column;
            //        artJToRemoveRow = artJ.Row;
            //        break;
            //    }
            //}

            //if (artJToRemoveRow > 0)        // Удаляем искусственную переменную из базисных
            //{
            //    _artJ.RemoveAll(x => x.Row == artJToRemoveRow);
            //    for (int i = 0; i < _artJ.Count; i++)
            //    {
            //        if (_artJ[i].Row > artJToRemoveRow)
            //        {
            //            _artJ[i].Row--;         // Сдвигаем индексы базисных переменных на один
            //            _artJ[i].Column--;      // После удаления искусственной переменной
            //        }
            //    }

            //    for (int i = 0; i < _task.Jb.Count; i++)
            //    {
            //        _task.Jb[i] = _task.Jb[i] > artJToRemoveColumn ? _task.Jb[i] - 1 : _task.Jb[i];
            //    }
            //}

            // Шаг 3
            var falseIndex = -1;
            var maxFract = 0d;
            for (int i = 0; i < _task.xo.Count(); i++)
            {
                if (Math.Abs(Math.Round(_task.xo[i]) - _task.xo[i]) > Eps)
                {
                    var fract = Math.Abs(_task.xo[i] - Math.Floor(_task.xo[i]));    // Находим базисную переменную
                    if (_task.Jb.Contains(i) && fract > Eps)                        // С максимальной дробной частью
                    {                                                               // и запоминаем ее индекс
                        if (fract > maxFract)
                        {
                            maxFract = fract;
                            falseIndex = i;
                        }
                    }
                }
            }

            if (falseIndex < 0)     // Если все переменные целые - решение найдено
            {
                return false;   // Прерываем выполнение метода
            }
            _writer.WriteLine("Jk = {0}", falseIndex);

            // Шаг 4
            var aB = new DenseMatrix(_task.Jb.Count());
            int index = 0;
            foreach (var j in _task.Jb)
            {
                aB.SetColumn(index, _task.A.Column(j));     // Формируем матрицу Ab из базисных столбцов А
                index++;
            }
            _writer.Write("Jb: ");
            _task.Jb.ForEach(x => _writer.Write("{0} ", x));
            _writer.WriteLine();
            _writer.WriteLine("Basis matrix: {0}", aB);
            var y = DenseMatrix.Identity(_task.A.RowCount).Column(_task.Jb.IndexOf(falseIndex)) * aB.Inverse(); //Находим e'*Ab

            var newRow = new DenseVector(_task.A.ColumnCount + 1);
            newRow.SetSubVector(0, _task.A.ColumnCount, y * _task.A);   // Находим данные для нового отсекающего ограничения

            _writer.WriteLine("Data for new limitation: {0}", newRow);

            for (int i = 0; i < newRow.Count; i++)      // Формируем новое отсекающее ограничение
            {
                if (i < _task.A.ColumnCount)
                {
                    if (Math.Abs(newRow[i]) < Eps)
                    {
                        newRow[i] = 0;
                    }
                    else
                    {
                        newRow[i] = newRow[i] > 0
                                    ? -(newRow[i] - Math.Floor(newRow[i]))
                                    : -(Math.Ceiling(Math.Abs(newRow[i])) - Math.Abs(newRow[i]));
                    }
                }
                else
                {
                    newRow[i] = 1;
                }
            }
            newRow[falseIndex] = 0;
            _writer.WriteLine("New limitation: {0}", newRow);

            var newb = (y * _task.b);   // Находим новый элемент вектора b
            newb = newb > 0 ? -(newb - Math.Floor(newb)) : -(Math.Ceiling(Math.Abs(newb)) - Math.Abs(newb)); // TODO probably need to rewrite this

            _writer.WriteLine("New b = {0}", newb);

            // Шаг 5
            var newMatrix = new DenseMatrix(_task.A.RowCount + 1, _task.A.ColumnCount + 1); // Формируем новую
            newMatrix.SetSubMatrix(0, _task.A.RowCount, 0, _task.A.ColumnCount, _task.A);   // матрицу А
            newMatrix.SetRow(_task.A.RowCount, newRow);
            newMatrix[_task.A.RowCount, _task.A.ColumnCount] = 1;

            var newBVector = new DenseVector(_task.b.Count + 1);    // Формируем новый
            newBVector.SetSubVector(0, _task.b.Count, _task.b);     // вектор b
            newBVector[_task.b.Count] = newb;

            var newCVector = new DenseVector(_task.c.Count + 1);    // Добавляем новую
            newCVector.SetSubVector(0, _task.c.Count, _task.c);     // компоненту вектора с

            var newJb = _task.Jb.ToList();
            newJb.Add(newJb[newJb.Count - 1] + 1);
            _artJ.Add(new ArtJEntry { Column = newMatrix.ColumnCount - 1, Row = newMatrix.RowCount - 1 });

            _task.A = newMatrix.Clone();        // Создаем
            _task.b = newBVector.Clone();       // новую задачу
            _task.c = newCVector.Clone();       // для следующей итерации
            _task.Jb = newJb;

            iterationNumber++;              // Присваиваем новый номер итерации

            return true;
        }