/// <summary>
        ///   Loads a jagged array from a stream.
        /// </summary>
        ///
        /// <param name="stream">The stream containing the matrix to be loaded.</param>
        /// <param name="trim">Pass true to remove null or empty elements from the loaded array.</param>
        ///
        /// <returns>A jagged array containing the values available in the given stream.</returns>
        ///
        public static Array LoadJagged(Stream stream, bool trim = true)
        {
            using (var reader = new BinaryReader(stream, System.Text.Encoding.ASCII
#if !NET35 && !NET40
                                                 , leaveOpen: true
#endif
                                                 ))
            {
                int   bytes;
                Type  type;
                int[] shape;
                if (!parseReader(reader, out bytes, out type, out shape))
                {
                    throw new FormatException();
                }

                Array matrix = Jagged.Create(type, shape);

                if (type == typeof(String))
                {
                    Array result = readStringMatrix(reader, matrix, bytes, type, shape);

                    if (trim)
                    {
                        return(result.Trim());
                    }
                    return(result);
                }

                return(readValueJagged(reader, matrix, bytes, type, shape));
            }
        }
        /// <summary>
        ///   Converts an image from one representation to another.
        /// </summary>
        ///
        /// <param name="input">The input image to be converted.</param>
        /// <param name="output">The converted image.</param>
        ///
        public void Convert(UnmanagedImage input, out float[][] output)
        {
            int width     = input.Width;
            int height    = input.Height;
            int pixelSize = input.PixelSize;
            int offset    = input.Offset;

            output = Jagged.Create <float>(height, width);

            float min = (float)Min;
            float max = (float)Max;

            unsafe
            {
                byte *src = (byte *)input.ImageData.ToPointer() + Channel;

                for (int y = 0; y < height; y++)
                {
                    for (int x = 0; x < width; x++, src += pixelSize)
                    {
                        output[y][x] = Vector.Scale(*src, (byte)0, (byte)255, min, max);
                    }
                    src += offset;
                }
            }
        }
        /// <summary>
        ///   Creates a matrix with uniformly distributed random data.
        /// </summary>
        ///
        public static int[][] Random(int size, int min, int max, bool symmetric = false, int[][] result = null)
        {
            if (result is null)
            {
                result = Jagged.Create <int>(size, size);
            }

            var random = ISynergy.Framework.Mathematics.Random.Generator.Random;

            if (symmetric)
            {
                for (var i = 0; i < size; i++)
                {
                    for (var j = i; j < size; j++)
                    {
                        result[i][j] = result[j][i] = (int)random.Next((int)min, (int)max);
                    }
                }
            }
            else
            {
                for (var i = 0; i < size; i++)
                {
                    for (var j = i; j < size; j++)
                    {
                        result[i][j] = (int)random.Next((int)min, (int)max);
                    }
                }
            }
            return(result);
        }
Beispiel #4
0
        // Compute gradient of the t-SNE cost function (using Barnes-Hut algorithm)
        internal static void computeGradient(double[][] P, int[] inp_row_P, int[] inp_col_P, double[] inp_val_P, double[][] Y, int N, int D, double[][] dC, double theta)
        {
            // Construct space-partitioning tree on current map
            var tree = SPTree.FromData(Y);

            // Compute all terms required for t-SNE gradient
            var pos_f = Jagged.Create <double>(N, D);
            var neg_f = Jagged.Create <double>(N, D);

            tree.ComputeEdgeForces(Y, inp_row_P, inp_col_P, inp_val_P, pos_f);

            double sum_Q = 0.0;

            for (int n = 0; n < Y.Length; n++)
            {
                tree.ComputeNonEdgeForces(Y[n], theta, neg_f[n], ref sum_Q);
            }

            // Compute final t-SNE gradient
            for (int i = 0; i < dC.Length; i++)
            {
                for (int j = 0; j < dC[i].Length; j++)
                {
                    dC[i][j] = pos_f[i][j] - (neg_f[i][j] / sum_Q);
                }
            }
        }
        /// <summary>
        ///   Creates a matrix with uniformly distributed random data.
        /// </summary>
        ///
        public static decimal[][] Random(int size, decimal min, decimal max, bool symmetric = false, decimal[][] result = null)
        {
            if (result is null)
            {
                result = Jagged.Create <decimal>(size, size);
            }

            var random = ISynergy.Framework.Mathematics.Random.Generator.Random;

            if (symmetric)
            {
                for (var i = 0; i < size; i++)
                {
                    for (var j = i; j < size; j++)
                    {
                        result[i][j] = result[j][i] = (decimal)random.NextDouble() * (max - min) + min;
                    }
                }
            }
            else
            {
                for (var i = 0; i < size; i++)
                {
                    for (var j = i; j < size; j++)
                    {
                        result[i][j] = (decimal)random.NextDouble() * (max - min) + min;
                    }
                }
            }
            return(result);
        }
Beispiel #6
0
        /// <summary>Least squares solution of <c>X * A = B</c></summary>
        /// <param name="value">Right-hand-side matrix with as many columns as <c>A</c> and any number of rows.</param>
        /// <returns>A matrix that minimized the two norm of <c>X * Q * R - B</c>.</returns>
        /// <exception cref="T:System.ArgumentException">Matrix column dimensions must be the same.</exception>
        /// <exception cref="T:System.InvalidOperationException">Matrix is rank deficient.</exception>
        public Double[][] SolveTranspose(Double[][] value)
        {
            if (value == null)
            {
                throw new ArgumentNullException("value", "Matrix cannot be null.");
            }

            if (value.Columns() != qr.Length)
            {
                throw new ArgumentException("Matrix row dimensions must agree.");
            }

            if (!this.FullRank)
            {
                throw new InvalidOperationException("Matrix is rank deficient.");
            }

            // Copy right hand side
            int count = value.Length;
            var X     = value.Transpose();

            // Compute Y = transpose(Q)*B
            for (int k = 0; k < p; k++)
            {
                for (int j = 0; j < count; j++)
                {
                    Double s = 0;
                    for (int i = k; i < n; i++)
                    {
                        s += qr[i][k] * X[i][j];
                    }

                    s = -s / qr[k][k];
                    for (int i = k; i < n; i++)
                    {
                        X[i][j] += s * qr[i][k];
                    }
                }
            }

            // Solve R*X = Y;
            for (int k = p - 1; k >= 0; k--)
            {
                for (int j = 0; j < count; j++)
                {
                    X[k][j] /= Rdiag[k];
                }

                for (int i = 0; i < k; i++)
                {
                    for (int j = 0; j < count; j++)
                    {
                        X[i][j] -= X[k][j] * qr[i][k];
                    }
                }
            }

            return(Jagged.Create(count, p, X, transpose: true));
        }
Beispiel #7
0
        /// <summary>
        ///  Preprocesses the cost matrix to remove infinities and invalid values.
        /// </summary>
        ///
        /// <returns>Go to step 1.</returns>
        ///
        private int step_zero()
        {
            validRow = new bool[NumberOfWorkers];
            validCol = new bool[NumberOfTasks];
            validMap = Jagged.Create <bool>(NumberOfWorkers, NumberOfTasks);


            double sum = 0;
            double max = Double.NegativeInfinity;

            for (int i = 0; i < validRow.Length; i++)
            {
                for (int j = 0; j < validCol.Length; j++)
                {
                    double v = Math.Abs(costMatrix[i][j]);

                    if (!Double.IsInfinity(v) && !Double.IsNaN(v))
                    {
                        validMap[i][j] = validRow[i] = validCol[j] = true;

                        sum += v;
                        if (v > max)
                        {
                            max = v;
                        }
                    }
                }
            }

            double bigM = Math.Pow(10, Math.Ceiling(Math.Log10(sum)) + 1);

            for (int i = 0; i < costMatrix.Length; i++)
            {
                for (int j = 0; j < costMatrix[i].Length; j++)
                {
                    if (!validMap[i][j])
                    {
                        costMatrix[i][j] = Math.Sign(costMatrix[i][j]) * bigM;
                    }
                }
            }

            nRows = validRow.Count(x => x);
            nCols = validCol.Count(x => x);
            n     = Math.Max(nRows, nCols);

            if (n == 0)
            {
                throw new InvalidOperationException("There are no valid values in the cost matrix.");
            }

            validCost = costMatrix.Get(validRow, validCol, result: Jagged.Create(n, n, 10.0 * max));

            this.rowCover = new bool[n];
            this.colCover = new bool[n];
            this.stars    = Jagged.Create(n, n, false);

            return(1);
        }
Beispiel #8
0
        /// <summary>Least squares solution of <c>A * X = B</c></summary>
        /// <param name="value">Right-hand-side matrix with as many rows as <c>A</c> and any number of columns.</param>
        /// <returns>A matrix that minimized the two norm of <c>Q * R * X - B</c>.</returns>
        /// <exception cref="T:System.ArgumentException">Matrix row dimensions must be the same.</exception>
        /// <exception cref="T:System.InvalidOperationException">Matrix is rank deficient.</exception>
        public double[][] Solve(double[][] value)
        {
            if (value is null)
            {
                throw new ArgumentNullException("value", "Matrix cannot be null.");
            }

            if (value.Length != n)
            {
                throw new ArgumentException("Matrix row dimensions must agree.");
            }

            if (!FullRank)
            {
                throw new InvalidOperationException("Matrix is rank deficient.");
            }

            // Copy right hand side
            var count = value.Columns();
            var X     = value.Copy();

            // Compute Y = transpose(Q)*B
            for (var k = 0; k < p; k++)
            {
                for (var j = 0; j < count; j++)
                {
                    double s = 0;
                    for (var i = k; i < qr.Length; i++)
                    {
                        s += qr[i][k] * X[i][j];
                    }

                    s = -s / qr[k][k];
                    for (var i = k; i < qr.Length; i++)
                    {
                        X[i][j] += s * qr[i][k];
                    }
                }
            }

            // Solve R*X = Y;
            for (var k = p - 1; k >= 0; k--)
            {
                for (var j = 0; j < X[k].Length; j++)
                {
                    X[k][j] /= Diagonal[k];
                }

                for (var i = 0; i < k; i++)
                {
                    for (var j = 0; j < X[i].Length; j++)
                    {
                        X[i][j] -= X[k][j] * qr[i][k];
                    }
                }
            }

            return(Jagged.Create(p, count, X, false));
        }
Beispiel #9
0
 internal T[][] create <T>(TInput[] input, T[][] decision)
 {
     if (decision == null)
     {
         decision = Jagged.Create <T>(input.Length, NumberOfOutputs);
     }
     return(decision);
 }
Beispiel #10
0
        // Compute gradient of the t-SNE cost function (exact)
        internal static void computeExactGradient(double[][] P, double[][] Y, int N, int D, double[][] dC)
        {
            // Make sure the current gradient contains zeros
            for (int i = 0; i < dC.Length; i++)
            {
                for (int j = 0; j < dC[i].Length; j++)
                {
                    dC[i][j] = 0.0;
                }
            }

            // Compute the squared Euclidean distance matrix
            double[][] DD = Jagged.Create <double>(N, N);
            computeSquaredEuclideanDistance(Y, N, D, DD);

            // Compute Q-matrix and normalization sum
            double[][] Q = Jagged.Zeros(N, N);

            double sum_Q = 0.0;

            for (int n = 0; n < N; n++)
            {
                for (int m = 0; m < N; m++)
                {
                    if (n != m)
                    {
                        Q[n][m] = 1.0 / (1.0 + DD[n][m]);
                        sum_Q  += Q[n][m];
                    }
                }
            }

            // Perform the computation of the gradient
            for (int n = 0; n < N; n++)
            {
                for (int m = 0; m < N; m++)
                {
                    if (n != m)
                    {
                        double mult = (P[n][m] - (Q[n][m] / sum_Q)) * Q[n][m];
                        for (int d = 0; d < D; d++)
                        {
                            dC[n][d] += (Y[n][d] - Y[m][d]) * mult;
                        }
                    }
                }
            }
        }
        public void CreateJaggedTest()
        {
            Array jagged = Jagged.Create(typeof(int), 2, 3, 1);

            foreach (var idx in jagged.GetIndices(deep: true))
            {
                Assert.AreEqual(0, jagged.GetValue(deep: true, indices: idx));
                jagged.SetValue(idx.Sum(), deep: true, indices: idx);
            }

            int[][][] expected =
            {
                new int[][] { new[] { 0 }, new[] { 1 }, new[] { 2 } },
                new int[][] { new[] { 1 }, new[] { 2 }, new[] { 3 } }
            };

            Assert.IsTrue(expected.IsEqual(jagged));
        }
        /// <summary>
        ///   Creates a matrix with uniformly distributed random data.
        /// </summary>
        ///
        public static short[][] Random(int rows, int columns, short min, short max, short[][] result = null)
        {
            if (result is null)
            {
                result = Jagged.Create <short>(rows, columns);
            }

            var random = ISynergy.Framework.Mathematics.Random.Generator.Random;

            for (var i = 0; i < rows; i++)
            {
                for (var j = 0; j < columns; j++)
                {
                    result[i][j] = (short)random.Next((int)min, (int)max);
                }
            }
            return(result);
        }
Beispiel #13
0
        /// <summary>
        ///   Creates the Gram matrix containing all dot products in feature
        ///   (kernel) space between each vector in <paramref name="x">x</paramref>
        ///   and the ones in <paramref name="y">y</paramref>.
        /// </summary>
        ///
        /// <param name="kernel">The kernel function.</param>
        /// <param name="x">The first vectors.</param>
        /// <param name="y">The second vectors.</param>
        ///
        /// <param name="result">An optional matrix where the result should be stored in.</param>
        ///
        /// <returns>A symmetric matrix containing the dot-products in
        ///   feature (kernel) space between each vector in <paramref name="x"/>
        ///   and the ones in <paramref name="y"/>.</returns>
        ///
        public static double[][] ToJagged2 <TKernel, TInput>(this TKernel kernel, TInput[] x, TInput[] y, double[][] result = null)
            where TKernel : IKernel <TInput>
        {
            if (result == null)
            {
                result = Jagged.Create <double>(x.Length, y.Length);
            }

            for (int i = 0; i < x.Length; i++)
            {
                for (int j = 0; j < y.Length; j++)
                {
                    result[i][j] = kernel.Function(x[i], y[j]);
                }
            }

            return(result);
        }
        /// <summary>
        ///   Creates a matrix with uniformly distributed random data.
        /// </summary>
        ///
        public static decimal[][] Random(int rows, int columns, decimal min, decimal max, decimal[][] result = null)
        {
            if (result is null)
            {
                result = Jagged.Create <decimal>(rows, columns);
            }

            var random = ISynergy.Framework.Mathematics.Random.Generator.Random;

            for (var i = 0; i < rows; i++)
            {
                for (var j = 0; j < columns; j++)
                {
                    result[i][j] = (decimal)random.NextDouble() * (max - min) + min;
                }
            }
            return(result);
        }
Beispiel #15
0
 double[][] IRandomNumberGenerator <double[]> .Generate(int samples)
 {
     return(Generate(samples, Jagged.Create <double>(samples, dimension)));
 }
Beispiel #16
0
 /// <summary>
 ///   Generates a random vector of observations from the current distribution.
 /// </summary>
 ///
 /// <param name="samples">The number of samples to generate.</param>
 /// <returns>A random vector of observations drawn from this distribution.</returns>
 ///
 public int[][] Generate(int samples)
 {
     return(Generate(samples, Jagged.Create <int>(samples, dimension)));
 }
        public void LeastSquaresConstructorTest()
        {
            double[][] inputs =
            {
                new double[] { -1, -1 },
                new double[] { -1,  1 },
                new double[] {  1, -1 },
                new double[] {  1,  1 }
            };

            int[] or =
            {
                0,
                0,
                0,
                +1
            };

            // Create Kernel Support Vector Machine with a Polynomial Kernel of 2nd degree
            var machine = new SupportVectorMachine(inputs[0].Length);

            var learn = new LeastSquaresLearning(machine, inputs, or);

            double error = learn.Run();

            Assert.AreEqual(0, error);

            {
                int[] iout = new int[inputs.Length];
                machine.ToMulticlass().Decide(inputs, iout);
                for (int i = 0; i < iout.Length; i++)
                {
                    Assert.AreEqual(or[i], iout[i]);
                }
            }
            {
                double[] dout = new double[inputs.Length];
                machine.ToMulticlass().Decide(inputs, dout);
                for (int i = 0; i < dout.Length; i++)
                {
                    Assert.AreEqual(or[i], dout[i]);
                }
            }
            {
                bool[] bout = new bool[inputs.Length];
                machine.Decide(inputs, bout);
                Assert.IsFalse(bout[0]);
                Assert.IsFalse(bout[1]);
                Assert.IsFalse(bout[2]);
                Assert.IsTrue(bout[3]);
            }
            {
                int[][] iiout = Jagged.Create <int>(inputs.Length, 2);
                machine.ToMulticlass().Decide(inputs, iiout);
                for (int i = 0; i < iiout.Length; i++)
                {
                    Assert.AreEqual(or[i], iiout[i][0]);
                    Assert.AreEqual(or[i], iiout[i][1] == 1 ? 0 : 1);
                }
            }
            {
                bool[][] bbout = Jagged.Create <bool>(inputs.Length, 2);
                machine.ToMulticlass().Decide(inputs, bbout);
                for (int i = 0; i < bbout.Length; i++)
                {
                    Assert.AreEqual(or[i], bbout[i][0] ? 1 : 0);
                    Assert.AreEqual(or[i], bbout[i][1] ? 0 : 1);
                }
            }
        }
Beispiel #18
0
 int[][] IClassifier <int[], int[]> .Decide(int[][] input)
 {
     return(Decide(input, Jagged.Create <int>(input.Length, NumberOfOutputs)));
 }
Beispiel #19
0
 bool[][] IClassifier <float[], bool[]> .Decide(float[][] input)
 {
     return(Decide(input, Jagged.Create <bool>(input.Length, NumberOfOutputs)));
 }
 /// <summary>
 /// Applies the transformation to a set of input vectors,
 /// producing an associated set of output vectors.
 /// </summary>
 /// <param name="input">The input data to which
 /// the transformation should be applied.</param>
 /// <returns>The output generated by applying this
 /// transformation to the given input.</returns>
 public double[][] Transform(TInput[][] input)
 {
     return(Transform(input, Jagged.Create <double>(input.Length, NumberOfWords)));
 }
Beispiel #21
0
 /// <summary>
 ///   Applies the transformation to a set of input vectors,
 ///   producing an associated set of output vectors.
 /// </summary>
 ///
 /// <param name="input">The input data to which
 ///   the transformation should be applied.</param>
 ///
 /// <returns>The output generated by applying this
 ///   transformation to the given input.</returns>
 ///
 public override TOutput[][] Transform(TInput[] input)
 {
     return(Transform(input, Jagged.Create <TOutput>(input.Length, NumberOfOutputs)));
 }
Beispiel #22
0
 /// <summary>
 ///   Generates a random vector of observations from the current distribution.
 /// </summary>
 ///
 /// <param name="samples">The number of samples to generate.</param>
 /// <returns>A random vector of observations drawn from this distribution.</returns>
 ///
 public double[][] Generate(int samples)
 {
     return(Generate(samples, Jagged.Create <double>(samples, dimension)));
 }
Beispiel #23
0
 double[][] ISampleableDistribution <double[]> .Generate(int samples, Random source)
 {
     return(Generate(samples, Jagged.Create <double>(samples, dimension), source));
 }
Beispiel #24
0
 public int[][] Generate(int samples, Random source)
 {
     return(Generate(samples, Jagged.Create <int>(samples, dimension), source));
 }
Beispiel #25
0
        internal static void run(double[][] X, double[][] Y, double perplexity, double theta, bool skip_random_init = false)
        {
            int N       = X.Rows();
            int D       = X.Columns();
            int no_dims = Y.Columns();

            // Determine whether we are using an exact algorithm
            if (N - 1 < 3 * perplexity)
            {
                throw new Exception(String.Format("Perplexity too large for the number of data points. For {0} points, should be less than {1}", N, (N - 1) / 3.0));
            }

            Debug.Write(String.Format("Using no_dims = {0}, perplexity = {1}, and theta = {2}", no_dims, perplexity, theta));

            bool exact = (theta == 0.0);

            // Set learning parameters
            TimeSpan  total_time = TimeSpan.Zero;
            Stopwatch start;
            TimeSpan  end;
            int       max_iter        = 1000;
            int       stop_lying_iter = 250;
            int       mom_switch_iter = 250;
            double    momentum        = 0.5;
            double    final_momentum  = 0.8;
            double    eta             = 200.0;

            // Allocate some memory
            double[][] dY    = Jagged.Create <double>(N, no_dims);
            double[][] uY    = Jagged.Create <double>(N, no_dims);
            double[][] gains = Jagged.Ones <double>(N, no_dims);

            // Normalize input data (to prevent numerical problems)
            Debug.Write("Computing input similarities...");
            start = Stopwatch.StartNew();
            Accord.Statistics.Tools.Center(X, inPlace: true);

            X.Divide(X.Max(), result: X);

            // Compute input similarities for exact t-SNE
            double[][] P     = null;
            int[]      row_P = null;
            int[]      col_P = null;
            double[]   val_P = null;
            if (exact)
            {
                Trace.Write("Exact?");
                // Compute similarities
                P = Jagged.Create <double>(N, N);

                computeGaussianPerplexity(X, N, D, ref P, perplexity);

                // Symmetrize input similarities
                Debug.Write("Symmetrizing...");
                for (int n = 0; n < N; n++)
                {
                    for (int m = n + 1; m < N; m++)
                    {
                        P[n][m] += P[m][n];
                        P[m][n]  = P[n][m];
                    }
                }

                P.Divide(P.Sum(), result: P);
            }

            // Compute input similarities for approximate t-SNE
            else
            {
                // Compute asymmetric pairwise input similarities
                computeGaussianPerplexity(X, N, D, ref row_P, ref col_P, ref val_P, perplexity, (int)(3 * perplexity));

                // Symmetrize input similarities
                symmetrizeMatrix(ref row_P, ref col_P, ref val_P, N);
                double sum_P = 0.0;
                for (int i = 0; i < row_P[N]; i++)
                {
                    sum_P += val_P[i];
                }
                for (int i = 0; i < row_P[N]; i++)
                {
                    val_P[i] /= sum_P;
                }
            }
            end = start.Elapsed;

            // Lie about the P-values
            if (exact)
            {
                P.Multiply(12.0, result: P);
            }
            else
            {
                for (int i = 0; i < row_P[N]; i++)
                {
                    val_P[i] *= 12.0;
                }
            }

            if (!skip_random_init)
            {
                // Initialize solution (randomly)
                for (int i = 0; i < Y.Length; i++)
                {
                    for (int j = 0; j < Y[i].Length; j++)
                    {
                        Y[i][j] = randn() * 0.0001;
                    }
                }
            }

            // Perform main training loop
            if (exact)
            {
                Debug.Write(String.Format("Input similarities computed in {0} seconds!", end));
                Debug.Write("Learning embedding...");
            }
            else
            {
                Debug.Write(String.Format("Input similarities computed in {0} seconds (sparsity = {1})!", end, (double)row_P[N] / ((double)N * (double)N)));
                Debug.Write("Learning embedding...");
            }

            start = Stopwatch.StartNew();
            for (int iter = 0; iter < max_iter; iter++)
            {
                // Compute (approximate) gradient
                if (exact)
                {
                    computeExactGradient(P, Y, N, no_dims, dY);
                }
                else
                {
                    computeGradient(P, row_P, col_P, val_P, Y, N, no_dims, dY, theta);
                }

                // Update gains
                for (int i = 0; i < gains.Length; i++)
                {
                    for (int j = 0; j < gains[i].Length; j++)
                    {
                        gains[i][j] = (System.Math.Sign(dY[i][j]) != System.Math.Sign(uY[i][j])) ? (gains[i][j] + 0.2) : (gains[i][j] * 0.8);
                    }
                }

                for (int i = 0; i < gains.Length; i++)
                {
                    for (int j = 0; j < gains[i].Length; j++)
                    {
                        if (gains[i][j] < 0.01)
                        {
                            gains[i][j] = 0.01;
                        }
                    }
                }

                // Perform gradient update (with momentum and gains)
                for (int i = 0; i < uY.Length; i++)
                {
                    for (int j = 0; j < uY[i].Length; j++)
                    {
                        uY[i][j] = momentum * uY[i][j] - eta * gains[i][j] * dY[i][j];
                    }
                }

                for (int i = 0; i < Y.Length; i++)
                {
                    for (int j = 0; j < Y[i].Length; j++)
                    {
                        Y[i][j] = Y[i][j] + uY[i][j];
                    }
                }

                // Make solution zero-mean
                Accord.Statistics.Tools.Center(Y, inPlace: true);

                // Stop lying about the P-values after a while, and switch momentum
                if (iter == stop_lying_iter)
                {
                    if (exact)
                    {
                        P.Divide(12.0, result: P);
                    }
                    else
                    {
                        for (int i = 0; i < row_P[N]; i++)
                        {
                            val_P[i] /= 12.0;
                        }
                    }
                }

                if (iter == mom_switch_iter)
                {
                    momentum = final_momentum;
                }

                // Print out progress
                if (iter > 0 && (iter % 50 == 0 || iter == max_iter - 1))
                {
                    end = start.Elapsed;
                    double C = 0.0;
                    if (exact)
                    {
                        C = evaluateError(P, Y, N, no_dims);
                    }
                    else
                    {
                        C = evaluateError(row_P, col_P, val_P, Y, N, no_dims, theta);  // doing approximate computation here!
                    }

                    if (iter == 0)
                    {
                        Debug.WriteLine(String.Format("Iteration {0}: error is {1}", iter + 1, C));
                    }
                    else
                    {
                        total_time += end;
                        Debug.WriteLine(String.Format("Iteration {0}: error is {1} (50 iterations in {2} seconds)", iter, C, end));
                    }
                    start = Stopwatch.StartNew();
                }
            }
            end         = start.Elapsed;
            total_time += end;

            Debug.WriteLine(String.Format("Fitting performed in {0} seconds.", total_time));
        }
Beispiel #26
0
 internal T[][] create <T>(TInput[] input)
 {
     return(Jagged.Create <T>(input.Length, NumberOfOutputs));
 }
 int[][] ICovariantTransform <TInput[], int[]> .Transform(TInput[][] input)
 {
     return(Transform(input, Jagged.Create <int>(input.Length, NumberOfWords)));
 }
Beispiel #28
0
        /// <summary>
        ///   Solves a linear equation system of the form AX = B.
        /// </summary>
        /// <param name="value">Parameter B from the equation AX = B.</param>
        /// <returns>The solution X from equation AX = B.</returns>
        public Single[][] Solve(Single[][] value)
        {
            // Additionally an important property is that if there does not exists a solution
            // when the matrix A is singular but replacing 1/Li with 0 will provide a solution
            // that minimizes the residue |AX -Y|. SVD finds the least squares best compromise
            // solution of the linear equation system. Interestingly SVD can be also used in an
            // over-determined system where the number of equations exceeds that of the parameters.

            // L is a diagonal matrix with non-negative matrix elements having the same
            // dimension as A, Wi ? 0. The diagonal elements of L are the singular values of matrix A.

            Single[][] Y = value;

            // Create L*, which is a diagonal matrix with elements
            //    L*[i] = 1/L[i]  if L[i] < e, else 0,
            // where e is the so-called singularity threshold.

            // In other words, if L[i] is zero or close to zero (smaller than e),
            // one must replace 1/L[i] with 0. The value of e depends on the precision
            // of the hardware. This method can be used to solve linear equations
            // systems even if the matrices are singular or close to singular.

            //singularity threshold
            Single e = this.Threshold;


            int scols = s.Length;
            var Ls    = new Single[scols][];

            for (int i = 0; i < s.Length; i++)
            {
                Ls[i] = new Single[scols];
                if (System.Math.Abs(s[i]) <= e)
                {
                    Ls[i][i] = 0;
                }
                else
                {
                    Ls[i][i] = 1 / s[i];
                }
            }

            //(V x L*) x Ut x Y
            var VL = Matrix.Dot(v, Ls);

            //(V x L* x Ut) x Y
            int vrows = v.Rows();
            int urows = u.Rows();
            int ucols = u.Columns();
            var VLU   = Jagged.Create <Single>(vrows, urows);

            for (int i = 0; i < vrows; i++)
            {
                for (int j = 0; j < urows; j++)
                {
                    Single sum = 0;
                    for (int k = 0; k < ucols; k++)
                    {
                        sum += VL[i][k] * u[j][k];
                    }
                    VLU[i][j] = sum;
                }
            }

            //(V x L* x Ut x Y)
            return(Matrix.Dot(VLU, Y));
        }
Beispiel #29
0
        /// <summary>Constructs a QR decomposition.</summary>
        /// <param name="value">The matrix A to be decomposed.</param>
        /// <param name="transpose">True if the decomposition should be performed on
        /// the transpose of A rather than A itself, false otherwise. Default is false.</param>
        /// <param name="inPlace">True if the decomposition should be done in place,
        /// overriding the given matrix <paramref name="value"/>. Default is false.</param>
        /// <param name="economy">True to perform the economy decomposition, where only
        ///.the information needed to solve linear systems is computed. If set to false,
        /// the full QR decomposition will be computed.</param>
        public JaggedQrDecomposition(Double[][] value, bool transpose = false,
                                     bool economy = true, bool inPlace = false)
        {
            if (value == null)
            {
                throw new ArgumentNullException("value", "Matrix cannot be null.");
            }

            if ((!transpose && value.Length < value[0].Length) ||
                (transpose && value[0].Length < value.Length))
            {
                throw new ArgumentException("Matrix has more columns than rows.", "value");
            }

            // https://www.inf.ethz.ch/personal/gander/papers/qrneu.pdf

            if (transpose)
            {
                this.p = value.Rows();

                if (economy)
                {
                    // Compute the faster, economy-sized QR decomposition
                    this.qr = value.Transpose(inPlace: inPlace);
                }
                else
                {
                    // Create room to store the full decomposition
                    this.qr = Jagged.Create(value.Columns(), value.Columns(), value, transpose: true);
                }
            }
            else
            {
                this.p = value.Columns();

                if (economy)
                {
                    // Compute the faster, economy-sized QR decomposition
                    this.qr = inPlace ? value : value.Copy();
                }
                else
                {
                    // Create room to store the full decomposition
                    this.qr = Jagged.Create(value.Rows(), value.Rows(), value, transpose: false);
                }
            }

            this.economy = economy;
            this.n       = qr.Rows();
            this.m       = qr.Columns();
            this.Rdiag   = new Double[m];

            for (int k = 0; k < m; k++)
            {
                // Compute 2-norm of k-th column without under/overflow.
                Double nrm = 0;
                for (int i = k; i < qr.Length; i++)
                {
                    nrm = Tools.Hypotenuse(nrm, qr[i][k]);
                }

                if (nrm != 0)
                {
                    // Form k-th Householder vector.
                    if (qr[k][k] < 0)
                    {
                        nrm = -nrm;
                    }

                    for (int i = k; i < qr.Length; i++)
                    {
                        qr[i][k] /= nrm;
                    }

                    qr[k][k] += 1;

                    // Apply transformation to remaining columns.
                    for (int j = k + 1; j < m; j++)
                    {
                        Double s = 0;
                        for (int i = k; i < qr.Length; i++)
                        {
                            s += qr[i][k] * qr[i][j];
                        }

                        s = -s / qr[k][k];
                        for (int i = k; i < qr.Length; i++)
                        {
                            qr[i][j] += s * qr[i][k];
                        }
                    }
                }

                this.Rdiag[k] = -nrm;
            }
        }