Exemple #1
0
        public void TestInverse2x2b()
        {
            const int n = 2;

            float[,] a = new float[n, n] {
                { 1, 2 },
                { 3, 4 }
            };
            float[,] aInv = (float[, ])a.Clone();
            Lapack.Inverse(aInv, n);

            float[,] eye = new float[n, n];
            for (int i = 0; i < n; i++)
            {
                for (int j = 0; j < n; j++)
                {
                    for (int k = 0; k < n; k++)
                    {
                        eye[i, j] += a[i, k] * aInv[k, j];
                    }
                }
            }
            AssertArray.AreAlmostEqual(1f, eye[0, 0]);
            AssertArray.AreAlmostEqual(0f, eye[0, 1]);
            AssertArray.AreAlmostEqual(0f, eye[1, 0]);
            AssertArray.AreAlmostEqual(1f, eye[1, 1]);
        }
        public void DotAsEinsteinSumMatchesDot()
        {
            var x = NN.Random.Uniform(-1f, 1f, 10, 20);
            var y = NN.Random.Uniform(-1f, 1f, 20, 13);

            AssertArray.AreAlmostEqual(x.Dot(y), NN.EinsteinSum(x, y, "ij,jk->ik"));
        }
        public void TensorDotAsEinsteinSumMatchesTensorDot()
        {
            var x = NN.Random.Uniform(-1f, 1f, 10, 20);
            var y = NN.Random.Uniform(-1f, 1f, 20, 13, 5);

            AssertArray.AreAlmostEqual(NN.TensorDot(x, new[] { 1 }, y, new[] { 0 }), NN.EinsteinSum(x, y, "ij,jkl->ikl"));
        }
Exemple #4
0
        public void TestLoop1()
        {
            // Computing tanh(x(t).dot(W) + b) elementwise
            //http://deeplearning.net/software/theano/tutorial/loop.html

            // defining the tensor variables
            var X     = T.Matrix <float>("x");
            var W     = T.Matrix <float>("W");
            var b_sym = T.Vector <float>("b_sym");

            var results             = T.Scan(v => T.Tanh(T.Dot(v, W) + b_sym), sequence: X);
            var compute_elementwise = T.Function(inputs: new[] { X, W, b_sym }, output: results);

            // test values
            var x = NN.Eye <float>(2);
            var w = NN.Ones <float>(2, 2);
            var b = NN.Ones <float>(2);

            b.Item[1] = 2;

            var result   = compute_elementwise(new[] { x, w, b });
            var expected = NN.Tanh(x.Dot(w) + b);

            AssertArray.AreAlmostEqual(expected[0], result[0]);
        }
Exemple #5
0
        public void TestHeapsortBatch()
        {
            var bestd     = new float[_test_3.Shape[1]][];
            var bestw     = new int[_test_3.Shape[1]][];
            int neighbors = 3;

            for (int i = 0; i < bestd.Length; i++)
            {
                bestd[i] = new float[neighbors];
                bestw[i] = new int[neighbors];
            }

            var expectedd = new float[6] {
                1.34889853f, 1.17417169f, 1.15919185f, 4.63800335f, 4.19298649f, 3.162262f
            };
            var expectedw = new int[6] {
                8, 2, 0, 1, 7, 3
            };

            _w2v.NBestHeap(_test_3, bestd, bestw);
            AssertArray.AreAlmostEqual(expectedd, (from bd in bestd
                                                   from d in bd
                                                   select d).ToArray());
            AssertArray.AreEqual(expectedw, (from bw in bestw
                                             from w in bw
                                             select w).ToArray());
        }
        public void TestCombineDispatchOnTranspose()
        {
            float r = 0.5f;
            var   t = NN.Random.Uniform(-r, r, 5, 4, 3).As <float>();

            var x = NN.Random.Uniform(-r, r, 3).As <float>();
            var y = NN.Random.Uniform(-r, r, 4).As <float>();
            var z = NN.Random.Uniform(-r, r, 5).As <float>();

            var txy  = t.Transpose(0, 1, 2).Combine(x, y);
            var tyx  = t.Transpose(0, 2, 1).Combine(y, x);
            var txy_ = t.Dot(x).Dot(y);

            AssertArray.AreAlmostEqual(txy, tyx);

            var txz = t.Transpose(1, 0, 2).Combine(x, z);
            var tzx = t.Transpose(1, 2, 0).Combine(z, x);
            var ztx = z.Dot(t.Dot(x));

            AssertArray.AreAlmostEqual(txz, tzx);

            var tyz = t.Transpose(2, 0, 1).Combine(y, z);
            var tzy = t.Transpose(2, 1, 0).Combine(z, y);
            var zyt = z.Dot(y.Dot(t));

            AssertArray.AreAlmostEqual(tyz, tzy);
        }
Exemple #7
0
        public void TestHeapsortSingle()
        {
            var bestd = new float[4];
            var bestw = new int[4];

            var expectedd = new float[4] {
                1.34889853f, 1.17417169f, 1.15919185f, 0.866888f
            };
            var expectedw = new int[4] {
                8, 2, 0, 5
            };

            _w2v.NBestHeap(_test_1, bestd, bestw);
            AssertArray.AreAlmostEqual(expectedd, bestd);
            AssertArray.AreEqual(expectedw, bestw);

            expectedd = new float[4] {
                4.63800335f, 4.19298649f, 3.162262f, 1.9904952f
            };
            expectedw = new int[4] {
                1, 7, 3, 4
            };
            _w2v.NBestHeap(_test_2, bestd, bestw);
            AssertArray.AreAlmostEqual(expectedd, bestd);
            AssertArray.AreEqual(expectedw, bestw);
        }
Exemple #8
0
        public void TestCompatibilityHeapsortBatchBatchParallel()
        {
            // test that heapsort batch parallel and batch have same results
            var test = NN.Random.Normal(0f, 0.1f, 4, 30);

            var bestd_1   = new float[test.Shape[1]][];
            var bestd_2   = new float[test.Shape[1]][];
            var bestw_1   = new int[test.Shape[1]][];
            var bestw_2   = new int[test.Shape[1]][];
            int neighbors = 6;

            for (int i = 0; i < test.Shape[1]; i++)
            {
                bestd_1[i] = new float[neighbors];
                bestd_2[i] = new float[neighbors];
                bestw_1[i] = new int[neighbors];
                bestw_2[i] = new int[neighbors];
            }

            _w2v.NBestHeap(test, bestd_1, bestw_1);
            _w2v.NBestHeapParallel(test, bestd_2, bestw_2);

            for (int i = 0; i < test.Shape[1]; i++)
            {
                AssertArray.AreAlmostEqual(bestd_1[i], bestd_2[i]);
                AssertArray.AreEqual(bestw_1[i], bestw_2[i]);
            }
        }
        public void TestCombine()
        {
            var t = NN.Ones <float>(5, 4, 3);

            t[2, _, _] *= 2;
            t[_, 1, _] *= -1;
            t[_, _, 2] *= 3;

            var x   = NN.Array <float>(1, -1, 3);
            var y   = NN.Array <float>(1, -1, 3, 2);
            var txy = t.Combine(x, y);

            var z = NN.Zeros <float>(5);

            for (int k = 0; k < z.Shape[0]; ++k)
            {
                for (int j = 0; j < y.Shape[0]; ++j)
                {
                    for (int i = 0; i < x.Shape[0]; ++i)
                    {
                        z.Item[k] += t.Item[k, j, i] * y.Item[j] * x.Item[i];
                    }
                }
            }

            var expected = new float[] { 63, 63, 126, 63, 63 };

            AssertArray.AreAlmostEqual(expected, z);
            AssertArray.AreAlmostEqual(expected, t.Dot(x).Dot(y));
            AssertArray.AreAlmostEqual(expected, txy);
        }
        public void TestCombineWithBias3D_1D()
        {
            var t = NN.Ones <float>(6, 5, 4);

            t[2, _, _] *= 2;
            t[_, 1, _] *= -1;
            t[_, _, 2] *= 3;

            var x = NN.Array <float>(1, -1, -2);
            var y = NN.Array <float>(1, -1, 3, 1);

            Array <float> txy2 = null;

            txy2 = t[_, Upto(-1), Upto(-1)].Combine(x, y, result: txy2);

            var txy = t.CombineWithBias(x, y);

            var xb = NN.Ones <float>(4);

            xb[Upto(-1)] = x;
            var yb = NN.Ones <float>(5);

            yb[Upto(-1)] = y;

            var txbyb = t.Combine(xb, yb);

            AssertArray.AreAlmostEqual(txbyb, txy);
        }
Exemple #11
0
        public void TestCompatibilityHeapsortBatchSingle()
        {
            // test that batch and single line heapsort have same results

            var test      = NN.Random.Normal(0.1f, 0.1f, 4, 20);
            var bestd     = new float[test.Shape[1]][];
            var bestw     = new int[test.Shape[1]][];
            int neighbors = 3;

            for (int i = 0; i < bestd.Length; i++)
            {
                bestd[i] = new float[neighbors];
                bestw[i] = new int[neighbors];
            }

            _w2v.NBestHeap(test, bestd, bestw);

            var singleBestd = new float[neighbors];
            var singleBestw = new int[neighbors];

            for (int i = 0; i < test.Shape[1]; i++)
            {
                _w2v.NBestHeap(test[Slicer._, i], singleBestd, singleBestw);
                AssertArray.AreEqual(singleBestw, bestw[i]);
                AssertArray.AreAlmostEqual(singleBestd, bestd[i]);
            }
        }
Exemple #12
0
        public void TestLogSumExp()
        {
            var X = NN.Array(new float[, ] {
                { 1, 3 }, { 2, 5 }
            });

            AssertArray.AreAlmostEqual(NN.Log(NN.Sum(NN.Exp(X), axis: -1)), NN.LogSumExp(X));
        }
Exemple #13
0
        public void MeanAcceptIntTensor()
        {
            var i = T.Scalar <int>("i");
            var x = T.Range(i);
            var y = T.Mean(x);

            var f = T.Function(i, y);

            AssertArray.AreAlmostEqual(f(10), 4.5f);
        }
Exemple #14
0
        public void TestSoftmax2D()
        {
            var X = NN.Array(new float[, ] {
                { 1, 3 }, { 2, 5 }
            });

            AssertArray.AreAlmostEqual(new float[, ] {
                { 0.11920292f, 0.88079708f }, { 0.04742587f, 0.95257413f }
            }, NN.Softmax(X));
        }
Exemple #15
0
        public void TestSolve()
        {
            /* Solve the equations A*X = B */
            // https://software.intel.com/sites/products/documentation/doclib/mkl_sa/11/mkl_lapack_examples/dgesv_ex.c.htm
            const int N = 5;
            const int NRHS = 3;
            const int LDA = N;
            const int LDB = NRHS;
            int       n = N, nrhs = NRHS, lda = LDA, ldb = LDB;

            /* Local arrays */
            int[]    ipiv = new int[N];
            double[] a    = new double[N * N] {
                6.80, -6.05, -0.45, 8.32, -9.67,
                -2.11, -3.30, 2.58, 2.71, -5.14,
                5.66, 5.36, -2.70, 4.35, -7.26,
                5.97, -4.44, 0.27, -7.17, 6.08,
                8.23, 1.08, 9.04, 2.14, -6.87
            };
            double[] b = new double[N * NRHS] {
                4.02, -1.56, 9.81,
                6.19, 4.00, -4.09,
                -8.22, -8.67, -4.57,
                -7.57, 1.75, -8.61,
                -3.03, 2.86, 8.99
            };
            /* Solve the equations A*X = B */
            Lapack.gesv(n, nrhs, a, lda, ipiv, b, ldb);

            // Solution
            var solution = NN.Array(new[] {
                -0.80, -0.39, 0.96,
                -0.70, -0.55, 0.22,
                0.59, 0.84, 1.90,
                1.32, -0.10, 5.36,
                0.57, 0.11, 4.04,
            }).Reshape(n, nrhs);

            AssertArray.AreAlmostEqual(solution, NN.Array(b).Reshape(N, NRHS), 1e-2, 1e-2);

            // Details of LU factorization
            var luFactorization = NN.Array(new[]
            {
                8.23, 1.08, 9.04, 2.14, -6.87,
                0.83, -6.94, -7.92, 6.55, -3.99,
                0.69, -0.67, -14.18, 7.24, -5.19,
                0.73, 0.75, 0.02, -13.82, 14.19,
                -0.26, 0.44, -0.59, -0.34, -3.43,
            }).Reshape(n, n);

            AssertArray.AreAlmostEqual(luFactorization, NN.Array(a).Reshape(n, n), 1e-2, 1e-2);

            // Pivot indices
            AssertArray.AreEqual(new[] { 5, 5, 3, 4, 5 }, ipiv);
        }
Exemple #16
0
        public void TestDeterminant2x2c()
        {
            const int n = 2;
            var       A = new float[n * n] {
                1, 2,
                3, 4
            };
            var result = Lapack.Determinant(A, n);

            AssertArray.AreAlmostEqual(result, -2f);
        }
        public void TestCombine2()
        {
            var a = NN.Ones <float>(4, 5, 6);
            var x = NN.Ones <float>(6);
            var y = NN.Ones <float>(5);
            var z = a.Combine(x, y);

            var expected = NN.Array <float>(30, 30, 30, 30);

            AssertArray.AreAlmostEqual(expected, z);
        }
        public void TestVector()
        {
            var v1 = NN.Array <float>(0, 1, 2);
            var v  = new Array <float>(3);

            for (int i = 0; i < 3; i++)
            {
                v.Item[i] = i;
            }
            AssertArray.AreAlmostEqual(v, v1);
        }
Exemple #19
0
        /// <see href="http://deeplearning.net/software/theano/tutorial/gradients.html"/>
        public void GradOfX2Is2X()
        {
            var x  = T.Scalar <float>("x");
            var y  = T.Pow(x, 2);
            var gy = T.Grad(y, x);
            // pp(gy)  # print out the gradient prior to optimization
            // '((fill((x ** 2), 1.0) * 2) * (x ** (2 - 1)))'
            var f = T.Function(x, gy);

            AssertArray.AreAlmostEqual(8.0f, f(4));
            AssertArray.AreAlmostEqual(188.4f, f(94.2f));
        }
        public void TestDotWithBias1D_1D()
        {
            var a = NN.Array <float>(1, 2, 1, 3);
            var b = NN.Array <float>(1, -1, 1);

            AssertArray.AreAlmostEqual(3, (float)a.DotWithBias(b));

            a = NN.Random.Uniform(-1, 1, 5).As <float>();
            b = NN.Random.Uniform(-1, 1, 4).As <float>();
            var c = NN.Ones <float>(5);

            c[Upto(4)] = b;

            AssertArray.AreAlmostEqual(a.Dot(c), a.DotWithBias(b));
        }
Exemple #21
0
        public void TestSvd()
        {
            // LAPACKE_dgesvd (row-major, high-levell)
            // https://software.intel.com/sites/products/documentation/doclib/mkl_sa/11/mkl_lapack_examples/lapacke_dgesvd_row.c.htm
            var a = NN.Array(new[, ] {
                { 8.79, 9.93, 9.83, 5.45, 3.16 },
                { 6.11, 6.91, 5.04, -0.27, 7.98 },
                { -9.15, -7.93, 4.86, 4.85, 3.01 },
                { 9.57, 1.64, 8.83, 0.74, 5.80 },
                { -3.49, 4.02, 9.80, 10.00, 4.27 },
                { 9.84, 0.15, -8.99, -6.02, -5.31 }
            });

            double[]       s;
            Array <double> u, vt;

            Svd(a, out u, out s, out vt);

            var singularValues = NN.Array(new[] { 27.47, 22.64, 8.56, 5.99, 2.01 });

            AssertArray.AreAlmostEqual(singularValues, NN.Array(s), 1e-2, 1e-2);

            // Left singular vectors (stored columnwise)
            var leftSingularVectors = NN.Array(new[, ] {
                { -0.59, 0.26, 0.36, 0.31, 0.23, 0.55 },
                { -0.40, 0.24, -0.22, -0.75, -0.36, 0.18 },
                { -0.03, -0.60, -0.45, 0.23, -0.31, 0.54 },
                { -0.43, 0.24, -0.69, 0.33, 0.16, -0.39 },
                { -0.47, -0.35, 0.39, 0.16, -0.52, -0.46 },
                { 0.29, 0.58, -0.02, 0.38, -0.65, 0.11 }
            });

            AssertArray.AreAlmostEqual(leftSingularVectors, u, 1e-2, 1e-2);

            // Right singular vectors (stored rowwise)
            var rightSingularVectors = NN.Array(new[, ] {
                { -0.25, -0.40, -0.69, -0.37, -0.41 },
                { 0.81, 0.36, -0.25, -0.37, -0.10 },
                { -0.26, 0.70, -0.22, 0.39, -0.49 },
                { 0.40, -0.45, 0.25, 0.43, -0.62 },
                { -0.22, 0.14, 0.59, -0.63, -0.44 }
            });

            AssertArray.AreAlmostEqual(rightSingularVectors, vt, 1e-2, 1e-2);

            var sigma = NN.Zeros <double>(a.Shape[0], a.Shape[1]);

            sigma[(0, s.Length), (0, s.Length)] = NN.Diag(s);
        public void TestShared()
        {
            var x = T.Vector <float>("x");
            var y = T.Shared(NN.Array <float>(2, 5, 8), "y");
            var z = x + y;
            var f = T.Function(x, z);

            var result = f(NN.Array <float>(1, 2, 3));

            AssertArray.AreAlmostEqual(NN.Array <float>(3, 7, 11), result);

            y.Value = NN.Array <float>(1, 1, 1);
            var result2 = f(NN.Array <float>(1, 2, 3));

            AssertArray.AreAlmostEqual(NN.Array <float>(2, 3, 4), result2);
        }
Exemple #23
0
        public void TestBaseline()
        {
            var bestd = new float[4];
            var bestw = new int[4];

            var expectedd = new float[4] {
                1.34889853f, 1.17417169f, 1.15919185f, 0.866888f
            };
            var expectedw = new int[4] {
                8, 2, 0, 5
            };

            _w2v.NBest(_test_1, bestd, bestw);
            AssertArray.AreAlmostEqual(expectedd, bestd);
            AssertArray.AreEqual(expectedw, bestw);
        }
Exemple #24
0
        public void TestGivenFloatVar()
        {
            var x      = T.Scalar <float>("x");
            var y      = T.Scalar <float>("y");
            var output = x + y;
            var f      = T.Function(input: x, output: output, givens: new OrderedDictionary {
                { y, 4f }
            });

            AssertArray.AreAlmostEqual(f(2), 6f);

            var f2 = T.Function(input: x, output: output, givens: new OrderedDictionary {
                { y, x + 4f }
            });

            AssertArray.AreAlmostEqual(f2(2), 8f);
        }
Exemple #25
0
        public void ConvolveValidAgreesWithNumpy()
        {
            AssertArray.AreAlmostEqual(
                NN.Array(2.5f),
                NN.Array(1f, 2f, 3f).Convolve(NN.Array(0f, 1f, 0.5f), mode: ConvMode.Valid)
                );

            AssertArray.AreAlmostEqual(
                NN.Array(2.5f, 6f, 6.5f),
                NN.Array(0f, 1f, 0.5f, 2f, 1f).Convolve(NN.Array(1f, 2f, 3f), mode: ConvMode.Valid)
                );

            AssertArray.AreAlmostEqual(
                NN.Array(3.1f, 4.45f, 5.4f, 4.3f, 12.5f),
                NN.Array(1, 2, 0.1f, 0.4f, 5, 1, 2, 1).Convolve(NN.Array(0, 1, 0.5f, 2), mode: ConvMode.Valid)
                );
        }
Exemple #26
0
        public void TestVPTree()
        {
            var bestd = new double[4];
            var bestw = new int[4];

            // with this test the order is not exactly the same due to the vector normalisation required by the VPTree
            // this is not a problem as it is often better to normalize before doing the neighbors search
            var expectedd = new double[4] {
                0.132890641689301, 0.480090379714966, 0.648832619190216, 0.662571460008621
            };
            var expectedw = new int[4] {
                8, 2, 5, 0
            };

            _w2v.NBestVPTree(_test_1, bestd, bestw);
            AssertArray.AreAlmostEqual(expectedd, bestd);
            AssertArray.AreEqual(expectedw, bestw);
        }
Exemple #27
0
        /// <see href="http://deeplearning.net/software/theano/tutorial/gradients.html"/>
        public void TestGradOfNeuralLayer()
        {
            var x         = T.Matrix <float>("x");
            var s         = T.Sum(1 / (1 + T.Exp(-x)));
            var gs        = T.Grad(s, x);
            var dlogistic = T.Function(x, gs);
            var m         = NN.Array(new float[, ] {
                { 0, 1 },
                { -1, -2 }
            });

            var expected = NN.Array(new float[, ] {
                { 0.25f, 0.196612f },
                { 0.196612f, 0.10499359f }
            });

            AssertArray.AreAlmostEqual(expected, dlogistic(m));
        }
        public void TestDotWithBias()
        {
            var a  = NN.Zeros <float>(3, 4);
            var id = NN.Eye <float>(3);

            a[_, Upto(-1)] = id;

            var expected = NN.Array <float>(new float[, ] {
                { 1, 0, 0, 0 },
                { 0, 1, 0, 0 },
                { 0, 0, 1, 0 }
            });

            AssertArray.AreAlmostEqual(expected, a);
            //var x = Tensor.Ones(3);
            //var y = Tensor.Ones(3).Scale(2);
            //Assert.AreEqual(x, y);
        }
Exemple #29
0
        public void SumProductWithSharedCanTrain()
        {
            var n = 2;
            // sequence of input
            var xs = T.Matrix <float>("xs");
            // accumulator
            var z = T.Vector <float>("z");
            var b = T.Shared(NN.Ones(n), "b");

            // sum xs in the accumulator
            Func <Tensor <float>, Tensor <float>, IList <Tensor <float> > > rec = (x, a) =>
                                                                                  new List <Tensor <float> >()
            {
                x + a, x *a + b
            };
            var loop = T.Scan(rec, xs, new[] { z, null });

            // get the last value
            var prod = loop[1][-1];

            // compute the cost and the gradient for the shared b.
            var cost = T.Sum(prod);
            var db   = T.Grad(cost, b);

            var costFunction = T.Function(input: (xs, z), output: cost);
            var xs_          = NN.Array(new float[, ] {
                { 1, -1 },
                { 0, -2 }
            });

            var z_ = NN.Zeros(n);

            var cost_xs_z = costFunction(xs_, z_);

            Assert.AreEqual(4, cost_xs_z);

            var updates = new OrderedDictionary {
                { b, b - 0.05f * db }
            };
            var train      = T.Function(input: (xs, z), output: cost, updates: updates);
            var cost_xs_z2 = train(xs_, z_);

            AssertArray.AreAlmostEqual(NN.Array(new[] { 0.95f, 0.95f }), b.Value);
        }
        public void TestMatrix()
        {
            var m1 = NN.Array(new float[, ] {
                { 0, 1, 2, 3 },
                { 1, 2, 3, 4 },
                { 2, 3, 4, 5 }
            });
            var m = new Array <float>(3, 4);

            for (int i = 0; i < 3; i++)
            {
                for (int j = 0; j < 4; j++)
                {
                    m.Item[i, j] = i + j;
                }
            }

            AssertArray.AreAlmostEqual(m, m1);
        }