Exemplo n.º 1
0
        public void TestCombineWithBias3D_1D()
        {
            var t = NN.Ones <float>(6, 5, 4);

            t[2, _, _] *= 2;
            t[_, 1, _] *= -1;
            t[_, _, 2] *= 3;

            var x = NN.Array <float>(1, -1, -2);
            var y = NN.Array <float>(1, -1, 3, 1);

            Array <float> txy2 = null;

            txy2 = t[_, Upto(-1), Upto(-1)].Combine(x, y, result: txy2);

            var txy = t.CombineWithBias(x, y);

            var xb = NN.Ones <float>(4);

            xb[Upto(-1)] = x;
            var yb = NN.Ones <float>(5);

            yb[Upto(-1)] = y;

            var txbyb = t.Combine(xb, yb);

            AssertArray.AreAlmostEqual(txbyb, txy);
        }
Exemplo n.º 2
0
        public void TestCombine()
        {
            var t = NN.Ones <float>(5, 4, 3);

            t[2, _, _] *= 2;
            t[_, 1, _] *= -1;
            t[_, _, 2] *= 3;

            var x   = NN.Array <float>(1, -1, 3);
            var y   = NN.Array <float>(1, -1, 3, 2);
            var txy = t.Combine(x, y);

            var z = NN.Zeros <float>(5);

            for (int k = 0; k < z.Shape[0]; ++k)
            {
                for (int j = 0; j < y.Shape[0]; ++j)
                {
                    for (int i = 0; i < x.Shape[0]; ++i)
                    {
                        z.Item[k] += t.Item[k, j, i] * y.Item[j] * x.Item[i];
                    }
                }
            }

            var expected = new float[] { 63, 63, 126, 63, 63 };

            AssertArray.AreAlmostEqual(expected, z);
            AssertArray.AreAlmostEqual(expected, t.Dot(x).Dot(y));
            AssertArray.AreAlmostEqual(expected, txy);
        }
Exemplo n.º 3
0
        public void TestLoop1()
        {
            // Computing tanh(x(t).dot(W) + b) elementwise
            //http://deeplearning.net/software/theano/tutorial/loop.html

            // defining the tensor variables
            var X     = T.Matrix <float>("x");
            var W     = T.Matrix <float>("W");
            var b_sym = T.Vector <float>("b_sym");

            var results             = T.Scan(v => T.Tanh(T.Dot(v, W) + b_sym), sequence: X);
            var compute_elementwise = T.Function(inputs: new[] { X, W, b_sym }, output: results);

            // test values
            var x = NN.Eye <float>(2);
            var w = NN.Ones <float>(2, 2);
            var b = NN.Ones <float>(2);

            b.Item[1] = 2;

            var result   = compute_elementwise(new[] { x, w, b });
            var expected = NN.Tanh(x.Dot(w) + b);

            AssertArray.AreAlmostEqual(expected[0], result[0]);
        }
Exemplo n.º 4
0
        public void SumAsEinsteinSumMatchesSum()
        {
            var x = NN.Random.Uniform(-1f, 1f, 10);
            var y = NN.Ones(1);

            AssertArray.AreEqual(x.Sum(axis: 0), NN.EinsteinSum(x, y, "i,->"));
        }
Exemplo n.º 5
0
        public void TestCombine2()
        {
            var a = NN.Ones <float>(4, 5, 6);
            var x = NN.Ones <float>(6);
            var y = NN.Ones <float>(5);
            var z = a.Combine(x, y);

            var expected = NN.Array <float>(30, 30, 30, 30);

            AssertArray.AreAlmostEqual(expected, z);
        }
        public void TestDotWithIdentity()
        {
            var a = NN.Ones <float>(4, 5);

            a[_, Upto(-1)] = NN.Eye <float>(4);
            var b = NN.Random.Uniform(-1, 1, 4).As <float>();
            var c = NN.Ones <float>(5);

            c[Upto(4)] = b;

            var ac = a.Dot(c);
            var ab = a.DotWithBias(b);

            AssertArray.AreEqual(ac, ab);
        }
        public void TestDotWithBias1D_1D()
        {
            var a = NN.Array <float>(1, 2, 1, 3);
            var b = NN.Array <float>(1, -1, 1);

            AssertArray.AreAlmostEqual(3, (float)a.DotWithBias(b));

            a = NN.Random.Uniform(-1, 1, 5).As <float>();
            b = NN.Random.Uniform(-1, 1, 4).As <float>();
            var c = NN.Ones <float>(5);

            c[Upto(4)] = b;

            AssertArray.AreAlmostEqual(a.Dot(c), a.DotWithBias(b));
        }
        public void TestDotWithBias2D_1D()
        {
            var a = NN.Array <float>(new float[, ] {
                { 1, 1, 3, 1 },
                { 2, 2, 6, 2 },
                { 1, 1, 3, 1 }
            });
            var b = NN.Array <float>(-1, 1, 4);
            var c = NN.Ones <float>(4);

            c[Upto(-1)] = b;

            var ac = a.Dot(c);
            var ab = a.DotWithBias(b);

            AssertArray.AreEqual(ac, ab);
        }
Exemplo n.º 9
0
        public void SumProductWithSharedCanTrain()
        {
            var n = 2;
            // sequence of input
            var xs = T.Matrix <float>("xs");
            // accumulator
            var z = T.Vector <float>("z");
            var b = T.Shared(NN.Ones(n), "b");

            // sum xs in the accumulator
            Func <Tensor <float>, Tensor <float>, IList <Tensor <float> > > rec = (x, a) =>
                                                                                  new List <Tensor <float> >()
            {
                x + a, x *a + b
            };
            var loop = T.Scan(rec, xs, new[] { z, null });

            // get the last value
            var prod = loop[1][-1];

            // compute the cost and the gradient for the shared b.
            var cost = T.Sum(prod);
            var db   = T.Grad(cost, b);

            var costFunction = T.Function(input: (xs, z), output: cost);
            var xs_          = NN.Array(new float[, ] {
                { 1, -1 },
                { 0, -2 }
            });

            var z_ = NN.Zeros(n);

            var cost_xs_z = costFunction(xs_, z_);

            Assert.AreEqual(4, cost_xs_z);

            var updates = new OrderedDictionary {
                { b, b - 0.05f * db }
            };
            var train      = T.Function(input: (xs, z), output: cost, updates: updates);
            var cost_xs_z2 = train(xs_, z_);

            AssertArray.AreAlmostEqual(NN.Array(new[] { 0.95f, 0.95f }), b.Value);
        }
Exemplo n.º 10
0
        public void CanSwapTwoShared()
        {
            var a = make_shared("a", 5, 4, 3);

            Assert.IsTrue(a.Value.Shape.Zip(new int[] { 5, 4, 3 }, (x, y) => x == y).All(x => x));
            var b = make_shared("b", 5, 4, 3);

            Assert.IsTrue(b.Value.Shape.Zip(new int[] { 5, 4, 3 }, (x, y) => x == y).All(x => x));
            a.Value = NN.Zeros <float>(5, 4, 3);
            b.Value = NN.Ones <float>(5, 4, 3);
            exchange_shared(a, b);
            Assert.IsTrue(a.Value.All(x => x == 1.0f));
            Assert.IsTrue(b.Value.All(x => x == 0.0f));
            var f = make_exchange_func(a, b);

            f();
            Assert.IsTrue(a.Value.All(x => x == 0.0f));
            Assert.IsTrue(b.Value.All(x => x == 1.0f));
        }
Exemplo n.º 11
0
        public void TestRecursive()
        {
            // http://deeplearning.net/software/theano/tutorial/loop.html
            // define tensor variables
            var X       = T.Vector <float>("X");
            var W       = T.Matrix <float>("W");
            var b_sym   = T.Matrix <float>("b_sym");
            var U       = T.Matrix <float>("U");
            var Y       = T.Matrix <float>("Y");
            var V       = T.Matrix <float>("V");
            var P       = T.Matrix <float>("P");
            var results = T.Scan((yy, pp, xx_tm1) => T.Tanh(T.Dot(xx_tm1, W) + T.Dot(yy, U) + T.Dot(pp, V)),
                                 sequences: new[] { Y, P[XSlicer.Step(-1)] },
                                 outputsInfo: X);
            var compute_seq = T.Function(inputs: new[] { X, W, Y, U, P, V }, output: results);
            // test values
            var x = NN.Zeros <float>(2);

            x.Item[1] = 1;
            var w = NN.Ones <float>(2, 2);
            var y = NN.Ones <float>(5, 2);

            y.Item[0] = -3;
            var u = NN.Ones <float>(2, 2);
            var p = NN.Ones <float>(5, 2);

            p.Item[0] = 3;
            var v      = NN.Ones <float>(2, 2);
            var result = compute_seq(new[] { x, w, y, u, p, v }); // Array<float>[5] => theano returns Array<float>[5][1]
            // comparison with numpy
            var x_res = NN.Zeros <float>(5, 2);

            x_res[0] = NN.Tanh(x.Dot(w) + y[0].Dot(u) + p[4].Dot(v));
            for (int i = 1; i < 5; i++)
            {
                x_res[i] = NN.Tanh(x_res[i - 1].Dot(w) + y[i].Dot(u) + p[4 - i].Dot(v));
            }

            AssertArray.AreAlmostEqual(x_res, result);
        }
Exemplo n.º 12
0
        public Tsne(Array <float> X_, int dims, float perplexity)
        {
            X_.AssertOfDim(2);
            int n = X_.Shape[0];

            X = T.Shared(X_, "X");
            Y = T.Shared(NN.Random.Uniform(-1f, 1f, n, dims), "Y");

            YMomentum = T.Shared(NN.Zeros(n, dims), "YMomentum");
            dYLast    = T.Shared(NN.Zeros(n, dims), "dYLast");

            // ones everywhere, zero on the diag
            mask = T.Shared(NN.Ones(n, n) - NN.Eye(n), "mask");

            // Compute pairwise affinities
            var sum_Y = T.Sum(Y * Y, 1, keepDims: true);

            var num = 1 / (1 - T.DimShuffle((2 * T.Dot(Y, Y, transposeY: true) + sum_Y), 1, 0) + sum_Y);

            // set the diag to zero
            num *= mask;

            var Q = num / T.Sum(num);
            //Q = T.Max(Q, 1e-12f);

            var P_ = x2p(X_, 1e-5f, perplexity);

            P_ = P_ * 4f; // early exaggeration
            P_ = NN.Apply(P_, x => Math.Max(x, 1e-12f));
            P  = T.Shared(P_, "P");

            KL_Loss = T.Sum(P * T.Log(P / Q));

            dY   = T.Function(output: T.Grad(KL_Loss, Y));
            Loss = T.Function(output: KL_Loss);

            var updates = MomentumUpdate(Y, YMomentum, dYLast, T.Grad(KL_Loss, Y), 500);

            Train = T.Function(updates);
        }
Exemplo n.º 13
0
        public static void TestLook1()
        {
            // defining the tensor variables
            var X     = T.Matrix <float>("x");
            var W     = T.Matrix <float>("W");
            var b_sym = T.Matrix <float>("b_sym");

            var results             = T.Scan(v => T.Tanh(T.Dot(v, W) + b_sym), sequence: X);
            var compute_elementwise = T.Function(inputs: new[] { X, W, b_sym }, output: results);

            // test values
            var x = NN.Eye <float>(2);
            var w = NN.Ones <float>(2, 2);
            var b = NN.Ones <float>(2);

            b.Item[1] = 2;

            Console.WriteLine(compute_elementwise(new[] { x, w, b }).Item[0]);

            // comparison with tensors
            Console.WriteLine(NN.Tanh(x.Dot(w) + b));
        }
Exemplo n.º 14
0
        public void TestGradientThroughImplicitBroadcast()
        {
            var M  = T.Matrix <float>("M");
            var W_ = NN.Array(new float[, ] {
                { 0, 0, 1 },
                { 1, 1, 0 }
            });
            var W    = T.Shared(W_, "W");
            var X    = T.Matrix <float>(-1, 1, "X");
            var loss = T.Sum((M + X) * W);

            var M_ = NN.Array(new float[, ] {
                { 0, 3, 7 },
                { 5, 2, 0 }
            });
            var X_ = NN.Ones(2).Reshape(-1, 1);

            var dL = T.Function(input: (X, M), output: T.Grad(loss, X));
            var dX = NN.Array(new float[, ] {
                { 1 }, { 2 }
            });

            AssertArray.AreAlmostEqual(dX, dL(X_, M_));
        }
Exemplo n.º 15
0
 public void NumNetInitOnes(int n, int m)
 {
     var a = NN.Ones(n, m);
 }
Exemplo n.º 16
0
        public Array <float> x2p(Array <float> X, float tol = 1e-5f, float perplexity = 30f, bool sym = true, bool normalize = true)
        {
            //"""Performs a binary search to get P-values in such a way that each conditional Gaussian has the same perplexity."""

            // Initialize some variables
            Console.WriteLine("Computing pairwise distances...");
            int n = X.Shape[0], d = X.Shape[1];
            var sum_X = NN.Sum(X * X, axis: 1);
            var D     = (-2 * X.Dot(X.T) + sum_X).T + sum_X;
            var P     = NN.Zeros(n, n);
            var beta  = NN.Ones(n);
            var logU  = (float)Math.Log(perplexity);
            var Di    = NN.Zeros(n, n - 1);

            // Loop over all datapoints
            for (int i = 0; i < n; ++i)
            {
                // Print progress
                if (i % 500 == 0)
                {
                    Console.WriteLine("Computing P-values for point {0} of {1} ...", i, n);
                }

                // Compute the Gaussian kernel and entropy for the current precision
                var betamin = float.NegativeInfinity;
                var betamax = float.PositiveInfinity;
                Di[i, Until(i)] = D[Until(i)];
                if (i + 1 < n)
                {
                    Di[i, From(i + 1)] = D[From(i)];
                }

                var H_thisP = Hbeta(Di, beta.Item[i]);
                var H = H_thisP.Item1; var thisP = H_thisP.Item2;

                // Evaluate whether the perplexity is within tolerance
                var Hdiff = H - logU;
                var tries = 0;
                while (Math.Abs(Hdiff) > tol && tries < 50)
                {
                    // If not, increase or decrease precision
                    if (Hdiff > 0)
                    {
                        betamin = beta.Item[i];
                        if (float.IsInfinity(betamax))
                        {
                            beta.Item[i] = beta.Item[i] * 2;
                        }
                        else
                        {
                            beta.Item[i] = (beta.Item[i] + betamax) / 2;
                        }
                    }
                    else
                    {
                        betamax = beta.Item[i];
                        if (float.IsInfinity(betamin))
                        {
                            beta.Item[i] = beta.Item[i] / 2;
                        }
                        else
                        {
                            beta.Item[i] = (beta.Item[i] + betamin) / 2;
                        }
                    }
                    // Recompute the values
                    H_thisP = Hbeta(Di, beta.Item[i]);
                    H       = H_thisP.Item1; thisP = H_thisP.Item2;

                    Hdiff = H - logU;
                    tries = tries + 1;
                }

                // Set the final row of P
                P[i, Until(i)] = thisP[Until(i)];
                if (i + 1 < n)
                {
                    P[i, From(i + 1)] = thisP[From(i)];
                }
            }
            var sigma = NN.Mean(NN.Sqrt(1 / beta));

            Console.WriteLine("Mean value of sigma: {0}", sigma);

            // Return final P-matrix
            if (sym)
            {
                P += P.T;
            }
            if (normalize)
            {
                P /= NN.Sum(P);
            }
            return(P);
        }