示例#1
0
        public void TestScanOnTanhSumDot()
        {
            var W = T.Shared(0.2f * NN.Random.Uniform(-1.0f, 1.0f, 4, 5).As <float>(), "W");

            Func <Tensor <float>, Tensor <float>, Tensor <float> > recurrence =
                (x, acc) => T.Tanh(acc + T.Dot(W, x));

            var X    = T.Matrix <float>(-1, 5, "X");
            var acc0 = T.Shared(NN.Zeros <float>(4), "acc0");

            var result = T.Scan(fn: recurrence, sequences: new[] { X }, outputsInfo: acc0);
            var norm2  = T.Norm2(result[-1]);

            var f = T.Function(X, norm2);

            var grad = T.Grad(norm2, W);

            var df = T.Function(input: X, output: (norm2, grad));

            df(NN.Array(new[, ] {
                { 0f, 0f, 0f, 0f, 0f }
            }));

            AssertTensor.PassesGradientCheck(X, norm2, acc0);
            AssertTensor.PassesGradientCheck(X, norm2, W);
        }
示例#2
0
        public void Convolve2DPassesGradientCheck()
        {
            //int[] poolingShape = new int[] { 1, 1 };
            int[] kernelShape = new int[] { 7, 7 };
            int[] inputShape  = new int[] { 100, 100 };
            var   iS          = NN.Array(inputShape).As <float>();
            var   kS          = NN.Array(kernelShape).As <float>();
            // layers
            var W = T.Shared(NN.Random.Uniform(-0.01f, 0.01f, kernelShape).As <float>(), "W");
            //var flatShape = ((inputShape[0] + kernelShape[0] - 1) / poolingShape[0] )  * ((inputShape[1] + kernelShape[1] - 1) / poolingShape[1] );
            var flatShape = ((inputShape[0] + kernelShape[0] - 1)) * ((inputShape[1] + kernelShape[1] - 1));
            var scaling   = (((iS[0] + kS[0] - 1f)) + ((iS[1] + kS[1] - 1f)));
            var S         = T.Shared(NN.Random.Uniform(-10f, 10f, 2, flatShape).As <float>() / scaling, "S");
            var Sb        = T.Shared(NN.Zeros <float>(2, 1), "Sb");

            var x = T.Matrix <float>(inputShape[0], inputShape[1], "x");  // [inputLength]
            var h = T.Sigmoid(T.Convolve2d(x, W, mode: ConvMode.Full));

            //h = T.MaxPooling2d(h, poolingShape[0], poolingShape[1], true);
            h = h.Reshape(flatShape, 1);
            var debug = (T.Dot(S, h) + Sb).Reshape(2);
            var pred  = T.Softmax(debug);
            var nll   = -T.Mean(T.Log(pred)[1]);

            AssertTensor.PassesGradientCheck(x, nll, W, relativeErr: 1e-3f, absErr: 1e-3f);
        }
示例#3
0
        public void ScanPassesGradientCheckOnSeq2Seq()
        {
            int embeddingSize = 10, vocabSize = 100;

            var L   = T.Shared(NN.Random.Uniform(-0.01f, 0.01f, vocabSize, embeddingSize), "L");
            var W   = T.Shared(NN.Random.Uniform(-0.01f, 0.01f, embeddingSize, embeddingSize), "W");
            var ids = T.Vector <int>(-1, "ids");
            var xs  = L[ids];

            var scan  = T.Scan((x, acc) => T.Tanh(T.Dot(acc + x, W)), sequence: xs, outputsInfo: T.Zeros <float>(embeddingSize));
            var norm2 = T.Norm2(scan);

            var grad    = T.Grad(norm2);
            var updates = new OrderedDictionary {
                [W] = W - 0.001f * grad[W], [L] = L - 0.001f * grad[L]
            };

            var f = T.Function(input: ids, output: norm2, updates: updates);

            Func <Array <int> > init = () => NN.Random.Uniform(0, vocabSize - 1, 10).As <int>();

            f(init());

            AssertTensor.PassesGradientCheck(ids, norm2, W, init: init);
            AssertTensor.PassesGradientCheck(ids, norm2, L, init: init);
        }
示例#4
0
        public void ItemPassesGradientCheck()
        {
            var y = T.Vector <float>(10, "y");
            var b = T.Shared(NN.Random.Uniform(-1f, 1f, 10).As <float>(), "b");

            AssertTensor.PassesGradientCheck(y, (y + b).Item[5], b);
            AssertTensor.PassesGradientCheck(y, (y + b).Item[-3], b);
        }
示例#5
0
        public void TanhPerceptronPassesGradientCheck()
        {
            var x = T.Vector <float>("x");
            int n = 20, m = 5;
            var W    = T.Shared(NN.Random.Uniform(-1f, 1f, m, n).As <float>(), "W");
            var W_op = T.Shared(NN.Random.Uniform(-1f, 1f, m, n).As <float>(), "W_op");
            var loss = T.Norm2(T.Tanh(T.Dot(W, x)) - T.Dot(W_op, x));

            AssertTensor.PassesGradientCheck(x, loss, W);
        }
示例#6
0
        public void DimShufflePassesGradientCheck()
        {
            var X = T.Matrix <float>(5, 3, "X");
            var b = T.Shared(NN.Random.Uniform(-1f, 1f, 3), "b");

            var b2   = b.DimShuffle('x', 0);
            var Xb   = X * b2;
            var loss = T.Norm2(Xb);

            AssertTensor.PassesGradientCheck(X, loss, b);
        }
示例#7
0
        public void MinPassesGradientCheck()
        {
            var x   = T.Shared(0f, "x");
            var min = T.Min(x, 0f);

            x.Value = 1;
            AssertTensor.PassesGradientCheck(min, x);

            x.Value = -1;
            AssertTensor.PassesGradientCheck(min, x);
        }
示例#8
0
        public void ConcatPassesGradientCheck()
        {
            var x = T.Shared(NN.Random.Uniform(-1f, 1f, 4, 10), "x");
            var y = T.Shared(NN.Random.Uniform(-1f, 1f, 6, 10), "y");

            var z    = T.Concat(0, x, y);
            var loss = T.Norm2(z[Range(2, 8)]);

            AssertTensor.PassesGradientCheck(loss, x);
            AssertTensor.PassesGradientCheck(loss, y);
        }
示例#9
0
        public void TensorDot3Dx1DPassesGradientCheck()
        {
            var x = T.Vector <float>("x");
            int n = 6, m = 4, l = 2;
            var W    = T.Shared(NN.Random.Uniform(-1f, 1f, l, m, n).As <float>(), "W");
            var W_op = T.Shared(NN.Random.Uniform(-1f, 1f, l, m, n).As <float>(), "W_op");

            var loss = T.Norm2(T.Dot(W, x) - T.Dot(W_op, x));

            AssertTensor.PassesGradientCheck(x, loss, W, relativeErr: 1e-3f, absErr: 1e-4f);
        }
示例#10
0
        public void MaxPassesGradientCheck()
        {
            var x   = T.Shared(0f, "x");
            var max = T.Max(x, 0f);

            x.Value = 1;
            AssertTensor.PassesGradientCheck(max, x);

            x.Value = -1;
            AssertTensor.PassesGradientCheck(max, x);
        }
示例#11
0
        public void TensorDot3Dx2DAsEinsteinPassesGradientCheck()
        {
            int n = 6, m = 4, l = 2;
            var x    = T.Matrix <float>("x");
            var W    = T.Shared(NN.Random.Uniform(-1f, 1f, l, m, n).As <float>(), "W");
            var W_op = T.Shared(NN.Random.Uniform(-1f, 1f, l, m, n).As <float>(), "W_op");

            var loss = T.Norm2(T.EinsteinSum(W, x, "lmn,nx->lmx") - T.Dot(W_op, x));

            AssertTensor.PassesGradientCheck(x, loss, W);
        }
示例#12
0
        public void TensorDot2Dx3DPassesGradientCheck()
        {
            int n = 6, m = 4, l = 2, k = 3;
            var x    = T.Tensor3 <float>(n, m, l, "x");
            var xT   = x.DimShuffle(1, 0, 2);
            var W    = T.Shared(NN.Random.Uniform(-1f, 1f, l, k).As <float>(), "W");
            var W_op = T.Shared(NN.Random.Uniform(-1f, 1f, l, k).As <float>(), "W_op");

            var loss = T.Norm2(T.Softmax(T.Dot(xT, W)) - T.Dot(xT, W_op));

            AssertTensor.PassesGradientCheck(x, loss, W);
        }
示例#13
0
        public void LogSumExpPassesGradientCheck()
        {
            var x = T.Matrix <float>(5, 4, "x");
            var W = T.Shared(NN.Random.Uniform(-1f, 1f, 8, 5).As <float>(), "W");

            AssertTensor.PassesGradientCheck(x, T.Sum(T.LogSumExp(T.Dot(W, x))), W);
            AssertTensor.PassesGradientCheck(x, T.LogSumExp(T.Dot(W, x)).Item[2], W);

            var y = T.Vector <float>(10, "y");
            var b = T.Shared(NN.Random.Uniform(-1f, 1f, 10).As <float>(), "b");

            AssertTensor.PassesGradientCheck(y, (Scalar <float>)T.LogSumExp(y + b), b);
        }
示例#14
0
        public void TensorDot3Dx3DPassesGradientCheck()
        {
            int n = 6, m = 4, l = 2, k = 5;
            var x    = T.Tensor3 <float>(k, l, m, "x");
            var W    = T.Shared(NN.Random.Uniform(-1f, 1f, l, m, n).As <float>(), "W");
            var W_op = T.Shared(NN.Random.Uniform(-1f, 1f, l, m, n).As <float>(), "W_op");

            var axesW = new[] { 0, 1 };
            var axesX = new[] { 1, 2 };
            var loss  = T.Norm2(T.TensorDot(W, axesW, x, axesX) - T.TensorDot(W_op, axesW, x, axesX));

            AssertTensor.PassesGradientCheck(x, loss, W);
        }
示例#15
0
        // TODO: this test doesn't work due to bugs in Elementwise
        public void PushCoherentGradientOnComplexAbstraction()
        {
            var x = T.Shared(NN.Range <float>(4), "x");
            var b = T.Scalar <float>("b");

            var y    = T.Apply(x, x_ => (x_ > 0f) * b + x_ + b);
            var loss = T.Sum(y);

            AssertArray.WithMessage("Can't compile the gradient.", () =>
                                    T.Function(input: b, output: T.Grad(loss, b))
                                    );
            AssertTensor.PassesGradientCheck(loss, b);
        }
示例#16
0
        public void MaxPoolingPassesGradientCheck()
        {
            var x_ = NN.Array(new float[, ]
            {
                { 0, 1, 0 },
                { 1, 0, 0 },
                { 0, 0, 1 },
            });

            var x        = T.Shared(x_, "x");
            var x_pooled = T.Max(x, axis: 1);
            var loss     = T.Sum(x_pooled);

            AssertTensor.PassesGradientCheck(loss, x);
        }
示例#17
0
        public void TanhNNTransposedAsEinsteinSumPassesGradientCheck()
        {
            var x0 = T.Vector <float>("x");

            int n0 = 20, n1 = 5, n2 = 20;
            var W1 = T.Shared(NN.Random.Uniform(-1f, 1f, n0, n1).As <float>(), "W1");
            var W2 = T.Shared(NN.Random.Uniform(-1f, 1f, n1, n2).As <float>(), "W2");
            var b1 = T.Shared(NN.Random.Uniform(-1f, 1f, n1).As <float>(), "b1");
            var b2 = T.Shared(NN.Random.Uniform(-0.1f, 0.1f, n2).As <float>(), "b2");

            var W_op = T.Shared(NN.Random.Uniform(-1f, 1f, n0, n2).As <float>(), "W_op");

            var x1   = T.Tanh(T.EinsteinSum(x0, W1, "i,ij->j") + b1);
            var x2   = T.Tanh(T.EinsteinSum(x1, W2, "i,ij->j") + b2);
            var loss = T.Norm2(x2 - T.Dot(W_op, x0));

            AssertTensor.PassesGradientCheck(x0, loss, b1);
            AssertTensor.PassesGradientCheck(x0, loss, W1);
            AssertTensor.PassesGradientCheck(x0, loss, W2);
            AssertTensor.PassesGradientCheck(x0, loss, b2, absErr: 0.002f);
        }
示例#18
0
        public void ReluNNPassesGradientCheck()
        {
            var x0 = T.Vector <float>("x");

            int n0 = 20, n1 = 5, n2 = 20;
            var W1 = T.Shared(NN.Random.Uniform(-1f, 1f, n1, n0).As <float>(), "W1");
            var W2 = T.Shared(NN.Random.Uniform(-1f, 1f, n2, n1).As <float>(), "W2");
            var b1 = T.Shared(NN.Random.Uniform(-1f, 1f, n1).As <float>(), "b1");
            var b2 = T.Shared(NN.Random.Uniform(-0.1f, 0.1f, n2).As <float>(), "b2");

            var W_op = T.Shared(NN.Random.Uniform(-1f, 1f, n2, n0).As <float>(), "W_op");

            var x1   = T.ReLu(T.Dot(W1, x0) + b1);
            var x2   = T.ReLu(T.Dot(W2, x1) + b2);
            var loss = T.Norm2(x2 - T.Dot(W_op, x0));

            AssertTensor.PassesGradientCheck(x0, loss, b1);
            AssertTensor.PassesGradientCheck(x0, loss, W1);
            AssertTensor.PassesGradientCheck(x0, loss, W2);
            AssertTensor.PassesGradientCheck(x0, loss, b2);
        }
示例#19
0
        public void IndexingPassesGradientCheck()
        {
            var maxLength = 20;
            var n         = 10;
            var ids       = T.Vector <int>("X");
            var W         = T.Shared(NN.Random.Uniform(-1f, 1f, n, n), "W");

            var loss = T.Sum(W[ids]);

            AssertTensor.PassesGradientCheck(
                ids, loss, W,
                init: () => NN.Random.Uniform <int>(0, n - 1, (int)(NN.Random.NextDouble() * maxLength) + 1)
                );

            var loss2 = T.Sum(W[ids, ids]);

            AssertTensor.PassesGradientCheck(
                ids, loss2, W,
                init: () => NN.Random.Uniform <int>(0, n - 1, (int)(NN.Random.NextDouble() * maxLength) + 1)
                );
        }