예제 #1
0
        public void ReLUTests()
        {
            var input    = new TensorOld(new double[] { 1, 2, -3, 5, -2, 7, 4, 6, 8, -5, 4, 1 }, 3, 4);
            var expected = new TensorOld(new double[] { 1, 2, 0, 5, 0, 7, 4, 6, 8, 0, 4, 1 }, 3, 4);
            var actual   = TensorOld.Apply(input, Functions.ReLU);

            Assert.Equal(expected, actual);
        }
예제 #2
0
        public void ReLUTests()
        {
            var output   = new TensorOld(new double[] { 1, 2, 0, 5, 0, 7, 4, 6, 8, 0, 4, 1 }, 3, 4);
            var expected = new TensorOld(new double[] { 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1 }, 3, 4);
            var actual   = TensorOld.Apply(output, Derivatives.ReLU);

            Assert.Equal(expected, actual);
        }
예제 #3
0
        public void ApplyTest()
        {
            var t = TensorOld.Rand(20, 30);
            var n = TensorOld.Apply(t, a => a * a);

            for (int i = 0; i < 20; i++)
            {
                for (int j = 0; j < 30; j++)
                {
                    Assert.Equal(t[i, j] * t[i, j], n[i, j]);
                }
            }
        }
예제 #4
0
        public void Optimize(TensorOld target, TensorOld gradient)
        {
            if (!dict.ContainsKey(gradient))
            {
                dict[gradient] = new AdamCache(gradient.Shape);
            }

            var c = dict[gradient];

            TensorOld.Apply(c.M, gradient, c.M, (m, g) => Beta1 * m + (1 - Beta1) * g);
            TensorOld.Apply(c.V, gradient, c.V, (v, g) => Beta2 * v + (1 - Beta2) * g * g);
            TensorOld.Apply(c.M, c.V, c.T, (m, v) => Alpha * m / (Math.Sqrt(v) + E));
            target.Minus(c.T);
        }
예제 #5
0
        public void Optimize(TensorOld target, TensorOld gradient)
        {
            if (!last.ContainsKey(gradient))
            {
                last[gradient] = gradient.GetSameShape();
                TensorOld.Apply(gradient, last[gradient], g => LearningRate * g);
                target.Minus(last[gradient]);
                return;
            }

            var prev = last[gradient];

            TensorOld.Apply(prev, gradient, prev, (p, g) => g * LearningRate - p * Moment);
            target.Minus(prev);
        }
예제 #6
0
 public TensorOld Forward(TensorOld input)
 {
     input.Apply(a =>
     {
         if (a > Max)
         {
             return(Max);
         }
         if (a < Min)
         {
             return(Min);
         }
         return(a);
     });
     return(input);
 }
예제 #7
0
        public (TensorOld, TensorOld) GetXorData(int count)
        {
            var xbuff = DataEmulator.Instance.RandomArray(count, 2);
            var x     = new TensorOld(xbuff);
            var y     = new TensorOld(count, 1);

            x.Apply(a => a * 12 - 6);
            for (int i = 0; i < count; i++)
            {
                if (x[i, 0] * x[i, 1] > 0)
                {
                    y[i, 0] = 0;
                }
                else
                {
                    y[i, 0] = 1;
                }
            }
            return(x, y);
        }
예제 #8
0
 public void Regularize(TensorOld parameters, TensorOld gradient)
 {
     TensorOld.Apply(gradient, parameters, gradient, (a, b) => a + Strength * b);
 }
예제 #9
0
 public void Normalize(TensorOld input, TensorOld output)
 {
     TensorOld.Apply(input, output, a => Math.Log10(a) / Denom);
 }
 /// <summary>
 /// 反向传播或叫向前传播
 /// </summary>
 /// <param name="y">传回来的误差</param>
 /// <returns>传到前面的误差</returns>
 public override TensorOld Backward(TensorOld y)
 {
     TensorOld.Apply(ForwardOutput, y, BackwardOutput, (a, b) => a - b);
     return(BackwardOutput);
 }
예제 #11
0
 /// <summary>
 /// 反向传播或叫向前传播
 /// </summary>
 /// <param name="error">传回来的误差</param>
 /// <returns>传到前面的误差</returns>
 public override TensorOld Backward(TensorOld error)
 {
     TensorOld.Apply(ForwardOutput, Derivative, Derivatives.SigmoidFromOutput);
     TensorOld.MultipleElementWise(Derivative, error, BackwardOutput);
     return(BackwardOutput);
 }
예제 #12
0
 /// <summary>
 /// 正向传播或叫向后传播
 /// </summary>
 /// <param name="input">输入的数值</param>
 /// <returns>输出的数值</returns>
 public override TensorOld Forward(TensorOld input)
 {
     TensorOld.Apply(input, ForwardOutput, Functions.Sigmoid);
     return(ForwardOutput);
 }
예제 #13
0
 public void Normalize(TensorOld input, TensorOld output)
 {
     TensorOld.Apply(input, output, a => (a - Min) / Denom);
 }
예제 #14
0
 public void Normalize(TensorOld input, TensorOld output)
 {
     TensorOld.Apply(input, output, a => (a - Mean) / Delta);
 }