Ejemplo n.º 1
0
        internal FloatTensor In(float value, long[] shape)
        {
            FloatTensor floatTensor = new FloatTensor(shape);

            floatTensor.Fill(value);
            return(floatTensor);
        }
Ejemplo n.º 2
0
        public Tensor Constant(float value, long[] shape)
        {
            FloatTensor floatTensor = new FloatTensor(shape);

            floatTensor.Fill(value);
            return(Out(floatTensor));
        }
Ejemplo n.º 3
0
        public string Evaluate(FloatTensor test_input, FloatTensor test_target, Loss.Loss criterion, int batch_size)
        {
            if (test_input.Shape[0] != test_target.Shape[0])
            {
                throw new InvalidDataException("Input and Target tensors don't seem to have the right dims");
            }

            int[] input_buffer_shape = new int[test_input.Shape.Length];
            input_buffer_shape[0] = batch_size;
            for (int i = 1; i < test_input.Shape.Length; i++)
            {
                input_buffer_shape[i] = test_input.Shape[i];
            }

            FloatTensor test_input_buffer = controller.floatTensorFactory.Create(_shape: input_buffer_shape, _autograd: true);
            FloatTensor test_loss         = controller.floatTensorFactory.Create(_shape: new int[] { 1 });

            int[] target_buffer_shape = new int[test_target.Shape.Length];
            target_buffer_shape[0] = batch_size;
            for (int i = 1; i < test_target.Shape.Length; i++)
            {
                target_buffer_shape[i] = test_target.Shape[i];
            }

            FloatTensor test_target_buffer = controller.floatTensorFactory.Create(_shape: target_buffer_shape, _autograd: true);
            float       loss        = 0;
            int         num_batches = (int)(test_input.Shape[0] / batch_size);
            FloatTensor predictions = controller.floatTensorFactory.Create(test_target.Shape);

            int test_input_batch_offset = batch_size;

            for (int i = 1; i < test_input.Shape.Length; i++)
            {
                test_input_batch_offset *= test_input.Shape[i];
            }

            int test_target_batch_offset = batch_size;

            for (int i = 1; i < test_target.Shape.Length; i++)
            {
                test_target_batch_offset *= test_target.Shape[i];
            }

            for (int batch_i = 0; batch_i < num_batches; batch_i++)
            {
                test_input_buffer.Fill(test_input, starting_offset: batch_i * test_input_batch_offset,
                                       length_to_fill: test_input_batch_offset);
                test_target_buffer.Fill(test_target, starting_offset: batch_i * test_target_batch_offset,
                                        length_to_fill: test_target_batch_offset);
                var        pred       = Forward(test_input_buffer);
                var        batch_loss = criterion.Forward(pred, test_target_buffer);
                List <int> tensor_ids = new List <int> {
                    predictions.Id, pred.Id
                };
                predictions.Fill(pred, starting_offset: 0, length_to_fill: test_target_batch_offset, starting_offset_fill: test_target_batch_offset * batch_i);
                loss += (batch_loss.Data[0] / batch_size);
            }
            test_loss.Fill(loss / num_batches);
            return(test_loss.Id.ToString() + "," + predictions.Id.ToString());
        }
Ejemplo n.º 4
0
        public float FitBatch(int batch_i, int iteration)
        {
            if (((batch_i + 1) * _input_batch_offset) < _input_tensor_origin.Size)
            {
                input_buffer.Fill(_input_tensor_origin, starting_offset: batch_i * _input_batch_offset,
                                  length_to_fill: _input_batch_offset);
                target_buffer.Fill(_target_tensor_origin, starting_offset: batch_i * _target_batch_offset,
                                   length_to_fill: _target_batch_offset);
                var pred = Forward(input_buffer);
                var loss = _criterion.Forward(pred, target_buffer);


                if (cached_ones_grad_for_backprop == null || cached_ones_grad_for_backprop.Size != loss.Size)
                {
                    cached_ones_grad_for_backprop          = loss.createOnesTensorLike();
                    cached_ones_grad_for_backprop.Autograd = false;
                }

                loss.Backward(cached_ones_grad_for_backprop);

                _optimizer.Step(this.input_buffer.Shape[0], iteration);
                return(loss.Data[0]);
            }
            else
            {
                return(0);
            }
        }
Ejemplo n.º 5
0
    public static void Main(string [] args)
    {
        var x = new FloatTensor(10);
        var b = new FloatTensor(10);

        b.Fill(30);
        Dump(b);
        x.Random(new RandomGenerator(), 10);
        FloatTensor.Add(x, 100, b);
        Dump(x);
        Dump(b);
#if false
        Dump(x);
        var y = x.Add(100);
        Dump(y);
#endif
        for (int i = 0; i < 1000; i++)
        {
            using (var a = new FloatTensor(1000)){
                using (var c = new FloatTensor(1000)) {
                    var d = a.Add(10);
                    a.CAdd(0, d);
                    d.Dispose();
                }
            }
        }
    }
Ejemplo n.º 6
0
        public void TestReshapeFloat4DPointToTheSameStorage()
        {
            var x = new FloatTensor(10, 10, 10, 5);

            x.Fill(1);

            var y = x.NewWithStorage1d((UIntPtr)0, 5000, 1);

            y[567] = 0;

            int count = 0;

            for (int i = 0; i < x.Shape[0]; i++)
            {
                for (int j = 0; j < x.Shape[1]; j++)
                {
                    for (int k = 0; k < x.Shape[2]; k++)
                    {
                        for (int l = 0; l < x.Shape[3]; l++)
                        {
                            Assert.AreEqual(x[i, j, k, l], y[count++]);
                        }
                    }
                }
            }
        }
Ejemplo n.º 7
0
        public override FloatTensor Forward(FloatTensor input)
        {
            if (_mask_source == null || input.Size != _mask_source.Size)
            {
                _mask_source = input.emptyTensorCopy(hook_graph: false);
                _mask_source.Fill(1 - rate, inline: true);
            }
            ;

            FloatTensor output = input.Mul(_mask_source.SampleMask());

            activation = output.Id;
            return(output);
        }