Exemple #1
0
        protected override string ProcessMessageAsLayerOrLoss(Command msgObj, SyftController ctrl)
        {
            switch (msgObj.functionCall)
            {
            case "prepare_to_fit":
            {
                FloatTensor input      = ctrl.floatTensorFactory.Get(int.Parse(msgObj.tensorIndexParams[0]));
                FloatTensor target     = ctrl.floatTensorFactory.Get(int.Parse(msgObj.tensorIndexParams[1]));
                Loss.Loss   criterion  = ctrl.getLoss(int.Parse(msgObj.tensorIndexParams[2]));
                Optimizer   optim      = ctrl.getOptimizer(int.Parse(msgObj.tensorIndexParams[3]));
                int         batch_size = int.Parse(msgObj.tensorIndexParams[4]);

                return(PrepareToFit(input, target, criterion, optim, batch_size).ToString());
            }

            case "fit":
            {
                int start_batch_id = int.Parse(msgObj.tensorIndexParams[0]);
                int end_batch_id   = int.Parse(msgObj.tensorIndexParams[1]);
                int iters          = int.Parse(msgObj.tensorIndexParams[2]);
                return(Fit(start_batch_id, end_batch_id, iters));
            }

            case "evaluate":
            {
                FloatTensor test_input  = ctrl.floatTensorFactory.Get(int.Parse(msgObj.tensorIndexParams[0]));
                FloatTensor test_target = ctrl.floatTensorFactory.Get(int.Parse(msgObj.tensorIndexParams[1]));
                Loss.Loss   criterion   = ctrl.getLoss(int.Parse(msgObj.tensorIndexParams[2]));
                int         batch_size  = int.Parse(msgObj.tensorIndexParams[3]);
                return(Evaluate(test_input, test_target, criterion, batch_size));
            }
            }

            return(ProcessMessageAsLayerObject(msgObj, ctrl));
        }
Exemple #2
0
        public string Evaluate(FloatTensor test_input, FloatTensor test_target, Loss.Loss criterion, int batch_size)
        {
            if (test_input.Shape[0] != test_target.Shape[0])
            {
                throw new InvalidDataException("Input and Target tensors don't seem to have the right dims");
            }

            int[] input_buffer_shape = new int[test_input.Shape.Length];
            input_buffer_shape[0] = batch_size;
            for (int i = 1; i < test_input.Shape.Length; i++)
            {
                input_buffer_shape[i] = test_input.Shape[i];
            }

            FloatTensor test_input_buffer = controller.floatTensorFactory.Create(_shape: input_buffer_shape, _autograd: true);
            FloatTensor test_loss         = controller.floatTensorFactory.Create(_shape: new int[] { 1 });

            int[] target_buffer_shape = new int[test_target.Shape.Length];
            target_buffer_shape[0] = batch_size;
            for (int i = 1; i < test_target.Shape.Length; i++)
            {
                target_buffer_shape[i] = test_target.Shape[i];
            }

            FloatTensor test_target_buffer = controller.floatTensorFactory.Create(_shape: target_buffer_shape, _autograd: true);
            float       loss        = 0;
            int         num_batches = (int)(test_input.Shape[0] / batch_size);
            FloatTensor predictions = controller.floatTensorFactory.Create(test_target.Shape);

            int test_input_batch_offset = batch_size;

            for (int i = 1; i < test_input.Shape.Length; i++)
            {
                test_input_batch_offset *= test_input.Shape[i];
            }

            int test_target_batch_offset = batch_size;

            for (int i = 1; i < test_target.Shape.Length; i++)
            {
                test_target_batch_offset *= test_target.Shape[i];
            }

            for (int batch_i = 0; batch_i < num_batches; batch_i++)
            {
                test_input_buffer.Fill(test_input, starting_offset: batch_i * test_input_batch_offset,
                                       length_to_fill: test_input_batch_offset);
                test_target_buffer.Fill(test_target, starting_offset: batch_i * test_target_batch_offset,
                                        length_to_fill: test_target_batch_offset);
                var        pred       = Forward(test_input_buffer);
                var        batch_loss = criterion.Forward(pred, test_target_buffer);
                List <int> tensor_ids = new List <int> {
                    predictions.Id, pred.Id
                };
                predictions.Fill(pred, starting_offset: 0, length_to_fill: test_target_batch_offset, starting_offset_fill: test_target_batch_offset * batch_i);
                loss += (batch_loss.Data[0] / batch_size);
            }
            test_loss.Fill(loss / num_batches);
            return(test_loss.Id.ToString() + "," + predictions.Id.ToString());
        }
Exemple #3
0
        public int PrepareToFit(FloatTensor input, FloatTensor target, Loss.Loss criterion, Optimizer optimizer, int batch_size)
        {
            if (input.Shape[0] != target.Shape[0])
            {
                throw new InvalidDataException("Input and Target tensors don't seem to have the right dims");
            }

            _input_tensor_origin  = input;
            _target_tensor_origin = target;

            int[] input_buffer_shape = new int[input.Shape.Length];
            input_buffer_shape[0] = batch_size;
            for (int i = 1; i < input.Shape.Length; i++)
            {
                input_buffer_shape[i] = input.Shape[i];
            }

            last_input_buffer = controller.floatTensorFactory.Create(_shape: input_buffer_shape, _autograd: true);

            int[] target_buffer_shape = new int[target.Shape.Length];
            target_buffer_shape[0] = batch_size;
            for (int i = 1; i < target.Shape.Length; i++)
            {
                target_buffer_shape[i] = target.Shape[i];
            }

            last_target_buffer = controller.floatTensorFactory.Create(_shape: target_buffer_shape, _autograd: true);

            this._batch_size = batch_size;
            this._criterion  = criterion;
            this._optimizer  = optimizer;

            this._input_batch_offset = batch_size;
            for (int i = 1; i < input.Shape.Length; i++)
            {
                this._input_batch_offset *= input.Shape[i];
            }

            this._target_batch_offset = batch_size;
            for (int i = 1; i < target.Shape.Length; i++)
            {
                this._target_batch_offset *= target.Shape[i];
            }

            return((int)(input.Shape[0] / batch_size));
        }