public void ScalarMultiplication() { float[] data1 = { float.MinValue, -10, -1.5f, 0, 1.5f, 10, 20, float.MaxValue }; int[] shape1 = { 2, 4 }; var tensor1 = new FloatTensor(data1, shape1); var tensor2 = new FloatTensor(data1, shape1); // Test multiplication by 0 float scalar = 0; tensor1.MulScalar(scalar); for (int i = 0; i < tensor1.Size; i++) { Assert.AreEqual(tensor2.Data [i] * scalar, tensor1.Data [i]); } // Test multiplication by positive tensor1 = new FloatTensor(data1, shape1); scalar = 99; tensor1.MulScalar(scalar); for (int i = 0; i < tensor1.Size; i++) { Assert.AreEqual(tensor2.Data [i] * scalar, tensor1.Data [i]); } // Test multiplication by negative tensor1 = new FloatTensor(data1, shape1); scalar = -99; tensor1.MulScalar(scalar); for (int i = 0; i < tensor1.Size; i++) { Assert.AreEqual(tensor2.Data [i] * scalar, tensor1.Data [i]); } // Test multiplication by decimal tensor1 = new FloatTensor(data1, shape1); scalar = 0.000001f; tensor1.MulScalar(scalar); for (int i = 0; i < tensor1.Size; i++) { Assert.AreEqual(tensor2.Data [i] * scalar, tensor1.Data [i]); } }
public void Add_() { float[] data1 = { float.MinValue, -10, -1.5f, 0, 1.5f, 10, 20, float.MaxValue }; int[] shape1 = { 2, 4 }; var tensor1 = new FloatTensor(data1, shape1); var tensor2 = new FloatTensor(data1, shape1); // Test addition by 0 float val = 0; tensor1.Add_(val); for (int i = 0; i < tensor1.Size; i++) { Assert.AreEqual(tensor2.Data [i] + val, tensor1.Data [i]); } // Test addition by positive tensor1 = new FloatTensor(data1, shape1); val = 99; tensor1.Add_(val); for (int i = 0; i < tensor1.Size; i++) { Assert.AreEqual(tensor2.Data [i] + val, tensor1.Data [i]); } // Test addition by negative tensor1 = new FloatTensor(data1, shape1); val = -99; tensor1.Add_(val); for (int i = 0; i < tensor1.Size; i++) { Assert.AreEqual(tensor2.Data [i] + val, tensor1.Data [i]); } // Test addition by decimal tensor1 = new FloatTensor(data1, shape1); val = 0.000001f; tensor1.Add_(val); for (int i = 0; i < tensor1.Size; i++) { Assert.AreEqual(tensor2.Data [i] + val, tensor1.Data [i]); } }
public void TestReshapeFloat1D() { var x = new FloatTensor(10); for (int i = 0; i < x.Shape[0]; i++) { x[i] = i; } var y = x.NewWithStorage1d((UIntPtr)0, 10, 1); for (int i = 0; i < x.Shape[0]; i++) { Assert.AreEqual(y[i], i); Assert.AreEqual(x[i], i); } }
public void FloatTensorLog() { var x1 = FloatTensor.Range(2f, 100f, 1f); Assert.NotNull(x1); Assert.True(1 == x1.Shape.Length); var x2 = x1.Log(); Assert.Equal(x1.Shape.Length, x2.Shape.Length); Assert.Equal(x1.Shape[0], x2.Shape[0]); for (var i = 0; i < x1.Shape[0]; ++i) { Assert.True(BasicTensorAPI.IsApproximatelyEqual(Math.Log(x1[i]), (double)x2[i])); } }
public int PrepareToFit(FloatTensor input, FloatTensor target, Loss.Loss criterion, Optimizer optimizer, int batch_size) { if (input.Shape[0] != target.Shape[0]) { throw new InvalidDataException("Input and Target tensors don't seem to have the right dims"); } _input_tensor_origin = input; _target_tensor_origin = target; int[] input_buffer_shape = new int[input.Shape.Length]; input_buffer_shape[0] = batch_size; for (int i = 1; i < input.Shape.Length; i++) { input_buffer_shape[i] = input.Shape[i]; } last_input_buffer = controller.floatTensorFactory.Create(_shape: input_buffer_shape, _autograd: true); int[] target_buffer_shape = new int[target.Shape.Length]; target_buffer_shape[0] = batch_size; for (int i = 1; i < target.Shape.Length; i++) { target_buffer_shape[i] = target.Shape[i]; } last_target_buffer = controller.floatTensorFactory.Create(_shape: target_buffer_shape, _autograd: true); this._batch_size = batch_size; this._criterion = criterion; this._optimizer = optimizer; this._input_batch_offset = batch_size; for (int i = 1; i < input.Shape.Length; i++) { this._input_batch_offset *= input.Shape[i]; } this._target_batch_offset = batch_size; for (int i = 1; i < target.Shape.Length; i++) { this._target_batch_offset *= target.Shape[i]; } return((int)(input.Shape[0] / batch_size)); }
public void TestBackward() { var lin1 = NN.Module.Linear(1000, 100); var lin2 = NN.Module.Linear(100, 10); var seq = NN.Module.Sequential(lin1, NN.Module.Relu(), lin2); var x = FloatTensor.RandomN(new long[] { 64, 1000 }, device: "cpu:0"); var y = FloatTensor.RandomN(new long[] { 64, 10 }, device: "cpu:0"); var eval = seq.Forward(x); var loss = NN.LossFunction.MSE(NN.Reduction.None); var output = loss(eval, y); seq.ZeroGrad(); output.Backward(); }
public void TestCustomModuleWithInPlaceModification() { var param = FloatTensor.RandomN(new long[] { 1000, 100 }); var module = new TestModule("test", param, true); Assert.Equal(1000, module.GetParameter("test").Shape[0]); Assert.Equal(100, module.GetParameter("test").Shape[1]); using (var grad = new AutoGradMode(false)) { param.TransposeInPlace(0, 1); } Assert.Equal(100, module.GetParameter("test").Shape[0]); Assert.Equal(1000, module.GetParameter("test").Shape[1]); Assert.Equal(100, param.Shape[0]); Assert.Equal(1000, param.Shape[1]); }
public void Tanh() { float[] data1 = { -0.6366f, 0.2718f, 0.4469f, 1.3122f }; int[] shape1 = { 4 }; var tensor = new FloatTensor(data1, shape1); float[] data2 = { -0.562580109f, 0.265298963f, 0.419347495f, 0.86483103f }; int[] shape2 = { 4 }; var expectedTanhTensor = new FloatTensor(data2, shape2); var actualTanhTensor = tensor.Tanh(); for (int i = 2; i < actualTanhTensor.Size; i++) { Assert.AreEqual(expectedTanhTensor.Data[i], actualTanhTensor.Data[i]); } }
public void Floor_() { float[] data1 = { 5.89221f, -20.11f, 9.0f, 100.4999f, 100.5001f }; int[] shape1 = { 5 }; var tensor1 = new FloatTensor(data1, shape1); float[] data2 = { 5, -21, 9, 100, 100 }; int[] shape2 = { 5 }; var tensorFloor = new FloatTensor(data2, shape2); tensor1.Floor_(); for (int i = 0; i < tensor1.Size; i++) { Assert.AreEqual(tensor1.Data[i], tensorFloor.Data[i]); } }
public void EvalLossSequence() { var lin1 = NN.Module.Linear(1000, 100); var lin2 = NN.Module.Linear(100, 10); var seq = NN.Module.Sequential(lin1, NN.Module.Relu(), lin2); var x = FloatTensor.RandomN(new long[] { 64, 1000 }, device: "cpu:0"); var y = FloatTensor.RandomN(new long[] { 64, 10 }, device: "cpu:0"); var eval = seq.Forward(x); var loss = NN.LossFunction.MSE(NN.Reduction.Sum); var output = loss(eval, y); var result = output.Item <float>(); Assert.IsNotNull(result); }
public void Cosh() { float[] data1 = { 0.4f, 0.5f, 0.3f, -0.1f }; int[] shape1 = { 4 }; var tensor = new FloatTensor(data1, shape1); float[] data2 = { 1.08107237f, 1.12762597f, 1.04533851f, 1.00500417f }; int[] shape2 = { 4 }; var expectedCoshTensor = new FloatTensor(data2, shape2); var actualCoshTensor = tensor.Cosh(); for (int i = 2; i < actualCoshTensor.Size; i++) { Assert.AreEqual(expectedCoshTensor.Data[i], actualCoshTensor.Data[i]); } }
public void GetFloatTensorData() { const int size = 10; var storage0 = new AtenSharp.FloatTensor.FloatStorage(2 * size); var x1 = new FloatTensor(size); var x2 = FloatTensor.NewWithStorage1d(storage0, UIntPtr.Zero, size, 1); var x3 = x2.NewWithStorage1d((UIntPtr)size, size, 1); Assert.AreNotEqual(IntPtr.Zero, x1.Data); Assert.AreNotEqual(IntPtr.Zero, x2.Data); Assert.AreNotEqual(IntPtr.Zero, x3.Data); Assert.AreNotEqual(IntPtr.Zero, x1.Storage); Assert.AreNotEqual(IntPtr.Zero, x2.Storage); Assert.AreNotEqual(IntPtr.Zero, x3.Storage); }
public void FloatTensorExp() { var x1 = FloatTensor.Range(2f, 15f, 1f); Assert.IsNotNull(x1); Assert.AreEqual(1, x1.Shape.Length); var x2 = x1.Exp(); Assert.AreEqual(x1.Shape.Length, x2.Shape.Length); Assert.AreEqual(x1.Shape[0], x2.Shape[0]); for (var i = 0; i < x1.Shape[0]; ++i) { Assert.IsTrue(BasicTensorAPI.IsApproximatelyEqual(Math.Exp(x1[i]), (double)x2[i])); } }
public void Cos_() { float[] data1 = { 0.4f, 0.5f, 0.3f, -0.1f }; int[] shape1 = { 4 }; var tensor = new FloatTensor(data1, shape1); float[] data2 = { 0.92106099f, 0.87758256f, 0.95533649f, 0.99500417f }; int[] shape2 = { 4 }; var expectedCosTensor = new FloatTensor(data2, shape2); tensor.Cos_(); for (int i = 2; i < tensor.Size; i++) { Assert.AreEqual(expectedCosTensor.Data[i], tensor.Data[i]); } }
public void TestMul() { var x = FloatTensor.Ones(new long[] { 100, 100 }); var y = x.Mul(0.5f.ToScalar()); var ydata = y.Data <float>(); var xdata = x.Data <float>(); for (int i = 0; i < 100; i++) { for (int j = 0; j < 100; j++) { Assert.Equal(ydata[i + j], xdata[i + j] * 0.5f); } } }
// create from file public FloatTensor Create(string filepath, ComputeShader _shader = null, bool _dataOnGpu = false, bool _autograd = false, bool _keepgrads = false, string _creation_op = null) { Tuple <int[], float[]> shape_data = FloatTensor.ReadFromFile(filepath); return(Create(_shape: shape_data.Item1, _data: shape_data.Item2, _copyData: false, _dataOnGpu: false, _autograd: _autograd, _keepgrads: _keepgrads, _creation_op: "read_from_file")); }
public void Ceil() { float[] data1 = { 5.89221f, -20.11f, 9.0f, 100.4999f, 100.5001f }; int[] shape1 = { 5 }; var tensor1 = new FloatTensor(data1, shape1); float[] data2 = { 6, -20, 9, 101, 101 }; int[] shape2 = { 5 }; var tensorCeil = new FloatTensor(data2, shape2); tensor1.Ceil(); for (int i = 0; i < tensor1.Size; i++) { Assert.AreEqual(tensor1.Data[i], tensorCeil.Data[i]); } }
public void TestLinearNoBias() { var lin = NN.Module.Linear(1000, 100, false); var weight = lin.Weight.Transpose(0, 1); var input = FloatTensor.RandomN(new long[] { 1, 1000 }); var forward = lin.Forward(input); var matmul = input.MatMul(weight); Assert.Equal(forward.Shape.Length, matmul.Shape.Length); Assert.Equal(forward.Shape[0], matmul.Shape[0]); Assert.Equal(forward.Shape[1], matmul.Shape[1]); for (int i = 0; i < 100; i++) { Assert.Equal(forward.Data <float>()[i], matmul.Data <float>()[i]); } }
public void Zero_() { float[] data1 = { -1, 0, 1, float.MaxValue, float.MinValue }; int[] shape1 = { 5 }; var tensor1 = new FloatTensor(data1, shape1); float[] data2 = { 0, 0, 0, 0, 0 }; int[] shape2 = { 5 }; var tensorZero = new FloatTensor(data2, shape2); tensor1.Zero_(); for (int i = 0; i < tensor1.Size; i++) { Assert.AreEqual(tensor1.Data[i], tensorZero.Data[i]); } }
public void Neg() { float[] data1 = { -1, 0, 1, float.MaxValue, float.MinValue }; int[] shape1 = { 5 }; var tensor1 = new FloatTensor(data1, shape1); float[] data2 = { 1, 0, -1, -float.MaxValue, -float.MinValue }; int[] shape2 = { 5 }; var tensorNeg = new FloatTensor(data2, shape2); tensor1.Neg(); for (int i = 0; i < tensor1.Size; i++) { Assert.AreEqual(tensor1.Data[i], tensorNeg.Data[i]); } }
public void Add() { float[] data1 = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 }; int[] shape1 = { 2, 5 }; var tensor1 = new FloatTensor(data1, shape1); float[] data2 = { 3, 2, 6, 9, 10, 1, 4, 8, 5, 7 }; int[] shape2 = { 2, 5 }; var tensor2 = new FloatTensor(data2, shape2); var tensorSum = tensor1.Add(tensor2); for (int i = 0; i < tensorSum.Size; i++) { Assert.AreEqual(tensor1.Data [i] + tensor2.Data [i], tensorSum.Data [i]); } }
public void Sqrt() { float[] data1 = { float.MaxValue, float.MinValue, 1f, 4f, 5f, 2.3232f, -30f }; int[] shape1 = { 7 }; var tensor = new FloatTensor(data1, shape1); float[] data2 = { float.NaN, float.NaN, 1f, 2f, 2.236068f, 1.524205f, float.NaN }; int[] shape2 = { 7 }; var expectedTensor = new FloatTensor(data2, shape2); var actualTensor = tensor.Sqrt(); for (int i = 2; i < expectedTensor.Size; i++) { Assert.AreEqual(Math.Round(expectedTensor.Data[i], 3), Math.Round(actualTensor.Data[i], 3)); } }
public Linear(SyftController _controller, int input, int output, string initializer = "Xavier", bool biased = false, float[] weights = null, float[] bias = null, bool fast = true) { init(name); this.controller = _controller; _input = fast ? output : input; _output = fast ? input : output; _fast = fast; _biased = biased || bias != null; int[] weightShape = { _input, _output }; if (weights == null) { weights = initializer == "Xavier" ? controller.RandomWeights(input * output, input) : controller.RandomWeights(input * output); } if (_fast) { var new_weights = new float[weights.Length]; for (var idx = 0; idx < weights.Length; idx++) { new_weights[(idx - (idx % output)) / output + input * (idx % output)] = weights[idx]; } weights = new_weights; } _weights = controller.floatTensorFactory.Create(_shape: weightShape, _data: weights, _autograd: true, _keepgrads: true); parameters.Add(_weights.Id); if (_biased) { int[] biasShape = { 1, output }; _bias = controller.floatTensorFactory.Create(_data: bias, _shape: biasShape, _autograd: true); parameters.Add(_bias.Id); } ; #pragma warning disable 420 id = System.Threading.Interlocked.Increment(ref nCreated); controller.addModel(this); }
public void AddMatrixMultiplyTest() { float[] base1_data = new float[] { 1, 2, 3, 4 }; int[] base1_shape = new int[] { 2, 2 }; var base1 = new FloatTensor(base1_data, base1_shape); float[] base2_data = new float[] { 1, 2, 3, 4, 5, 6, 7, 8, 9 }; int[] base2_shape = new int[] { 3, 3 }; var base2 = new FloatTensor(base2_data, base2_shape); float[] data = new float[] { 1, 2, 3, 4, 5, 6 }; int[] tensor1_shape = new int[] { 2, 3 }; int[] tensor2_shape = new int[] { 3, 2 }; var tensor1 = new FloatTensor(data, tensor1_shape); var tensor2 = new FloatTensor(data, tensor2_shape); base1.AddMatrixMultiply(tensor1, tensor2); base2.AddMatrixMultiply(tensor2, tensor1); for (int i = 0; i < base1_shape[0]; i++) { for (int j = 0; j < base1_shape[1]; j++) { float mm_res = base1_data[i * base1_shape[1] + j]; for (int k = 0; k < tensor1_shape[1]; k++) { mm_res += tensor1[i, k] * tensor2[k, j]; } Assert.AreEqual(base1[i, j], mm_res); } } for (int i = 0; i < base2_shape[0]; i++) { for (int j = 0; j < base2_shape[1]; j++) { float mm_res = base2_data[i * base2_shape[1] + j]; for (int k = 0; k < tensor2_shape[1]; k++) { mm_res += tensor2[i, k] * tensor1[k, j]; } Assert.AreEqual(base2[i, j], mm_res); } } }
public void TestGradConditional() { var modT = new CondModel(true); var modF = new CondModel(false); var x = FloatTensor.RandomN(new long[] { 64, 1000 }, device: "cpu:0"); var y = FloatTensor.RandomN(new long[] { 64, 10 }, device: "cpu:0"); modT.Train(); var eval = modT.Forward(x); var loss = NN.LossFunction.MSE(NN.Reduction.None); var output = loss(eval, y); modT.ZeroGrad(); output.Backward(); var gradCounts = 0; foreach (var parm in modT.Parameters()) { var grad = parm.Grad(); gradCounts += grad.Handle == IntPtr.Zero ? 0 : 1; } Assert.Equal(2, gradCounts); modF.Train(); eval = modF.Forward(x); output = loss(eval, y); modF.ZeroGrad(); output.Backward(); gradCounts = 0; foreach (var parm in modF.Parameters()) { var grad = parm.Grad(); gradCounts += grad.Handle == IntPtr.Zero ? 0 : 1; } Assert.Equal(3, gradCounts); }
public void TransposeNoDimensionsSpecified() { float[] data1 = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12 }; int[] shape1 = { 3, 2, 2 }; // Test Tensor with more than 2 dimensions var tensor = new FloatTensor(data1, shape1); Assert.That(() => tensor.Transpose(), Throws.TypeOf <InvalidOperationException>()); // Test tensor with less than 2 dimensions float[] data2 = { 1, 2, 3, 4, 5 }; int[] shape2 = { 5 }; tensor = new FloatTensor(data2, shape2); Assert.That(() => tensor.Transpose(), Throws.TypeOf <InvalidOperationException>()); }
public void TestSaveLoadTensorFloat() { var file = ".saveload.float.ts"; if (File.Exists(file)) { File.Delete(file); } var tensor = FloatTensor.Ones(new long[] { 5, 6 }); tensor.Save(file); var tensorLoaded = TorchTensor.Load(file); File.Delete(file); Assert.NotNull(tensorLoaded); Assert.Equal(tensorLoaded.Type, tensor.Type); Assert.Equal(tensorLoaded, tensor); }
public void TestLinearWithBias() { var lin = NN.Module.Linear(1000, 100, true); var bias = lin.Bias; var weight = lin.Weight.T(); var input = FloatTensor.RandomN(new long[] { 1, 1000 }); var forward = lin.Forward(input); var matmul = input.MatMul(weight).Add(bias.Value); Assert.Equal(forward.Shape.Length, matmul.Shape.Length); Assert.Equal(forward.Shape[0], matmul.Shape[0]); Assert.Equal(forward.Shape[1], matmul.Shape[1]); for (int i = 0; i < 100; i++) { Assert.InRange(forward.Data <float>()[i], matmul.Data <float>()[i] - 10e5f, matmul.Data <float>()[i] + 10e5f); } }
public void ElementwiseMultiplication() { float[] data1 = { float.MinValue, -10, -1.5f, 0, 1.5f, 10, 20, float.MaxValue }; int[] shape1 = { 2, 4 }; var tensor1 = new FloatTensor(data1, shape1); float[] data2 = { float.MinValue, -10, -1.5f, 0, 1.5f, 10, 20, float.MaxValue }; int[] shape2 = { 2, 4 }; var tensor2 = new FloatTensor(data2, shape2); var tensorMult = tensor1.MulElementwise(tensor2); for (int i = 0; i < tensorMult.Size; i++) { float current = tensor1.Data [i] * tensor2.Data [i]; Assert.AreEqual(tensorMult.Data [i], current); } }
public void TestLinearEditWeightsAndBias() { var lin = NN.Module.Linear(0, 0, true); var bias = FloatTensor.RandomN(new long[] { 100 }); var weights = FloatTensor.RandomN(new long[] { 100, 1000 }); lin.Bias = bias; lin.Weight = weights; Assert.Equal(lin.Weight.Shape.Length, weights.Shape.Length); Assert.Equal(lin.Weight.Shape[0], weights.Shape[0]); Assert.Equal(lin.Weight.Shape[1], weights.Shape[1]); for (int i = 0; i < 100; i++) { Assert.Equal(lin.Bias.Value.Data <float>()[i], bias.Data <float>()[i]); } }