public unsafe void Convolution2DValidRectangle1() { float[,] l = { { 0, 1, 0, 2, 0, 1 } }, k = { { 1, 1, 0, 1 } }; float[] b = { 0.9f }; float[,] expected = { { 2.9f, 2.9f } }; fixed(float *pl = l, pk = k, pb = b) { Tensor.Reshape(pl, 1, 6, out Tensor lTensor); Tensor.Reshape(pk, 1, 4, out Tensor kTensor); Tensor.Reshape(pb, 1, 1, out Tensor bTensor); Tensor.New(1, 2, out Tensor result); CpuDnn.ConvolutionForward(lTensor, new TensorInfo(2, 3, 1), kTensor, new TensorInfo(2, 2, 1), bTensor, result); Assert.IsTrue(result.ToArray2D().ContentEquals(expected)); result.Free(); } }
public unsafe void FullyConnectedBackwardData() { FullyConnectedLayer fc = new FullyConnectedLayer(TensorInfo.Linear(231), 125, ActivationType.Sigmoid, WeightsInitializationMode.GlorotUniform, BiasInitializationMode.Gaussian); Tensor dy = CreateRandomTensor(400, fc.OutputInfo.Size); fixed(float *pw = fc.Weights, pb = fc.Biases) { Tensor.Reshape(pw, fc.InputInfo.Size, fc.OutputInfo.Size, out Tensor w); Tensor.Reshape(pb, 1, fc.OutputInfo.Size, out Tensor b); Tensor.New(dy.Entities, fc.InputInfo.Size, out Tensor dx1); CpuDnn.FullyConnectedBackwardData(w, dy, dx1); Gpu gpu = Gpu.Default; using (DeviceMemory <float> dy_gpu = gpu.AllocateDevice(dy), w_gpu = gpu.AllocateDevice(w), dx_gpu = gpu.AllocateDevice <float>(dx1.Size)) { Dnn.Get(gpu).FullyConnectedBackwardData(dy.Entities, fc.InputInfo.Size, fc.OutputInfo.Size, dy_gpu.Ptr, w_gpu.Ptr, dx_gpu.Ptr); dx_gpu.CopyToHost(dx1.Entities, dx1.Length, out Tensor dx2); Assert.IsTrue(dx1.ContentEquals(dx2)); Tensor.Free(dy, dx1, dx2); } } }
/// <inheritdoc/> public override void Forward(Span <Tensor> inputs, out Tensor z, out Tensor a) { Tensor.New(inputs[0].Entities, inputs[0].Length, out z); CpuBlas.Sum(inputs, z); Tensor.Like(z, out a); CpuDnn.ActivationForward(z, ActivationFunctions.Activation, a); }
public unsafe void Pool2() { // Test values float[,] m = { { 0.77f, -0.11f, 0.11f, 0.33f, 0.55f, -0.11f, 0.33f, -0.11f, 1, -0.11f, 0.33f, -0.11f, 0.11f, -0.11f, 0.11f, -0.11f, 1, -0.33f, 0.11f, -0.11f, 0.55f, 0.33f, 0.33f, -0.33f, 0.55f, -0.33f, 0.33f, 0.33f, 0.55f, -0.11f, 0.11f, -0.33f, 1, -0.11f, 0.11f, -0.11f, 0.11f, -0.11f, 0.33f, -0.11f, 1, -0.11f, 0.33f, -0.11f, 0.55f, 0.33f, 0.11f, -0.11f, 0.77f } }, r = { { 1, 0.33f, 0.55f, 0.33f, 0.33f, 1, 0.33f, 0.55f, 0.55f, 0.33f, 1, 0.11f, 0.33f, 0.55f, 0.11f, 0.77f } }; fixed(float *pm = m) { Tensor.Reshape(pm, 1, 49, out Tensor mTensor); Tensor.New(1, 16, out Tensor result); CpuDnn.PoolingForward(mTensor, TensorInfo.Image <Alpha8>(7, 7), result); Assert.IsTrue(result.ToArray2D().ContentEquals(r)); result.Free(); } }
public unsafe void Compress2() { // Test values float[,] m = { { 1, 2, 3, 4, 5, 6, 7, 8, 9, 1, 99, 3, 4, 5, 6, 7, 8, 9 }, { 1, 2, 3, 4, 5, 66, 7, 8, 9, 1, 2, 3, 44, 5, 6, 7, 8, 9 } }; float[] r = { 150, 227 }; fixed(float *pm = m) { Tensor.Reshape(pm, 2, 18, out Tensor mTensor); Tensor.New(1, 2, out Tensor v); CpuDnn.ConvolutionBackwardBias(mTensor, new TensorInfo(3, 3, 2), v); Assert.IsTrue(v.ToArray().ContentEquals(r)); v.Free(); } }
public unsafe void ConvolutionFull1() { float[,] l = { { 0, 1, -1, 2 } }, k = { { 1, 0, 1, 1 } }; float[,] expected = { { 0, 1, 1, -1, 1, 3, 0, -1, 2 } }; fixed(float *pl = l, pk = k) { Tensor.Reshape(pl, 1, 4, out Tensor lTensor); Tensor.Reshape(pk, 1, 4, out Tensor kTensor); Tensor.New(1, 9, out Tensor result); CpuDnn.ConvolutionBackwardData(lTensor, new TensorInfo(2, 2, 1), kTensor, new TensorInfo(2, 2, 1), result, new TensorInfo(3, 3, 1)); Assert.IsTrue(result.ToArray2D().ContentEquals(expected)); result.Free(); } }
public unsafe void FullyConnectedForward() { FullyConnectedLayer fc = new FullyConnectedLayer(TensorInfo.Linear(231), 125, ActivationType.Sigmoid, WeightsInitializationMode.GlorotUniform, BiasInitializationMode.Gaussian); Tensor x = CreateRandomTensor(400, fc.InputInfo.Size); fixed(float *pw = fc.Weights, pb = fc.Biases) { Tensor.Reshape(pw, fc.InputInfo.Size, fc.OutputInfo.Size, out Tensor w); Tensor.Reshape(pb, 1, fc.OutputInfo.Size, out Tensor b); Tensor.New(x.Entities, fc.OutputInfo.Size, out Tensor y1); CpuDnn.FullyConnectedForward(x, w, b, y1); Gpu gpu = Gpu.Default; using (DeviceMemory <float> x_gpu = gpu.AllocateDevice(x), w_gpu = gpu.AllocateDevice(w), b_gpu = gpu.AllocateDevice(b), y_gpu = gpu.AllocateDevice <float>(y1.Size)) { Dnn.Get(gpu).FullyConnectedForward(x.Entities, x.Length, y1.Length, x_gpu.Ptr, w_gpu.Ptr, b_gpu.Ptr, y_gpu.Ptr); y_gpu.CopyToHost(y1.Entities, y1.Length, out Tensor y2); Assert.IsTrue(y1.ContentEquals(y2)); Tensor.Free(x, y1, y2); } } }
public unsafe void Pool5() { // Test values float[,] m = { { -1, 0, 1, 2, 1, 1, 1, 1, 0, -0.3f, -5, -0.5f, -1, 10, -2, -1, -1, 0, 1, 2, 1, 1, 1, 1, 0, -0.3f, -5, 1.2f, -1, 10, -2, -1 }, { -1, 0, 1, 2, 1, 1, 1, 1, 0, -0.3f, -5, 1.2f, -1, 10, -2, -1, -1, 0, 1, 2, 1, 1, 1, 1, 0, -0.3f, -5, 1.45f, -1, 10, -2, -1 } }, r = { { 1, 2, 10, -0.5f, 1, 2, 10, 1.2f }, { 1, 2, 10, 1.2f, 1, 2, 10, 1.45f }, }; fixed(float *pm = m) { Tensor.Reshape(pm, 2, 32, out Tensor mTensor); Tensor.New(2, 8, out Tensor result); CpuDnn.PoolingForward(mTensor, new TensorInfo(2, 2, 2), result); Assert.IsTrue(result.ToArray2D().ContentEquals(r)); result.Free(); } }
public unsafe void Convolution2DValid5() { float[,] l = { { 0, 1, 0, 2, 0, 1, 1, 1, 0, 1, 0, 0, 0, 2, 1, 0, 1, 1 } }, k = { { 1, 1, 0, 1, 0, 1, 1, 0 }, { 1, 1, 0, 1, 0, 1, 1, 0 } }; float[] b = { 0, 0.2f }; float[,] expected = { { 2, 4, 6, 3, 2.2f, 4.2f, 6.2f, 3.2f } }; fixed(float *pl = l, pk = k, pb = b) { Tensor.Reshape(pl, 1, 18, out Tensor lTensor); Tensor.Reshape(pk, 2, 8, out Tensor kTensor); Tensor.Reshape(pb, 1, 2, out Tensor bTensor); Tensor.New(1, 8, out Tensor result); CpuDnn.ConvolutionForward(lTensor, new TensorInfo(3, 3, 2), kTensor, new TensorInfo(2, 2, 2), bTensor, result); Assert.IsTrue(result.ToArray2D().ContentEquals(expected)); result.Free(); } }
public unsafe void UpscalePool2() { float[,] m = { { -1, 0, 1, 2, 1.2f, 1, 1, 1, 0, -0.3f, -5, -0.5f, -1, 10, -2, -1, -1, 0, 1, 2, 1.2f, 1, 1, 1, 0, -0.3f, -5, -0.5f, 11, 10, -2, -1 } }, p = { { 66, 77, 99, 11, 66, 1, 111, 11 } }, r = { { 0, 0, 0, 77, 66, 0, 0, 0, 0, 0, 0, 11, 0, 99, 0, 0, 0, 0, 0, 1, 66, 0, 0, 0, 0, 0, 0, 11, 111, 0, 0, 0 } }; fixed(float *pm = m, pp = p) { Tensor.Reshape(pm, 1, 32, out Tensor mTensor); Tensor.Reshape(pp, 1, 8, out Tensor pTensor); CpuDnn.PoolingBackward(mTensor, new TensorInfo(4, 4, 2), pTensor, mTensor); Assert.IsTrue(mTensor.ToArray2D().ContentEquals(r)); } }
public unsafe void Convolution2DValid2() { float[,] l = { { 0, 1, 0, 2, 0, 1, 1, 1, 0 }, { 0, 1, 0, 2, 0, 1, 1, 1, 0 } }, k = { { 1, 1, 0, 1 } }; float[,] expected = { { 2, 2, 4, 1 }, { 2, 2, 4, 1 } }; fixed(float *pl = l, pk = k) { Tensor.Reshape(pl, 2, 9, out Tensor lTensor); Tensor.Reshape(pk, 1, 4, out Tensor kTensor); Tensor.NewZeroed(1, 1, out Tensor b); Tensor.New(2, 4, out Tensor result); CpuDnn.ConvolutionForward(lTensor, new TensorInfo(3, 3, 1), kTensor, new TensorInfo(2, 2, 1), b, result); Assert.IsTrue(result.ToArray2D().ContentEquals(expected)); result.Free(); } }
public void ActivationForward() { Tensor x = CreateRandomTensor(400, 1200); Tensor.Like(x, out Tensor y1); CpuDnn.ActivationForward(x, ActivationFunctions.Sigmoid, y1); Gpu gpu = Gpu.Default; using (DeviceMemory <float> x_gpu = gpu.AllocateDevice(x), y_gpu = gpu.AllocateDevice <float>(x.Size)) { Dnn.Get(gpu).ActivationForward(x.Entities, x.Length, x_gpu.Ptr, y_gpu.Ptr, ActivationFunctions.Sigmoid); y_gpu.CopyToHost(y1.Entities, y1.Length, out Tensor y2); Assert.IsTrue(y1.ContentEquals(y2)); Tensor.Free(x, y1, y2); } }
public unsafe void Pool1() { // Down float[,] m = { { -1, 0, 1, 2, 1, 1, 1, 1, 0, -0.3f, -5, -0.5f, -1, 10, -2, -1 } }, r = { { 1, 2, 10, -0.5f } }; fixed(float *pm = m) { Tensor.Reshape(pm, 1, 16, out Tensor mTensor); Tensor.New(1, 4, out Tensor result); CpuDnn.PoolingForward(mTensor, TensorInfo.Image <Alpha8>(4, 4), result); Assert.IsTrue(result.ToArray2D().ContentEquals(r)); // Upscale CpuDnn.PoolingBackward(mTensor, TensorInfo.Image <Alpha8>(4, 4), result, mTensor); float[,] expected = { { 0, 0, 0, 2, 1, 0, 0, 0, 0, 0, 0, -0.5f, 0, 10, 0, 0 } }; Assert.IsTrue(mTensor.ToArray2D().ContentEquals(expected)); result.Free(); } }
public void ActivationBackward() { Tensor x = CreateRandomTensor(400, 1200), dy = CreateRandomTensor(400, 1200); Tensor.Like(x, out Tensor dx1); CpuDnn.ActivationBackward(x, dy, ActivationFunctions.SigmoidPrime, dx1); Gpu gpu = Gpu.Default; using (DeviceMemory <float> x_gpu = gpu.AllocateDevice(x), dy_gpu = gpu.AllocateDevice(dy)) { Dnn.Get(gpu).ActivationBackward(x.Entities, x.Length, x_gpu.Ptr, dy_gpu.Ptr, ActivationFunctions.SigmoidPrime, dy_gpu.Ptr); dy_gpu.CopyToHost(dy.Entities, dy.Length, out Tensor dx2); Assert.IsTrue(dx1.ContentEquals(dx2)); Tensor.Free(x, dy, dx1, dx2); } }
public unsafe void Compress1() { // Test values float[,] m = { { 1, 2, 3, 4, 5, 6, 7, 8, 9 } }; float[] r = { 45 }; fixed(float *pm = m) { Tensor.Reshape(pm, 1, 9, out Tensor mTensor); Tensor.New(1, 1, out Tensor v); CpuDnn.ConvolutionBackwardBias(mTensor, TensorInfo.Image <Alpha8>(3, 3), v); Assert.IsTrue(v.ToArray().ContentEquals(r)); v.Free(); } }
public void PerActivationBatchNormalizationForward() { // Setup Tensor x = CreateRandomTensor(400, 250); Tensor.NewZeroed(1, 250, out Tensor mu); Tensor.LikeZeroed(mu, out Tensor sigma2); Tensor.New(1, 250, out Tensor gamma); Tensor.NewZeroed(1, 250, out Tensor beta); for (int i = 0; i < 250; i++) { gamma[i] = ThreadSafeRandom.NextFloat(); } // Cpu Tensor.Like(x, out Tensor y1); CpuDnn.BatchNormalizationForward(NormalizationMode.PerActivation, TensorInfo.Linear(250), x, 1, mu, sigma2, gamma, beta, y1); // Gpu Gpu gpu = Gpu.Default; using (DeviceMemory <float> x_gpu = gpu.AllocateDevice(x), y_gpu = gpu.AllocateDevice <float>(x.Size), gamma_gpu = gpu.AllocateDevice(gamma), beta_gpu = gpu.AllocateDevice(beta), run_mean = gpu.AllocateDevice <float>(mu.Size), run_var = gpu.AllocateDevice <float>(mu.Size)) { TensorDescriptor desc = new TensorDescriptor(); desc.Set4D(DataType.FLOAT, TensorFormat.CUDNN_TENSOR_NCHW, x.Entities, x.Length, 1, 1); TensorDescriptor gammaBetadesc = new TensorDescriptor(); gammaBetadesc.Set4D(DataType.FLOAT, TensorFormat.CUDNN_TENSOR_NCHW, 1, x.Length, 1, 1); Dnn.Get(gpu).BatchNormalizationForwardTraining( BatchNormMode.PER_ACTIVATION, 1, 0, desc, x_gpu.Ptr, desc, y_gpu.Ptr, gammaBetadesc, gamma_gpu.Ptr, beta_gpu.Ptr, 1, run_mean.Ptr, run_var.Ptr, CpuDnn.CUDNN_BN_MIN_EPSILON,
public void FullyConnectedBackwardFilter() { FullyConnectedLayer fc = new FullyConnectedLayer(TensorInfo.Linear(231), 125, ActivationType.Sigmoid, WeightsInitializationMode.GlorotUniform, BiasInitializationMode.Gaussian); Tensor x = CreateRandomTensor(400, fc.InputInfo.Size), dy = CreateRandomTensor(x.Entities, fc.OutputInfo.Size); Tensor.New(fc.InputInfo.Size, fc.OutputInfo.Size, out Tensor dJdw1); CpuDnn.FullyConnectedBackwardFilter(x, dy, dJdw1); dJdw1.Reshape(1, dJdw1.Size, out dJdw1); Gpu gpu = Gpu.Default; using (DeviceMemory <float> x_gpu = gpu.AllocateDevice(x), dy_gpu = gpu.AllocateDevice(dy), djdb_gpu = gpu.AllocateDevice <float>(fc.Weights.Length)) { Dnn.Get(gpu).FullyConnectedBackwardFilter(x.Entities, fc.InputInfo.Size, fc.OutputInfo.Size, x_gpu.Ptr, dy_gpu.Ptr, djdb_gpu.Ptr); djdb_gpu.CopyToHost(1, fc.Weights.Length, out Tensor dJdw2); Assert.IsTrue(dJdw1.ContentEquals(dJdw2)); Tensor.Free(x, dy, dJdw1, dJdw2); } }
public unsafe void Pool3() { // Test values float[,] m = { { -1, 0, 1, 1 }, }, r = { { 1 } }; fixed(float *pm = m) { Tensor.Reshape(pm, 1, 4, out Tensor mTensor); Tensor.New(1, 1, out Tensor result); CpuDnn.PoolingForward(mTensor, TensorInfo.Image <Alpha8>(2, 2), result); Assert.IsTrue(result.ToArray2D().ContentEquals(r)); result.Free(); } }
public unsafe void UpscalePool4() { float[,] m = { { -1, 0, 1, 2, 1.2f, 1, 1, 1, 0, -0.3f, -5, -0.5f, -1, 10, -2, -1, -1, 2, 1, 2, 1.2f, 1, 1, 1, 0, -0.3f, 0, -0.5f, 11, 10, -2, -1, -1, 2, 1, 2, 1.2f, 5, 1, 5, 0, 22, 0, -0.5f, 11, 10, -2, 7 }, { -1, 0, 1, 2, 1.2f, 1, 1, 1, 0, -0.3f, -5, -0.5f, -1, 10, -2, -1, -1, 2, 1, 2, 1.2f, 1, 1, 1, 0, -0.3f, 0, -0.5f, 11, 10, -2, -1, 99, 2, 1, 2, 1.2f, 5, 1, 5, 0, 22, 0, -0.5f, 11, 10, -2, 7 } }, p = { { 66, 77, 99, 11, 66, 1, 111, 11, 11, 22, 33, 44 }, { 66, 77, 222, 11, 66, 1, 111, 11, 11, 22, 33, 44 } }, r = { { 0, 0, 0, 77, 66, 0, 0, 0, 0, 0, 0, 11, 0, 99, 0, 0, 0, 66, 0, 1, 0, 0, 0, 0, 0, 0, 11, 0, 111, 0, 0, 0, 0, 0, 0, 0, 0, 11, 0, 22, 0, 33, 0, 0, 0, 0, 0, 44 }, { 0, 0, 0, 77, 66, 0, 0, 0, 0, 0, 0, 11, 0, 222, 0, 0, 0, 66, 0, 1, 0, 0, 0, 0, 0, 0, 11, 0, 111, 0, 0, 0, 11, 0, 0, 0, 0, 0, 0, 22, 0, 33, 0, 0, 0, 0, 0, 44 } }; fixed(float *pm = m, pp = p) { Tensor.Reshape(pm, 2, 48, out Tensor mTensor); Tensor.Reshape(pp, 2, 12, out Tensor pTensor); CpuDnn.PoolingBackward(mTensor, new TensorInfo(4, 4, 3), pTensor, mTensor); Assert.IsTrue(mTensor.ToArray2D().ContentEquals(r)); } }
public unsafe void ConvolutionFull4() { float[,] l = { { 0, 1, -1, 2, 0, -1, -1, 2 }, { 0, 1, 2, 3, -1, -1, 0, 4 } }, k = { { 1, 0, 1, 1, 0, 1, 0, 1 }, { 1, 0, 0, 1, 1, 3, 0, -2 } }; float[,] expected = { { 0, 0, 1, -2, 3, 2, 0, -2, 4, 0, 3, 0, 1, -4, -1, -4, 7, 2 }, { -1, 0, 1, 2, 8, 3, 0, 2, 7, 2, 3, 0, -1, -8, -1, 2, 15, 4 } }; fixed(float *pl = l, pk = k) { Tensor.Reshape(pl, 2, 8, out Tensor lTensor); Tensor.Reshape(pk, 2, 8, out Tensor kTensor); Tensor.New(2, 18, out Tensor result); CpuDnn.ConvolutionBackwardData(lTensor, new TensorInfo(2, 2, 2), kTensor, new TensorInfo(2, 2, 2), result, new TensorInfo(3, 3, 2)); Assert.IsTrue(result.ToArray2D().ContentEquals(expected)); result.Free(); } }