public unsafe void SubtractFloat32_Test()
        {
            const int size  = 11;
            const int size2 = 5;

            float[] x1, x2, expres;
            x1 = new float[size] {
                1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11
            };
            x2 = new float[size2] {
                1, 2, 3, 4, 5
            };
            expres = new float[size];

            for (int i = 0; i < size; i++)
            {
                expres[i] = x1[i] - x2[i % size2];
            }

            Tensor expected_res = expres.ToDisposedTensor(new Shape(size));

            Tensor t1, t2;

            t1 = x1.ToDisposedTensor(new Shape(size));
            t2 = x2.ToDisposedTensor(new Shape(size2));
            Tensor myres = CpuKernels.SubtractFloat32(t1, t2);


            Console.WriteLine(myres);
            Console.WriteLine(expected_res);
            Assert.AreEqual(expected_res.ToString(), myres.ToString());
            myres.Dispose();
        }
        public unsafe override Tensor CalculateResult()
        {
            Tensor v   = Terms[0].GetResult();
            Tensor res = CpuKernels.ExpandFloat(v, this.Shape, Terms[0].Shape, Multiplier);

            return(res);
        }
Example #3
0
        public override Tensor CalculateResult()
        {
            var x1 = Terms[0].GetResult();
            var x2 = Terms[1].GetResult();

            return(CpuKernels.MatrixMultiplyFloat(x1, x2));
        }
        public void MultiplyRandom()
        {
            Tensor m1, m2, calculated, result;

            float[] arr1, arr2, arr_expected;
            (arr1, m1)   = CreateRandomTensor();
            (arr2, m2)   = CreateRandomTensor();
            arr_expected = new float[9];

            //CpuKernels.MultiplyFloat32 already uses ElementWiseMultiplyAVX method. We should calculate the results manually.
            for (int i = 0; i < arr_expected.Length; i++)
            {
                arr_expected[i] = arr1[i] * arr2[i];
            }

            //VectorizationFloat.ElementWiseMultiplyAVX(arr1, arr2, arr_expected, 9);

            result = Tensor.ToDisposedTensor(arr_expected, new Shape(9), NumberType.Float32);

            calculated = CpuKernels.MultiplyFloat32(m1, m2);

            Console.WriteLine(calculated.ToString());
            Console.WriteLine(result.ToString());

            Assert.AreEqual(calculated.ToString(), result.ToString());

            m1.Dispose();
            m2.Dispose();
            calculated.Dispose();
        }
        public void Add()
        {
            Tensor expected = new Tensor(new Shape(3, 3), TensorConfig.Host_Float32);

            expected.SetValue(15);
            Tensor calculated = new Tensor(new Shape(3, 3), TensorConfig.Host_Float32);

            calculated.SetValue(0);
            Tensor[] inputs = new Tensor[5];
            for (int i = 0; i < 5; i++)
            {
                inputs[i] = new Tensor(new Shape(3, 3), TensorConfig.Host_Float32);
                inputs[i].SetValue(3);
            }
            CpuKernels.AddFloat32(calculated, inputs);
            Assert.AreEqual(expected.ToString(), calculated.ToString());
            Tensor calculated2 = CpuKernels.AddFloat32(inputs);

            Assert.AreEqual(expected.ToString(), calculated2.ToString());

            expected.Dispose();
            calculated.Dispose();
            for (int i = 0; i < 5; i++)
            {
                inputs[i].Dispose();
            }
        }
        public override Tensor CalculateResult()
        {
            Tensor v1  = Terms[0].GetResult();
            Tensor v2  = Terms[1].GetResult();
            Tensor res = CpuKernels.SubtractFloat(v1, v2);

            return(res);
        }
 public override unsafe void CalculateDerivate(Tensor s)
 {
     if (Terms[0].ContainsTrainable)
     {
         Tensor combined = CpuKernels.ExpandFloat_GetGradient_0(s, this.Shape, Terms[0].Shape, Multiplier);
         Terms[0].Derivate(combined);
         combined.Dispose();
     }
 }
Example #8
0
        public unsafe override Tensor CalculateResult()
        {
            for (int i = 0; i < this.Terms.Length; i++)
            {
                tensors[i] = Terms[i].GetResult();
            }

            return(CpuKernels.AddFloat(tensors));
        }
 public override unsafe void CalculateDerivate(Tensor s)
 {
     if (Terms[0].ContainsTrainable)
     {
         Tensor sigmo    = GetResult();
         Tensor combined = CpuKernels.SigmoidFloat_GetGradient_0(s, sigmo);
         Terms[0].Derivate(combined);
         combined.Dispose();
     }
 }
 public override void CalculateDerivate(Tensor s)
 {
     if (Terms[0].ContainsTrainable)
     {
         Terms[0].Derivate(s);
     }
     if (Terms[1].ContainsTrainable)
     {
         Tensor d2 = CpuKernels.SubtractFloat_GetGradient_1(s, Terms[0].GetResult(), Terms[1].GetResult());
         Terms[1].Derivate(d2);
         d2.Dispose();
     }
 }
Example #11
0
        public unsafe override Tensor CalculateResult()
        {
            Tensor res = Terms[0].GetResult();

            if (PowerOf == 2)
            {
                return(CpuKernels.Power2Float(res));
            }
            else
            {
                throw new Exception("Unsupported Power factor!");
            }
        }
Example #12
0
 public override unsafe void CalculateDerivate(Tensor s)
 {
     if (Terms[0].ContainsTrainable)
     {
         var combinedleft = CpuKernels.MatrixMultiplyFloat_GetGradient_0(s, Terms[1].GetResult(), this.Shape, Terms[0].Shape, Terms[1].Shape);
         Terms[0].Derivate(combinedleft);
         combinedleft.Dispose();
     }
     if (Terms[1].ContainsTrainable)
     {
         var combinedright = CpuKernels.MatrixMultiplyFloat_GetGradient_1(s, Terms[0].GetResult(), this.Shape, Terms[0].Shape, Terms[1].Shape);
         Terms[1].Derivate(combinedright);
         combinedright.Dispose();
     }
 }
Example #13
0
        public override void CalculateDerivate(Tensor s)
        {
            Tensor a = Terms[0].GetResult(), b = Terms[1].GetResult();

            if (Terms[0].ContainsTrainable)
            {
                Tensor g0 = CpuKernels.MultiplyFloat_GetGradient_0(s, a, b);
                Terms[0].Derivate(g0);
                g0.Dispose();
            }
            if (Terms[1].ContainsTrainable)
            {
                Tensor g1 = CpuKernels.MultiplyFloat_GetGradient_1(s, a, b);
                Terms[1].Derivate(g1);
                g1.Dispose();
            }
        }
Example #14
0
        public override unsafe void CalculateDerivate(Tensor s)
        {
            if (Terms[0].ContainsTrainable)
            {
                Tensor res = Terms[0].GetResult();

                if (PowerOf == 2)//todo move this power check to the power kernel
                {
                    Tensor combined = CpuKernels.Power2Float_GetGradient_0(s, res);
                    Terms[0].Derivate(combined);
                    combined.Dispose();
                }
                else
                {
                    throw new Exception("Unsupported Power factor!");
                }
            }
        }
        public void AddRandom()
        {
            int arrSize    = 5;
            int tensorSize = 9;

            // initialize test tensors
            Tensor expected, calculated;

            float[] expectedArr = new float[tensorSize];
            (_, calculated) = CreateRandomTensor(tensorSize);
            Tensor[]  inputs = new Tensor[arrSize];
            float[][] arrays = new float[arrSize][];
            for (int i = 0; i < arrSize; i++)
            {
                (arrays[i], inputs[i]) = CreateRandomTensor(tensorSize);
                for (int j = 0; j < tensorSize; j++)
                {
                    expectedArr[j] += arrays[i][j];
                }
            }

            // add tensors
            expected = Tensor.ToDisposedTensor(expectedArr, new Shape(tensorSize), NumberType.Float32);


            CpuKernels.AddFloat32(calculated, inputs);

            Console.WriteLine(calculated.ToString());
            Console.WriteLine(expected.ToString());

            Assert.AreEqual(calculated.ToString(), expected.ToString());

            //need to dipose tensors to remove them from memory
            //we don't need to do this manually but better doing
            calculated.Dispose();
            for (int i = 0; i < arrSize; i++)
            {
                inputs[i].Dispose();
            }

            //expected.Dispose(); don't dispose expected tensor because it is already disposed tensor. line 72
        }
        public unsafe void ReluFloatCpu()
        {
            float[] vdata    = new float[] { 0.1f, -0.2f, 2, 3, -1, 2, -5, -10, 9, -8 };
            float[] graddata = new float[] { 0.1f, 0.1f, 0.1f, 0.1f, 0.1f, 0.1f, 0.1f, 0.1f, 0.1f, 0.1f };

            fixed(float *ptr_v = vdata, ptr_grad = graddata)
            {
                Tensor v    = Tensor.ToDisposedTensor(vdata, new Shape(vdata.Length), NumberType.Float32);
                Tensor grad = Tensor.ToDisposedTensor(graddata, new Shape(graddata.Length), NumberType.Float32);

                Tensor reluv    = CpuKernels.ReluFloat32(v);
                Tensor relugrad = CpuKernels.ReluFloat32_GetGradient_0(grad, v);

                Console.WriteLine(v);
                Console.WriteLine(grad);

                Console.WriteLine(reluv);
                Console.WriteLine(relugrad);
                //todo make checks

                reluv.Dispose();
                relugrad.Dispose();
            }
        }
Example #17
0
 public override Tensor CalculateResult()
 {
     return(CpuKernels.MultiplyFloat(Terms[0].GetResult(), Terms[1].GetResult()));
 }
Example #18
0
        public unsafe override Tensor CalculateResult()
        {
            Tensor v = Terms[0].GetResult();

            return(CpuKernels.ShrinkFloat(v, this.Shape, Terms[0].Shape, Divisor));
        }
        public override unsafe Tensor CalculateResult()
        {
            Tensor v = Terms[0].GetResult();

            return(CpuKernels.SigmoidFloat(v));
        }