示例#1
0
 public void AssertEqualTensorsData(OpenMined.Syft.Tensor.FloatTensor t1, OpenMined.Syft.Tensor.FloatTensor t2, double delta = 0.0d)
 {
     float[] data1 = new float[t1.Size];
     t1.DataBuffer.GetData(data1);
     float[] data2 = new float[t2.Size];
     t2.DataBuffer.GetData(data2);
     Assert.AreEqual(t1.DataBuffer.count, t2.DataBuffer.count);
     Assert.AreEqual(t1.DataBuffer.stride, t2.DataBuffer.stride);
     Assert.AreNotEqual(t1.DataBuffer.GetNativeBufferPtr(), t2.DataBuffer.GetNativeBufferPtr());
     for (var i = 0; i < data1.Length; ++i)
     {
         //Debug.LogFormat("Asserting {0} equals {1} with accuracy {2} where diff is {3}", data1[i], data2[i], delta, data1[i] - data2[i]);
         Assert.AreEqual(data1[i], data2[i], delta);
     }
 }
示例#2
0
        public FloatTensor SubScalarGPU(float value, FloatTensor result)
        {
            Debug.LogFormat("<color=blue>FloatTensor.SubScalarGPU dataOnGpu: {0}</color>", dataOnGpu);

            if (dataOnGpu)
            {
                var valBuffer = SendFloatToGpu(SubScalarKernel, value, "SubScalarScalar");

                shader.SetBuffer(SubScalarKernel, "SubScalarData", dataBuffer);
                shader.SetBuffer(SubScalarKernel, "SubScalarResult", result.dataBuffer);
                shader.Dispatch(SubScalarKernel, this.size, 1, 1);

                valBuffer.Release();
            }
            return(result);
        }
示例#3
0
        public void AddMatrixMultiplyGPU(FloatTensor tensor_1, FloatTensor tensor_2)
        {
            Debug.LogFormat("<color=blue>FloatTensor.add_matrix_multiply dataOnGpu: {0}</color>", dataOnGpu);

            // Tensor 1 (M x N), Tensor 2 (N x O), this (M x O)
            var bufferN = SendIntToGpu(AddMMKernel_, tensor_2.shape[0], "AddmmDimensionsN_");
            var bufferO = SendIntToGpu(AddMMKernel_, tensor_2.shape[1], "AddmmDimensionsO_");

            shader.SetBuffer(AddMMKernel_, "AddmmDataA_", dataBuffer);
            shader.SetBuffer(AddMMKernel_, "AddmmDataB_", tensor_1.DataBuffer);
            shader.SetBuffer(AddMMKernel_, "AddmmDataC_", tensor_2.DataBuffer);
            shader.Dispatch(AddMMKernel_, size, 1, 1);

            bufferN.Release();
            bufferO.Release();
        }
示例#4
0
        public FloatTensor AddMatrixVectorProduct(FloatTensor matrix, FloatTensor vector)
        {
            bool gpu = dataOnGpu & matrix.DataOnGpu & vector.DataOnGpu;
            bool cpu = !(dataOnGpu | matrix.DataOnGpu | vector.DataOnGpu);

            int[] ref_shape    = this.Shape;
            int[] matrix_shape = matrix.Shape;
            int[] vector_shape = vector.Shape;

            if (ref_shape.Length != 1)
            {
                throw new InvalidOperationException("Cannot perform this operation on a tensor with more than one dimension");
            }
            if (ref_shape [0] != vector_shape [0])
            {
                throw new InvalidOperationException(String.Format("Cannot add matrix-vector product to tensor: {0} & {1}.", ref_shape [0], vector_shape [0]));
            }
            if (matrix_shape [1] != vector_shape [0])
            {
                throw new InvalidOperationException(String.Format("Last dimension of matrix doesn't match: {0} vs {1}.", matrix_shape [1], vector_shape [0]));
            }

            if (gpu)
            {
                AddMatrixVectorProductGPU(matrix, vector);
            }
            else if (cpu)
            {
                var nCpu = SystemInfo.processorCount;
                Parallel.For(0, nCpu, workerId => {
                    var max = size * (workerId + 1) / nCpu;
                    for (var idx = size * workerId / nCpu; idx < max; idx++)
                    {
                        for (var j = 0; j < ref_shape [0]; j++)
                        {
                            Data [idx] += vector.Data [j] * matrix.Data [j + (idx * ref_shape [0])];
                        }
                    }
                });
            }
            else
            {
                Debug.Log("Data for all Tensors needs to be colocated on the same device. - CPU != GPU");
            }

            return(this);
        }
示例#5
0
        public FloatTensor Remainder(FloatTensor divisor, bool inline = false)
        {
            if (!IsContiguous() || !divisor.IsContiguous())
            {
                throw new InvalidOperationException("All tensor must be contiguous, call Contiguous() to convert");
            }

            SameSizeDimensionsShapeAndLocation(ref divisor);
            if (inline & autograd)
            {
                throw new InvalidOperationException("Cannot call inline functions if you intend to run backprop.");
            }
            if (autograd)
            {
                throw new InvalidOperationException("Autograd not available for Remainder.");
            }

            var result = inline ? this : this.emptyTensorCopy();

            if (dataOnGpu)
            {
                result.Gpu(shader);
                if (inline)
                {
                    RemainderElemGPU_(divisor);
                    return(this);
                }
                else
                {
                    result = RemainderElemGPU(divisor, result);
                }
            }
            else
            {
                var nCpu = SystemInfo.processorCount;
                Parallel.For(0, nCpu, workerId => {
                    var max = size * (workerId + 1) / nCpu;
                    for (var i = size * workerId / nCpu; i < max; i++)
                    {
                        result[i] = this[i] % divisor[i];
                    }
                    ;
                });
            }

            return(result);
        }
示例#6
0
 public FloatTensor MultiplyDerivative(FloatTensor other)
 {
     // TODO: check for corner cases
     if (dataOnGpu & other.DataOnGpu)
     {
         MultiplyDerivativeOnGpu(other);
     }
     else if (!dataOnGpu & !other.DataOnGpu)
     {
         //TODO: implement the function
     }
     else
     {
         Debug.Log("Data for all Tensors needs to be colocated on the same device. - CPU != GPU");
     }
     return(this);
 }
示例#7
0
        public FloatTensor Mul(float value, bool inline = false, FloatTensor result = null)
        {
            result = HookAutograd(ref result, value, "mul_scalar", inline);

            if (dataOnGpu)
            {
                if (!inline)
                {
                    return(MulScalarGPU(value, result));
                }
                MulScalarGPU_(value);
                return(this);
            }

            result.Data = data.AsParallel().Select(x => x * value).ToArray();
            return(result);
        }
示例#8
0
        public FloatTensor Neg(bool inline = false, FloatTensor result = null)
        {
            result = HookAutograd(ref result, "neg", inline);

            if (dataOnGpu)
            {
                result.Gpu(shader);
                if (!inline)
                {
                    return(NegateGPU());
                }
                NegateGPU_();
                return(this);
            }
            result.Data = data.AsParallel().Select(x => - x).ToArray();
            return(result);
        }
示例#9
0
        public FloatTensor Add(FloatTensor x, bool inline = false, FloatTensor result = null)
        {
            if (!IsContiguous() || !x.IsContiguous())
            {
                throw new InvalidOperationException("All tensors must be contiguous, call Contiguous() to convert");
            }

            // Check if both tensors are compatible for sum
            SameSizeDimensionsShapeAndLocation(ref x);


            result = HookAutograd(ref result, ref x, "add_elem", inline);


            if (dataOnGpu)
            {
                if (inline)
                {
                    if (autograd)
                    {
                        throw new InvalidOperationException("Cannot call inline functions if you intend to run backprop.");
                    }


                    AddElemGPU_(x);
                    return(this);
                }
                else
                {
                    return(AddElemGPU(x, result));
                }
            }

            var nCpu = SystemInfo.processorCount;

            Parallel.For(0, nCpu, workerId => {
                var max = size * (workerId + 1) / nCpu;
                for (var i = size * workerId / nCpu; i < max; i++)
                {
                    result.Data [i] = x.Data [i] + Data [i];
                }
            });


            return(result);
        }
示例#10
0
        public FloatTensor Div(float value, bool inline = false, FloatTensor result = null)
        {
            result = HookAutograd(ref result, value, "div_scalar", inline);

            if (dataOnGpu)
            {
                result.Gpu(shader);
                if (!inline)
                {
                    return(DivScalarGPU(value, result));
                }
                DivScalarGPU_(value);
                return(this);
            }
            result.Data = data.AsParallel().Select(x => x / value).ToArray();
            return(result);
        }
示例#11
0
        public FloatTensor Add(FloatTensor x, bool inline = false)
        {
            // Check if both tensors are compatible for sum
            SameSizeDimensionsShapeAndLocation(ref x);

            FloatTensor result = inline ? this : this.emptyTensorCopy();

            if (dataOnGpu & x.dataOnGpu)
            {
                if (inline)
                {
                    if (autograd)
                    {
                        throw new InvalidOperationException("Cannot call inline functions if you intend to run backprop.");
                    }

                    AddElemGPU_(x);
                    return(this);
                }
                else
                {
                    result = AddElemGPU(x, result);
                }
            }
            else
            {
                var nCpu = SystemInfo.processorCount;
                Parallel.For(0, nCpu, workerId => {
                    var max = size * (workerId + 1) / nCpu;
                    for (var i = size * workerId / nCpu; i < max; i++)
                    {
                        result.Data [i] = x.Data [i] + Data [i];
                    }
                });
            }


            if (autograd)
            {
                HookAutograd(ref result, ref x, "add_elem");
            }


            return(result);
        }
示例#12
0
        public void MulElemGPU_(FloatTensor tensor)
        {
            Debug.LogFormat("<color=blue>FloatTensor.MulElemGPU_ dataOnGpu: {0}</color>", dataOnGpu);

            if (dataOnGpu)
            {
                if (tensor.id != this.id)
                {
                    shader.SetBuffer(MulElemKernel_, "MulElemDataA_", dataBuffer);
                    shader.SetBuffer(MulElemKernel_, "MulElemDataB_", tensor.dataBuffer);
                    shader.Dispatch(MulElemKernel_, this.size, 1, 1);
                }
                else
                {
                    PowScalarGPU_(2);
                }
            }
        }
示例#13
0
        public void RemainderElemGPU_(FloatTensor divisor)
        {
            Debug.LogFormat("<color=blue>FloatTensor.RemainderElemGPU_ dataOnGpu: {0}</color>", dataOnGpu);

            if (dataOnGpu)
            {
                if (this.id != divisor.id)
                {
                    shader.SetBuffer(RemainderElemKernel_, "RemainderElemDataA_", dataBuffer);
                    shader.SetBuffer(RemainderElemKernel_, "RemainderElemDataB_", divisor.DataBuffer);
                    shader.Dispatch(RemainderElemKernel_, this.size, 1, 1);
                }
                else
                {
                    this.ZeroGPU_();
                }
            }
        }
示例#14
0
        internal FloatTensor emptyTensorCopy()
        {
            FloatTensor result = factory.Create(
                _shape: this.shape,
                _data: data,
                _dataBuffer: dataBuffer,
                _shapeBuffer: shapeBuffer,
                _shader: shader,
                _copyData: true,
                _dataOnGpu: dataOnGpu,
                _autograd: autograd,
                _keepgrads: keepgrads,
                _creation_op: "emptyTensorCopy");

            result.Zero_();

            return(result);
        }
        public void AddrGPU_(float beta, FloatTensor vec1, FloatTensor vec2, float alpha)
        {
            var strideBuffer = SendIntToGpu(AddrKernel_, shape[1], "AddrStride_");
            var betaBuffer   = SendFloatToGpu(AddrKernel_, beta, "AddrBeta_");
            var alphaBuffer  = SendFloatToGpu(AddrKernel_, alpha, "AddrAlpha_");

            // associate arrays with gpu
            shader.SetBuffer(AddrKernel_, "AddrMatrix_", dataBuffer);
            shader.SetBuffer(AddrKernel_, "AddrVec1_", vec1.DataBuffer);
            shader.SetBuffer(AddrKernel_, "AddrVec2_", vec2.DataBuffer);

            // launch kernel
            shader.Dispatch(AddrKernel_, size, 1, 1);

            strideBuffer.Release();
            betaBuffer.Release();
            alphaBuffer.Release();
        }
示例#16
0
        private bool SameSizeDimensionsShapeAndLocation(ref FloatTensor tensor)
        {
            bool use_backup = false;

            if (dataOnGpu != tensor.dataOnGpu)
            {
                throw new InvalidOperationException(String.Format("Tensors must be on same device : {0} != {1}.", dataOnGpu, tensor.dataOnGpu));
            }

            if (tensor.Size == 1 && Size != 1)
            {
                // should retry with scalar version
                return(true);
            }
            if (tensor.Size != 1 && Size == 1)
            {
                // should retry with scalar version
                return(true);
            }

            // Check if both tensors have same size
            if (tensor.Size != size)
            {
                throw new InvalidOperationException(String.Format("Tensors cannot be added since they have different sizes: {0} != {1}", tensor.Size, size));
            }

            // Check if both tensors have same number of dimensions
            if (tensor.Shape.Length != shape.Length)
            {
                throw new InvalidOperationException(
                          String.Format("Tensors cannot be added since they have different number of dimensions: {0} != {1}", tensor.Shape.Length, shape.Length));
            }

            // Check if both tensors have same shapes
            for (var i = 0; i < shape.Length; i++)
            {
                if (shape[i] != tensor.Shape[i])
                {
                    throw new InvalidOperationException("Tensors cannot be added since they have different shapes.");
                }
            }
            return(false);
        }
示例#17
0
        public void SubElemGPU_(FloatTensor tensor)
        {
            Debug.LogFormat("<color=blue>FloatTensor.SubElemGPU_ dataOnGpu: {0}</color>", dataOnGpu);

            if (dataOnGpu)
            {
                if (this.id != tensor.id)
                {
                    shader.SetBuffer(SubElemKernel_, "SubElemDataA_", dataBuffer);
                    shader.SetBuffer(SubElemKernel_, "SubElemDataB_", tensor.dataBuffer);
                    shader.Dispatch(SubElemKernel_, this.size, 1, 1);
                }
                else
                {
                    Debug.LogFormat("addition with itself should be multiplication instead", dataOnGpu);
                    this.Zero_();
                }
            }
        }
示例#18
0
        public FloatTensor MM(FloatTensor x, FloatTensor result = null)
        {
            if (!IsContiguous() || !x.IsContiguous())
            {
                throw new InvalidOperationException("All tensors must be contiguous, call Contiguous() to convert");
            }

            if (this.shape.Length != 2 || x.shape.Length != 2)
            {
                throw new InvalidOperationException(
                          "Cannot do MM on tensors that aren't 2 dimentional. Try calling view() to reshape");
            }

            result = HookAutograd(ref result, ref x, "mm", false, new int[] { shape[0], x.shape[1] });

            result.AddMatrixMultiply(this, x);

            return(result);
        }
示例#19
0
        public FloatTensor Neg()
        {
            if (dataOnGpu)
            {
                return(NegateGPU());
            }

            var result = new FloatTensor(_ctrl: ctrl, _shape: shape, _shader: this.shader);
            var nCpu   = SystemInfo.processorCount;

            Parallel.For(0, nCpu, workerId => {
                var max = data.Length * (workerId + 1) / nCpu;
                for (var i = data.Length * workerId / nCpu; i < max; i++)
                {
                    result.data [i] = -data [i];
                }
            });
            return(result);
        }
示例#20
0
        public FloatTensor Rsqrt()
        {
            if (dataOnGpu)
            {
                return(RsqrtGPU());
            }

            var result = new FloatTensor(_shape: shape, _shader: this.shader);
            var nCpu   = SystemInfo.processorCount;

            Parallel.For(0, nCpu, workerId => {
                var max = data.Length * (workerId + 1) / nCpu;
                for (var i = data.Length * workerId / nCpu; i < max; i++)
                {
                    result.data[i] = 1 / (float)Math.Sqrt(data[i]);
                }
            });
            return(result);
        }
示例#21
0
 public FloatTensor RemainderElemGPU(FloatTensor divisor, FloatTensor result)
 {
     Debug.LogFormat("<color=blue>FloatTensor.RemainderElemGPU dataOnGpu: {0}</color>", dataOnGpu);
     if (dataOnGpu)
     {
         if (this.Id != divisor.Id)
         {
             shader.SetBuffer(RemainderElemKernel, "RemainderElemDataA", this.DataBuffer);
             shader.SetBuffer(RemainderElemKernel, "RemainderElemDataB", divisor.DataBuffer);
             shader.SetBuffer(RemainderElemKernel, "RemainderElemResult", result.DataBuffer);
             shader.Dispatch(RemainderElemKernel, this.size, 1, 1);
         }
         else
         {
             result.ZeroGPU_();
         }
     }
     return(result);
 }
        public void DivElemGPU_(FloatTensor tensor)
        {
            Debug.LogFormat("<color=blue>FloatTensor.DivElemGPU_ dataOnGpu: {0}</color>", dataOnGpu);

            if (dataOnGpu)
            {
                if (tensor.id != this.id)
                {
                    shader.SetBuffer(DivElemKernel_, "DivElemDataA_", dataBuffer);
                    shader.SetBuffer(DivElemKernel_, "DivElemDataB_", tensor.dataBuffer);
                    shader.Dispatch(DivElemKernel_, this.size, 1, 1);
                }
                else
                {
                    this.ZeroGPU_();
                    this.AddScalarGPU_((float)1);
                }
            }
        }
示例#23
0
        public FloatTensor Sin(bool inline = false)
        {
            FloatTensor result = factory.ctrl.floatTensorFactory.Create(shape);

            if (dataOnGpu)
            {
                result.Gpu(shader);
                if (inline)
                {
                    throw new NotImplementedException();
                }
                else
                {
                    return(SinGPU(result));
                }
            }
            result.Data = data.AsParallel().Select(x => (float)Math.Sin((double)x)).ToArray();
            return(result);
        }
示例#24
0
        public FloatTensor Sigmoid(bool inline = false, FloatTensor result = null)
        {
            if (dataOnGpu)
            {
                if (!inline)
                {
                    return(SigmoidGPU(this.emptyTensorCopy()));
                }
                if (autograd)
                {
                    throw new InvalidOperationException(
                              "Cannot call inline functions if you intend to run backprop.");
                }

                SigmoidGPU_();
                return(this);
            }

            result = HookAutograd(ref result, "sigmoid", inline);

            var nCpu = SystemInfo.processorCount;

            Parallel.For(0, nCpu, workerId =>
            {
                var max = size * (workerId + 1) / nCpu;
                for (var i = size * workerId / nCpu; i < max; i++)
                {
                    if (this[i] >= 0)
                    {
                        var s     = Math.Exp(-(double)this[i]);
                        result[i] = (float)(1 / (1.0f + s));
                    }
                    else
                    {
                        var s     = Math.Exp((double)this[i]);
                        result[i] = (float)(s / (1.0f + s));
                    }
                }
            });

            return(result);
        }
示例#25
0
        public FloatTensor View(int[] new_shape, bool inline = false)
        {
            var newSize = 1;

            for (var i = 0; i < new_shape.Length; i++)
            {
                newSize *= new_shape[i];
            }

            var result = this;

            if (newSize != size)
            {
                return(result);
            }
            if (dataOnGpu)
            {
                if (inline)
                {
                    shape = new_shape;

                    shapeBuffer.Release();
                    shapeBuffer = new ComputeBuffer(shape.Length, sizeof(int));
                    shapeBuffer.SetData(shape);
                }
                else
                {
                    result = new FloatTensor(_controller: controller, _shape: new_shape, _shader: this.shader);
                    result.Gpu(shader);
                    CopyBuffer(dataBuffer, result.DataBuffer);
                }
            }
            else if (inline)
            {
                shape = new_shape;
            }
            else
            {
                result = new FloatTensor(_controller: controller, _data: data, _shape: new_shape, _shader: shader);
            }
            return(result);
        }
示例#26
0
        public FloatTensor MulElemGPU(FloatTensor tensor, FloatTensor result)
        {
            Debug.LogFormat("<color=blue>FloatTensor.MulElemGPU dataOnGpu: {0}</color>", dataOnGpu);

            if (dataOnGpu)
            {
                if (tensor.id != this.id)
                {
                    shader.SetBuffer(MulElemKernel, "MulElemDataA", dataBuffer);
                    shader.SetBuffer(MulElemKernel, "MulElemDataB", tensor.dataBuffer);
                    shader.SetBuffer(MulElemKernel, "MulElemDataResult", result.dataBuffer);
                    shader.Dispatch(MulElemKernel, this.size, 1, 1);
                }
                else
                {
                    return(this.PowScalarGPU(2, result));
                }
            }
            return(result);
        }
        public void Mul_(FloatTensor x)
        {
            SameSizeDimensionsShapeAndLocation(ref x);

            if (dataOnGpu)
            {
                MulElemGPU_(x);
            }
            else
            {
                var nCpu = SystemInfo.processorCount;
                Parallel.For(0, nCpu, workerId => {
                    var max = size * (workerId + 1) / nCpu;
                    for (var i = size * workerId / nCpu; i < max; i++)
                    {
                        data [i] *= x.data [i];
                    }
                });
            }
        }
示例#28
0
        public static FloatTensor Random(FloatTensorFactory factory, int[] dims, int random_seed = 0)
        {
            int dims_prod = 1;

            foreach (int dim in dims)
            {
                dims_prod *= dim;
            }
            FloatTensor result = factory.ctrl.floatTensorFactory.Create(dims);

            if (random_seed > 0)
            {
                UnityEngine.Random.InitState(random_seed);
            }
            for (int i = 0; i < dims_prod; i++)
            {
                result.Data[i] = UnityEngine.Random.value;
            }
            return(result.View(dims));
        }
示例#29
0
        public void MulElementwiseGPU(FloatTensor other)
        {
            Debug.LogFormat("<color=blue>FloatTensor.inline_elementwise_mult dataOnGpu: {0}</color>", dataOnGpu);

            if (size == other.Size)
            {
                if (dataOnGpu && other.DataOnGpu)
                {
                    // correspond tensor buffers with shader kernel buffers
                    shader.SetBuffer(ElementwiseMultMainKernel, "data_a", dataBuffer);
                    shader.SetBuffer(ElementwiseMultMainKernel, "data_b", other.DataBuffer);

                    shader.Dispatch(ElementwiseMultMainKernel, 1, 1, 1);
                }
            }
            else
            {
                Debug.Log("Tensors do not have the same number of elements!");
            }
        }
示例#30
0
        public FloatTensor MulScalar(float scalar)
        {
            if (dataOnGpu)
            {
                return(MultScalarGPU_(scalar));
            }

            var result = new FloatTensor(shape, dataOnGpu);
            var nCpu   = SystemInfo.processorCount;

            Parallel.For(0, nCpu, workerId =>
            {
                var max = size * (workerId + 1) / nCpu;
                for (var i = size * workerId / nCpu; i < max; i++)
                {
                    result.Data[i] = Data[i] * scalar;
                }
            });
            return(result);
        }