Ejemplo n.º 1
0
        public override Tensor ElementwiseWithBroadcast(string kernelName, Tensor[] tensors)
        {
            Assert.IsTrue(tensors.Length > 0);

            Tensor outputTensor1 = NewTensor(TensorExtensions.MaxShape(tensors));
            Tensor outputTensor2 = null;

            if (tensors.Length > 2)
            {
                outputTensor2 = NewTensor(TensorExtensions.MaxShape(tensors));
            }

            var X  = tensors[0];
            var fn = BestKernel(ComputeKernelLibrary.Broadcast(X.shape, outputTensor1.shape, kernelName));

            Tensor O = null;

            for (int t = 1; t < tensors.Length; ++t)
            {
                var B = tensors[t];
                O = (t % 2 == 1)?outputTensor1:outputTensor2;
                fn.SetTensor("X", X.shape, Pin(X).buffer);
                fn.SetTensor("O", O.shape, Pin(O).buffer);
                fn.SetTensor("B", B.shape, Pin(B).buffer, Pin(B).offset);

                fn.Dispatch();

                X = O;
            }

            return(O);
        }
Ejemplo n.º 2
0
        public override Tensor Concat(Tensor[] tensors, int axis)
        {
            var O = NewTensor(TensorExtensions.Concat(tensors.Select(t => t.shape).ToArray(), axis));

            var offsets = new int[] { 0, 0, 0, 0 };

            axis = O.shape.Axis(axis);

            foreach (var X in tensors)
            {
                var fn = BestKernel(ComputeKernelLibrary.Copy(X.shape, O.shape));

                fn.SetTensor("X", X.shape, Pin(X).buffer);
                fn.SetTensor("O", O.shape, Pin(O).buffer);

                fn.shader.SetInts("_Pad", offsets);

                fn.Dispatch();

                offsets[axis] += X.shape[axis];
            }

            return(O);
        }
Ejemplo n.º 3
0
        static public TensorShape[] ListTemporaryTensorShapes(Model model, IDictionary <string, TensorShape> inputShapes,
                                                              out IDictionary <string, TensorShape> shapesByName)
        {
            Profiler.BeginSample("Barracuda.ListTemporaryTensorShapes");
            var shapes = new List <TensorShape>();

            shapesByName = new Dictionary <string, TensorShape>();
            foreach (var entry in inputShapes)
            {
                shapesByName.Add(entry.Key, entry.Value);
            }

            TensorShape X;

            shapesByName.TryGetValue(GetDefaultInputName(model), out X); // default input
            var O = X;

            foreach (var l in model.layers)
            {
                if (l.inputs.Length > 0 && shapesByName.ContainsKey(l.inputs[0]))
                {
                    X = shapesByName[l.inputs[0]];
                }
                else
                {
                    X = O; // previous output is used, if-and-only-if layer has no explicit inputs
                }
                if (l.type == Layer.Type.Dense)
                {
                    Assert.IsNotNull(l.datasets);
                    var W = l.datasets[0].shape;
                    O = new TensorShape(X.flatHeight, W.flatWidth);
                }
                else if (
                    l.type == Layer.Type.Conv2D ||
                    l.type == Layer.Type.DepthwiseConv2D)
                {
                    var K = l.datasets[0].shape;

                    Assert.IsNotNull(l.stride);
                    Assert.IsNotNull(l.pad);
                    var pad = X.AdjustPadToKernel(K, l.stride, l.pad);
                    O = X.ApplyKernel(K, l.stride, pad);
                }
                else if (
                    l.type == Layer.Type.Conv2DTrans)
                {
                    var K = l.datasets[0].shape;
                    Assert.IsNotNull(l.stride);
                    Assert.IsNotNull(l.pad);
                    // pool size is treated as output_adjustment aka output_padding here
                    var outputAdjustment = l.pool;
                    var pad = X.AdjustPadToKernel(K, l.stride, l.pad);
                    O = X.ApplyKernelInverse(K, l.stride, pad, outputAdjustment);
                }
                else if (
                    l.type == Layer.Type.Upsample2D)
                {
                    // pool size is treated as upsample coefficient here
                    Assert.IsNotNull(l.pool);
                    Assert.AreEqual(l.pool.Length, 2);
                    O = new TensorShape(X.batch, X.height * l.pool[1], X.width * l.pool[0], X.channels);
                }
                else if (
                    l.type == Layer.Type.MaxPool2D ||
                    l.type == Layer.Type.AvgPool2D)
                {
                    Assert.IsNotNull(l.pool);
                    Assert.IsNotNull(l.stride);
                    Assert.IsNotNull(l.pad);
                    var pad = X.AdjustPadToPool(l.pool, l.stride, l.pad);
                    O = X.ApplyPool(l.pool, l.stride, pad);
                }
                else if (
                    l.type == Layer.Type.GlobalMaxPool2D ||
                    l.type == Layer.Type.GlobalAvgPool2D)
                {
                    O = new TensorShape(X.batch, 1, 1, X.channels);
                }
                else if (
                    l.type == Layer.Type.Border2D ||
                    l.type == Layer.Type.Pad2DReflect ||
                    l.type == Layer.Type.Pad2DSymmetric ||
                    l.type == Layer.Type.Pad2DEdge)
                {
                    Assert.IsNotNull(l.pad);
                    O = X.ApplyBorder(l.pad);
                }
                else if (
                    l.type == Layer.Type.Conv3D ||
                    l.type == Layer.Type.Conv3DTrans ||
                    l.type == Layer.Type.Upsample3D ||
                    l.type == Layer.Type.MaxPool3D ||
                    l.type == Layer.Type.AvgPool3D ||
                    l.type == Layer.Type.GlobalMaxPool3D ||
                    l.type == Layer.Type.GlobalAvgPool3D ||
                    l.type == Layer.Type.Border3D)
                {
                    throw new NotImplementedException();
                }
                else if (
                    l.type == Layer.Type.RandomNormal ||
                    l.type == Layer.Type.RandomUniform)
                {
                    Assert.IsNotNull(l.pool);
                    // pool size is treated as shape constant, if not empty
                    // otherwise shape of the previous tensor is used
                    if (l.pool.Length > 0)
                    {
                        O = new TensorShape(l.pool);
                    }
                    else
                    {
                        O = X;
                    }
                }
                else if (
                    l.type == Layer.Type.Multinomial)
                {
                    Assert.IsNotNull(l.pool);
                    Assert.AreEqual(l.pool.Length, 1);
                    O = new TensorShape(X.batch, l.pool[0]);
                }
                else if (
                    l.type == Layer.Type.OneHot)
                {
                    Assert.IsNotNull(l.pool);
                    Assert.AreEqual(l.pool.Length, 1);
                    int features = X.flatWidth;
                    int depth    = l.pool[0];
                    O = new TensorShape(X.batch, 1, features, depth);
                }
                else if (
                    l.type == Layer.Type.Add ||
                    l.type == Layer.Type.Sub ||
                    l.type == Layer.Type.Mul ||
                    l.type == Layer.Type.Div ||
                    l.type == Layer.Type.Pow ||
                    l.type == Layer.Type.Min ||
                    l.type == Layer.Type.Max ||
                    l.type == Layer.Type.Mean ||
                    l.type == Layer.Type.Greater ||
                    l.type == Layer.Type.GreaterEqual ||
                    l.type == Layer.Type.Less ||
                    l.type == Layer.Type.LessEqual ||
                    l.type == Layer.Type.Equal ||
                    l.type == Layer.Type.LogicalOr ||
                    l.type == Layer.Type.LogicalAnd ||
                    l.type == Layer.Type.LogicalXor)
                {
                    // gather shapes by names
                    var list = new List <TensorShape>(l.inputs.Length);
                    foreach (var i in l.inputs)
                    {
                        if (shapesByName.ContainsKey(i))
                        {
                            list.Add(shapesByName[i]);
                        }
                    }

                    O = TensorExtensions.Max(list.ToArray());
                }
                else if (
                    l.type == Layer.Type.ReduceL1 ||
                    l.type == Layer.Type.ReduceL2 ||
                    l.type == Layer.Type.ReduceLogSum ||
                    l.type == Layer.Type.ReduceLogSumExp ||
                    l.type == Layer.Type.ReduceMax ||
                    l.type == Layer.Type.ReduceMean ||
                    l.type == Layer.Type.ReduceMin ||
                    l.type == Layer.Type.ReduceProd ||
                    l.type == Layer.Type.ReduceSum ||
                    l.type == Layer.Type.ReduceSumSquare)
                {
                    O = X.Reduce(l.axis);
                }
                else if (
                    l.type == Layer.Type.Flatten)
                {
                    O = X.Flatten();
                }
                else if (
                    l.type == Layer.Type.Reshape)
                {
                    // pool size is treated as reshape coefficient, if not empty
                    // otherwise shape of the 2nd input tensor is used
                    var size = l.pool;

                    Assert.IsNotNull(size);
                    if (size.Length == 0 && l.inputs.Length > 1)
                    {
                        size = shapesByName[l.inputs[1]].ToArray();
                    }

                    Assert.AreEqual(size.Length, 4);
                    // pool size is treated as reshape coefficient here
                    O = X.Reshape(size);
                }
                else if (
                    l.type == Layer.Type.Transpose)
                {
                    O = new TensorShape(X.flatWidth, X.flatHeight);
                }
                else if (
                    l.type == Layer.Type.Squeeze ||
                    l.type == Layer.Type.Unsqueeze)
                {
                    throw new NotImplementedException();
                }
                else if (
                    l.type == Layer.Type.Concat)
                {
                    // gather shapes by names
                    var list = new List <TensorShape>(l.inputs.Length);
                    foreach (var i in l.inputs)
                    {
                        if (shapesByName.ContainsKey(i))
                        {
                            list.Add(shapesByName[i]);
                        }
                    }

                    O = TensorExtensions.Concat(list.ToArray(), l.axis);
                }
                else if (
                    l.type == Layer.Type.StridedSlice)
                {
                    Assert.IsNotNull(l.pad);
                    Assert.IsNotNull(l.pool);
                    Assert.IsNotNull(l.stride);
                    O = X.ApplyStridedSlice(l.pad, l.pool, l.stride);
                }
                else if (
                    l.type == Layer.Type.Tile)
                {
                    // pool size is treated as tiling coefficient here
                    Assert.IsNotNull(l.pool);
                    Assert.AreEqual(l.pool.Length, 4);
                    var scale = l.pool;
                    O = X.Scale(scale);
                }
                else if (
                    l.type == Layer.Type.Load)
                {
                    O = l.datasets[0].shape;
                }
                else if (// elementwise operations
                    l.type == Layer.Type.Nop ||
                    l.type == Layer.Type.Activation ||
                    l.type == Layer.Type.ScaleBias ||
                    l.type == Layer.Type.Normalization ||
                    l.type == Layer.Type.LRN ||
                    l.type == Layer.Type.Dropout ||
                    l.type == Layer.Type.LogicalNot ||
                    l.activation == Layer.Activation.PRelu)
                {
                    // works in place, keeps the same shape size
                    O = X;
                }
                else
                {
                    throw new NotImplementedException();
                }

                shapes.Add(O);
                shapesByName.Add(l.name, O);
            }

            Profiler.EndSample();
            return(shapes.ToArray());
        }