コード例 #1
0
 /// <inheritdoc/>
 Tensor IOps.Conv3D(Tensor X, Tensor K, Tensor B, int[] stride, int[] pad, Layer.FusedActivation fusedActivation)
 {
     LogLayerSummary(X.shape + " # " + K.shape + " + (" + B.flatWidth + ")");
     var O = m_Ops.Conv3D(X, K, B, stride, pad, fusedActivation);
     LogOutputTensorSummary(O, Prefix + "Conv3D");
     return O;
 }
コード例 #2
0
 /// <inheritdoc/>
 Tensor IOps.Dense(Tensor X, Tensor W, Tensor B, Layer.FusedActivation fusedActivation)
 {
     LogLayerSummary(X.shape + " * (" + W.flatHeight + "," + W.flatWidth + ") + (" + B.flatWidth + ")");
     var O = m_Ops.Dense(X, W, B, fusedActivation);
     LogOutputTensorSummary(O, Prefix + "Dense");
     return O;
 }
コード例 #3
0
        Tensor IOps.Dense(Tensor X, Tensor W, Tensor B, Layer.FusedActivation fusedActivation)
        {
            var O = m_Ops.Dense(X, W, B, fusedActivation);

            m_Alu += (long)X.flatHeight * (long)X.flatWidth * (long)W.flatWidth * 2L;
            m_Mem += (long)X.length + (long)W.length + (long)B.length + (long)O.length;
            return(O);
        }
コード例 #4
0
        /// <inheritdoc/>
        Tensor IOps.Conv2D(Tensor X, Tensor K, Tensor B, int[] stride, int[] pad, Layer.FusedActivation fusedActivation)
        {
            var Y = m_Ops1.Conv2D(X, K, B, stride, pad, fusedActivation);
            var Z = m_Ops2.Conv2D(X, K, B, stride, pad, fusedActivation);

            CheckSame(Y, Z, Layer.Type.Conv2D);
            return(Y);
        }
コード例 #5
0
        /// <inheritdoc/>
        Tensor IOps.Conv3D(Tensor X, Tensor K, Tensor B, int[] stride, int[] pad, Layer.FusedActivation fusedActivation)
        {
            D.Log(X.shape + " # " + K.shape + " + (" + B.flatWidth + ")");
            var O = m_Ops.Conv3D(X, K, B, stride, pad, fusedActivation);

            O.PrintDataPart(32, Prefix + "Conv3D");
            return(O);
        }
コード例 #6
0
        /// <inheritdoc/>
        Tensor IOps.Dense(Tensor X, Tensor W, Tensor B, Layer.FusedActivation fusedActivation)
        {
            var Y = m_Ops1.Dense(X, W, B, fusedActivation);
            var Z = m_Ops2.Dense(X, W, B, fusedActivation);

            CheckSame(Y, Z, Layer.Type.Dense);
            return(Y);
        }
コード例 #7
0
        /// <inheritdoc/>
        Tensor IOps.Dense(Tensor X, Tensor W, Tensor B, Layer.FusedActivation fusedActivation)
        {
            D.Log(X.shape + " * (" + W.flatHeight + "," + W.flatWidth + ") + (" + B.flatWidth + ")");
            var O = m_Ops.Dense(X, W, B, fusedActivation);

            O.PrintDataPart(32, Prefix + "Dense");
            return(O);
        }
コード例 #8
0
        Tensor IOps.Conv2D(Tensor X, Tensor K, Tensor B, int[] stride, int[] pad, Layer.FusedActivation fusedActivation)
        {
            var  O = m_Ops.Conv2D(X, K, B, stride, pad, fusedActivation);
            long m = (long)O.batch * (long)O.width * (long)O.height;
            long n = (long)X.channels;
            long k = (long)K.kernelWidth * (long)K.kernelHeight * (long)K.channels;

            m_Alu += m * n * k * 2L;
            m_Mem += (long)X.length + (long)K.length + (long)B.length + (long)O.length;
            return(O);
        }
コード例 #9
0
        /// <summary>
        /// Check if `fusedActivation` is supported in-place
        /// </summary>
        /// <param name="fusedActivation">fused activation type</param>
        /// <returns>`true` if supported in-place</returns>
        protected override bool IsFusedActivationSupported(Layer.FusedActivation fusedActivation)
        {
            switch (fusedActivation)
            {
            case Layer.FusedActivation.Relu:
                return(true);

            case Layer.FusedActivation.None:
                return(true);

            default:
                return(false);
            }
        }
コード例 #10
0
        public MultiLayerPerception(Shape shape, Layer.FusedActivation activation = Layer.FusedActivation.Relu)
        {
            _shape = shape;
            ModelBuilder mb = new ModelBuilder();

            m_cache = new float[_shape.WeightCount];
            { // Build the model
                TensorCachingAllocator tca = new TensorCachingAllocator();
                string prevLayerName       = "[ERROR]NOT_INITIALIZED";
                prevLayerName = mb.Input(LayerNames.Input, new int[] { -1, 1, 1, _shape.inputSize }).name;
                prevLayerName = mb.Dense(LayerNames.Hidden, prevLayerName, tca.Alloc(new TensorShape(_shape.inputSize, _shape.hiddenSize)), tca.Alloc(new TensorShape(1, _shape.hiddenSize))).name;
                prevLayerName = MBActivationByName(ref mb, LayerNames.HiddenActive, prevLayerName, activation).name;
                prevLayerName = mb.Dense(LayerNames.Output, prevLayerName, tca.Alloc(new TensorShape(_shape.hiddenSize, _shape.outputSize)), tca.Alloc(new TensorShape(1, _shape.outputSize))).name;
                prevLayerName = MBActivationByName(ref mb, LayerNames.OutputActive, prevLayerName, activation).name;
                tca.Dispose();
                Debug.Assert(prevLayerName == mb.Output(prevLayerName));
                model = mb.model;
            }
            PrepareCache();
        }
コード例 #11
0
        public override Tensor Dense(Tensor X, Tensor W, Tensor B, Layer.FusedActivation fusedActivation)
        {
            if (m_Compiled.kernel.shader == null)
            {
                return(base.Dense(X, W, B, fusedActivation));
            }

            Assert.IsTrue(W.dimensions <= 2);
            Assert.AreEqual(B.flatWidth, B.length);
            Assert.AreEqual(X.flatWidth, W.flatHeight);

            if (ShouldFlattenInputForDenseLayer(X.shape))
            {
                Assert.IsNotNull(m_Compiled.instructions[1].kernel.shader);
                var flattenedX = NewTensor(m_Compiled.instructions[1].shape);
                var flattenFn  = m_Compiled.instructions[1].kernel;

                flattenFn.SetTensor(_DeclX, _DataX, X.shape, Pin(X).buffer);
                flattenFn.SetTensor(_DeclO, _DataO, flattenedX.shape, Pin(flattenedX).buffer);
                flattenFn.Dispatch();

                X = flattenedX;
            }

            Assert.IsNotNull(m_Compiled.kernel.shader);
            var O  = NewTensor(m_Compiled.shape);
            var fn = m_Compiled.kernel;

            fn.SetTensor(_DeclX, _DataX, X.shape, Pin(X).buffer);
            fn.SetTensor(_DeclO, _DataO, O.shape, Pin(O).buffer);
            fn.SetTensorDecl(_DeclW, W.shape, Pin(W).offset);
            fn.SetTensorDecl(_DeclB, B.shape, Pin(B).offset);
            Assert.AreEqual(Pin(W).buffer, Pin(B).buffer);
            fn.SetTensorBuffer(_DataWBK, Pin(W).buffer);
            fn.shader.SetInt("_ActivationMode", (int)fusedActivation);

            fn.Dispatch();

            return(ApplyUnsupportedFusedActivationIfNeeded(fusedActivation, O));
        }
コード例 #12
0
        // ---------------------------------------------------------------------------------
        private Tensor ApplyUnsupportedFusedActivationIfNeeded(Layer.FusedActivation fusedActivation, Tensor O)
        {
            if (!IsFusedActivationSupported(fusedActivation))
            {
                CompiledInstruction instructionActivation = m_Compiled.instructions[m_Compiled.instructions.Length - 1];
                Assert.IsNotNull(instructionActivation.kernel.shader);

                var fnActivation = instructionActivation.kernel;
                var Oactivation  = NewTensor(O.shape);

                fnActivation.SetTensor("X", O.shape, Pin(O).buffer);
                fnActivation.SetTensor("O", Oactivation.shape, Pin(Oactivation).buffer);

                fnActivation.shader.SetFloat(_Alpha, 0.0f);
                fnActivation.shader.SetFloat(_Beta, 0.0f);

                fnActivation.Dispatch();
                return(Oactivation);
            }

            return(O);
        }
コード例 #13
0
        /// <inheritdoc/>
        public override Tensor Dense(Tensor X, Tensor W, Tensor B, Layer.FusedActivation fusedActivation)
        {
            Assert.IsTrue(W.dimensions <= 2);
            Assert.AreEqual(B.flatWidth, B.length);
            Assert.AreEqual(X.flatWidth, W.flatHeight);

            var Oshape = new TensorShape(X.flatHeight, W.flatWidth);

            Material material = new Material(PixelShaderSingleton.Instance.FindShader("Barracuda/Dense"));

            SetTensor(material, "X", X);
            SetTensor(material, "W", W);
            SetTensor(material, "B", B);
            material.SetInt("_ActivationMode", (int)fusedActivation);

            var O = Dispatch(material, Oshape);

            if (!IsFusedActivationSupported(fusedActivation))
            {
                O = Activation(m_StringCache.Lookup("Barracuda/", fusedActivation.ToString()), O);
            }

            return(O);
        }
コード例 #14
0
        /// <inheritdoc/>
        Tensor IOps.Normalization(Tensor X, Tensor S, Tensor B, int pool, int axis, float epsilon, Layer.FusedActivation fusedActivation)
        {
            var Y = m_Ops1.Normalization(X, S, B, pool, axis, epsilon, fusedActivation);
            var Z = m_Ops2.Normalization(X, S, B, pool, axis, epsilon, fusedActivation);

            CheckSame(Y, Z, Layer.Type.Normalization);
            return(Y);
        }
コード例 #15
0
        /// <inheritdoc/>
        Tensor IOps.Conv2DTrans(Tensor X, Tensor K, Tensor B, int[] stride, int[] pad, int[] outputAdjustment, Layer.FusedActivation fusedActivation)
        {
            var Y = m_Ops1.Conv2DTrans(X, K, B, stride, pad, outputAdjustment, fusedActivation);
            var Z = m_Ops2.Conv2DTrans(X, K, B, stride, pad, outputAdjustment, fusedActivation);

            CheckSame(Y, Z, Layer.Type.Conv2DTrans);
            return(Y);
        }
コード例 #16
0
        public override Tensor Normalization(Tensor X, Tensor S, Tensor B, int pool, int axis, float epsilon, Layer.FusedActivation fusedActivation)
        {
            if (axis != 3 && axis != -1)
            {
                throw new NotImplementedException();
            }

            if (pool <= 0)
            {
                pool = X.batch;
            }

            if (pool > 1)
            {
                throw new NotImplementedException(); // @TODO: support other types of Normalization at test time
            }
            // Currently supported only pool=1 (InstanceNormalization)

            // [0,N] : AvgVariancePool2DReduce
            // N+1 : GlobalAvgVariancePool2D
            // N+2: Normalize
            // N+3 Activation

            var inputDim = new[] { X.height, X.width };

            var  Xr              = X;
            var  X2r             = X;
            bool isFirstDispatch = true;

            for (var i = 0; i < m_Compiled.instructions.Length - 3; ++i)
            {
                var poolReduce = new[] { 8, 8 };
                var stride     = poolReduce;
                var pad        = new[] { 0, 0, 0, 0 };

                CompiledInstruction instructionPool = m_Compiled.instructions[i];
                Assert.IsNotNull(instructionPool.kernel.shader);

                var Or     = NewTensor(instructionPool.shape);
                var O2r    = NewTensor(instructionPool.shape);
                var fnPool = instructionPool.kernel;

                fnPool.SetTensor("X", Xr.shape, Pin(Xr).buffer);
                fnPool.SetTensor("X2", X2r.shape, Pin(X2r).buffer);
                fnPool.SetTensor("O", Or.shape, Pin(Or).buffer);
                fnPool.SetTensor("O2", O2r.shape, Pin(O2r).buffer);

                fnPool.shader.SetInts("_Pool", poolReduce);
                fnPool.shader.SetInts("_Stride", stride);
                fnPool.shader.SetInts("_Pad", pad);
                fnPool.shader.SetInt("_IsFirstDispatch", isFirstDispatch ? 1 : 0);

                fnPool.Dispatch();

                Xr              = Or;
                X2r             = O2r;
                isFirstDispatch = false;
            }

            CompiledInstruction instructionGlobalPool = m_Compiled.instructions[m_Compiled.instructions.Length - 3];

            Assert.IsNotNull(instructionGlobalPool.kernel.shader);

            var meanVariance = NewTensor(instructionGlobalPool.shape);
            var fnGlobalPool = instructionGlobalPool.kernel;

            fnGlobalPool.SetTensor("X", Xr.shape, Pin(Xr).buffer);
            fnGlobalPool.SetTensor("X2", X2r.shape, Pin(X2r).buffer);
            fnGlobalPool.SetTensor("O", meanVariance.shape, Pin(meanVariance).buffer);
            fnGlobalPool.shader.SetInts("_Pool", inputDim);
            fnGlobalPool.shader.SetInt("_IsFirstDispatch", isFirstDispatch ? 1 : 0);

            fnGlobalPool.Dispatch();

            CompiledInstruction instructionNormalize = m_Compiled.instructions[m_Compiled.instructions.Length - 2];

            Assert.IsNotNull(instructionNormalize.kernel.shader);
            Assert.AreEqual(X.channels, B.channels); Assert.AreEqual(X.channels, S.channels);
            Assert.AreEqual(B.length, B.channels); Assert.AreEqual(S.length, S.channels);

            var O           = NewTensor(X.shape);
            var fnNormalize = instructionNormalize.kernel;

            fnNormalize.SetTensor("X", X.shape, Pin(X).buffer);
            fnNormalize.SetTensor("O", O.shape, Pin(O).buffer);
            fnNormalize.SetTensor("W", meanVariance.shape, Pin(meanVariance).buffer);
            fnNormalize.SetTensorDecl("S", S.shape, Pin(S).offset);
            fnNormalize.SetTensorDecl("B", B.shape, Pin(B).offset);
            Assert.AreEqual(Pin(S).buffer, Pin(B).buffer);
            fnNormalize.SetTensorBuffer("WBK", Pin(S).buffer);
            fnNormalize.shader.SetFloat("_Epsilon", epsilon);
            fnNormalize.shader.SetInt("_ActivationMode", (int)fusedActivation);

            fnNormalize.Dispatch();

            return(ApplyUnsupportedFusedActivationIfNeeded(fusedActivation, O));
        }
コード例 #17
0
        public override Tensor Conv2D(Tensor X, Tensor K, Tensor B, int[] stride, int[] pad, Layer.FusedActivation fusedActivation)
        {
            if (m_Compiled.kernel.shader == null)
            {
                return(base.Conv2D(X, K, B, stride, pad, fusedActivation));
            }

            Assert.AreEqual(X.channels, K.kernelDepth);
            Assert.AreEqual(K.kernelCount, B.flatWidth);
            Assert.AreEqual(B.flatWidth, B.length);
            Assert.AreEqual(stride.Length, 2);
            Assert.AreEqual(pad.Length, 4);

            Assert.IsNotNull(m_Compiled.kernel.shader);
            var O  = NewTensor(m_Compiled.shape);
            var fn = m_Compiled.kernel;

            fn.SetTensor(_DeclX, _DataX, X.shape, Pin(X).buffer);
            fn.SetTensor(_DeclO, _DataO, O.shape, Pin(O).buffer);

            if (m_Compiled.instructions[0].tensors?.Length == 2)
            {
                K = m_Compiled.instructions[0].tensors[0];
                B = m_Compiled.instructions[0].tensors[1];
            }

            fn.SetTensorDecl(_DeclK, K.shape, Pin(K).offset);
            fn.SetTensorDecl(_DeclB, B.shape, Pin(B).offset);
            Assert.AreEqual(Pin(K).buffer, Pin(B).buffer);
            fn.SetTensorBuffer(_DataWBK, Pin(K).buffer);

            fn.shader.SetInts(_Pad, pad);
            fn.shader.SetInts(_Stride, stride);
            fn.shader.SetInt("_ActivationMode", (int)fusedActivation);

            fn.Dispatch();

            return(ApplyUnsupportedFusedActivationIfNeeded(fusedActivation, O));
        }
コード例 #18
0
 /// <inheritdoc/>
 Tensor IOps.Normalization(Tensor X, Tensor S, Tensor B, int pool, int axis, float epsilon, Layer.FusedActivation fusedActivation)
 {
     LogLayerSummary(X.shape + " ! " + (pool==1 ? "instance": "batch") + " axis=" + axis);
     var O = m_Ops.Normalization(X, S, B, pool, axis, epsilon, fusedActivation);
     LogOutputTensorSummary(O, Prefix + "Normalization");
     return O;
 }
コード例 #19
0
        /// <inheritdoc/>
        public override Tensor Normalization(Tensor X, Tensor S, Tensor B, int pool, int axis, float epsilon, Layer.FusedActivation fusedActivation)
        {
            if (!X.shape.Is4D())
            {
                throw new NotImplementedException();
            }

            if (axis != TensorShape.C && axis != -1)
            {
                return(base.Normalization(X, S, B, pool, axis, epsilon, fusedActivation));
            }

            if (pool == 1 && X.batch != 1)
            {
                return(base.Normalization(X, S, B, pool, axis, epsilon, fusedActivation)); // @TODO: Instance Normalization with batch > 1
            }
            if (pool <= 0)
            {
                pool = X.batch;
            }

            Material material = new Material(PixelShaderSingleton.Instance.FindShader("Barracuda/InstanceNorm"));

            material.SetFloat("_Epsilon", epsilon);
            material.SetInt("_ActivationMode", (int)fusedActivation);

            SetTensor(material, "X", X);
            SetTensor(material, "W", S);
            SetTensor(material, "B", B);

            var O = Dispatch(material, X.shape);

            if (!IsFusedActivationSupported(fusedActivation))
            {
                O = Activation(m_StringCache.Lookup("Barracuda/", fusedActivation.ToString()), O);
            }

            return(O);
        }
コード例 #20
0
        /// <inheritdoc/>
        public override Tensor Conv2DTrans(Tensor X, Tensor K, Tensor B, int[] stride, int[] pad, int[] outputAdjustment, Layer.FusedActivation fusedActivation)
        {
            Assert.IsTrue(X.shape.Is4D());
            Assert.AreEqual(X.channels, K.kernelDepth);
            Assert.AreEqual(K.kernelCount, B.flatWidth);
            Assert.AreEqual(B.flatWidth, B.length);
            Assert.AreEqual(stride.Length, 2);
            Assert.AreEqual(pad.Length, 4);

            var      Oshape   = X.shape.ApplyKernelInverse(K.shape, stride, pad, outputAdjustment);
            Material material = new Material(PixelShaderSingleton.Instance.FindShader("Barracuda/Conv2DTrans"));

            // one pass version
            pad = new int[]
            {
                K.kernelWidth - pad[0] - 1, K.kernelHeight - pad[1] - 1,
                K.kernelWidth - pad[2] - 1, K.kernelHeight - pad[3] - 1
            };

            SetTensor(material, "X", X);
            SetTensor(material, "K", K);
            SetTensor(material, "B", B);

            material.SetVector("_Stride", new Vector4(stride[0], stride[1], 0, 0));
            material.SetVector("_Pad", new Vector4(pad[0], pad[1], 0, 0));
            material.SetInt("_ActivationMode", (int)(fusedActivation));

            var O = Dispatch(material, Oshape);

            if (!IsFusedActivationSupported(fusedActivation))
            {
                O = Activation(m_StringCache.Lookup("Barracuda/", fusedActivation.ToString()), O);
            }

            return(O);
        }
コード例 #21
0
        /// <inheritdoc/>
        Tensor IOps.Conv2DTrans(Tensor X, Tensor K, Tensor B, int[] stride, int[] pad, int[] outputAdjustment, Layer.FusedActivation fusedActivation)
        {
            D.Log(X.shape + " @ " + K.shape + " + (" + B.flatWidth + ")");
            var O = m_Ops.Conv2DTrans(X, K, B, stride, pad, outputAdjustment, fusedActivation);

            O.PrintDataPart(32, Prefix + "Conv2DTrans");
            return(O);
        }
コード例 #22
0
        public static Layer MBActivationByName(ref ModelBuilder mb, string name, object input, Layer.FusedActivation activation)
        {
            switch (activation)
            {
            case Layer.FusedActivation.Exp:
                return(mb.Exp(name, input));

            case Layer.FusedActivation.Log:
                return(mb.Log(name, input));

            case Layer.FusedActivation.Neg:
                return(mb.Neg(name, input));

            case Layer.FusedActivation.None:
                return(mb.Identity(name, input));

            case Layer.FusedActivation.Relu:
                return(mb.Relu(name, input));

            case Layer.FusedActivation.Relu6:
                return(mb.Relu6(name, input));

            case Layer.FusedActivation.Sigmoid:
                return(mb.Sigmoid(name, input));

            case Layer.FusedActivation.Sqrt:
                return(mb.Sqrt(name, input));

            case Layer.FusedActivation.Swish:
                return(mb.Swish(name, input));

            case Layer.FusedActivation.Tanh:
                return(mb.Tanh(name, input));

            default:
                throw new KeyNotFoundException();
            }
        }
コード例 #23
0
        Tensor IOps.Normalization(Tensor X, Tensor S, Tensor B, int pool, int axis, float epsilon, Layer.FusedActivation fusedActivation)
        {
            var O = m_Ops.Normalization(X, S, B, pool, axis, epsilon, fusedActivation);

            m_Alu += (long)X.length * 4L + (long)O.length * 2L;
            m_Mem += (long)X.length + (long)O.length;
            return(O);
        }
コード例 #24
0
ファイル: StatsOps.cs プロジェクト: amaannt/PracticeMLAgents
        /// <inheritdoc/>
        Tensor IOps.Conv2DTrans(Tensor X, Tensor K, Tensor B, int[] stride, int[] pad, int[] outputAdjustment, Layer.FusedActivation fusedActivation)
        {
            var  O = m_Ops.Conv2DTrans(X, K, B, stride, pad, outputAdjustment, fusedActivation);
            long m = (long)O.batch * (long)O.width * (long)O.height;
            long n = (long)X.channels;
            long k = (long)(K.kernelWidth / stride[1]) * (long)(K.kernelHeight / stride[0]) * (long)K.channels;

            m_Alu += m * n * k * 2L;
            m_Mem += (long)X.length + (long)K.length + (long)B.length + (long)O.length;
            return(O);
        }
コード例 #25
0
        /// <inheritdoc/>
        Tensor IOps.Normalization(Tensor X, Tensor S, Tensor B, int pool, int axis, float epsilon, Layer.FusedActivation fusedActivation)
        {
            D.Log(X.shape + " ! " + (pool == 1 ? "instance": "batch") + " axis=" + axis);
            var O = m_Ops.Normalization(X, S, B, pool, axis, epsilon, fusedActivation);

            O.PrintDataPart(32, Prefix + "Normalization");
            return(O);
        }
コード例 #26
0
 /// <inheritdoc/>
 Tensor IOps.Conv2DTrans(Tensor X, Tensor K, Tensor B, int[] stride, int[] pad, int[] outputAdjustment, Layer.FusedActivation fusedActivation)
 {
     LogLayerSummary(X.shape + " @ " + K.shape + " + (" + B.flatWidth + ")");
     var O = m_Ops.Conv2DTrans(X, K, B, stride, pad, outputAdjustment, fusedActivation);
     LogOutputTensorSummary(O, Prefix + "Conv2DTrans");
     return O;
 }
コード例 #27
0
        /// <inheritdoc/>
        public override Tensor DepthwiseConv2D(Tensor X, Tensor K, Tensor B, int[] stride, int[] pad, Layer.FusedActivation fusedActivation)
        {
            if (K.kernelDepth != 1)
            {
                return(base.DepthwiseConv2D(X, K, B, stride, pad, fusedActivation));
            }

            Assert.IsTrue(X.shape.Is4D());
            Assert.AreEqual(K.kernelDepth, 1);
            Assert.AreEqual(K.kernelCount, X.channels);
            Assert.AreEqual(K.kernelCount, B.flatWidth);
            Assert.AreEqual(B.flatWidth, B.length);
            Assert.AreEqual(stride.Length, 2);
            Assert.AreEqual(pad.Length, 4);

            var      Oshape   = X.shape.ApplyKernel(K.shape, stride, pad);
            Material material = new Material(PixelShaderSingleton.Instance.FindShader("Barracuda/DepthwiseConv2D"));

            SetTensor(material, "X", X);
            SetTensor(material, "K", K);
            SetTensor(material, "B", B);

            material.SetVector("_Stride", new Vector4(stride[0], stride[1], 0, 0));
            material.SetVector("_Pad", new Vector4(pad[0], pad[1], pad[2], pad[3]));
            material.SetInt("_ActivationMode", (int)(fusedActivation));

            var O = Dispatch(material, Oshape);

            if (!IsFusedActivationSupported(fusedActivation))
            {
                O = Activation(m_StringCache.Lookup("Barracuda/", fusedActivation.ToString()), O);
            }

            return(O);
        }