internal void Reduce(ModelBuilder net, ONNXNodeWrapper node, Layer.Type reduceType)
        {
            node.UnsupportedAttribute("keepdims", 1);

            // @TODO: extract into separate function and reuse for other Reduce ops
            var    features = node.Input0Features;
            var    rank     = node.Input0Rank;
            object input    = node.Input0;

            foreach (var onnxAxis in node.Axes)
            {
                var axis = ONNXLayout.ConvertAxisToBarracuda(onnxAxis, onnxRank: rank, onnxLayout: "NCHW");
                input = net.Reduce(reduceType, $"{node.Name}__axis{axis}", input, axis);

                bool lastAxis = (axis == -1 || axis == node.Input0Rank - 1); // last axis in Barracuda is feature axis
                if (lastAxis)
                {
                    features = 1; // if reducing over the last feature axis, then operation will collapse all features to 1
                }
                rank--;           // rank will be reduced after this operation
                Output(name, features: features, rank: rank);
            }

            net.Identity(node.Name, input);
        }
Esempio n. 2
0
        private Layer Conv(string name, Layer.Type convType, object input, Int32[] stride, Int32[] pad, Int32[] outputPad, Tensor kernel, Tensor bias)
        {
            Layer layer = new Layer(name, convType);

            layer.pad                         = pad;
            layer.stride                      = stride;
            layer.pool                        = outputPad;
            layer.inputs                      = new [] { ResolveInput(input) };
            layer.datasets                    = new Layer.DataSet[2];
            layer.datasets[0].name            = $"{name}/K";
            layer.datasets[0].shape           = kernel.shape;
            layer.datasets[0].itemSizeInBytes = 4;
            layer.datasets[0].length          = kernel.shape.length;
            layer.datasets[0].offset          = 0;
            layer.datasets[1].name            = $"{name}/B";
            layer.datasets[1].shape           = bias.shape;
            layer.datasets[1].itemSizeInBytes = 4;
            layer.datasets[1].length          = bias.shape.length;
            layer.datasets[1].offset          = kernel.shape.length;
            layer.weights                     = new float[kernel.shape.length + bias.shape.length];

            kernel.readonlyArray.CopyTo(layer.weights, 0);
            bias.readonlyArray.CopyTo(layer.weights, layer.datasets[1].offset);

            m_Model.layers.Add(layer);

            return(layer);
        }
Esempio n. 3
0
 public static bool IsLayerSupportingActivationFusing(Layer.Type layerType)
 {
     return(layerType == Layer.Type.Dense ||
            layerType == Layer.Type.Conv2D ||
            layerType == Layer.Type.DepthwiseConv2D ||
            layerType == Layer.Type.Conv2DTrans ||
            layerType == Layer.Type.Normalization);
 }
Esempio n. 4
0
        private Layer Broadcast(Layer.Type type, string name, object[] inputs)
        {
            Layer layer = new Layer(name, type);

            layer.inputs = inputs.Select(i => ResolveInput(i)).ToArray();

            m_Model.layers.Add(layer);

            return(layer);
        }
Esempio n. 5
0
        /// <summary>
        /// Computes a reduce operation (max/min/mean/prod/sum) of the input tensor's element along the provided axis
        /// </summary>
        public Layer Reduce(Layer.Type type, string name, object input, int axis = -1)
        {
            Layer layer = new Layer(name, type);

            layer.inputs = new[] { ResolveInput(input) };
            layer.axis   = axis;
            m_Model.layers.Add(layer);

            return(layer);
        }
Esempio n. 6
0
        private Layer Pad(Layer.Type type, string name, object input, Int32[] pad, float constantValue = 0.0f)
        {
            Layer layer = new Layer(name, type);

            layer.inputs = new[] { ResolveInput(input) };
            layer.beta   = constantValue;
            layer.pad    = pad;

            m_Model.layers.Add(layer);

            return(layer);
        }
Esempio n. 7
0
        private Layer Pool(Layer.Type type, string name, object input, Int32[] pool, Int32[] stride, Int32[] pad)
        {
            Layer layer = new Layer(name, type);

            layer.pad    = pad;
            layer.stride = stride;
            layer.pool   = pool;
            layer.inputs = new [] { ResolveInput(input) };

            m_Model.layers.Add(layer);

            return(layer);
        }
Esempio n. 8
0
 private void CheckSame(Tensor X, Tensor Y, Layer.Type layerType, params Tensor[] inputs)
 {
     CompareOpsUtils.CheckSame(X, Y, layerType, m_DifferenceLogLevel, m_Epsilon, inputs);
 }
        private static int ConvertLayerAxisFor8DShapeSupportIfNeeded(int axis, long version, Layer.Type layerType)
        {
            if (version > Model.LastVersionWithout8DSupport)
            {
                return(axis);
            }

            //Prior to version 17, 8D tensors were not supported thus axis was expressed in NCHW format for Gather, Concat and Reduce layers.
            if (layerType == Layer.Type.ReduceL2 ||
                layerType == Layer.Type.ReduceLogSum ||
                layerType == Layer.Type.ReduceLogSumExp ||
                layerType == Layer.Type.ReduceMax ||
                layerType == Layer.Type.ReduceMean ||
                layerType == Layer.Type.ReduceMin ||
                layerType == Layer.Type.ReduceProd ||
                layerType == Layer.Type.ReduceSum ||
                layerType == Layer.Type.ReduceSumSquare ||
                layerType == Layer.Type.Gather ||
                layerType == Layer.Type.Concat)
            {
                axis = TensorExtensions.Convert4DTo8DAxis(axis);
            }

            return(axis);
        }
Esempio n. 10
0
 static public void CheckApproximately(bool differenceAsError, Tensor X, Tensor Y, int count, float epsilon, Layer.Type type)
 {
     CheckApproximately(differenceAsError, X, Y, count, epsilon, type.ToString());
 }
Esempio n. 11
0
 // -----
 static public void CheckSame(bool differenceAsError, Tensor X, Tensor Y, Layer.Type type, params Tensor[] inputs)
 {
     CheckSame(differenceAsError, X, Y, type.ToString(), inputs);
 }
Esempio n. 12
0
 private void Add(Layer.Type layerType, Func <Layer, long> opStats)
 {
     m_layerComplexityStats.Add(layerType, opStats);
 }
Esempio n. 13
0
 static internal bool CheckApproximately(Tensor X, Tensor Y, int count, float epsilon, Layer.Type type, LogLevel logLevel)
 {
     return(CheckApproximately(X, Y, count, epsilon, type.ToString(), logLevel));
 }
Esempio n. 14
0
 static internal void CheckSame(Tensor X, Tensor Y, Layer.Type type, LogLevel logLevel, float epsilon = 0.0001f, params Tensor[] inputs)
 {
     CheckSame(X, Y, type.ToString(), logLevel, epsilon, inputs);
 }
Esempio n. 15
0
 static public void CheckApproximately(Tensor X, Tensor Y, int count, float epsilon, Layer.Type type, LogLevel logLevel)
 {
     CheckApproximately(X, Y, count, epsilon, type.ToString(), logLevel);
 }
Esempio n. 16
0
 // -----
 static public void CheckSame(Tensor X, Tensor Y, Layer.Type type, params Tensor[] inputs)
 {
     CheckSame(X, Y, type.ToString(), inputs);
 }