예제 #1
0
        internal void Reduce(ModelBuilder net, ONNXNodeWrapper node, Layer.Type reduceType)
        {
            node.UnsupportedAttribute("keepdims", 1);

            // @TODO: extract into separate function and reuse for other Reduce ops
            var    features = node.Input0Features;
            var    rank     = node.Input0Rank;
            object input    = node.Input0;

            foreach (var onnxAxis in node.Axes)
            {
                var axis = ONNXLayout.ConvertAxisToBarracuda(onnxAxis, onnxRank: rank, onnxLayout: "NCHW");
                input = net.Reduce(reduceType, $"{node.Name}__axis{axis}", input, axis);

                bool lastAxis = (axis == -1 || axis == node.Input0Rank - 1); // last axis in Barracuda is feature axis
                if (lastAxis)
                {
                    features = 1; // if reducing over the last feature axis, then operation will collapse all features to 1
                }
                rank--;           // rank will be reduced after this operation
                Output(name, features: features, rank: rank);
            }

            net.Identity(node.Name, input);
        }
예제 #2
0
        internal static void Resize2D(ModelBuilder net, ONNXNodeWrapper node, float[] scales)
        {
            if (!scales.All(x => x > 0.0f))
            {
                Warn(net, node, $"Only positive scale values are supported.");
            }

            if (scales.All(x => x < 1.0f))
            {
                if (!scales.All(x => Mathf.Approximately(1f / x, Mathf.Round(1f / x))))
                {
                    Warn(net, node, $"Only inverse of scale values which produce integer are currently supported. Inverse of scale value will be rounded to closest integer.");
                }

                var noStride = new[] { 1, 1 };
                var noPad    = new[] { 0, 0 };
                var inverseScalesRoundedToInt = scales.Select(x => (int)Mathf.Round(1f / x)).ToArray();
                // @TODO: nearest, actually this is bilinear downsampling
                net.AvgPool2D(node.Name, node.Input0, inverseScalesRoundedToInt, noStride, noPad);
            }
            else
            {
                if (!scales.All(x => Mathf.Approximately(x, Mathf.Round(x))))
                {
                    Warn(net, node, $"Only integer scale values are currently supported. Scale value will be rounded to closest integer value.");
                }

                var scalesRoundedToInt = scales.Select(x => (int)Mathf.Round(x)).ToArray();
                net.Upsample2D(node.Name, node.Input0, scalesRoundedToInt);
            }
        }
예제 #3
0
 // Logging helpers
 private static void Warn(ModelBuilder builder, ONNXNodeWrapper node, string message)
 {
     Warn(builder.model, node.Name, message);
 }
예제 #4
0
 private void Output(ONNXNodeWrapper node, int features = -1, int rank = -1)
 {
     Output(node.Name, features, rank);
 }
예제 #5
0
 // Helpers to keep track of model tensors
 private void Const(ONNXNodeWrapper node, ONNXTensor onnxTensor)
 {
     m_ModelTensors.AddConstant(node.Name, onnxTensor);
 }
예제 #6
0
        private ONNXTensor BakeNodeIntoConstant(Action <ModelBuilder, ONNXNodeWrapper> opImportAction, ONNXNodeWrapper node)
        {
            var model = new Model();
            var net   = new ModelBuilder(model);

            // add all inputs as constants
            Debug.Assert(node.AreAllInputsConst);
            for (var i = 0; i < node.InputCount; ++i)
            {
                var assumeOnnxLayout = i == 0 ? "NCHW" : "CONST";
                var input            = node.Inputs[i];
                net.Const(input,
                          constantTensors[input].ToBarracuda(assumeOnnxLayout));
            }

            // add node that we are going to bake into the constant
            opImportAction(net, node);

            // bake
            var noInputs = new Dictionary <string, Tensor>();

            var useCPUforBaking = WorkerFactory.Device.CPU;
            var worker          = WorkerFactory.CreateWorker(model, useCPUforBaking);
            var result          = worker.ExecuteAndWaitForCompletion(noInputs);

            // convert from Barracuda back into ONNX layout
            var onnxData  = ONNXTensor.Permute(result, new int[] { 0, 3, 1, 2 }); // NHWC -> NCHW
            var onnxShape = onnxData.shape.ToArray().Select(x => (long)x).ToArray();

            return(new ONNXTensor(onnxData, onnxShape).SqueezeAll());
        }
예제 #7
0
        private Model ConvertOnnxModel(ModelProto onnxModel)
        {
            var model        = new Model();
            var modelBuilder = new ModelBuilder(model);

            // Convert graph inputs & outputs
            var initializersByName = onnxModel.Graph.Initializer.ToDictionary(i => i.Name, i => true);

            foreach (ValueInfoProto i in onnxModel.Graph.Input)
            {
                // skip input tensors that have initializer data, they are constant tensors not global inputs
                if (initializersByName.ContainsKey(i.Name))
                {
                    continue;
                }

                if (m_OverrideGlobalInputs.ContainsKey(i.Name))
                {
                    Const(i.Name, m_OverrideGlobalInputs[i.Name]);
                    continue;
                }

                modelBuilder.Input(i.Name, ONNXLayout.ConvertSymbolicShapeToBarracuda(i.Type.TensorType.Shape, onnxLayout: "NCHW"));
                Output(i.Name, onnxShape: i.Type.TensorType.Shape.Dim.Select(d => d.DimValue).ToArray(), onnxLayout: "NCHW");
            }
            foreach (ValueInfoProto o in onnxModel.Graph.Output)
            {
                modelBuilder.Output(o.Name);
            }

            // TODO: process model (recurrent nodes) memories

            // Read constants from initializer list
            foreach (TensorProto initializer in onnxModel.Graph.Initializer)
            {
                Const(initializer.Name, new ONNXTensor(initializer));
            }

            // Convert graph nodes
            foreach (NodeProto onnxNode in onnxModel.Graph.Node)
            {
                var node   = new ONNXNodeWrapper(onnxNode, m_ModelTensors, model.Warnings);
                var nodeId = node.Name;
                var opType = node.OperatorType;

                Output(node);

                bool injectDummy = false;
                if (m_NodeImporters.ContainsKey(opType))
                {
                    try
                    {
                        if (node.AreAllInputsConst && !m_ShouldNotBeBaked.Contains(opType))
                        {
                            Profiler.BeginSample($"Bake {opType} {node.Name}");
                            var bakedTensor = BakeNodeIntoConstant(m_NodeImporters[opType], node);
                            Const(node.Name, bakedTensor);
                            var printTensor = bakedTensor.ToBarracuda("NCHW");
                            D.Log($"Baked node {nodeId} into constant of shape {printTensor.shape} and values: {printTensor.DataToString()}");
                            Profiler.EndSample();
                        }
                        else
                        {
                            Profiler.BeginSample($"Import {opType} {node.Name}");
                            m_NodeImporters[opType](modelBuilder, node);
                            Profiler.EndSample();
                        }
                    }
                    catch (Exception e)
                    {
                        // We support the layer but something went wrong while importing it
                        // We log the problem and insert an identity layer
                        string message = $"Unexpected error while parsing layer {nodeId} of type {opType}.\n{e.Message}\n\nJson: {onnxNode}\n{e.StackTrace}\n";
                        Warn(model, nodeId, message);
                        injectDummy = true;
                    }
                }
                else
                {
                    //We don't support this type of layer
                    //We log the problem and insert an identity layer
                    string message = $"Unknown type encountered while parsing layer {nodeId} of type {opType}. We replace by an identity layer.";
                    Warn(model, nodeId, message);
                    injectDummy = true;
                }

                if (injectDummy)
                {
                    var originalLayerHadInputs = (node.InputCount > 0);
                    if (originalLayerHadInputs)
                    {
                        modelBuilder.Identity(nodeId, node.Input0);
                    }
                    else // if errorneous layer had no inputs, inject dummy constant which does not require any inputs
                    {
                        modelBuilder.Const(nodeId, new Tensor());
                    }
                }

                m_ModelTensors.CompleteUninitializedFields(node);
            }

            // Convert constant tensors
            int insertionIndex = 0;

            foreach (var entry in constantTensors)
            {
                modelBuilder.Const(entry.Key, entry.Value.ToBarracuda(onnxLayout: "CONST"),
                                   insertionIndex++);
            }

            // Model should not contain any broken links in the end
            var unconnectedInputs = ModelAnalyzer.FindBrokenLinks(model);

            Debug.Assert(unconnectedInputs.Length == 0);
            if (unconnectedInputs.Length > 0)
            {
                var message = $"Broken links: {string.Join(", ", unconnectedInputs)}";
                Warn(model, "", message);
            }

            // Parse meta data
            var irVersion = onnxModel.IrVersion; // legacy

            if (onnxModel.OpsetImport?.Count > 0)
            {
                irVersion = onnxModel.OpsetImport[0].Version;
            }
            model.ProducerName = $"{onnxModel.ProducerName} v{onnxModel.ProducerVersion}";
            model.IrSource     = "ONNX";
            model.IrVersion    = $"{irVersion}";

            // strip :0 at the end of string name for TF import
            if (patchRemoveTrailingTFExportCharacters)
            {
                model.inputs = model.inputs.Select(i => { i.name = i.name.EndsWith(":0") ? i.name.Remove(i.name.Length - 2) : i.name;
                                                          return(i); }).ToList();
                model.outputs = model.outputs.Select(o => { o = o.EndsWith(":0") ? o.Remove(o.Length - 2) : o;
                                                            return(o); }).ToList();
                model.memories = model.memories.Select(m => { m.input  = m.input.EndsWith(":0")  ? m.input.Remove(m.input.Length - 2)   : m.input;
                                                              m.output = m.output.EndsWith(":0") ? m.output.Remove(m.output.Length - 2) : m.output;
                                                              return(m); }).ToList();
                model.layers = model.layers.Select(l => { l.name = l.name.EndsWith(":0") ? l.name.Remove(l.name.Length - 2) : l.name;
                                                          for (int i = 0; i < l.datasets.Length; i++)
                                                          {
                                                              l.datasets[i].name = l.datasets[i].name.EndsWith(":0") ? l.datasets[i].name.Remove(l.datasets[i].name.Length - 2) : l.datasets[i].name;
                                                          }
                                                          for (int i = 0; i < l.inputs.Length; i++)
                                                          {
                                                              l.inputs[i] = l.inputs[i].EndsWith(":0") ? l.inputs[i].Remove(l.inputs[i].Length - 2) : l.inputs[i];
                                                          }
                                                          return(l); }).ToList();
            }

            return(model);
        }