Exemple #1
0
        public static Model ValidateModel(Model model)
        {
            // validate, model contains no broken links
            var brokenLinks = ModelAnalyzer.FindBrokenLinks(model);

            if (brokenLinks.Length > 0)
            {
                D.LogWarning($"Model contains {brokenLinks.Length} broken links: {string.Join(",", brokenLinks)}");
            }

            // validate, all model outputs are unique
            // https://stackoverflow.com/questions/18547354/c-sharp-linq-find-duplicates-in-list
            var duplicateOutputs = model.outputs.GroupBy(x => x)
                                   .Where(g => g.Count() > 1)
                                   .Select(y => y.Key);

            foreach (var o in duplicateOutputs)
            {
                D.LogWarning($"Output is specified more than once in the model: {o}");
            }

            // validate, model contains no unconnected layers
            var unconnectedOutputs = ModelAnalyzer.FindUnconnectedOutputs(model);

            foreach (var o in unconnectedOutputs)
            {
                D.LogWarning($"Layer is specified as output, but is missing in the model: {o}");
            }

            return(model);
        }
Exemple #2
0
        public static Model PatchModel(Model model, string[] additionalOutputs, string[] trimOutputs = null)
        {
            bool trimModel = trimOutputs != null;

            if (trimOutputs != null)
            {
                foreach (var o in trimOutputs.Except(model.outputs))
                {
                    if (additionalOutputs == null || !additionalOutputs.Contains(o))
                    {
                        D.LogWarning($"Output specified in trimOutputs was not found in the model: {o}");
                    }
                }

                var newModel = model.ShallowCopy();
                newModel.outputs = trimOutputs.Intersect(model.outputs).ToList();
                model            = newModel;
            }

            if (additionalOutputs != null)
            {
                foreach (var o in additionalOutputs.Except(model.layers.Select(l => l.name)))
                {
                    D.LogWarning($"Layer specified in additionalOutputs was not found in the model: {o}");
                }

                // 'new' means that output name does not yet exist in model.outputs
                // 'valid' means that output name matches one of the existing model.layer names
                var newAndValidAdditionalOutputs =
                    additionalOutputs.Except(model.outputs).Intersect(model.layers.Select(l => l.name));

                var newModel = model.ShallowCopy();
                newModel.outputs.AddRange(newAndValidAdditionalOutputs);
                model = newModel;
            }

            if (trimModel)
            {
                var newModel = model.ShallowCopy();
                var upstream = ModelAnalyzer.FindUpstreamLayers(model, newModel.outputs.ToArray());
                foreach (var l in model.layers)
                {
                    if (!upstream.Contains(l))
                    {
                        newModel.layers.Remove(l);
                    }
                }

                model = newModel;
            }

            model = ModelOptimizer.RemoveNoop(model);

            return(model);
        }
Exemple #3
0
        public GenericWorker(Model model, IOps ops, IVars vars, bool verbose = false)
        {
            m_Model             = model;
            m_DefaultInputName  = ModelAnalyzer.GetDefaultInputName(model);
            m_DefaultOutputName = ModelAnalyzer.GetDefaultOutputName(model);
            m_Ops           = ops;
            m_Vars          = vars;
            m_ModelCompiler = ops as IModelCompiler;
            m_Verbose       = verbose;

            m_RequestResetAllocator = true;
        }
Exemple #4
0
        public override void PrepareStorage(Model model, IOps ops, IDictionary <string, TensorShape> inputShapes)
        {
            base.PrepareStorage(model, ops, inputShapes);

            ReleaseTemporary();

            if (m_CachedModel != model)
            {
                m_LayersWithStorage = ModelAnalyzer.FindLayersThatRequireStorage(model);
            }
            m_CachedModel = model;

            Assert.AreEqual(m_Temporary, null);
        }
Exemple #5
0
        public override void PrepareStorage(Model model, IOps ops, IDictionary <string, TensorShape> inputShapes)
        {
            base.PrepareStorage(model, ops, inputShapes);
            if (m_CachedModel != model)
            {
                // pre-allocate 2 buffers that can be cycled for temporaries
                var allocator = m_TemporaryAllocator;

                var maxShape = ModelAnalyzer.FindLargestNecessaryTensorShape(model, inputShapes);
                var alloc1   = allocator.Alloc(maxShape);
                var alloc2   = allocator.Alloc(maxShape);
                alloc1 = ops.Prepare(alloc1);
                alloc2 = ops.Prepare(alloc2);
                allocator.Release(alloc1, false);
                allocator.Release(alloc2, false);
            }
            m_CachedModel = model;
        }
Exemple #6
0
        static public TensorShape GetShapeByName(this Model model, string name)
        {
            foreach (var i in model.inputs)
            {
                if (i.name == name)
                {
                    return(new TensorShape(i.shape));
                }
            }

            TensorShape shape;

            if (ModelAnalyzer.TryGetOutputTensorShape(model, name, out shape))
            {
                return(shape);
            }

            foreach (var l in model.layers)
            {
                foreach (var ds in l.datasets)
                {
                    if (ds.name == name)
                    {
                        return(ds.shape);
                    }
                }
            }

            foreach (var mem in model.memories)
            {
                if (mem.input == name || mem.output == name)
                {
                    return(mem.shape);
                }
            }

            throw new System.Collections.Generic.KeyNotFoundException("Shape " + name + " not found!");
        }
Exemple #7
0
        void OnEnable()
        {
            // TODO: investigate perf -- method takes 1s the first time you click on the model in the UI
            var nnModel = target as NNModel;

            if (nnModel == null)
            {
                return;
            }
            if (nnModel.modelData == null)
            {
                return;
            }

            m_Model = ModelLoader.Load(nnModel, verbose: false);
            if (m_Model == null)
            {
                return;
            }

            m_Inputs     = m_Model.inputs.Select(i => i.name).ToList();
            m_InputsDesc = m_Model.inputs.Select(i => $"shape: ({String.Join(",", i.shape)})").ToList();
            m_Outputs    = m_Model.outputs.ToList();

            bool allKnownShapes = true;
            var  inputShapes    = new Dictionary <string, TensorShape>();

            foreach (var i in m_Model.inputs)
            {
                allKnownShapes = allKnownShapes && !i.shape.Contains(-1) && !i.shape.Contains(0);
                if (!allKnownShapes)
                {
                    break;
                }
                inputShapes.Add(i.name, new TensorShape(i.shape));
            }
            if (allKnownShapes)
            {
                m_OutputsDesc = m_Model.outputs.Select(i => {
                    string output = "(-1,-1,-1,-1)";
                    try
                    {
                        TensorShape shape;
                        if (ModelAnalyzer.TryGetOutputTensorShape(m_Model, inputShapes, i, out shape))
                        {
                            output = shape.ToString();
                        }
                    }
                    catch (Exception e)
                    {
                        Debug.LogError($"Unexpected error while evaluating model output {i}. {e}");
                    }
                    return($"shape: {output}");
                }).ToList();
            }
            else
            {
                m_OutputsDesc = m_Model.outputs.Select(i => "shape: (-1,-1,-1,-1)").ToList();
            }

            m_Memories     = m_Model.memories.Select(i => i.input).ToList();
            m_MemoriesDesc = m_Model.memories.Select(i => $"shape:{i.shape.ToString()} output:{i.output}").ToList();

            var layers    = m_Model.layers.Where(i => i.type != Layer.Type.Load);
            var constants = m_Model.layers.Where(i => i.type == Layer.Type.Load);

            m_Layers        = layers.Select(i => i.type.ToString()).ToList();
            m_LayersDesc    = layers.Select(i => i.ToString()).ToList();
            m_Constants     = constants.Select(i => i.type.ToString()).ToList();
            m_ConstantsDesc = constants.Select(i => i.ToString()).ToList();

            m_NumEmbeddedWeights = layers.Sum(l => (long)l.weights.Length).ToString();
            m_NumConstantWeights = constants.Sum(l => (long)l.weights.Length).ToString();

            m_Warnings     = m_Model.Warnings.Select(i => i.LayerName).ToList();
            m_WarningsDesc = m_Model.Warnings.Select(i => i.Message).ToList();
        }
Exemple #8
0
        public virtual void PrepareModel(Model model, IDictionary <string, TensorShape> inputShapes)
        {
            var modelHash = CalcModelWithInputsHashCode(model, inputShapes);

            if (modelHash == m_CachedModelHash)
            {
                return;
            }

            m_CachedModelHash = modelHash;
            m_CompiledLayers.Clear();

            IDictionary <string, TensorShape> shapesByName;

            ModelAnalyzer.ListTemporaryTensorShapes(model, inputShapes, out shapesByName);

            foreach (var l in model.layers)
            {
                if (m_CompiledLayers.ContainsKey(l))
                {
                    continue; // already compiled
                }
                if (l.inputs.Length == 0)
                {
                    continue; // don't need to compile layers without inputs, so far all of them are CPU only
                }
                var X = shapesByName[l.inputs[0]];
                var O = shapesByName[l.name];

                ComputeKernel kernel = new ComputeKernel();
                if (l.type == Layer.Type.Dense)
                {
                    var itemSize = 4; // @TODO: itemSizeInBytes == 2 | float16
                    kernel = BestKernel(
                        ComputeKernelLibrary.Dense(X, l.datasets[0].shape, O, itemSize >> 2));
                }
                else if (
                    l.type == Layer.Type.Conv2D)
                {
                    Assert.IsNotNull(l.stride);
                    Assert.IsNotNull(l.pad);
                    kernel = BestKernel(
                        ComputeKernelLibrary.Conv2D(X, l.datasets[0].shape, O, l.stride, l.pad));
                }
                else if (
                    l.type == Layer.Type.DepthwiseConv2D)
                {
                    kernel = BestKernel(
                        ComputeKernelLibrary.DepthwiseConv2D(X, l.datasets[0].shape, O));
                }
                else if (
                    l.type == Layer.Type.Conv2DTrans)
                {
                    kernel = BestKernel(
                        ComputeKernelLibrary.Conv2DTrans(X, l.datasets[0].shape, O));
                }
                else if (
                    l.type == Layer.Type.Upsample2D)
                {
                    kernel = BestKernel(
                        ComputeKernelLibrary.Upsample2D(X, O));
                }
                else if (
                    l.type == Layer.Type.MaxPool2D ||
                    l.type == Layer.Type.AvgPool2D)
                {
                    var kernelName = l.type.ToString();

                    Assert.IsNotNull(l.pool);
                    Assert.IsNotNull(l.stride);
                    Assert.IsNotNull(l.pad);
                    var pad = X.AdjustPadToPool(l.pool, l.stride, l.pad);
                    if (pad[0] == 0 && pad[1] == 0 && pad[2] == 0 && pad[3] == 0)
                    {
                        kernelName += "_NoPads";
                    }

                    kernel = BestKernel(
                        ComputeKernelLibrary.Pool2D(X, O, kernelName));
                }
                // @TODO: reimplement GlobalPools, currently require different kernels for each pyramid step
                //else if (
                //    l.type == Layer.Type.GlobalMaxPool2D ||
                //    l.type == Layer.Type.GlobalAvgPool2D)
                //{
                //    var kernelName = l.type.ToString();
                //    kernel = BestKernel(
                //        ComputeKernelLibrary.GlobalPool2D(X, O, kernelName));
                //}
                else if (
                    l.type == Layer.Type.ScaleBias)
                {
                    kernel = BestKernel(
                        ComputeKernelLibrary.ScaleBias(X, O));
                }
                // @TODO: reimplement Normalization, which became a multi-kernel operation after optimizations
                //else if (
                //    l.type == Layer.Type.Normalization)
                //{
                //    kernel = BestKernel(
                //        ComputeKernelLibrary.Normalization(X, O));
                //}
                else if (
                    l.type == Layer.Type.Add ||
                    l.type == Layer.Type.Sub ||
                    l.type == Layer.Type.Mul ||
                    l.type == Layer.Type.Div ||
                    l.type == Layer.Type.Pow ||
                    l.type == Layer.Type.Min ||
                    l.type == Layer.Type.Max
                    // || l.type == Layer.Type.Mean @TODO: implement BroadcastMean
                    )
                {
                    var kernelName = "Broadcast" + l.type;
                    kernel = BestKernel(
                        ComputeKernelLibrary.Broadcast(X, O, kernelName));
                }
                // @TODO: implement Concat, currently might require different kernel for each tensor
                //else if (
                //    l.type == Layer.Type.Concat) {}
                // Activations
                else if (l.type == Layer.Type.Activation)
                {
                    if (l.activation == Layer.Activation.Softmax)
                    {
                        kernel = BestKernel(
                            ComputeKernelLibrary.Softmax(X, O));
                    }
                    else if (l.activation == Layer.Activation.LogSoftmax)
                    {
                        kernel = BestKernel(
                            ComputeKernelLibrary.LogSoftmax(X, O));
                    }
                    else if (l.activation == Layer.Activation.PRelu)
                    {
                        kernel = BestKernel(
                            ComputeKernelLibrary.PRelu(X, O));
                    }
                    else if (l.activation != Layer.Activation.None)
                    {
                        var kernelName = l.activation.ToString();
                        kernel = BestKernel(
                            ComputeKernelLibrary.Activation(X, O, kernelName));
                    }
                }

                m_CompiledLayers.Add(l, new CompiledLayer {
                    kernel = kernel, shape = O
                });
            }
        }
        private Model ConvertOnnxModel(ModelProto onnxModel)
        {
            var model        = new Model();
            var modelBuilder = new ModelBuilder(model);

            // Convert graph inputs & outputs
            var initializersByName = onnxModel.Graph.Initializer.ToDictionary(i => i.Name, i => true);

            foreach (ValueInfoProto i in onnxModel.Graph.Input)
            {
                // skip input tensors that have initializer data, they are constant tensors not global inputs
                if (initializersByName.ContainsKey(i.Name))
                {
                    continue;
                }

                if (m_OverrideGlobalInputs.ContainsKey(i.Name))
                {
                    Const(i.Name, m_OverrideGlobalInputs[i.Name]);
                    continue;
                }

                modelBuilder.Input(i.Name, ONNXLayout.ConvertSymbolicShapeToBarracuda(i.Type.TensorType.Shape, onnxLayout: "NCHW"));
                Output(i.Name, onnxShape: i.Type.TensorType.Shape.Dim.Select(d => d.DimValue).ToArray(), onnxLayout: "NCHW");
            }
            foreach (ValueInfoProto o in onnxModel.Graph.Output)
            {
                modelBuilder.Output(o.Name);
            }

            // TODO: process model (recurrent nodes) memories

            // Read constants from initializer list
            foreach (TensorProto initializer in onnxModel.Graph.Initializer)
            {
                Const(initializer.Name, new ONNXTensor(initializer));
            }

            // Convert graph nodes
            foreach (NodeProto onnxNode in onnxModel.Graph.Node)
            {
                var node   = new ONNXNodeWrapper(onnxNode, m_ModelTensors, model.Warnings);
                var nodeId = node.Name;
                var opType = node.OperatorType;

                Output(node);

                bool injectDummy = false;
                if (m_NodeImporters.ContainsKey(opType))
                {
                    try
                    {
                        if (node.AreAllInputsConst && !m_ShouldNotBeBaked.Contains(opType))
                        {
                            Profiler.BeginSample($"Bake {opType} {node.Name}");
                            var bakedTensor = BakeNodeIntoConstant(m_NodeImporters[opType], node);
                            Const(node.Name, bakedTensor);
                            var printTensor = bakedTensor.ToBarracuda("NCHW");
                            D.Log($"Baked node {nodeId} into constant of shape {printTensor.shape} and values: {printTensor.DataToString()}");
                            Profiler.EndSample();
                        }
                        else
                        {
                            Profiler.BeginSample($"Import {opType} {node.Name}");
                            m_NodeImporters[opType](modelBuilder, node);
                            Profiler.EndSample();
                        }
                    }
                    catch (Exception e)
                    {
                        // We support the layer but something went wrong while importing it
                        // We log the problem and insert an identity layer
                        string message = $"Unexpected error while parsing layer {nodeId} of type {opType}.\n{e.Message}\n\nJson: {onnxNode}\n{e.StackTrace}\n";
                        Warn(model, nodeId, message);
                        injectDummy = true;
                    }
                }
                else
                {
                    //We don't support this type of layer
                    //We log the problem and insert an identity layer
                    string message = $"Unknown type encountered while parsing layer {nodeId} of type {opType}. We replace by an identity layer.";
                    Warn(model, nodeId, message);
                    injectDummy = true;
                }

                if (injectDummy)
                {
                    var originalLayerHadInputs = (node.InputCount > 0);
                    if (originalLayerHadInputs)
                    {
                        modelBuilder.Identity(nodeId, node.Input0);
                    }
                    else // if errorneous layer had no inputs, inject dummy constant which does not require any inputs
                    {
                        modelBuilder.Const(nodeId, new Tensor());
                    }
                }

                m_ModelTensors.CompleteUninitializedFields(node);
            }

            // Convert constant tensors
            int insertionIndex = 0;

            foreach (var entry in constantTensors)
            {
                modelBuilder.Const(entry.Key, entry.Value.ToBarracuda(onnxLayout: "CONST"),
                                   insertionIndex++);
            }

            // Model should not contain any broken links in the end
            var unconnectedInputs = ModelAnalyzer.FindBrokenLinks(model);

            Debug.Assert(unconnectedInputs.Length == 0);
            if (unconnectedInputs.Length > 0)
            {
                var message = $"Broken links: {string.Join(", ", unconnectedInputs)}";
                Warn(model, "", message);
            }

            // Parse meta data
            var irVersion = onnxModel.IrVersion; // legacy

            if (onnxModel.OpsetImport?.Count > 0)
            {
                irVersion = onnxModel.OpsetImport[0].Version;
            }
            model.ProducerName = $"{onnxModel.ProducerName} v{onnxModel.ProducerVersion}";
            model.IrSource     = "ONNX";
            model.IrVersion    = $"{irVersion}";

            // strip :0 at the end of string name for TF import
            if (patchRemoveTrailingTFExportCharacters)
            {
                model.inputs = model.inputs.Select(i => { i.name = i.name.EndsWith(":0") ? i.name.Remove(i.name.Length - 2) : i.name;
                                                          return(i); }).ToList();
                model.outputs = model.outputs.Select(o => { o = o.EndsWith(":0") ? o.Remove(o.Length - 2) : o;
                                                            return(o); }).ToList();
                model.memories = model.memories.Select(m => { m.input  = m.input.EndsWith(":0")  ? m.input.Remove(m.input.Length - 2)   : m.input;
                                                              m.output = m.output.EndsWith(":0") ? m.output.Remove(m.output.Length - 2) : m.output;
                                                              return(m); }).ToList();
                model.layers = model.layers.Select(l => { l.name = l.name.EndsWith(":0") ? l.name.Remove(l.name.Length - 2) : l.name;
                                                          for (int i = 0; i < l.datasets.Length; i++)
                                                          {
                                                              l.datasets[i].name = l.datasets[i].name.EndsWith(":0") ? l.datasets[i].name.Remove(l.datasets[i].name.Length - 2) : l.datasets[i].name;
                                                          }
                                                          for (int i = 0; i < l.inputs.Length; i++)
                                                          {
                                                              l.inputs[i] = l.inputs[i].EndsWith(":0") ? l.inputs[i].Remove(l.inputs[i].Length - 2) : l.inputs[i];
                                                          }
                                                          return(l); }).ToList();
            }

            return(model);
        }
        void OnEnable()
        {
            // TODO: investigate perf -- method takes 1s the first time you click on the model in the UI
            var nnModel = target as NNModel;

            if (nnModel == null)
            {
                return;
            }
            if (nnModel.modelData == null)
            {
                return;
            }

            m_Model = ModelLoader.Load(nnModel, verbose: false);
            if (m_Model == null)
            {
                return;
            }

            m_Inputs     = m_Model.inputs.Select(i => i.name).ToList();
            m_InputsDesc = m_Model.inputs.Select(i => $"shape: ({String.Join(",", i.shape)})").ToList();
            m_Outputs    = m_Model.outputs.ToList();

            bool allKnownShapes = true;
            var  inputShapes    = new Dictionary <string, TensorShape>();

            foreach (var i in m_Model.inputs)
            {
                allKnownShapes = allKnownShapes && !i.shape.Contains(-1) && !i.shape.Contains(0);
                if (!allKnownShapes)
                {
                    break;
                }
                inputShapes.Add(i.name, new TensorShape(i.shape));
            }
            if (allKnownShapes)
            {
                m_OutputsDesc = m_Model.outputs.Select(i => { TensorShape shape; bool sucess = ModelAnalyzer.TryGetOutputTensorShape(m_Model, inputShapes, i, out shape); return(sucess ? $"shape: {shape.ToString()}" : "shape: (-1,-1,-1,-1)"); }).ToList();
            }
            else
            {
                m_OutputsDesc = m_Model.outputs.Select(i => "shape: (-1,-1,-1,-1)").ToList();
            }

            m_Memories     = m_Model.memories.Select(i => i.input).ToList();
            m_MemoriesDesc = m_Model.memories.Select(i => $"shape:{i.shape.ToString()} output:{i.output}").ToList();

            m_Layers     = m_Model.layers.Select(i => i.type.ToString()).ToList();
            m_LayersDesc = m_Model.layers.Select(i => i.ToString()).ToList();
            m_NumWeights = m_Model.layers.Sum(l => (long)l.weights.Length).ToString();

            m_Warnings     = m_Model.Warnings.Select(i => i.LayerName).ToList();
            m_WarningsDesc = m_Model.Warnings.Select(i => i.Message).ToList();
        }