Example #1
0
        internal static Model ValidateModel(Model model)
        {
            // validate, model contains no broken links
            var brokenLinks = ModelAnalyzer.FindBrokenLinks(model);

            if (brokenLinks.Length > 0)
            {
                D.LogWarning($"Model contains {brokenLinks.Length} broken links: {string.Join(",", brokenLinks)}");
            }

            // validate, all model outputs are unique
            // https://stackoverflow.com/questions/18547354/c-sharp-linq-find-duplicates-in-list
            var duplicateOutputs = model.outputs.GroupBy(x => x)
                                   .Where(g => g.Count() > 1)
                                   .Select(y => y.Key);

            foreach (var o in duplicateOutputs)
            {
                D.LogWarning($"Output is specified more than once in the model: {o}");
            }

            // validate, model contains no unconnected layers
            var unconnectedOutputs = ModelAnalyzer.FindUnconnectedOutputs(model);

            foreach (var o in unconnectedOutputs)
            {
                D.LogWarning($"Layer is specified as output, but is missing in the model: {o}");
            }

            return(model);
        }
        internal static Model PatchModel(Model model, string[] additionalOutputs, string[] trimOutputs = null)
        {
            bool trimModel = trimOutputs != null;

            if (model.flags.HasFlag(Model.Flags.NeedsCompilation))
            {
                model.Compile();
            }

            if (trimOutputs != null)
            {
                foreach (var o in trimOutputs.Except(model.outputs))
                {
                    if (additionalOutputs == null || !additionalOutputs.Contains(o))
                    {
                        D.LogWarning($"Output specified in trimOutputs was not found in the model: {o}");
                    }
                }

                var newModel = model.ShallowCopy();
                newModel.outputs = trimOutputs.Intersect(model.outputs).ToList();
                model            = newModel;
            }

            if (additionalOutputs != null)
            {
                foreach (var o in additionalOutputs.Except(model.layers.Select(l => l.name)))
                {
                    D.LogWarning($"Layer specified in additionalOutputs was not found in the model: {o}");
                }

                // 'new' means that output name does not yet exist in model.outputs
                // 'valid' means that output name matches one of the existing model.layer names
                var newAndValidAdditionalOutputs =
                    additionalOutputs.Except(model.outputs).Intersect(model.layers.Select(l => l.name));

                var newModel = model.ShallowCopy();
                newModel.outputs.AddRange(newAndValidAdditionalOutputs);
                model = newModel;
            }

            if (trimModel)
            {
                var newModel = model.ShallowCopy();
                var upstream = ModelAnalyzer.FindUpstreamLayers(model, newModel.outputs.ToArray());
                foreach (var l in model.layers)
                {
                    if (!upstream.Contains(l))
                    {
                        newModel.layers.Remove(l);
                    }
                }

                model = newModel;
            }

            model = ModelOptimizer.RemoveNoop(model);

            return(model);
        }
Example #3
0
        public GenericWorker(Model model, IOps ops, IVars vars, bool verbose = false)
        {
            m_Model             = model;
            m_DefaultInputName  = ModelAnalyzer.GetDefaultInputName(model);
            m_DefaultOutputName = ModelAnalyzer.GetDefaultOutputName(model);
            m_Ops           = ops;
            m_Vars          = vars;
            m_ModelCompiler = ops as IModelCompiler;
            m_Verbose       = verbose;

            m_RequestResetAllocator = true;
        }
Example #4
0
        public override void PrepareStorage(Model model, IOps ops, IDictionary <string, TensorShape> inputShapes)
        {
            base.PrepareStorage(model, ops, inputShapes);

            ReleaseTemporary();

            if (m_CachedModel != model)
            {
                m_LayersWithStorage = ModelAnalyzer.FindLayersThatRequireStorage(model);
            }
            m_CachedModel = model;

            Assert.AreEqual(m_Temporary, null);
        }
Example #5
0
        public static void RemoveUnused(Model model, HashSet <string> keepLayers)
        {
            // TODO: strip layers not useful to compute output
            var preserve = new HashSet <string>(
                model.memories.Select(mem => mem.input).Concat(
                    model.memories.Select(mem => mem.output)).Concat(
                    model.outputs));

            // Strip unused layers
            var unusedLayers = new HashSet <string>(ModelAnalyzer.FindUnusedLayers(model));

            if (keepLayers != null) // Except explicitly specified for keeping
            {
                unusedLayers.ExceptWith(keepLayers);
            }
            model.layers = model.layers.Where(l => !unusedLayers.Contains(l.name) || preserve.Contains(l.name)).ToList();
        }
Example #6
0
        public override void PrepareStorage(Model model, IOps ops, IDictionary <string, TensorShape> inputShapes)
        {
            base.PrepareStorage(model, ops, inputShapes);
            if (m_CachedModel != model)
            {
                // pre-allocate 2 buffers that can be cycled for temporaries
                var allocator = m_TemporaryAllocator;

                var maxShape = ModelAnalyzer.FindLargestNecessaryTensorShape(model, inputShapes);
                var alloc1   = allocator.Alloc(maxShape);
                var alloc2   = allocator.Alloc(maxShape);
                alloc1 = ops.Prepare(alloc1);
                alloc2 = ops.Prepare(alloc2);
                allocator.Release(alloc1, false);
                allocator.Release(alloc2, false);
            }
            m_CachedModel = model;
        }
Example #7
0
        static public Model Optimize(Model model, bool allowFusing, HashSet <string> keepLayers = null)
        {
            // Strip unused layers
            var unusedLayers = new HashSet <string>(ModelAnalyzer.FindUnusedLayers(model));

            if (keepLayers != null) // Except explicitly specified for keeping
            {
                unusedLayers.ExceptWith(keepLayers);
            }
            model.layers = model.layers.Where(l => !unusedLayers.Contains(l.name)).ToList();

            if (allowFusing)
            {
                FuseActivations(model);
            }

            return(model);
        }
Example #8
0
        /// <summary>
        /// Get model tensor shape by name
        /// </summary>
        /// <param name="model">Model</param>
        /// <param name="name">Tensor name</param>
        /// <returns>Tensor shape</returns>
        /// <exception cref="KeyNotFoundException"></exception>
        static public TensorShape?GetShapeByName(this Model model, string name)
        {
            foreach (var i in model.inputs)
            {
                if (i.name == name)
                {
                    return(new TensorShape(i.shape));
                }
            }

            TensorShape shape;

            if (ModelAnalyzer.TryGetOutputTensorShape(model, name, out shape))
            {
                return(shape);
            }

            foreach (var l in model.layers)
            {
                foreach (var ds in l.datasets)
                {
                    if (ds.name == name)
                    {
                        return(ds.shape);
                    }
                }
            }

            foreach (var mem in model.memories)
            {
                if (mem.input == name || mem.output == name)
                {
                    return(mem.shape);
                }
            }

            throw new System.Collections.Generic.KeyNotFoundException("Shape " + name + " not found!");
        }
Example #9
0
        public virtual void PrepareModel(Model model, IDictionary <string, TensorShape> inputShapes)
        {
            var modelHash = CalcModelWithInputsHashCode(model, inputShapes);

            if (modelHash == m_CachedModelHash)
            {
                return;
            }

            m_CachedModelHash = modelHash;
            foreach (var l in m_CompiledLayers)
            {
                foreach (var i in l.Value.instructions)
                {
                    if (i.tensors.Length == 0)
                    {
                        continue;
                    }
                    foreach (var t in i.tensors)
                    {
                        t.Dispose();
                    }
                }
            }
            m_CompiledLayers.Clear();

            IDictionary <string, TensorShape?> shapesByName;

            ModelAnalyzer.ListTemporaryTensorShapes(model, inputShapes, out shapesByName);

            foreach (var l in model.layers)
            {
                if (m_CompiledLayers.ContainsKey(l))
                {
                    continue; // already compiled
                }
                if (l.inputs.Length == 0)
                {
                    continue; // don't need to compile layers without inputs, so far all of them are CPU only
                }
                if (shapesByName[l.inputs[0]] == null || shapesByName[l.name] == null)
                {
                    continue;
                }

                var X = shapesByName[l.inputs[0]].Value;
                var O = shapesByName[l.name].Value;

                ComputeKernel kernel = new ComputeKernel();
                if (l.type == Layer.Type.Dense)
                {
                    var instructions = new List <CompiledInstruction>();
                    var itemSize     = 4; // @TODO: itemSizeInBytes == 2 | float16
                    kernel = BestKernel(ComputeKernelLibrary.Dense(X, l.datasets[0].shape, O, itemSize >> 2));
                    instructions.Add(new CompiledInstruction {
                        kernel = kernel, shape = O
                    });

                    if (ShouldFlattenInputForDenseLayer(X))
                    {
                        var flattenedShape = X.Flatten();
                        var flattenKernel  = BestKernel(ComputeKernelLibrary.ReshapeFromNHWCModel(flattenedShape));
                        instructions.Add(new CompiledInstruction {
                            kernel = flattenKernel, shape = flattenedShape
                        });
                    }

                    // FusedActivation
                    var fusedActivation = (Layer.FusedActivation)l.activation;
                    if (!IsFusedActivationSupported(fusedActivation))
                    {
                        var activationKernel = BestKernel(ComputeKernelLibrary.Activation(X, O, fusedActivation.ToString()));
                        instructions.Add(new CompiledInstruction {
                            kernel = activationKernel, shape = O
                        });
                    }

                    m_CompiledLayers.Add(l, new CompiledLayer {
                        instructions = instructions.ToArray(), shape = O
                    });
                    continue;
                }
                else if (
                    l.type == Layer.Type.Conv2D)
                {
                    Assert.IsNotNull(l.stride);
                    Assert.IsNotNull(l.pad);
                    var instructions = new List <CompiledInstruction>();

                    // Conv2D
                    kernel = BestKernel(ComputeKernelLibrary.Conv2D(X, l.datasets[0].shape, O, l.stride, l.pad));
                    if (kernel.func.kernelName.StartsWith("Conv2DWinograd_2x2_3x3"))
                    {
                        instructions.Add(new CompiledInstruction {
                            kernel = kernel, shape = O, tensors = PrepareConv2dWinograd(model, l)
                        });
                    }
                    else
                    {
                        instructions.Add(new CompiledInstruction {
                            kernel = kernel, shape = O
                        });
                    }

                    // FusedActivation
                    var fusedActivation = (Layer.FusedActivation)l.activation;
                    if (!IsFusedActivationSupported(fusedActivation))
                    {
                        var activationKernel = BestKernel(ComputeKernelLibrary.Activation(X, O, fusedActivation.ToString()));
                        instructions.Add(new CompiledInstruction {
                            kernel = activationKernel, shape = O
                        });
                    }

                    m_CompiledLayers.Add(l, new CompiledLayer {
                        instructions = instructions.ToArray(), shape = O
                    });
                    continue;
                }
                else if (
                    l.type == Layer.Type.DepthwiseConv2D)
                {
                    kernel = BestKernel(
                        ComputeKernelLibrary.DepthwiseConv2D(X, l.datasets[0].shape, O));
                }
                else if (
                    l.type == Layer.Type.Conv2DTrans)
                {
                    var outputAdjustment = l.pool;
                    var stride           = l.stride;

                    var K   = l.datasets[0].shape;
                    var B   = l.datasets[1].shape;
                    var pad = new int[]
                    {
                        K.kernelWidth - l.pad[0] - 1, K.kernelHeight - l.pad[1] - 1,
                        K.kernelWidth - l.pad[2] - 1, K.kernelHeight - l.pad[3] - 1
                    };

                    var XpaddedShape = new TensorShape(X.batch, stride[0] * (X.height - 1) + 1 + outputAdjustment[0], stride[0] * (X.width - 1) + 1 + outputAdjustment[1], X.channels);

                    var kernelFill = CompileKernel(new ComputeKernelLibrary.Entry("Conv2DTransPadFill", (X.channels, X.width, X.height), 1.0f, 0));

                    var kernelConv = BestKernel(
                        ComputeKernelLibrary.Conv2D(XpaddedShape, K, O, new int[] { 1, 1 }, pad));
                    bool isConvWinograd = (kernelConv.func.kernelName.StartsWith("Conv2DWinograd_2x2_3x3"));

                    m_CompiledLayers.Add(l, new CompiledLayer {
                        instructions = new CompiledInstruction[]
                        {
                            new CompiledInstruction {
                                kernel = kernelFill, shape = XpaddedShape
                            },
                            new CompiledInstruction {
                                shape = K, tensors = PrepareConv2DTrans(model, l)
                            },
                            new CompiledInstruction {
                                kernel = kernelConv, shape = O, tensors = isConvWinograd ? PrepareConv2dWinograd(model, l) : null
                            }
                        }, shape = O
                    });

                    continue;
                }
                else if (
                    l.type == Layer.Type.Upsample2D)
                {
                    // axis is treated as upsample point/bilinear flag
                    var bilinear = l.axis > 0;
                    kernel = BestKernel(
                        ComputeKernelLibrary.Upsample2D(X, O, l.pool, bilinear));
                }
                else if (
                    l.type == Layer.Type.MaxPool2D ||
                    l.type == Layer.Type.AvgPool2D)
                {
                    var kernelName = l.type.ToString();

                    Assert.IsNotNull(l.pool);
                    Assert.IsNotNull(l.stride);
                    Assert.IsNotNull(l.pad);
                    kernel = BestKernel(
                        ComputeKernelLibrary.Pool2D(X, O, kernelName));
                }
                else if (
                    l.type == Layer.Type.GlobalMaxPool2D ||
                    l.type == Layer.Type.GlobalAvgPool2D)
                {
                    var poolKernelName   = l.type.ToString().Substring(6) + "Reduce";
                    var globalKernelName = l.type.ToString();

                    var instructions = new List <CompiledInstruction>();
                    var Xr           = X;
                    while (Xr.height * Xr.width >= 64)
                    {
                        var lastLength = Xr.length;
                        var pool       = new[] { 8, 8 };
                        var stride     = pool;
                        var pad        = new[] { 0, 0, 0, 0 };

                        var Oshape     = Xr.ApplyPool(pool, stride, pad, ceilMode: true);
                        var Or         = new TensorShape(Oshape.batch, IDivC(Oshape.height, 2), IDivC(Oshape.width, 2), Oshape.channels);
                        var poolKernel = BestKernel(
                            ComputeKernelLibrary.Pool2DReduce(Xr, Or, poolKernelName));

                        instructions.Add(new CompiledInstruction {
                            kernel = poolKernel, shape = Or
                        });

                        Xr = Or;
                        Assert.IsTrue(Xr.length < lastLength);
                    }

                    var globalKernel = BestKernel(
                        ComputeKernelLibrary.GlobalPool2D(Xr, O, globalKernelName));

                    instructions.Add(new CompiledInstruction {
                        kernel = globalKernel, shape = O
                    });

                    m_CompiledLayers.Add(l, new CompiledLayer {
                        instructions = instructions.ToArray(), shape = O
                    });

                    continue;
                }
                else if (
                    l.type == Layer.Type.ScaleBias)
                {
                    kernel = BestKernel(
                        ComputeKernelLibrary.ScaleBias(X, O));
                }
                else if (
                    l.type == Layer.Type.Normalization)
                {
                    // GlobalAvgVariancePool2D
                    var poolKernelName   = "AvgVariancePool2DReduce";
                    var globalKernelName = "GlobalAvgVariancePool2D";

                    var instructions = new List <CompiledInstruction>();
                    var Xr           = X;
                    while (Xr.height * Xr.width >= 64)
                    {
                        var lastLength = Xr.length;
                        var pool       = new[] { 8, 8 };
                        var stride     = pool;
                        var pad        = new[] { 0, 0, 0, 0 };

                        var Oshape     = Xr.ApplyPool(pool, stride, pad, ceilMode: true);
                        var Or         = new TensorShape(Oshape.batch, IDivC(Oshape.height, 2), IDivC(Oshape.width, 2), Oshape.channels);
                        var poolKernel = BestKernel(
                            ComputeKernelLibrary.PoolAvgVar2D(Xr, Or, poolKernelName));

                        instructions.Add(new CompiledInstruction {
                            kernel = poolKernel, shape = Or
                        });

                        Xr = Or;
                        Assert.IsTrue(Xr.length < lastLength);
                    }

                    var meanVariance = new TensorShape(Xr.batch, 2, 1, Xr.channels);
                    var globalKernel = BestKernel(
                        ComputeKernelLibrary.GlobalPool2D(Xr, meanVariance, globalKernelName));
                    instructions.Add(new CompiledInstruction {
                        kernel = globalKernel, shape = meanVariance
                    });

                    // ScaleBias
                    var S = l.datasets[0].shape;
                    var B = l.datasets[1].shape;
                    Assert.AreEqual(X.channels, B.channels); Assert.AreEqual(X.channels, S.channels);
                    Assert.AreEqual(B.length, B.channels); Assert.AreEqual(S.length, S.channels);
                    var normlizationKernel = BestKernel(ComputeKernelLibrary.NormalizationTail(X, O));
                    instructions.Add(new CompiledInstruction {
                        kernel = normlizationKernel, shape = O
                    });

                    // FusedActivation
                    var fusedActivation = (Layer.FusedActivation)l.activation;
                    if (!IsFusedActivationSupported(fusedActivation))
                    {
                        var activationKernel = BestKernel(ComputeKernelLibrary.Activation(X, O, fusedActivation.ToString()));
                        instructions.Add(new CompiledInstruction {
                            kernel = activationKernel, shape = O
                        });
                    }
                    else
                    {
                        instructions.Add(new CompiledInstruction {
                            shape = O
                        });
                    }

                    m_CompiledLayers.Add(l, new CompiledLayer {
                        instructions = instructions.ToArray(), shape = O
                    });
                    continue;
                }
                else if (
                    l.type == Layer.Type.Add ||
                    l.type == Layer.Type.Sub ||
                    l.type == Layer.Type.Mul ||
                    l.type == Layer.Type.Div ||
                    l.type == Layer.Type.Pow ||
                    l.type == Layer.Type.Min ||
                    l.type == Layer.Type.Max ||
                    l.type == Layer.Type.Mean
                    )
                {
                    var kernelName = "Broadcast" + l.type;
                    kernel = BestKernel(
                        ComputeKernelLibrary.Broadcast(X, O, kernelName));
                }
                else if (
                    l.type == Layer.Type.Concat)
                {
                    var instructions = new List <CompiledInstruction>();

                    foreach (var input in l.inputs)
                    {
                        var I = shapesByName[input];

                        if (I == null)
                        {
                            instructions.Add(new CompiledInstruction {
                            });
                            continue;
                        }
                        var kernelI = BestKernel(ComputeKernelLibrary.Copy(I.Value, O));

                        instructions.Add(new CompiledInstruction {
                            kernel = kernelI, shape = I.Value
                        });
                    }

                    m_CompiledLayers.Add(l, new CompiledLayer {
                        instructions = instructions.ToArray(), shape = O
                    });
                    continue;
                }
                // Activations
                else if (l.type == Layer.Type.Activation)
                {
                    if (l.activation == Layer.Activation.Softmax)
                    {
                        kernel = BestKernel(
                            ComputeKernelLibrary.Softmax(X, O));
                    }
                    else if (l.activation == Layer.Activation.LogSoftmax)
                    {
                        kernel = BestKernel(
                            ComputeKernelLibrary.LogSoftmax(X, O));
                    }
                    else if (l.activation == Layer.Activation.PRelu)
                    {
                        kernel = BestKernel(
                            ComputeKernelLibrary.PRelu(X, O));
                    }
                    else if (l.activation != Layer.Activation.None)
                    {
                        var kernelName = l.activation.ToString();
                        kernel = BestKernel(
                            ComputeKernelLibrary.Activation(X, O, kernelName));
                    }
                }

                m_CompiledLayers.Add(l, new CompiledLayer {
                    instructions = new CompiledInstruction[]
                    {
                        new CompiledInstruction {
                            kernel = kernel, shape = O
                        }
                    }, shape = O
                });
            }
        }
        void OnEnable()
        {
            // TODO: investigate perf -- method takes 1s the first time you click on the model in the UI
            var nnModel = target as NNModel;

            if (nnModel == null)
            {
                return;
            }
            if (nnModel.modelData == null)
            {
                return;
            }

            m_Model = ModelLoader.Load(nnModel, verbose: false, skipWeights: true);
            if (m_Model == null)
            {
                return;
            }

            m_Inputs     = m_Model.inputs.Select(i => i.name).ToList();
            m_InputsDesc = m_Model.inputs.Select(i => $"shape: ({String.Join(",", i.shape)})").ToList();
            m_Outputs    = m_Model.outputs.ToList();

            bool allKnownShapes = true;
            var  inputShapes    = new Dictionary <string, TensorShape>();

            foreach (var i in m_Model.inputs)
            {
                allKnownShapes = allKnownShapes && !i.shape.Contains(-1) && !i.shape.Contains(0);
                if (!allKnownShapes)
                {
                    break;
                }
                inputShapes.Add(i.name, new TensorShape(i.shape));
            }
            if (allKnownShapes)
            {
                m_OutputsDesc = m_Model.outputs.Select(i => {
                    string output = "(-1,-1,-1,-1)";
                    try
                    {
                        TensorShape shape;
                        if (ModelAnalyzer.TryGetOutputTensorShape(m_Model, inputShapes, i, out shape))
                        {
                            output = shape.ToString();
                        }
                    }
                    catch (Exception e)
                    {
                        Debug.LogError($"Unexpected error while evaluating model output {i}. {e}");
                    }
                    return($"shape: {output}");
                }).ToList();
            }
            else
            {
                m_OutputsDesc = m_Model.outputs.Select(i => "shape: (-1,-1,-1,-1)").ToList();
            }

            m_Memories     = m_Model.memories.Select(i => i.input).ToList();
            m_MemoriesDesc = m_Model.memories.Select(i => $"shape:{i.shape.ToString()} output:{i.output}").ToList();

            var layers    = m_Model.layers.Where(i => i.type != Layer.Type.Load);
            var constants = m_Model.layers.Where(i => i.type == Layer.Type.Load);

            m_Layers        = layers.Select(i => i.type.ToString()).ToList();
            m_LayersDesc    = layers.Select(i => i.ToString()).ToList();
            m_Constants     = constants.Select(i => i.type.ToString()).ToList();
            m_ConstantsDesc = constants.Select(i => i.ToString()).ToList();

            m_NumEmbeddedWeights = layers.Sum(l => (long)l.datasets.Sum(ds => (long)ds.length));
            m_NumConstantWeights = constants.Sum(l => (long)l.datasets.Sum(ds => (long)ds.length));

            // weights are not loaded for UI, recompute size
            m_TotalWeightsSizeInBytes = 0;
            for (var l = 0; l < m_Model.layers.Count; ++l)
            {
                for (var d = 0; d < m_Model.layers[l].datasets.Length; ++d)
                {
                    m_TotalWeightsSizeInBytes += m_Model.layers[l].datasets[d].length;
                }
            }

            m_Warnings     = m_Model.Warnings.Select(i => i.LayerName).ToList();
            m_WarningsDesc = m_Model.Warnings.Select(i => i.Message).ToList();
        }