Exemple #1
0
        /// <summary>
        /// Create a worker that will execute `asset` using the best backend that is available for a given `device` type.
        /// This is just a convenience function that internally calls `ModelLoader.Load` followed by ``WorkerFactory.CreateWorker`.
        /// `asset` is the associated NNModel asset.
        /// `additionalOutputs` are the additional outputs to track but not directly specified by the model.
        /// `trimOutputs` are the outputs not discard even if they are specified by the model.
        /// `device` is the device type to run worker on. For example `WorkerFactory.Device.GPU` specifies the fast GPU path.
        /// `verbose` will log scheduling of layers execution to the console (default == false).
        /// </summary>
        public static IWorker CreateWorker(this NNModel asset,
                                           string[] additionalOutputs, string[] trimOutputs, WorkerFactory.Device device = WorkerFactory.Device.Auto, bool verbose = false)
        {
            var model = ModelLoader.Load(asset);

            return(model.CreateWorker(additionalOutputs, trimOutputs, device, verbose));
        }
Exemple #2
0
        /// <summary>
        /// Create a worker that will execute `asset` using the best backend that is available for a given `device` type.
        /// This is just a convenience function that internally calls `ModelLoader.Load` followed by ``WorkerFactory.CreateWorker`.
        /// `asset` is the associated NNModel asset.
        /// `device` is the preferred device for execution. For example `WorkerFactory.Device.GPU` specifies the fast GPU path.
        /// `verbose` will log scheduling of layers execution to the console.
        /// </summary>
        public static IWorker CreateWorker(this NNModel asset,
                                           WorkerFactory.Device device = WorkerFactory.Device.Auto, bool verbose = false)
        {
            var model = ModelLoader.Load(asset);

            return(model.CreateWorker(device, verbose));
        }
        /// <summary>
        /// Scripted importer callback
        /// </summary>
        /// <param name="ctx">Asset import context</param>
        public override void OnImportAsset(AssetImportContext ctx)
        {
            var converter = new ONNXModelConverter(optimizeModel, treatErrorsAsWarnings, forceArbitraryBatchSize, importMode);

            var model = converter.Convert(ctx.assetPath);

            NNModelData assetData = ScriptableObject.CreateInstance <NNModelData>();

            using (var memoryStream = new MemoryStream())
                using (var writer = new BinaryWriter(memoryStream))
                {
                    ModelWriter.Save(writer, model);
                    assetData.Value = memoryStream.ToArray();
                }
            assetData.name      = "Data";
            assetData.hideFlags = HideFlags.HideInHierarchy;

            NNModel asset = ScriptableObject.CreateInstance <NNModel>();

            asset.modelData = assetData;

            ctx.AddObjectToAsset("main obj", asset, LoadIconTexture());
            ctx.AddObjectToAsset("model data", assetData);

            ctx.SetMainObject(asset);
        }
Exemple #4
0
        /// <summary>
        /// Return an object oriented representation (aka: `Model`) of a neural network from a binary representation of type `NNModel`.
        /// By default details are not logged to the console, set `verbose` to true to see loading details.
        /// </summary>
        /// <param name="nnModel">binary representation of model</param>
        /// <param name="model">object-oriented representation of model (must initialize before calling method)</param>
        /// <param name="verbose">verbose</param>
        /// <param name="skipWeights">skip loading weights (fast loading, metadata only)</param>
        /// <param name="maxTimePerYield">the maximum amount of time to spend between in computation before yielding</param>
        /// <returns>IEnumerator (use with StartCoroutine)</returns>
        public static IEnumerator LoadAsync(NNModel nnModel, Model model, bool verbose = false, bool skipWeights = false, float maxTimePerYield = 0.01f)
        {
            Assert.IsNotNull(model);
            var enumerator = LoadAsync(Open(nnModel.modelData.Value), model, verbose, true, skipWeights, maxTimePerYield);

            while (enumerator.MoveNext())
            {
                model = (Model)enumerator.Current;
                if (model != null)
                {
                    yield return(null);
                }
            }
        }
Exemple #5
0
        /// <summary>
        /// Scripted importer callback
        /// </summary>
        /// <param name="ctx">Asset import context</param>
        public override void OnImportAsset(AssetImportContext ctx)
        {
            ONNXModelConverter.ModelImported += BarracudaAnalytics.SendBarracudaImportEvent;
            var converter = new ONNXModelConverter(optimizeModel, treatErrorsAsWarnings, forceArbitraryBatchSize, importMode);

            var model = converter.Convert(ctx.assetPath);

            if (weightsTypeMode == ONNXModelConverter.DataTypeMode.ForceHalf)
            {
                model.ConvertWeights(DataType.Half);
            }
            else if (weightsTypeMode == ONNXModelConverter.DataTypeMode.ForceFloat)
            {
                model.ConvertWeights(DataType.Float);
            }

            NNModelData assetData = ScriptableObject.CreateInstance <NNModelData>();

            using (var memoryStream = new MemoryStream())
                using (var writer = new BinaryWriter(memoryStream))
                {
                    ModelWriter.Save(writer, model);
                    assetData.Value = memoryStream.ToArray();
                }
            assetData.name      = "Data";
            assetData.hideFlags = HideFlags.HideInHierarchy;

            NNModel asset = ScriptableObject.CreateInstance <NNModel>();

            asset.modelData = assetData;

            ctx.AddObjectToAsset("main obj", asset, LoadIconTexture());
            ctx.AddObjectToAsset("model data", assetData);

            ctx.SetMainObject(asset);
        }
 /// <summary>
 /// Return an object oriented representation (aka: `Model`) of a neural network from a binary representation of type `NNModel`.
 /// By default details are not logged to the console, set `verbose` to true to see loading details.
 /// </summary>
 /// <param name="model">model</param>
 /// <param name="verbose">verbose</param>
 /// <param name="skipWeights">skip loading weights (fast loading, metadata only)</param>
 /// <returns>loaded Model</returns>
 public static Model Load(NNModel model, bool verbose = false, bool skipWeights = false)
 {
     return(Load(model.modelData.Value, verbose, skipWeights));
 }