/// <summary> /// Create a worker that will execute `asset` using the best backend that is available for a given `device` type. /// This is just a convenience function that internally calls `ModelLoader.Load` followed by ``WorkerFactory.CreateWorker`. /// `asset` is the associated NNModel asset. /// `device` is the preferred device for execution. For example `WorkerFactory.Device.GPU` specifies the fast GPU path. /// `verbose` will log scheduling of layers execution to the console. /// </summary> public static IWorker CreateWorker(this NNModel asset, WorkerFactory.Device device = WorkerFactory.Device.Auto, bool verbose = false) { var model = ModelLoader.Load(asset); return(model.CreateWorker(device, verbose)); }
internal static WorkerFactory.Type GetBestTypeForDevice(WorkerFactory.Device device) { switch (device) { case WorkerFactory.Device.Auto: case WorkerFactory.Device.GPU: return(WorkerFactory.Type.ComputePrecompiled); default: return(WorkerFactory.Type.CSharpBurst); } }
/// <summary> /// Create a worker that will execute `asset` using the best backend that is available for a given `device` type. /// This is just a convenience function that internally calls `ModelLoader.Load` followed by ``WorkerFactory.CreateWorker`. /// `asset` is the associated NNModel asset. /// `additionalOutputs` are the additional outputs to track but not directly specified by the model. /// `trimOutputs` are the outputs not discard even if they are specified by the model. /// `device` is the device type to run worker on. For example `WorkerFactory.Device.GPU` specifies the fast GPU path. /// `verbose` will log scheduling of layers execution to the console (default == false). /// </summary> public static IWorker CreateWorker(this NNModel asset, string[] additionalOutputs, string[] trimOutputs, WorkerFactory.Device device = WorkerFactory.Device.Auto, bool verbose = false) { var model = ModelLoader.Load(asset); return(model.CreateWorker(additionalOutputs, trimOutputs, device, verbose)); }
/// <summary> /// Create a worker that will execute `model` using the best backend that is available for a given `device` type. /// This is just a convenience function that internally calls `ModelLoader.Load` followed by ``WorkerFactory.CreateWorker`. /// `model` is the associated Model to execute. /// `additionalOutputs` are the additional outputs to track but not directly specified by the model. /// `trimOutputs` are the outputs not discard even if they are specified by the model. /// `device` is the device type to run worker on. For example `WorkerFactory.Device.GPU` specifies the fast GPU path. /// `verbose` will log scheduling of layers execution to the console (default == false). /// </summary> public static IWorker CreateWorker(this Model model, string[] additionalOutputs, string[] trimOutputs, WorkerFactory.Device device = WorkerFactory.Device.Auto, bool verbose = false) { return(WorkerFactory.CreateWorker(model, additionalOutputs, trimOutputs, device, verbose)); }
/// <summary> /// Create a worker that will execute `model` using the best backend that is available for a given `device` type. /// This is just a convenience function that internally calls `ModelLoader.Load` followed by ``WorkerFactory.CreateWorker`. /// `model` is the associated Model to execute. /// `device` is the preferred device for execution. For example `WorkerFactory.Device.GPU` specifies the fast GPU path. /// `verbose` will log scheduling of layers execution to the console. /// </summary> public static IWorker CreateWorker(this Model model, WorkerFactory.Device device = WorkerFactory.Device.Auto, bool verbose = false) { return(WorkerFactory.CreateWorker(model, device, verbose)); }