/// <summary> /// Create `VerboseOps` for target `ops` /// </summary> /// <param name="ops">target `IOps` instance</param> public VerboseOps(IOps ops) { m_Ops = ops; }
protected override Tensor[] PrepareLayerInputTensors(Model model, Layer layer, IOps ops) { var tensorIndex = 0; var tensors = new Tensor[layer.inputs.Length + layer.datasets.Length]; foreach (var name in layer.inputs) { var tensor = new Tensor(1, 1, 1, 1, m_StringCache.Lookup(layer.name, "_dummy_in", tensorIndex)); tensors[tensorIndex++] = tensor; } Int64 offsetIntoModelWeights = m_OffsetsIntoModelWeights.ContainsKey(layer.name) ? m_OffsetsIntoModelWeights[layer.name]: 0; ComputeBuffer buffer = m_ModelBuffers.ContainsKey(layer.name) ? m_ModelBuffers[layer.name] : null; if (buffer == null) { buffer = CreateComputeBufferForModelTensors(layer, out offsetIntoModelWeights); if (buffer != null) { m_ModelBuffers[layer.name] = buffer; m_OffsetsIntoModelWeights[layer.name] = offsetIntoModelWeights; } } foreach (var arg in layer.datasets) { Assert.IsNotNull(buffer); var tensor = new Tensor(arg.shape, new SharedComputeTensorData(buffer, arg.shape, (int)(arg.offset - offsetIntoModelWeights)), m_StringCache.Lookup(layer.name, "_arg", tensorIndex)); tensors[tensorIndex++] = tensor; m_ModelTensors.Add(tensor); } Assert.AreEqual(tensorIndex, tensors.Length); return(tensors); }
public StatsOps(IOps ops) { m_Ops = ops; m_Alu = 0L; m_Mem = 0L; }
/// <summary> /// Create `VerboseOps` for target `ops` /// </summary> /// <param name="ops">target `IOps` instance</param> /// <param name="ops">produce log in Unity standard log file, model execution reporter from IOps will always be used if it exist.</param> public VerboseOps(IOps ops, bool useUnityLogFile = true) { m_Ops = ops; m_UseUnityLogFile = useUnityLogFile; }
protected virtual Tensor[] PrepareLayerInputTensors(Model model, Layer layer, IOps ops) { int tensorIndex = 0; var tensors = new Tensor[layer.inputs.Length + layer.datasets.Length]; foreach (var name in layer.inputs) { tensors[tensorIndex++] = new Tensor(1, 1, 1, 1, m_StringCache.Lookup(layer.name, "_dummy_in", tensorIndex)); } foreach (var arg in layer.datasets) { var tensor = new Tensor(arg.shape, new SharedArrayTensorData(layer.weights, (int)arg.offset, (int)arg.shape.length), m_StringCache.Lookup(layer.name, "_arg", tensorIndex)); if (ops != null) { tensor = ops.Prepare(tensor); } m_ModelTensors.Add(tensor); tensors[tensorIndex++] = tensor; } return(tensors); }
/// <summary> /// Create `StatsOps` /// </summary> /// <param name="ops">target ops</param> public StatsOps(IOps ops) { m_Ops = ops; m_Alu = new LayerStat(0L, 0L); m_Mem = new LayerStat(0L, 0L); }
internal static IWorker CreateWorker(WorkerFactory.Type type, Model model, string[] additionalOutputs, string[] trimOutputs, WorkerFactory.WorkerConfiguration workerConfiguration, IModelExecutionsReporter modelExecutionsReporter = null) { type = ResolveAutoType(type); var compareAgainstType = ResolveAutoType(workerConfiguration.compareAgainstType); Assert.AreNotEqual(type, WorkerFactory.Type.Auto); Assert.AreNotEqual(compareAgainstType, WorkerFactory.Type.Auto); bool compare = type != compareAgainstType; if (WorkerFactory.IsType(type, WorkerFactory.Device.GPU) && !SystemInfo.supportsComputeShaders && !Application.isEditor) { type = WorkerFactory.Type.PixelShader; } IVars vars; // PixelShader worker uses Blit/Textures, cannot re-use vars unless the dispatch mechanism allows rendering to sub part of the texture if ((type == WorkerFactory.Type.PixelShader) || (compareAgainstType == WorkerFactory.Type.PixelShader)) { vars = new GenericVarsWithReuse(); } else { if (WorkerFactory.IsType(type, WorkerFactory.Device.GPU) || WorkerFactory.IsType(compareAgainstType, WorkerFactory.Device.GPU)) { vars = new ComputeVarsWithSharedModel(); } else { vars = new DefaultVars(); } } ITensorAllocator allocator = vars.GetAllocator(); if ((type == WorkerFactory.Type.PixelShader) || (compareAgainstType == WorkerFactory.Type.PixelShader)) { allocator = new TensorCachingByShapeAllocator(); } if (workerConfiguration.verbose) { D.Log($"Storage type: {vars.GetType()}. Allocator type: {allocator.GetType()}."); } IOps ops = CreateOps(type, allocator, workerConfiguration.verbose); if (compare) { ops = new CompareOps(ops, CreateOps(compareAgainstType, allocator, workerConfiguration.verbose), workerConfiguration.compareLogLevel, workerConfiguration.compareEpsilon); } if (workerConfiguration.verbose || modelExecutionsReporter != null) { ops = new VerboseOps(ops, workerConfiguration.verbose); } if (Application.isEditor || modelExecutionsReporter != null) { ops = new StatsOps(ops); } model = ValidateModel( PatchModel(model, additionalOutputs, trimOutputs)); ops.SetModelExecutionsReporter(modelExecutionsReporter); return(new GenericWorker(model, ops, vars, workerConfiguration.verbose, workerConfiguration.takeoverWeights)); }
public void TakeMemorySnapshot(IOps ops, IVars vars, string context, Layer layer) { MemorySnapshotsReport.TakeMemorySnapshot(ops, vars, context, layer); }