/// <summary> /// Initializes the Brain with the Model that it will use when selecting actions for /// the agents /// </summary> /// <param name="model"> The Barracuda model to load </param> /// <param name="brainParameters"> The parameters of the Brain used to generate the /// placeholder tensors </param> /// <param name="inferenceDevice"> Inference execution device. CPU is the fastest /// option for most of ML Agents models. </param> /// <param name="seed"> The seed that will be used to initialize the RandomNormal /// and Multinomial objects used when running inference.</param> /// <exception cref="UnityAgentsException">Throws an error when the model is null /// </exception> public ModelRunner( NNModel model, BrainParameters brainParameters, InferenceDevice inferenceDevice = InferenceDevice.CPU, int seed = 0) { Model barracudaModel; m_Model = model; m_InferenceDevice = inferenceDevice; m_TensorAllocator = new TensorCachingAllocator(); if (model != null) { #if BARRACUDA_VERBOSE m_Verbose = true; #endif D.logEnabled = m_Verbose; barracudaModel = ModelLoader.Load(model); var executionDevice = inferenceDevice == InferenceDevice.GPU ? WorkerFactory.Type.ComputePrecompiled : WorkerFactory.Type.CSharp; m_Engine = WorkerFactory.CreateWorker(executionDevice, barracudaModel, m_Verbose); } else { barracudaModel = null; m_Engine = null; } m_InferenceInputs = BarracudaModelParamLoader.GetInputTensors(barracudaModel); m_OutputNames = BarracudaModelParamLoader.GetOutputNames(barracudaModel); m_TensorGenerator = new TensorGenerator( seed, m_TensorAllocator, m_Memories, barracudaModel); m_TensorApplier = new TensorApplier( brainParameters, seed, m_TensorAllocator, m_Memories, barracudaModel); }
// Start is called before the first frame update void Start() { ops = new PrecompiledComputeOps(ComputeShaderSingleton.Instance.kernels, ComputeShaderSingleton.Instance.referenceKernels); //ops = new BurstCPUOps(); TextureLoader loader = new TextureLoader(); print(TextureLoader.instance.semInput.shape); inputs[name1] = TextureLoader.instance.semInput; inputs[name2] = TextureLoader.instance.embedInput; var model = ModelLoader.Load(modelSource); worker = WorkerFactory.CreateWorker(WorkerFactory.Type.ComputePrecompiled, model); //worker.Execute(inputs); //output = worker.PeekOutput(); //output.PrintDataPart(100); //output = Normalize(output, ops); //texture = output.ToRenderTexture(0, 0, 1, 0, null); count = Time.frameCount; }
public static IOps CreateOps(WorkerFactory.Type type, bool verbose = false) { WorkerFactory.ValidateType(type); switch (type) { case WorkerFactory.Type.ComputePrecompiled: return new PrecompiledComputeOps(ComputeShaderSingleton.Instance.kernels, ComputeShaderSingleton.Instance.referenceKernels, verbose: verbose); case WorkerFactory.Type.Compute: return new ComputeOps(ComputeShaderSingleton.Instance.kernels, ComputeShaderSingleton.Instance.referenceKernels, verbose: verbose); case WorkerFactory.Type.ComputeRef: return new ReferenceComputeOps(ComputeShaderSingleton.Instance.referenceKernels); case WorkerFactory.Type.CSharp: return new UnsafeArrayCPUOps(); default: return new ReferenceCPUOps(); } }
BuiltinHandlerActivator StartBus(int workers, int parallelism, WorkerFactory workerFactory) { var activator = new BuiltinHandlerActivator(); Using(activator); Configure.With(activator) .Logging(l => l.ColoredConsole(minLevel: LogLevel.Info)) .Transport(t => t.UseInMemoryTransport(new InMemNetwork(), "threads")) .Options(o => { o.SetNumberOfWorkers(workers); o.SetMaxParallelism(parallelism); if (workerFactory == WorkerFactory.TaskParallelLibrary) { o.UseTplToReceiveMessages(); } }) .Start(); return(activator); }
public SolverDataManager(ProjectDataSet model, ConstraintSolverResult data) { _model = model; _solverResult = data; var jobFactory = new JobFactory(); var skillFactory = new SkillFactory(); var workerFactory = new WorkerFactory(); var toolFactory = new ToolFactory(); var zoneFactory = new ZoneFactory(); var shiftFactory = new ShiftFactory(); var shiftConverter = new ShiftConverter(shiftFactory); var skillConverter = new SkillConverter(skillFactory); var toolConverter = new ToolConverter(toolFactory, shiftConverter); var zoneConverter = new ZoneConverter(zoneFactory); var laborConverter = new LaborConverter(workerFactory, shiftConverter, skillConverter); var jobConverter = new JobConverter(jobFactory, skillConverter, toolConverter, zoneConverter); _shiftManager = new ShiftManager(shiftConverter, skillConverter, laborConverter, toolConverter); _jobManager = new JobManager(jobConverter); _zoneManager = new ZoneManager(zoneConverter); }
// Use this for initialization IEnumerator Start() { Application.targetFrameRate = 60; labels = labelsAsset.text.Split('\n'); model = ModelLoader.Load(srcModel, false); // Append Softmax() layer to the end of the network var modelBuilder = new ModelBuilder(model); modelBuilder.Softmax(output_name, model.outputs[0]); modelBuilder.Output(output_name); engine = WorkerFactory.CreateWorker(model, useGPU ? WorkerFactory.Device.GPU : WorkerFactory.Device.CSharp); var input = new Tensor(PrepareTextureForInput(inputImage, !useGPU), 3); inputs["data"] = input; yield return(null); StartCoroutine(RunInference()); }
public static GameObject GetWorker(this RoomInfo room) { if (room == null || room.CustomProperties[WorkerPropKey] == null || !room.CustomProperties.ContainsKey(WorkerPropKey)) { return(null); } var WorkerFactory = new WorkerFactory(); string data = (string)room.CustomProperties[WorkerPropKey]; List <string> workerD = data.Split(',').ToList(); GameObject go = WorkerFactory.Create(); Worker worker = go.GetComponent <Worker>(); foreach (Player player in PhotonNetwork.PlayerList) { if (player.ActorNumber == int.Parse(workerD[0])) { worker.Owner = player; } } worker.Pos = new Vector2(int.Parse(workerD[1]), int.Parse(workerD[2])); return(go); }
/// <summary> /// Updates the output layer names based on the selected model architecture /// and initializes the Barracuda inference engine witht the selected model. /// </summary> private void InitializeBarracuda() { // The compiled model used for performing inference Model m_RunTimeModel; if (modelType == ModelType.MobileNet) { preProcessFunction = Utils.PreprocessMobileNet; // Compile the model asset into an object oriented representation m_RunTimeModel = ModelLoader.Load(mobileNetModelAsset); displacementFWDLayer = m_RunTimeModel.outputs[2]; displacementBWDLayer = m_RunTimeModel.outputs[3]; } else { preProcessFunction = Utils.PreprocessResNet; // Compile the model asset into an object oriented representation m_RunTimeModel = ModelLoader.Load(resnetModelAsset); displacementFWDLayer = m_RunTimeModel.outputs[3]; displacementBWDLayer = m_RunTimeModel.outputs[2]; } heatmapLayer = m_RunTimeModel.outputs[0]; offsetsLayer = m_RunTimeModel.outputs[1]; // Create a model builder to modify the m_RunTimeModel ModelBuilder modelBuilder = new ModelBuilder(m_RunTimeModel); // Add a new Sigmoid layer that takes the output of the heatmap layer modelBuilder.Sigmoid(predictionLayer, heatmapLayer); // Validate if backend is supported, otherwise use fallback type. workerType = WorkerFactory.ValidateType(workerType); // Create a worker that will execute the model with the selected backend engine = new Engine(workerType, modelBuilder.model, modelType); }
/// <summary> /// 根据关键字查询雇员集合 /// </summary> /// <param name="sql"></param> /// <returns></returns> public static List <IEmployee> GetEmployeeByKeyword(string sql) { using (SqlConnection conn = sqlHelper.GetConnection()) { conn.Open(); SqlCommand cmd = new SqlCommand(sql, conn); using (SqlDataReader reader = cmd.ExecuteReader()) { List <IEmployee> list = new List <IEmployee>(); while (reader.Read()) { System.Data.IDataRecord record = reader; IEmployee emp = null; if (record.GetString(4).Equals("Worker")) { emp = new WorkerFactory().Get(); } else if (record.GetString(4).Equals("Manager")) { emp = new ManagerFactory().Get(); } else { emp = new DriverFactory().Get(); } emp.Id = record.GetInt32(0); emp.Name = record.GetString(1); emp.Sex = record.GetString(2); emp.Age = record.GetInt32(3); list.Add(emp); } return(list); } } }
/// <summary> /// Gère la reception des demandes contenues dans l'objet de transfert depuis un noeud /// </summary> /// <param name="input">objet de transfert</param> /// <param name="node">noeud emetteur</param> public override void ProcessInput(DataInput input, Node node) { TaskExecutor executor = WorkerFactory.GetWorker(input.Method); if (MethodIsNotInfra(input.Method) && input.MsgType == MessageType.Response) { ViewModelLocator.VmlMonitorUcStatic.RefreshStateFromTaskResult(input); } object res = executor.DoWork(input); if (res != null) { DataInput resp = new DataInput() { ClientGuid = input.ClientGuid, NodeGuid = NodeGuid, TaskId = input.TaskId, Method = input.Method, Data = res, MsgType = MessageType.Response }; SendData(node, resp); } }
public void ReadBarracudaModel() { resultText.text = "Loading"; var worker = WorkerFactory.CreateWorker(WorkerFactory.Type.ComputePrecompiled, model); resultText.text = "Loaded"; var tensor = new Tensor(drawTexture, 1); worker.Execute(tensor); var result = worker.PeekOutput(); var scores = result.AsFloats().ToList(); float bestScore = 0; for (var i = 0; i < 10; i++) { if (scores[i] > bestScore) { bestScore = scores[i]; } } worker.Dispose(); resultText.text = scores.IndexOf(bestScore).ToString(); }
static void Main(string[] args) { //есть сервис, в нем обработчики, которые что то делают с определенной периодичностью, //1 из обработчиков будет создавать задачу на выполнение var worker = new WorkerFactory(new WorkerRepository()).GetService(); worker.AddTaskAsync(new Models.BO.NewTaskToWork("test", 50, false)); List <ITaskWorkHandler> handlers = new List <ITaskWorkHandler>(); while (true) { worker.StartActualTaskHandleAsync(handlers); Thread.Sleep(500); Console.WriteLine("write 'y' if stop else write anything"); if (Console.ReadKey().KeyChar == 'y') { break; } } Console.WriteLine("Hello World!"); }
public override IWorkFacade[] InitWorkFacades(IInputDataContainer inputDataContainer, int dataSize) { return(new[] { #region IJob #region Persistent WorkerFactory <NativeArray <float>, NativeArray <float> > .Create <BaseIJob>( TestName(), inputDataContainer.GetData <float>(TypeConfig.DataFloat1), inputDataContainer.GetData <float>(TypeConfig.DataFloat1), new WorkConfigIJob(), new IDataConfig[] { new DataConfigUnityCollection(Allocator.Persistent), new DataConfigUnityCollection(Allocator.Persistent), } ), WorkerFactory <NativeArray <float>, NativeArray <float> > .Create <BaseIJob>( TestName(), inputDataContainer.GetData <float>(TypeConfig.DataFloat1), inputDataContainer.GetData <float>(TypeConfig.DataFloat1), new WorkConfigIJob(false), new IDataConfig[] { new DataConfigUnityCollection(Allocator.Persistent), new DataConfigUnityCollection(Allocator.Persistent), } ), WorkerFactory <NativeArray <float>, NativeArray <float> > .Create <BaseBurstIJob>( TestName(), inputDataContainer.GetData <float>(TypeConfig.DataFloat1), inputDataContainer.GetData <float>(TypeConfig.DataFloat1), new WorkConfigIJob(), new IDataConfig[] { new DataConfigUnityCollection(Allocator.Persistent), new DataConfigUnityCollection(Allocator.Persistent), } ), WorkerFactory <NativeArray <float>, NativeArray <float> > .Create <BaseBurstIJob>( TestName(), inputDataContainer.GetData <float>(TypeConfig.DataFloat1), inputDataContainer.GetData <float>(TypeConfig.DataFloat1), new WorkConfigIJob(false), new IDataConfig[] { new DataConfigUnityCollection(Allocator.Persistent), new DataConfigUnityCollection(Allocator.Persistent), } ), #endregion #region Temp WorkerFactory <NativeArray <float>, NativeArray <float> > .Create <BaseIJob>( TestName(), inputDataContainer.GetData <float>(TypeConfig.DataFloat1), inputDataContainer.GetData <float>(TypeConfig.DataFloat1), new WorkConfigIJob(), new IDataConfig[] { new DataConfigUnityCollection(Allocator.Temp), new DataConfigUnityCollection(Allocator.Temp), } ), WorkerFactory <NativeArray <float>, NativeArray <float> > .Create <BaseIJob>( TestName(), inputDataContainer.GetData <float>(TypeConfig.DataFloat1), inputDataContainer.GetData <float>(TypeConfig.DataFloat1), new WorkConfigIJob(false), new IDataConfig[] { new DataConfigUnityCollection(Allocator.Temp), new DataConfigUnityCollection(Allocator.Temp), } ), WorkerFactory <NativeArray <float>, NativeArray <float> > .Create <BaseBurstIJob>( TestName(), inputDataContainer.GetData <float>(TypeConfig.DataFloat1), inputDataContainer.GetData <float>(TypeConfig.DataFloat1), new WorkConfigIJob(), new IDataConfig[] { new DataConfigUnityCollection(Allocator.Temp), new DataConfigUnityCollection(Allocator.Temp), } ), WorkerFactory <NativeArray <float>, NativeArray <float> > .Create <BaseBurstIJob>( TestName(), inputDataContainer.GetData <float>(TypeConfig.DataFloat1), inputDataContainer.GetData <float>(TypeConfig.DataFloat1), new WorkConfigIJob(false), new IDataConfig[] { new DataConfigUnityCollection(Allocator.Temp), new DataConfigUnityCollection(Allocator.Temp), } ), #endregion #region TempJob WorkerFactory <NativeArray <float>, NativeArray <float> > .Create <BaseIJob>( TestName(), inputDataContainer.GetData <float>(TypeConfig.DataFloat1), inputDataContainer.GetData <float>(TypeConfig.DataFloat1), new WorkConfigIJob(), new IDataConfig[] { new DataConfigUnityCollection(Allocator.TempJob), new DataConfigUnityCollection(Allocator.TempJob), } ), WorkerFactory <NativeArray <float>, NativeArray <float> > .Create <BaseIJob>( TestName(), inputDataContainer.GetData <float>(TypeConfig.DataFloat1), inputDataContainer.GetData <float>(TypeConfig.DataFloat1), new WorkConfigIJob(false), new IDataConfig[] { new DataConfigUnityCollection(Allocator.TempJob), new DataConfigUnityCollection(Allocator.TempJob), } ), WorkerFactory <NativeArray <float>, NativeArray <float> > .Create <BaseBurstIJob>( TestName(), inputDataContainer.GetData <float>(TypeConfig.DataFloat1), inputDataContainer.GetData <float>(TypeConfig.DataFloat1), new WorkConfigIJob(), new IDataConfig[] { new DataConfigUnityCollection(Allocator.TempJob), new DataConfigUnityCollection(Allocator.TempJob), } ), WorkerFactory <NativeArray <float>, NativeArray <float> > .Create <BaseBurstIJob>( TestName(), inputDataContainer.GetData <float>(TypeConfig.DataFloat1), inputDataContainer.GetData <float>(TypeConfig.DataFloat1), new WorkConfigIJob(false), new IDataConfig[] { new DataConfigUnityCollection(Allocator.TempJob), new DataConfigUnityCollection(Allocator.TempJob), } ), #endregion #endregion #region IJobParallelFor #region Persistent WorkerFactory <NativeArray <float>, NativeArray <float> > .Create <BaseIJobParallelFor>( TestName(), inputDataContainer.GetData <float>(TypeConfig.DataFloat1), inputDataContainer.GetData <float>(TypeConfig.DataFloat1), new WorkConfigIJobParallelFor(), new IDataConfig[] { new DataConfigUnityCollection(Allocator.Persistent), new DataConfigUnityCollection(Allocator.Persistent), } ), WorkerFactory <NativeArray <float>, NativeArray <float> > .Create <BaseIJobParallelFor>( TestName(), inputDataContainer.GetData <float>(TypeConfig.DataFloat1), inputDataContainer.GetData <float>(TypeConfig.DataFloat1), new WorkConfigIJobParallelFor(false), new IDataConfig[] { new DataConfigUnityCollection(Allocator.Persistent), new DataConfigUnityCollection(Allocator.Persistent), } ), WorkerFactory <NativeArray <float>, NativeArray <float> > .Create <BaseBurstIJobParallelFor>( TestName(), inputDataContainer.GetData <float>(TypeConfig.DataFloat1), inputDataContainer.GetData <float>(TypeConfig.DataFloat1), new WorkConfigIJobParallelFor(), new IDataConfig[] { new DataConfigUnityCollection(Allocator.Persistent), new DataConfigUnityCollection(Allocator.Persistent), } ), WorkerFactory <NativeArray <float>, NativeArray <float> > .Create <BaseBurstIJobParallelFor>( TestName(), inputDataContainer.GetData <float>(TypeConfig.DataFloat1), inputDataContainer.GetData <float>(TypeConfig.DataFloat1), new WorkConfigIJobParallelFor(false), new IDataConfig[] { new DataConfigUnityCollection(Allocator.Persistent), new DataConfigUnityCollection(Allocator.Persistent), } ), #endregion #region Temp WorkerFactory <NativeArray <float>, NativeArray <float> > .Create <BaseIJobParallelFor>( TestName(), inputDataContainer.GetData <float>(TypeConfig.DataFloat1), inputDataContainer.GetData <float>(TypeConfig.DataFloat1), new WorkConfigIJobParallelFor(), new IDataConfig[] { new DataConfigUnityCollection(Allocator.Temp), new DataConfigUnityCollection(Allocator.Temp), } ), WorkerFactory <NativeArray <float>, NativeArray <float> > .Create <BaseIJobParallelFor>( TestName(), inputDataContainer.GetData <float>(TypeConfig.DataFloat1), inputDataContainer.GetData <float>(TypeConfig.DataFloat1), new WorkConfigIJobParallelFor(false), new IDataConfig[] { new DataConfigUnityCollection(Allocator.Temp), new DataConfigUnityCollection(Allocator.Temp), } ), WorkerFactory <NativeArray <float>, NativeArray <float> > .Create <BaseBurstIJobParallelFor>( TestName(), inputDataContainer.GetData <float>(TypeConfig.DataFloat1), inputDataContainer.GetData <float>(TypeConfig.DataFloat1), new WorkConfigIJobParallelFor(), new IDataConfig[] { new DataConfigUnityCollection(Allocator.Temp), new DataConfigUnityCollection(Allocator.Temp), } ), WorkerFactory <NativeArray <float>, NativeArray <float> > .Create <BaseBurstIJobParallelFor>( TestName(), inputDataContainer.GetData <float>(TypeConfig.DataFloat1), inputDataContainer.GetData <float>(TypeConfig.DataFloat1), new WorkConfigIJobParallelFor(false), new IDataConfig[] { new DataConfigUnityCollection(Allocator.Temp), new DataConfigUnityCollection(Allocator.Temp), } ), #endregion #region TempJob WorkerFactory <NativeArray <float>, NativeArray <float> > .Create <BaseIJobParallelFor>( TestName(), inputDataContainer.GetData <float>(TypeConfig.DataFloat1), inputDataContainer.GetData <float>(TypeConfig.DataFloat1), new WorkConfigIJobParallelFor(), new IDataConfig[] { new DataConfigUnityCollection(Allocator.TempJob), new DataConfigUnityCollection(Allocator.TempJob), } ), WorkerFactory <NativeArray <float>, NativeArray <float> > .Create <BaseIJobParallelFor>( TestName(), inputDataContainer.GetData <float>(TypeConfig.DataFloat1), inputDataContainer.GetData <float>(TypeConfig.DataFloat1), new WorkConfigIJobParallelFor(false), new IDataConfig[] { new DataConfigUnityCollection(Allocator.TempJob), new DataConfigUnityCollection(Allocator.TempJob), } ), WorkerFactory <NativeArray <float>, NativeArray <float> > .Create <BaseBurstIJobParallelFor>( TestName(), inputDataContainer.GetData <float>(TypeConfig.DataFloat1), inputDataContainer.GetData <float>(TypeConfig.DataFloat1), new WorkConfigIJobParallelFor(), new IDataConfig[] { new DataConfigUnityCollection(Allocator.TempJob), new DataConfigUnityCollection(Allocator.TempJob), } ), WorkerFactory <NativeArray <float>, NativeArray <float> > .Create <BaseBurstIJobParallelFor>( TestName(), inputDataContainer.GetData <float>(TypeConfig.DataFloat1), inputDataContainer.GetData <float>(TypeConfig.DataFloat1), new WorkConfigIJobParallelFor(false), new IDataConfig[] { new DataConfigUnityCollection(Allocator.TempJob), new DataConfigUnityCollection(Allocator.TempJob), } ), #endregion #endregion }); }
public override IWorkFacade[] InitWorkFacades(IInputDataContainer inputDataContainer, int dataSize) { return(new[] { #region Plain WorkerFactory <Vector3[], int[]> .Create <SeparateAllPlain>( TestName(), inputDataContainer.GetData <Vector3>(TypeConfig.DataVector3), inputDataContainer.GetData <int>(TypeConfig.DataInt1), new WorkConfigDefault(), new IDataConfig[] { new DataConfigDefault(), new DataConfigDefault(), } ), WorkerFactory <Vector3[], int[]> .Create <SeparateVerticesTrianglesPlain>( TestName(), inputDataContainer.GetData <Vector3>(TypeConfig.DataVector3), inputDataContainer.GetData <int>(TypeConfig.DataInt1), new WorkConfigDefault(), new IDataConfig[] { new DataConfigDefault(), new DataConfigDefault(), } ), WorkerFactory <Vector3[], int[]> .Create <AllJoinPlain>( TestName(), inputDataContainer.GetData <Vector3>(TypeConfig.DataVector3), inputDataContainer.GetData <int>(TypeConfig.DataInt1), new WorkConfigDefault(), new IDataConfig[] { new DataConfigDefault(), new DataConfigDefault(), } ), WorkerFactory <Vector3[], int[]> .Create <AllJoinSingleForPlain>( TestName(), inputDataContainer.GetData <Vector3>(TypeConfig.DataVector3), inputDataContainer.GetData <int>(TypeConfig.DataInt1), new WorkConfigDefault(), new IDataConfig[] { new DataConfigDefault(), new DataConfigDefault(), } ), #endregion #region Job WorkerFactory <NativeArray <Vector3>, NativeArray <int> > .Create <SeparateAllJob>( TestName(), inputDataContainer.GetData <Vector3>(TypeConfig.DataVector3), inputDataContainer.GetData <int>(TypeConfig.DataInt1), new WorkConfigIJob(), new IDataConfig[] { new DataConfigUnityCollection(Allocator.Persistent), new DataConfigUnityCollection(Allocator.Persistent), } ), WorkerFactory <NativeArray <Vector3>, NativeArray <int> > .Create <SeparateVerticesTrianglesJob>( TestName(), inputDataContainer.GetData <Vector3>(TypeConfig.DataVector3), inputDataContainer.GetData <int>(TypeConfig.DataInt1), new WorkConfigIJob(), new IDataConfig[] { new DataConfigUnityCollection(Allocator.Persistent), new DataConfigUnityCollection(Allocator.Persistent), } ), WorkerFactory <NativeArray <Vector3>, NativeArray <int> > .Create <AllJoinJob>( TestName(), inputDataContainer.GetData <Vector3>(TypeConfig.DataVector3), inputDataContainer.GetData <int>(TypeConfig.DataInt1), new WorkConfigIJob(), new IDataConfig[] { new DataConfigUnityCollection(Allocator.Persistent), new DataConfigUnityCollection(Allocator.Persistent), } ), WorkerFactory <NativeArray <Vector3>, NativeArray <int> > .Create <AllJoinSingleForJob>( TestName(), inputDataContainer.GetData <Vector3>(TypeConfig.DataVector3), inputDataContainer.GetData <int>(TypeConfig.DataInt1), new WorkConfigIJob(), new IDataConfig[] { new DataConfigUnityCollection(Allocator.Persistent), new DataConfigUnityCollection(Allocator.Persistent), } ), #endregion #region JobParallelFor WorkerFactory <NativeArray <Vector3>, NativeArray <int> > .Create <AllJoinSingleForJobParallelFor>( TestName(), inputDataContainer.GetData <Vector3>(TypeConfig.DataVector3), inputDataContainer.GetData <int>(TypeConfig.DataInt1), new WorkConfigIJobParallelFor(), new IDataConfig[] { new DataConfigUnityCollection(Allocator.Persistent), new DataConfigUnityCollection(Allocator.Persistent), } ), #endregion #region SystemParallelFor WorkerFactory <Vector3[], int[]> .Create <AllJoinSingleForSystemParallelFor>( TestName(), inputDataContainer.GetData <Vector3>(TypeConfig.DataVector3), inputDataContainer.GetData <int>(TypeConfig.DataInt1), new WorkConfigDefault(), new IDataConfig[] { new DataConfigDefault(), new DataConfigDefault(), } ), #endregion }); }
public WorkerScheduler(ServiceProvider sp, IDictionary <string, int> dic) { _wf = new WorkerFactory(sp); SetupTimers(dic); }
// Start is called before the first frame update void Start() { m_RuntimeModel = ModelLoader.Load(modelAsset); worker = WorkerFactory.CreateWorker(WorkerFactory.Type.ComputePrecompiled, m_RuntimeModel); }
// TODO: refactor with FuseShapesIntoConstants public void InferAllShapes(Model model, ref IDictionary <string, TensorShape?> shapesByName, ref IDictionary <string, int?> ranksByName) { var toRunnableNCHW = new IntermediateToRunnableNCHWPass(); var knownLayersValue = new Dictionary <string, Tensor>(); var newKnownLayers = new HashSet <string>(); var keepLayers = new HashSet <string>(); for (int l = 0; l < model.layers.Count; ++l) { var layer = model.layers[l]; if (layer.flags == Layer.Flags.Preserve) { keepLayers.Add(layer.name); } // NN is a directed graph, if we just fused constants + shapes, update following nodes // re-evaluate shapes FuseInputsIntoLayer(ref layer, knownLayersValue, ranksByName, null);//TODO handle potential folding errors/warnings // TODO optimization, pass in index, or add shape IRShapeInferenceHelper.RankInference.UpdateKnownTensorRanks(model, ranksByName); IRShapeInferenceHelper.ShapeInference.UpdateKnownTensorShapesNCHW(model, ranksByName, ref shapesByName); if (ModelOptimizer.IsLayerConstant(layer)) { knownLayersValue[layer.name] = new Tensor(layer.datasets[0].shape, layer.weights); } else if (layer.type == Layer.Type.Shape) { // assert inputs.Lenght == 1 var input = layer.inputs[0]; if (shapesByName.ContainsKey(input) && shapesByName[input] != null && ranksByName.ContainsKey(input) && ranksByName[input] != null ) { var shape = shapesByName[input].Value; var rank = ranksByName[input].Value; knownLayersValue[layer.name] = ShapeToNCHWTensor(shape, rank); newKnownLayers.Add(layer.name); continue; } } bool allInputsAreKnown = layer.inputs.Length > 0 ? knownLayersValue.ContainsKey(layer.inputs[0]) : false; for (int i = 1; i < layer.inputs.Length; i++) { allInputsAreKnown &= knownLayersValue.ContainsKey(layer.inputs[i]); } // if all inputs are known, execute layer if (!allInputsAreKnown) { continue; } var layerInputs = new Dictionary <string, Tensor>(); var opsModel = new Model(); opsModel.layout = "iNCHW"; for (int i = 0; i < layer.inputs.Length; i++) { Model.Input input; input.name = layer.inputs[i]; input.shape = shapesByName[input.name].Value.ToArray(); input.rank = ranksByName[input.name].Value; opsModel.inputs.Add(input); layerInputs[input.name] = knownLayersValue[input.name]; } Layer newLayer = new Layer(layer.name.ToString(), layer.activation); newLayer.type = layer.type; newLayer.activation = layer.activation; newLayer.pad = layer.pad.ToArray(); newLayer.stride = layer.stride.ToArray(); newLayer.pool = layer.pool.ToArray(); newLayer.axis = layer.axis; newLayer.alpha = layer.alpha; newLayer.beta = layer.beta; newLayer.inputs = layer.inputs.ToArray(); newLayer.datasets = layer.datasets; newLayer.weights = layer.weights; if (layer.outputs != null) { newLayer.outputs = layer.outputs.ToArray(); } if (layer.axes != null) { newLayer.axes = layer.axes.ToArray(); } opsModel.layers.Add(newLayer); opsModel.outputs.Add(newLayer.name); toRunnableNCHW.Run(ref opsModel); toRunnableNCHW.Run(ref opsModel); // bake var useCPUforBaking = WorkerFactory.Device.CPU; using (var worker = WorkerFactory.CreateWorker(opsModel, useCPUforBaking)) { var bakedConstant = worker.Execute(layerInputs).PeekOutput(); bakedConstant.TakeOwnership(); knownLayersValue[layer.name] = bakedConstant; newKnownLayers.Add(layer.name); } } // clear allocated tensors foreach (var l in knownLayersValue) { l.Value.Dispose(); } // remove unused constants var removeUnusedLayersPass = new Cleanup.RemoveUnusedLayersPass(); removeUnusedLayersPass.Run(ref model); }
public void LoadModel(NNModel model) { mRuntimeModel = ModelLoader.Load(model); mWorker = WorkerFactory.CreateWorker(WorkerFactory.Type.ComputePrecompiled, mRuntimeModel); Debug.Log("model loaded"); }
public override IWorkFacade[] InitWorkFacades(IInputDataContainer inputDataContainer, int dataSize) { return(new[] { #region IJob WorkerFactory <NativeArray <float>, NativeArray <float> > .Create <BurstFloatModeDefaultJob>( TestName(), inputDataContainer.GetData <float>(TypeConfig.DataFloat1), inputDataContainer.GetData <float>(TypeConfig.DataFloat1), new WorkConfigIJob(), new IDataConfig[] { new DataConfigUnityCollection(Allocator.Persistent), new DataConfigUnityCollection(Allocator.Persistent), } ), WorkerFactory <NativeArray <float>, NativeArray <float> > .Create <BurstFloatModeDefaultJob>( TestName(), inputDataContainer.GetData <float>(TypeConfig.DataFloat1), inputDataContainer.GetData <float>(TypeConfig.DataFloat1), new WorkConfigIJob(false), new IDataConfig[] { new DataConfigUnityCollection(Allocator.Persistent), new DataConfigUnityCollection(Allocator.Persistent), } ), WorkerFactory <NativeArray <float>, NativeArray <float> > .Create <BurstFloatModeFastJob>( TestName(), inputDataContainer.GetData <float>(TypeConfig.DataFloat1), inputDataContainer.GetData <float>(TypeConfig.DataFloat1), new WorkConfigIJob(), new IDataConfig[] { new DataConfigUnityCollection(Allocator.Persistent), new DataConfigUnityCollection(Allocator.Persistent), } ), WorkerFactory <NativeArray <float>, NativeArray <float> > .Create <BurstFloatModeFastJob>( TestName(), inputDataContainer.GetData <float>(TypeConfig.DataFloat1), inputDataContainer.GetData <float>(TypeConfig.DataFloat1), new WorkConfigIJob(false), new IDataConfig[] { new DataConfigUnityCollection(Allocator.Persistent), new DataConfigUnityCollection(Allocator.Persistent), } ), WorkerFactory <NativeArray <float>, NativeArray <float> > .Create <BurstFloatModeStrictJob>( TestName(), inputDataContainer.GetData <float>(TypeConfig.DataFloat1), inputDataContainer.GetData <float>(TypeConfig.DataFloat1), new WorkConfigIJob(), new IDataConfig[] { new DataConfigUnityCollection(Allocator.Persistent), new DataConfigUnityCollection(Allocator.Persistent), } ), WorkerFactory <NativeArray <float>, NativeArray <float> > .Create <BurstFloatModeStrictJob>( TestName(), inputDataContainer.GetData <float>(TypeConfig.DataFloat1), inputDataContainer.GetData <float>(TypeConfig.DataFloat1), new WorkConfigIJob(false), new IDataConfig[] { new DataConfigUnityCollection(Allocator.Persistent), new DataConfigUnityCollection(Allocator.Persistent), } ), WorkerFactory <NativeArray <float>, NativeArray <float> > .Create <BurstFloatModeDeterministicJob>( TestName(), inputDataContainer.GetData <float>(TypeConfig.DataFloat1), inputDataContainer.GetData <float>(TypeConfig.DataFloat1), new WorkConfigIJob(), new IDataConfig[] { new DataConfigUnityCollection(Allocator.Persistent), new DataConfigUnityCollection(Allocator.Persistent), } ), WorkerFactory <NativeArray <float>, NativeArray <float> > .Create <BurstFloatModeDeterministicJob>( TestName(), inputDataContainer.GetData <float>(TypeConfig.DataFloat1), inputDataContainer.GetData <float>(TypeConfig.DataFloat1), new WorkConfigIJob(false), new IDataConfig[] { new DataConfigUnityCollection(Allocator.Persistent), new DataConfigUnityCollection(Allocator.Persistent), } ), #endregion #region IJobParallelFor WorkerFactory <NativeArray <float>, NativeArray <float> > .Create <BurstFloatModeDefaultJobParallelFor>( TestName(), inputDataContainer.GetData <float>(TypeConfig.DataFloat1), inputDataContainer.GetData <float>(TypeConfig.DataFloat1), new WorkConfigIJobParallelFor(), new IDataConfig[] { new DataConfigUnityCollection(Allocator.Persistent), new DataConfigUnityCollection(Allocator.Persistent), } ), WorkerFactory <NativeArray <float>, NativeArray <float> > .Create <BurstFloatModeDefaultJobParallelFor>( TestName(), inputDataContainer.GetData <float>(TypeConfig.DataFloat1), inputDataContainer.GetData <float>(TypeConfig.DataFloat1), new WorkConfigIJobParallelFor(false), new IDataConfig[] { new DataConfigUnityCollection(Allocator.Persistent), new DataConfigUnityCollection(Allocator.Persistent), } ), WorkerFactory <NativeArray <float>, NativeArray <float> > .Create <BurstFloatModeFastJobParallelFor>( TestName(), inputDataContainer.GetData <float>(TypeConfig.DataFloat1), inputDataContainer.GetData <float>(TypeConfig.DataFloat1), new WorkConfigIJobParallelFor(), new IDataConfig[] { new DataConfigUnityCollection(Allocator.Persistent), new DataConfigUnityCollection(Allocator.Persistent), } ), WorkerFactory <NativeArray <float>, NativeArray <float> > .Create <BurstFloatModeFastJobParallelFor>( TestName(), inputDataContainer.GetData <float>(TypeConfig.DataFloat1), inputDataContainer.GetData <float>(TypeConfig.DataFloat1), new WorkConfigIJobParallelFor(false), new IDataConfig[] { new DataConfigUnityCollection(Allocator.Persistent), new DataConfigUnityCollection(Allocator.Persistent), } ), WorkerFactory <NativeArray <float>, NativeArray <float> > .Create <BurstFloatModeStrictJobParallelFor>( TestName(), inputDataContainer.GetData <float>(TypeConfig.DataFloat1), inputDataContainer.GetData <float>(TypeConfig.DataFloat1), new WorkConfigIJobParallelFor(), new IDataConfig[] { new DataConfigUnityCollection(Allocator.Persistent), new DataConfigUnityCollection(Allocator.Persistent), } ), WorkerFactory <NativeArray <float>, NativeArray <float> > .Create <BurstFloatModeStrictJobParallelFor>( TestName(), inputDataContainer.GetData <float>(TypeConfig.DataFloat1), inputDataContainer.GetData <float>(TypeConfig.DataFloat1), new WorkConfigIJobParallelFor(false), new IDataConfig[] { new DataConfigUnityCollection(Allocator.Persistent), new DataConfigUnityCollection(Allocator.Persistent), } ), WorkerFactory <NativeArray <float>, NativeArray <float> > .Create <BurstFloatModeDeterministicJobParallelFor>( TestName(), inputDataContainer.GetData <float>(TypeConfig.DataFloat1), inputDataContainer.GetData <float>(TypeConfig.DataFloat1), new WorkConfigIJobParallelFor(), new IDataConfig[] { new DataConfigUnityCollection(Allocator.Persistent), new DataConfigUnityCollection(Allocator.Persistent), } ), WorkerFactory <NativeArray <float>, NativeArray <float> > .Create <BurstFloatModeDeterministicJobParallelFor>( TestName(), inputDataContainer.GetData <float>(TypeConfig.DataFloat1), inputDataContainer.GetData <float>(TypeConfig.DataFloat1), new WorkConfigIJobParallelFor(false), new IDataConfig[] { new DataConfigUnityCollection(Allocator.Persistent), new DataConfigUnityCollection(Allocator.Persistent), } ), #endregion }); }
public void OnGUI() { m_position = EditorGUILayout.Vector3Field("Position the agent should point to:", m_position); kinectModel = EditorGUILayout.ObjectField("Kinect NN model:", kinectModel, typeof(NNModel), false) as NNModel; leapModel = EditorGUILayout.ObjectField("Leap NN model:", leapModel, typeof(NNModel), false) as NNModel; exampleKinectClip = EditorGUILayout.ObjectField("Example Kinect Animation Clip", exampleKinectClip, typeof(AnimationClip), false) as AnimationClip; exampleLeapClip = EditorGUILayout.ObjectField("Example Leap Animation Clip", exampleLeapClip, typeof(AnimationClip), false) as AnimationClip; if (GUILayout.Button("Load Animation Clips")) { string filePath = Application.dataPath + "/Resources/Recordings/"; animList.Clear(); LoadAnimationClips(animList, filePath); Debug.Log(animList.Count); } if (GUILayout.Button("Save Clips Data to CSV Files")) { // Retrieve current time in MM-dd-yyyy_HH:mm:ss DateTime now = DateTime.Now; string path = Application.dataPath + "/Resources/clips2csv_" + now.ToString("MM-dd-yyyy_HH-mm-ss"); TextWriter sw_Kinect = new StreamWriter(path + "_Kinect.csv"); TextWriter sw_Leap = new StreamWriter(path + "_Leap.csv"); StringBuilder line_Kinect = new StringBuilder(); // each line in the csv file StringBuilder line_Leap = new StringBuilder(); foreach (AnimationClip clip in animList) { line_Kinect.Clear(); line_Leap.Clear(); if (clip != null) { string device = clip.name.Split('_')[0]; if (device == "Kinect") { RecordAnimationClip2CSV(clip, line_Kinect, sw_Kinect, true); } else if (device == "LeapRight") { RecordAnimationClip2CSV(clip, line_Leap, sw_Leap, false); } } } sw_Kinect.Close(); sw_Leap.Close(); // Debug.Log(maxPos); // Debug.Log(maxRot); // Debug.Log(minScale); } if (GUILayout.Button("Recorded transform path and property name")) { // Retrieve current time in MM-dd-yyyy_HH:mm:ss DateTime now = DateTime.Now; string path = Application.dataPath + "/Resources/propertynames_" + now.ToString("MM-dd-yyyy_HH-mm-ss"); TextWriter sw_Kinect = new StreamWriter(path + "_Kinect.csv"); TextWriter sw_Leap = new StreamWriter(path + "_Leap.csv"); RecordPathPropertyNames(exampleKinectClip, sw_Kinect, true); RecordPathPropertyNames(exampleLeapClip, sw_Leap, false); sw_Kinect.Close(); sw_Leap.Close(); } if (GUILayout.Button("Inference Kinect Body from Position")) { var model = ModelLoader.Load(kinectModel, false); var worker = WorkerFactory.CreateWorker(WorkerFactory.Type.Compute, model); // Pass the input to the model Tensor input = new Tensor(1, 10, new float[10] { m_position.x, m_position.y, m_position.z, 0f, 0f, 1.25f, 0f, 0.5f, 0f, 1.8f }); // create a tensor // Debug.Log(input[0]); var output = worker.Execute(input).PeekOutput(); // From the example Kinect animation clip, find all transforms that the output should match with // and assign each output to either position, rotation, or scale of the transform InferenceFromModel(true, output); } if (GUILayout.Button("Inference Leap Hand from Position")) { var model = ModelLoader.Load(leapModel, false); var worker = WorkerFactory.CreateWorker(WorkerFactory.Type.Compute, model); // Pass the input to the model Tensor input = new Tensor(1, 10, new float[10] { m_position.x, m_position.y, m_position.z, 0f, 0f, 1.25f, 0f, 0.5f, 0f, 1.8f }); // create a tensor // Debug.Log(input[0]); var output = worker.Execute(input).PeekOutput(); // From the example Leap animation clip, find all transforms that the output should match with // and assign each output to either position, rotation, or scale of the transform InferenceFromModel(false, output); // Debug.Log(i); // Debug.Log(GameObject.Find("R_Palm").transform.localRotation); } }
private void Start() { // デバッグ用ファイルを開く //Encoding enc = Encoding.GetEncoding("Shift_JIS"); //var csvPath = System.IO.Path.Combine(Application.streamingAssetsPath, "data.csv"); //writer = new StreamWriter(csvPath, false, enc); if (DebugMode) { if (User3Input) { UpdateVNectModel = new UpdateVNectModelDelegate(UpdateVNectAsync); } else { UpdateVNectModel = new UpdateVNectModelDelegate(UpdateVNect); } /* * var streamingPath = System.IO.Path.Combine(Application.streamingAssetsPath, HighQualityModelName); * var writer = new BinaryWriter(new FileStream(streamingPath, FileMode.Create)); * writer.Write(NNModel.modelData.Value); * writer.Close(); */ _model = ModelLoader.Load(NNModel, Verbose); } else { var streamingPath = System.IO.Path.Combine(Application.streamingAssetsPath, HighQualityModelName); if (!File.Exists(streamingPath)) { ModelQuality = 0; } if (ModelQuality == 0) { InputImageSize = 224; HeatMapCol = 14; User3Input = false; UpdateVNectModel = new UpdateVNectModelDelegate(UpdateVNect); _model = ModelLoader.Load(NNModel, Verbose); } else { InputImageSize = 448; HeatMapCol = 28; User3Input = true; UpdateVNectModel = new UpdateVNectModelDelegate(UpdateVNectAsync); _model = ModelLoader.LoadFromStreamingAssets(streamingPath); } } // Init VideoCapture videoCapture.Init(InputImageSize, InputImageSize); videoCapture.VideoReady += videoCapture_VideoReady; HeatMapCol_Half = HeatMapCol / 2; HeatMapCol_Squared = HeatMapCol * HeatMapCol; HeatMapCol_Cube = HeatMapCol * HeatMapCol * HeatMapCol; HeatMapCol_JointNum = HeatMapCol * JointNum; heatMap2D = new float[JointNum * HeatMapCol_Squared]; offset2D = new float[JointNum * HeatMapCol_Squared * 2]; heatMap3D = new float[JointNum * HeatMapCol_Cube]; offset3D = new float[JointNum * HeatMapCol_Cube * 3]; InputImageSizeF = InputImageSize; InputImageSizeHalf = InputImageSizeF / 2f; unit = 1f / (float)HeatMapCol; cubeOffsetLinear = HeatMapCol * JointNum_Cube; cubeOffsetSquared = HeatMapCol_Squared * JointNum_Cube; // Disabel sleep Screen.sleepTimeout = SleepTimeout.NeverSleep; _worker = WorkerFactory.CreateWorker(WorkerType, _model, Verbose); StartCoroutine("WaitLoad"); var texture = new RenderTexture(InputImageSize, InputImageSize, 0, RenderTextureFormat.RGB565, RenderTextureReadWrite.sRGB) { useMipMap = false, autoGenerateMips = false, wrapMode = TextureWrapMode.Clamp, filterMode = FilterMode.Point, }; if (User3Input) { inputs[inputName_1] = new Tensor(texture, 3); inputs[inputName_2] = new Tensor(texture, 3); inputs[inputName_3] = new Tensor(texture, 3); _worker.Execute(inputs); inputs[inputName_1].Dispose(); inputs[inputName_2].Dispose(); inputs[inputName_3].Dispose(); } else { input = new Tensor(texture, 3); _worker.Execute(input); input.Dispose(); } }
/// <summary> /// Constructeur initialisant un TaskExecutor avec la méthode de traitement métier pour le Module 1 /// </summary> /// <param name="name"></param> /// <param name="adress"></param> /// <param name="port"></param> public DnaClient(string name, string adress, int port) : base(name, adress, port) { WorkerFactory.AddWorker(DnaQuantMethod, new TaskExecutor(this, DnaQuantStatDisplay, null, null)); }
public void MLP_Calc() { TensorCachingAllocator tca = new TensorCachingAllocator(); var shape = new MultiLayerPerception.Shape { inputSize = 2, outputSize = 3, hiddenSize = 2 }; MultiLayerPerception mlp = new MultiLayerPerception(shape); int layerCnt = 0; foreach (Layer layer in mlp.model.layers) { layerCnt++; for (int iWB = 0; iWB < layer.weights.Length; iWB++) { layer.weights[iWB] = iWB * layerCnt; } if (layer.datasets.Length == 2) { Debug.Log($"" + $"{layer.name} " + $"({layer.weights.Length}: W{layer.datasets[0].length} + B{layer.datasets[1].length}): " + $"<{string.Join(", ", layer.weights)}>"); } } string HiddenLayer = MultiLayerPerception.LayerNames.Hidden; IWorker worker = WorkerFactory.CreateWorker(mlp.model, new string[] { HiddenLayer }, WorkerFactory.Device.GPU); Tensor inTensor = tca.Alloc(new TensorShape(1, 1, 1, shape.inputSize)); for (int i = 0; i < shape.inputSize; i++) { inTensor[i] = i; Debug.Log($"input[{i}] = {inTensor[i]}"); } IWorker ex = worker.Execute(inTensor); ex.FlushSchedule(true); Tensor hTensor = ex.PeekOutput(HiddenLayer); Debug.Assert(hTensor.length == shape.hiddenSize); for (int i = 0; i < hTensor.length; i++) { Debug.Log($"hidden1[{i}] = {hTensor[i]}"); } Tensor output = ex.PeekOutput(); Debug.Assert(output.length == shape.outputSize); for (int i = 0; i < output.length; i++) { Debug.Log($"output[{i}] = {output[i]}"); } for (int iHNode = 0; iHNode < shape.hiddenSize; iHNode++) { string str = ""; float sum = 0; for (int iINode = 0; iINode < shape.inputSize; iINode++) { float w = mlp.GetWeight(HiddenLayer, iINode, iHNode); str += $"{w} * {inTensor[iINode]} + "; sum += w * inTensor[iINode]; } float b = mlp.GetBias(HiddenLayer, iHNode); str += $"{b}"; sum += b; str += $"= {hTensor[iHNode]} ({sum})"; Debug.Assert(Mathf.Approximately(sum, hTensor[iHNode])); Debug.Log(str); } tca.Dispose(); ex.Dispose(); worker.Dispose(); Debug.Assert(true); }
private void Start() { var model = ModelLoader.Load(ModelFinalOut); worker = WorkerFactory.CreateWorker(WorkerFactory.Type.ComputePrecompiled, model); }
private static async Task ProcessJobs(CancellationToken cancellationToken) { IWorker worker = null; ClientJob job = null; var whenLastJobCompleted = DateTime.MinValue; var waitForMoreJobs = false; while (!cancellationToken.IsCancellationRequested) { var allJobs = _jobs.GetAll(); // Dequeue the first job. We will only pass jobs that have // the same SpanId to the current worker. job = allJobs.FirstOrDefault(newJob => { // If the job is null then we don't have a span id to match against. // Otherwise we want to pick jobs with the same span id. return(job == null || string.Equals(newJob.SpanId, job.SpanId, StringComparison.OrdinalIgnoreCase)); }); if (job != null) { // A spanId means that a span is defined and we might run // multiple jobs. if (!string.IsNullOrEmpty(job.SpanId)) { waitForMoreJobs = true; } if (job.State == ClientState.Waiting) { Log($"Starting '{job.Client}' worker"); Log($"Current Job SpanId '{job.SpanId}'"); job.State = ClientState.Starting; try { if (worker == null) { worker = WorkerFactory.CreateWorker(job); } if (worker == null) { Log($"Error while creating the worker"); job.State = ClientState.Deleting; whenLastJobCompleted = DateTime.UtcNow; } else { await worker.StartJobAsync(job); } } catch (Exception e) { Log($"An unexpected error occurred while starting the job {job.Id}"); Log(e.ToString()); job.State = ClientState.Deleting; } } else if (job.State == ClientState.Running || job.State == ClientState.Completed) { var now = DateTime.UtcNow; // Clean the job in case the driver is not running if (now - job.LastDriverCommunicationUtc > TimeSpan.FromSeconds(30)) { Log($"Driver didn't communicate for {now - job.LastDriverCommunicationUtc}. Halting job."); job.State = ClientState.Deleting; } } else if (job.State == ClientState.Deleting) { Log($"Deleting job {worker?.JobLogText ?? "no worker found"}"); try { if (worker != null) { await worker.StopJobAsync(); // Reset the last job completed indicator. whenLastJobCompleted = DateTime.UtcNow; } } finally { _jobs.Remove(job.Id); job = null; } } } await Task.Delay(100); // job will be null if there aren't any more jobs with the same spanId. if (job == null) { // Currently no jobs with the same span id exist so we check if we can // clear out the worker to signal to the worker factory to create // a new one. if (worker != null) { var now = DateTime.UtcNow; // Disposing the worker conditions // 1. A span isn't defined so there won't be any more jobs for this worker // 2. We check that whenLastJob completed is something other that it's default value // and 10 seconds have passed since the last job was completed. if (!waitForMoreJobs || (whenLastJobCompleted != DateTime.MinValue && now - whenLastJobCompleted > TimeSpan.FromSeconds(10))) { Log("Job queuing timeout reached. Disposing worker."); waitForMoreJobs = false; await worker.DisposeAsync(); worker = null; } } } } }
public async Task CheckThreads(int messageCount, int workers, int parallelism, WorkerFactory workerFactory) { var(activator, starter) = CreateBus(workers, parallelism, workerFactory); var counter = new SharedCounter(messageCount); activator.Handle <string>(async str => counter.Decrement()); var bus = starter.Start(); await Task.WhenAll(Enumerable.Range(0, messageCount) .Select(n => bus.SendLocal($"THIS IS MESSAGE {n}"))); counter.WaitForResetEvent(timeoutSeconds: 15); }
public NNHandler(NNModel nnmodel) { model = ModelLoader.Load(nnmodel); worker = WorkerFactory.CreateWorker(model); }
/// <summary> /// Initialized a new World. /// </summary> public void initialize(string saveName, NewWorldSettings settings) { this.saveName = saveName; this.seed = settings.getSeed(); this.preInitialization(); // Create land plots this.plotManager.initializeFirstTime(this.seed); // Generate the map. for (int depth = 0; depth < this.storage.layerCount; depth++) { this.mapGenerator.generateLayer(this, depth); } // Unlock the plot that contains the Dumptruck and set the // Worker spawn point. List <CellBehaviorMasterDepositPoint> masters = this.getAllBehaviors <CellBehaviorMasterDepositPoint>(); if (masters.Count > 0) { CellBehaviorMasterDepositPoint m = masters[0]; this.plotManager.getPlot(m.pos.x, m.pos.y).isOwned = true; this.storage.workerSpawnPoint = m.pos.add(-1, -1); } else { Debug.LogWarning("No MasterDepositPoint could be found when generating a new map, there must always be at least one!"); } // Set starting money. this.money.value = this.mapGenerator.startingMoney; // Setup the new Player. CameraController.instance.initNewPlayer(settings); // Spawn the starting Workers. WorkerFactory factory = Main.instance.workerFactory; foreach (WorkerType workerType in this.mapGenerator.startingWorkers) { if (workerType != null) { int xShift = UnityEngine.Random.Range(-1, 2); int yShift = UnityEngine.Random.Range(0, 2); EntityWorker worker = factory.spawnWorker( this, this.storage.workerSpawnPoint.add(xShift, -yShift), factory.generateWorkerInfo(workerType, Main.instance.personalityRegistry.getDefaultPersonality()), workerType); } } this.postInitialization(); CameraController.instance.changeLayer(this.mapGenerator.playerStartLayer); }
public static void FuseConstants(ref Model model) { var knownLayersValue = new Dictionary <string, Tensor>(); var newKnownLayers = new HashSet <string>(); var keepLayers = new HashSet <string>(); for (int l = 0; l < model.layers.Count; ++l) { var layer = model.layers[l]; if (layer.flags == Layer.Flags.Preserve) { keepLayers.Add(layer.name); } // NN is a directed graph, if we just fused constants + shapes, update following nodes // TODO optimization, pass in index, or add shape if (ModelOptimizer.IsLayerConstant(layer)) { knownLayersValue[layer.name] = new Tensor(layer.datasets[0].shape, layer.weights); } bool allInputsAreKnown = layer.inputs.Length > 0 ? knownLayersValue.ContainsKey(layer.inputs[0]) : false; for (int i = 1; i < layer.inputs.Length; i++) { allInputsAreKnown &= knownLayersValue.ContainsKey(layer.inputs[i]); } // if all inputs are known, execute layer if (!allInputsAreKnown) { continue; } var layerInputs = new Dictionary <string, Tensor>(); var opsModel = new Model(); for (int i = 0; i < layer.inputs.Length; i++) { Model.Input input; input.name = layer.inputs[i]; input.shape = knownLayersValue[input.name].shape.ToArray(); input.rank = knownLayersValue[input.name].shape.dimensions; opsModel.inputs.Add(input); layerInputs[input.name] = knownLayersValue[input.name]; } opsModel.layers.Add(layer); opsModel.outputs.Add(layer.name); // bake var useCPUforBaking = WorkerFactory.Device.CPU; using (var worker = WorkerFactory.CreateWorker(opsModel, useCPUforBaking)) { // TODO use ModelIR2RunnableNCHWPass var bakedConstant = worker.Execute(layerInputs).PeekOutput(); bakedConstant.TakeOwnership(); knownLayersValue[layer.name] = bakedConstant; newKnownLayers.Add(layer.name); } } // remove new baked layers since we will insert constants for those model.layers.RemoveAll(x => newKnownLayers.Contains(x.name) && !keepLayers.Contains(x.name)); // TODO use ModelBuilder? foreach (var l in newKnownLayers) { if (keepLayers.Contains(l)) { continue; } var name = l; var tensor = knownLayersValue[name]; Layer c = new Layer(name, Layer.Type.Load); c.datasets = new Layer.DataSet[1]; c.datasets[0].name = name; c.datasets[0].shape = tensor.shape; c.datasets[0].itemSizeInBytes = 4; c.datasets[0].length = tensor.shape.length; c.datasets[0].offset = 0; c.axis = tensor.shape.dimensions; c.weights = new BarracudaArray(tensor.length); BarracudaArray.Copy(tensor.ToReadOnlyArray(), c.weights, tensor.length); model.layers.Insert(0, c); } // clear allocated tensors foreach (var l in knownLayersValue) { l.Value.Dispose(); } // remove unused constants var removeUnusedLayersPass = new Cleanup.RemoveUnusedLayersPass(); removeUnusedLayersPass.Run(ref model); }
private void FuseShapesIntoConstants(ref Model model, IDictionary <string, TensorShape?> shapesByName, IDictionary <string, int?> ranksByName, ref List <Model.ImporterWarning> warnings) { var toRunnableNCHW = new IntermediateToRunnableNCHWPass(); var knownLayersValue = new Dictionary <string, Tensor>(); var newKnownLayers = new HashSet <string>(); var keepLayers = new HashSet <string>(); for (int l = 0; l < model.layers.Count; ++l) { var layer = model.layers[l]; if (layer.flags == Layer.Flags.Preserve) { keepLayers.Add(layer.name); } // NN is a directed graph, if we just fused constants + shapes, update following nodes // re-evaluate shapes FuseInputsIntoLayer(ref layer, knownLayersValue, ranksByName, warnings); // TODO optimization, pass in index, or add shape IRShapeInferenceHelper.RankInference.UpdateKnownTensorRanks(model, ranksByName); IRShapeInferenceHelper.ShapeInference.UpdateKnownTensorShapesNCHW(model, ranksByName, ref shapesByName); if (ModelOptimizer.IsLayerConstant(layer)) { knownLayersValue[layer.name] = new Tensor(layer.datasets[0].shape, layer.weights); } else if (layer.type == Layer.Type.Shape) { // assert inputs.Lenght == 1 var input = layer.inputs[0]; if (shapesByName.ContainsKey(input) && shapesByName[input] != null && ranksByName.ContainsKey(input) && ranksByName[input] != null ) { var shape = shapesByName[input].Value; var rank = ranksByName[input].Value; knownLayersValue[layer.name] = ShapeToNCHWTensor(shape, rank); newKnownLayers.Add(layer.name); continue; } } bool allInputsAreKnown = layer.inputs.Length > 0 ? knownLayersValue.ContainsKey(layer.inputs[0]) : false; for (int i = 1; i < layer.inputs.Length; i++) { allInputsAreKnown &= knownLayersValue.ContainsKey(layer.inputs[i]); } // if all inputs are known, execute layer if (!allInputsAreKnown) { continue; } var layerInputs = new Dictionary <string, Tensor>(); var opsModel = new Model(); opsModel.layout = "iNCHW"; for (int i = 0; i < layer.inputs.Length; i++) { Model.Input input; input.name = layer.inputs[i]; input.shape = shapesByName[input.name].Value.ToArray(); input.rank = ranksByName[input.name].Value; opsModel.inputs.Add(input); layerInputs[input.name] = knownLayersValue[input.name]; } Layer newLayer = new Layer(layer.name.ToString(), layer.activation); newLayer.type = layer.type; newLayer.activation = layer.activation; newLayer.pad = layer.pad.ToArray(); newLayer.stride = layer.stride.ToArray(); newLayer.pool = layer.pool.ToArray(); newLayer.axis = layer.axis; newLayer.alpha = layer.alpha; newLayer.beta = layer.beta; newLayer.inputs = layer.inputs.ToArray(); newLayer.datasets = layer.datasets; newLayer.weights = layer.weights; if (layer.outputs != null) { newLayer.outputs = layer.outputs.ToArray(); } if (layer.axes != null) { newLayer.axes = layer.axes.ToArray(); } opsModel.layers.Add(newLayer); opsModel.outputs.Add(newLayer.name); toRunnableNCHW.Run(ref opsModel); // bake var useCPUforBaking = WorkerFactory.Device.CPU; using (var worker = WorkerFactory.CreateWorker(opsModel, useCPUforBaking)) { var bakedConstant = worker.Execute(layerInputs).CopyOutput(); knownLayersValue[layer.name] = bakedConstant; newKnownLayers.Add(layer.name); } } // remove new baked layers since we will insert constants for those model.layers.RemoveAll(x => newKnownLayers.Contains(x.name) && !keepLayers.Contains(x.name)); // TODO use ModelBuilder? foreach (var l in newKnownLayers) { if (keepLayers.Contains(l)) { continue; } var name = l; var tensor = knownLayersValue[name]; Layer c = new Layer(name, Layer.Type.Load); c.datasets = new Layer.DataSet[1]; c.datasets[0].name = name; c.datasets[0].shape = tensor.shape; c.datasets[0].itemSizeInBytes = 4; c.datasets[0].length = tensor.shape.length; c.datasets[0].offset = 0; c.axis = ranksByName[c.name].Value; c.weights = new BarracudaArray(tensor.length); BarracudaArray.Copy(tensor.ToReadOnlyArray(), c.weights, tensor.length); model.layers.Insert(0, c); } foreach (var l in knownLayersValue) { l.Value.Dispose(); } // TODO remove? // remove unused constants var removeUnusedLayersPass = new Cleanup.RemoveUnusedLayersPass(); removeUnusedLayersPass.Run(ref model); }
private void InitializeWorkers() { _workerFactory = new WorkerFactory(); _laborConverter = new LaborConverter(_workerFactory, _shiftConverter, _skillConverter); _workers = new ConcurrentDictionary<int, IWorker>(); _laborsMap = new ConcurrentDictionary<int, ILabor>(); }