void Start() { model_ = ModelLoader.LoadFromStreamingAssets("F:\\2Unity\\RollerBall\\Assets\\RollerBallBrain.nn"); worker = BarracudaWorkerFactory.CreateWorker(BarracudaWorkerFactory.Type.ComputePrecompiled, model_); rBody = GetComponent <Rigidbody>(); FxFz = new float[2]; }
private void Start() { // Initialize HeatMapCol_Squared = HeatMapCol * HeatMapCol; HeatMapCol_Cube = HeatMapCol * HeatMapCol * HeatMapCol; HeatMapCol_JointNum = HeatMapCol * JointNum; CubeOffsetLinear = HeatMapCol * JointNum_Cube; CubeOffsetSquared = HeatMapCol_Squared * JointNum_Cube; heatMap2D = new float[JointNum * HeatMapCol_Squared]; offset2D = new float[JointNum * HeatMapCol_Squared * 2]; heatMap3D = new float[JointNum * HeatMapCol_Cube]; offset3D = new float[JointNum * HeatMapCol_Cube * 3]; unit = 1f / (float)HeatMapCol; InputImageSizeF = InputImageSize; InputImageSizeHalf = InputImageSizeF / 2f; ImageScale = InputImageSize / (float)HeatMapCol;// 224f / (float)InputImageSize; // Disabel sleep Screen.sleepTimeout = SleepTimeout.NeverSleep; // Init model _model = ModelLoader.Load(NNModel, Verbose); _worker = BarracudaWorkerFactory.CreateWorker(WorkerType, _model, Verbose); StartCoroutine("WaitLoad"); // Init VNect model jointPoints = VNectModel.Init(); // Init VideoCapture videoCapture.Init(InputImageSize, InputImageSize); }
public void Predict() { loadedModel = ModelLoader.Load(modelSource); worker = BarracudaWorkerFactory.CreateWorker(BarracudaWorkerFactory.Type.ComputePrecompiled, loadedModel); float[] points = GetPoints(3); float[] newPoints = RePosition(ReSize(points, 32, 32, 28, 28)); var t = new Tensor(1, 28 * 28, newPoints); worker.ExecuteAndWaitForCompletion(t); var result = worker.PeekOutput(); var floats = result.data.Download(10); float maxVal = 0; int maxIndex = -1; for (int i = 0; i < floats.Length; i++) { if (floats[i] > maxVal) { maxVal = floats[i]; maxIndex = i; } } Debug.Log(maxVal + " occurs at " + maxIndex); DrawHandler.instance.Clear(); List <string> inputNames = loadedModel.inputs.ConvertAll(new System.Converter <Model.Input, string>(StringMap)); List <string> outputNames = loadedModel.outputs; Debug.Log(inputNames.Count + " " + inputNames[0]); Debug.Log(outputNames.Count + " " + outputNames[0]); result.Dispose(); worker.Dispose(); }
// Start is called before the first frame update void Start() { model = ModelLoader.Load(modelSource); worker = BarracudaWorkerFactory.CreateWorker(BarracudaWorkerFactory.Type.ComputePrecompiled, model); Texture2D tex = (Texture2D)Resources.Load("img_7"); // これ32*32で読み込んでしまうので修正。 var pixel = tex.GetPixels(0, 0, 28, 28); var tex2 = new Texture2D(28, 28); tex2.SetPixels(pixel); tex2.Apply(); inputs["0"] = new Tensor(tex2, 1); worker.Execute(inputs["0"]); Tensor output = worker.PeekOutput(); Debug.Log(output.GetType()); // Barracuda.Tensor型 Debug.Log(output.data.GetMaxCount()); // for (int i = 0; i < 10; i++) { Debug.Log(output[i]); } output.Dispose(); // Dispose()で終了させないとダメらしい worker.Dispose(); Debug.Log("end"); }
/// <summary> /// Initializes the Brain with the Model that it will use when selecting actions for /// the agents /// </summary> /// <param name="seed"> The seed that will be used to initialize the RandomNormal /// and Multinomial obsjects used when running inference.</param> /// <exception cref="UnityAgentsException">Throws an error when the model is null /// </exception> public void ReloadModel(int seed = 0) { if (_tensorAllocator == null) { _tensorAllocator = new TensorCachingAllocator(); } #if ENABLE_TENSORFLOW if (model != null) { _engine = new TFSharpInferenceEngine(); _engine.PrepareModel(model.bytes); } else { _engine = null; } _modelParamLoader = ModelParamLoader.GetLoaderAndCheck(_engine, brainParameters); _inferenceInputs = _modelParamLoader.GetInputTensors(); _inferenceOutputs = _modelParamLoader.GetOutputTensors(); _tensorGenerator = new TensorGenerator(brainParameters, seed, _tensorAllocator); _tensorApplier = new TensorApplier(brainParameters, seed, _tensorAllocator); #else if (model != null) { #if BARRACUDA_VERBOSE _verbose = true; #endif D.logEnabled = _verbose; // Cleanup previous instance if (_engine != null) { _engine.Dispose(); } _barracudaModel = ModelLoader.Load(model.Value); var executionDevice = inferenceDevice == InferenceDevice.GPU ? BarracudaWorkerFactory.Type.ComputePrecompiled : BarracudaWorkerFactory.Type.CSharp; _engine = BarracudaWorkerFactory.CreateWorker(executionDevice, _barracudaModel, _verbose); } else { _barracudaModel = null; _engine = null; } _modelParamLoader = BarracudaModelParamLoader.GetLoaderAndCheck(_engine, _barracudaModel, brainParameters); _inferenceInputs = _modelParamLoader.GetInputTensors(); _outputNames = _modelParamLoader.GetOutputNames(); _tensorGenerator = new TensorGenerator(brainParameters, seed, _tensorAllocator, _barracudaModel); _tensorApplier = new TensorApplier(brainParameters, seed, _tensorAllocator, _barracudaModel); #endif }
// Use this for initialization void Start() { Application.targetFrameRate = 60; model = BuildModel(inputImage.height, inputImage.width); engine = BarracudaWorkerFactory.CreateWorker(BarracudaWorkerFactory.Type.ComputePrecompiled, model, false); input = new Tensor(inputImage); StartCoroutine(RunInference()); }
public Mnist(NNModel nnModel, BarracudaWorkerFactory.Type type) { bool verbose = false; model = ModelLoader.Load(nnModel, verbose); worker = BarracudaWorkerFactory.CreateWorker(type, model, verbose); var kernels = ComputeShaderSingleton.Instance.kernels; ops = new PrecompiledComputeOps(kernels, kernels[0]); }
// Start is called before the first frame update void Start() { // モデルのロード m_Model = ModelLoader.LoadFromStreamingAssets(m_ModelName + ".nn"); // ワーカーの作成 m_Worker = BarracudaWorkerFactory.CreateWorker(BarracudaWorkerFactory.Type.ComputePrecompiled, m_Model); // テクスチャーペイントの準備 Texture2D mainTexture = (Texture2D)m_Plane.GetComponent <Renderer>().material.mainTexture; Color[] pixels = mainTexture.GetPixels(); m_Buffer = new Color[pixels.Length]; pixels.CopyTo(m_Buffer, 0); m_DrawTexture = new Texture2D(mainTexture.width, mainTexture.height, TextureFormat.ARGB32, false); m_DrawTexture.filterMode = FilterMode.Point; }
// Start is called before the first frame update void Start() { inputs = new Dictionary <string, Tensor>(); cond = new float[18]; for (int i = 0; i < 18; ++i) { cond[i] = Random.Range(0, 0.3f); } inputs[inputName2] = new Tensor(1, 1, 1, 18, cond); noise = new float[100]; for (int i = 0; i < 100; ++i) { noise[i] = Random.Range(-1f, 1f); } inputs[inputName1] = new Tensor(1, 1, 1, 100, noise); var model = ModelLoader.Load(modelSource); worker = BarracudaWorkerFactory.CreateWorker(BarracudaWorkerFactory.Type.ComputePrecompiled, model); print("Update Per Frame: " + pf); counter = Time.frameCount; }
public static Texture2D UpscaleTexture(Texture2D tex) { sContext.inputTexture = tex; // Budget needed for upscaling in MB long budget = 9600L * sContext.inputTexture.width * sContext.inputTexture.height * (sContext.inputTexture.alphaIsTransparency ? 2 : 1) / 1024 / 1024; // Check if model will fit into video memory if (budget > SystemInfo.graphicsMemorySize) { if (!EditorUtility.DisplayDialog("Upscaling large image!", $"Upscaling process will use more video memory than your GPU has ({budget}MB > {SystemInfo.graphicsMemorySize}MB)." + "\nDo you want to continue?\nNote: transparent images take 2x of memory!", "Continue", "Cancel")) { return(null); } } sContext.model = ModelLoader.Load(LocateModel(), false); sContext.worker = BarracudaWorkerFactory.CreateWorker(BarracudaWorkerFactory.Type.ComputePrecompiled, sContext.model, false); if (sContext.inputTexture.alphaIsTransparency) { sContext.input = new Tensor(new Texture[] { sContext.inputTexture, ExtractAlpha(sContext.inputTexture) }); } else { sContext.input = new Tensor(new Texture[] { sContext.inputTexture }); } Texture2D target = new Texture2D(tex.width, tex.height); sContext.startTime = Time.realtimeSinceStartup; sContext.destinationTexture = target; sContext.executor = sContext.worker.ExecuteAsync(sContext.input); return(target); }
/// <summary> /// Initializes the Brain with the Model that it will use when selecting actions for /// the agents /// </summary> /// <param name="seed"> The seed that will be used to initialize the RandomNormal /// and Multinomial obsjects used when running inference.</param> /// <exception cref="UnityAgentsException">Throws an error when the model is null /// </exception> public void ReloadModel(int seed = 0) { if (m_TensorAllocator == null) { m_TensorAllocator = new TensorCachingAllocator(); } if (model != null) { #if BARRACUDA_VERBOSE _verbose = true; #endif D.logEnabled = m_Verbose; // Cleanup previous instance if (m_Engine != null) { m_Engine.Dispose(); } m_BarracudaModel = ModelLoader.Load(model.Value); var executionDevice = inferenceDevice == InferenceDevice.GPU ? BarracudaWorkerFactory.Type.ComputePrecompiled : BarracudaWorkerFactory.Type.CSharp; m_Engine = BarracudaWorkerFactory.CreateWorker(executionDevice, m_BarracudaModel, m_Verbose); } else { m_BarracudaModel = null; m_Engine = null; } m_ModelParamLoader = BarracudaModelParamLoader.GetLoaderAndCheck(m_Engine, m_BarracudaModel, brainParameters); m_InferenceInputs = m_ModelParamLoader.GetInputTensors(); m_OutputNames = m_ModelParamLoader.GetOutputNames(); m_TensorGenerator = new TensorGenerator(brainParameters, seed, m_TensorAllocator, m_BarracudaModel); m_TensorApplier = new TensorApplier(brainParameters, seed, m_TensorAllocator, m_BarracudaModel); }
// Start is called before the first frame update void Start() { var model = ModelLoader.LoadFromStreamingAssets(modelName + ".bytes"); var worker = BarracudaWorkerFactory.CreateWorker(BarracudaWorkerFactory.Type.Compute, model); foreach (var layer in model.layers) { Debug.Log("Layer " + layer.name + " does: " + layer.inputs); } var inputs = new Dictionary <string, Tensor>(); Texture2D img = Resources.Load("tennis_in_crowd") as Texture2D; // Texture2D img = testImage.mainTexture as Texture2D; var tensor = new Tensor(img, 3); inputs.Add("image", tensor); worker.ExecuteAndWaitForCompletion(inputs); var Heatmap = worker.Fetch("heatmap"); var Offset = worker.Fetch("offset_2"); var Dis_fwd = worker.Fetch("displacement_fwd_2"); var Dis_bwd = worker.Fetch("displacement_bwd_2"); poses = posenet.DecodeMultiplePosesOG(Heatmap, Offset, Dis_fwd, Dis_bwd, outputStride: 16, maxPoseDetections: 2, scoreThreshold: 0.02f, nmsRadius: 20); gl = GameObject.Find("GLRenderer").GetComponent <GLRenderer>(); Debug.Log(poses.Length); // Debug.Log(Heatmap.height); Heatmap.Dispose(); Offset.Dispose(); Dis_fwd.Dispose(); Dis_bwd.Dispose(); worker.Dispose(); }
/// <summary> /// Initializes the Brain with the Model that it will use when selecting actions for /// the agents /// </summary> /// <param name="model"> The Barracuda model to load </param> /// <param name="brainParameters"> The parameters of the Brain used to generate the /// placeholder tensors </param> /// <param name="inferenceDevice"> Inference execution device. CPU is the fastest /// option for most of ML Agents models. </param> /// <param name="seed"> The seed that will be used to initialize the RandomNormal /// and Multinomial objects used when running inference.</param> /// <exception cref="UnityAgentsException">Throws an error when the model is null /// </exception> public ModelRunner( NNModel model, BrainParameters brainParameters, InferenceDevice inferenceDevice = InferenceDevice.CPU, int seed = 0) { Model barracudaModel; m_Model = model; m_InferenceDevice = inferenceDevice; m_TensorAllocator = new TensorCachingAllocator(); if (model != null) { #if BARRACUDA_VERBOSE m_Verbose = true; #endif D.logEnabled = m_Verbose; barracudaModel = ModelLoader.Load(model.Value); var executionDevice = inferenceDevice == InferenceDevice.GPU ? BarracudaWorkerFactory.Type.ComputePrecompiled : BarracudaWorkerFactory.Type.CSharp; m_Engine = BarracudaWorkerFactory.CreateWorker(executionDevice, barracudaModel, m_Verbose); } else { barracudaModel = null; m_Engine = null; } m_InferenceInputs = BarracudaModelParamLoader.GetInputTensors(barracudaModel); m_OutputNames = BarracudaModelParamLoader.GetOutputNames(barracudaModel); m_TensorGenerator = new TensorGenerator( seed, m_TensorAllocator, m_Memories, barracudaModel); m_TensorApplier = new TensorApplier( brainParameters, seed, m_TensorAllocator, m_Memories, barracudaModel); }
// Start is called before the first frame update void Start() { // 動作確認のサンプルなので, すべて Start() 上で実行しています. // モデルのロード var model = ModelLoader.LoadFromStreamingAssets(m_ModelName + ".nn"); // ワーカー (推論エンジン) の作成 var worker = BarracudaWorkerFactory.CreateWorker(BarracudaWorkerFactory.Type.ComputePrecompiled, model); // 入力の作成. 第2引数はチャンネル数. var tensor = new Tensor(m_InputTexture, 1); // 推論の実行 worker.Execute(tensor); // 推論結果の取得 var O = worker.Peek(); // 結果の表示 int pred = 0; float maxVal = float.MinValue; for (int i = 0; i < 10; ++i) { if (maxVal < O.readonlyArray[i]) { pred = i; maxVal = O.readonlyArray[i]; } } Debug.Log("Pred: " + pred.ToString()); // 後片付け (メモリの解放など) O.Dispose(); worker.Dispose(); }
public TinyYolo(NNModel nnModel, BarracudaWorkerFactory.Type type) { bool verbose = false; model = ModelLoader.Load(nnModel, verbose); Debug.Log(model); Debug.Log(model.inputs[0].name); worker = BarracudaWorkerFactory.CreateWorker(type, model, verbose); inputShape = new TensorShape(model.inputs[0].shape); textureToTensor = new TextureToTensor(); resizeOptions = new TextureToTensor.ResizeOptions() { width = inputShape.width, height = inputShape.height, rotationDegree = 0, flipX = false, flipY = false, aspectMode = TextureToTensor.AspectMode.Fill, }; inputAspect = (float)inputShape.width / (float)inputShape.height; results = new Result[25]; }
// Start is called before the first frame update void Start() { var model = ModelLoader.LoadFromStreamingAssets(RollerBallBrain + ".nn"); var worker = BarracudaWorkerFactory.CreateWorker(BarracudaWorkerFactory.Type.ComputePrecompiled, model); }
IEnumerator PoseUpdateNoTex(NativeArray <Color32> buffer, int width, int height, float secondsToWait) { // isPosing = true; var _model = ModelLoader.LoadFromStreamingAssets(modelName + ".bytes"); //var _model = model; var _worker = BarracudaWorkerFactory.CreateWorker(BarracudaWorkerFactory.Type.Compute, _model); //var _worker = worker; var frame = new Texture2D(width, height, TextureFormat.RGB24, false); frame.SetPixels32(buffer.ToArray()); frame.Apply(); //yield return new WaitForSeconds(secondsToWait); yield return(new WaitForEndOfFrame()); //frame.ResizePro(Width, Height, false, false); posenet.scale(frame, Width, Height, FilterMode.Bilinear); // Save frame image jpg to disk for debugging /// var randomInt = UnityEngine.Random.Range(0, 100000000000000000); /// File.WriteAllBytes(Application.persistentDataPath + "/pose-" + randomInt + ".jpg", frame.EncodeToJPG()); /// Debug.Log("Saved size converted image path: " + Application.persistentDataPath + "/pose-" + randomInt + ".jpg"); var inputs = new Dictionary <string, Tensor>(); var tensor = new Tensor(frame, 3); inputs.Add("image", tensor); _worker.ExecuteAndWaitForCompletion(inputs); //yield return new WaitForSeconds(secondsToWait); yield return(new WaitForEndOfFrame()); var Heatmap = _worker.Fetch("heatmap"); //yield return new WaitForSeconds(secondsToWait); yield return(new WaitForEndOfFrame()); var Offset = _worker.Fetch("offset_2"); //yield return new WaitForSeconds(secondsToWait); yield return(new WaitForEndOfFrame()); var Dis_fwd = _worker.Fetch("displacement_fwd_2"); //yield return new WaitForSeconds(secondsToWait); yield return(new WaitForEndOfFrame()); var Dis_bwd = _worker.Fetch("displacement_bwd_2"); //yield return new WaitForSeconds(secondsToWait); yield return(new WaitForEndOfFrame()); poses = posenet.DecodeMultiplePosesOG(Heatmap, Offset, Dis_fwd, Dis_bwd, outputStride: 16, maxPoseDetections: 1, scoreThreshold: 0.8f, nmsRadius: 30); Offset.Dispose(); Dis_fwd.Dispose(); Dis_bwd.Dispose(); Heatmap.Dispose(); _worker.Dispose(); isPosing = false; frame = null; inputs = null; // Resources.UnloadUnusedAssets(); yield return(null); }