void Start() { Application.targetFrameRate = 60; m_RuntimeModel = ModelLoader.Load(inputModel, false); m_Worker = WorkerFactory.CreateWorker(WorkerFactory.Type.ComputePrecompiled, m_RuntimeModel, false); #if (WEBCAM) #if UNITY_WSA Resolution cameraResolution = VideoCapture.SupportedResolutions.OrderByDescending((res) => res.width * res.height).First(); Debug.Log(cameraResolution); float cameraFramerate = VideoCapture.GetSupportedFrameRatesForResolution(cameraResolution).OrderByDescending((fps) => fps).First(); Debug.Log(cameraFramerate); VideoCapture.CreateAsync(false, delegate(VideoCapture videoCapture) { if (videoCapture != null) { m_VideoCapture = videoCapture; //Debug.Log("Created VideoCapture Instance!"); CameraParameters cameraParameters = new CameraParameters(); cameraParameters.hologramOpacity = 0.0f; cameraParameters.frameRate = cameraFramerate; cameraParameters.cameraResolutionWidth = cameraResolution.width; cameraParameters.cameraResolutionHeight = cameraResolution.height; cameraParameters.pixelFormat = CapturePixelFormat.BGRA32; m_VideoCapture.StartVideoModeAsync(cameraParameters, VideoCapture.AudioState.ApplicationAndMicAudio, OnStartedVideoCaptureMode); } else { Debug.LogError("Failed to create VideoCapture Instance!"); } }); #else m_WebcamTexture = new WebCamTexture(); m_WebcamTexture.Play(); #endif #else var targetRT = RenderTexture.GetTemporary(inputResolutionX, inputResolutionY, 0); Graphics.Blit(inputImage, targetRT, postprocessMaterial); m_Input = new Tensor(targetRT, 3); //m_Input = new Tensor(1, inputResolutionY, inputResolutionX, 3); #endif }
public SegementationFilter(ResourceSet resource, int w = 1920, int h = 1080) { this.resource = resource; config = new Config(resource, w, h); worker = ModelLoader.Load(resource.model).CreateWorker(); buffers.preprocess = new ComputeBuffer(config.InputFootPrint, sizeof(float)); buffers.segment = RTUtil.NewFloat(config.OutputWidth, config.OutputHeight); buffers.parts = RTUtil.NewFloat(config.OutputWidth * 24, config.OutputHeight); buffers.heatmaps = RTUtil.NewFloat(config.OutputWidth * KeyPointCount, config.OutputHeight); buffers.offsets = RTUtil.NewFloat(config.OutputWidth * KeyPointCount * 2, config.OutputHeight); buffers.mask = RTUtil.NewUAV(config.OutputWidth, config.OutputHeight); buffers.keypoints = new GraphicsBuffer(GraphicsBuffer.Target.Structured, KeyPointCount, sizeof(float) * 4); }
public void TestCheckModelThrowsVectorObservation1() { var model = ModelLoader.Load(continuous2vis8vec2actionModel); var brainParameters = GetContinuous2vis8vec2actionBrainParameters(); brainParameters.VectorObservationSize = 9; // Invalid observation var errors = BarracudaModelParamLoader.CheckModel(model, brainParameters, new SensorComponent[] { sensor_21_20_3, sensor_20_22_3 }); Assert.Greater(errors.Count(), 0); brainParameters = GetContinuous2vis8vec2actionBrainParameters(); brainParameters.NumStackedVectorObservations = 2;// Invalid stacking errors = BarracudaModelParamLoader.CheckModel(model, brainParameters, new SensorComponent[] { sensor_21_20_3, sensor_20_22_3 }); Assert.Greater(errors.Count(), 0); }
public void TestCheckModelThrowsAction2() { var model = ModelLoader.Load(discrete1vis0vec_2_3action_recurrModel); var brainParameters = GetDiscrete1vis0vec_2_3action_recurrModelBrainParameters(); brainParameters.VectorActionSize = new int[] { 3, 3 }; // Invalid action var errors = BarracudaModelParamLoader.CheckModel(model, brainParameters, new SensorComponent[] { sensor_21_20_3 }); Assert.Greater(errors.Count(), 0); brainParameters = GetContinuous2vis8vec2actionBrainParameters(); brainParameters.VectorActionSpaceType = SpaceType.Continuous;// Invalid SpaceType errors = BarracudaModelParamLoader.CheckModel(model, brainParameters, new SensorComponent[] { sensor_21_20_3 }); Assert.Greater(errors.Count(), 0); }
public void Start() { // yield return Application.RequestUserAuthorization(UserAuthorization.WebCam); // if (Application.HasUserAuthorization(UserAuthorization.WebCam)) // { // webCamTexture = new WebCamTexture(64, 64); // webCamTexture.Play(); // } var model = ModelLoader.Load(Model); m_Worker = WorkerFactory.CreateWorker(WorkerFactory.Type.ComputeRef, model); CamTextureRead = new Texture2D(64, 64, TextureFormat.RGBAFloat, false); }
public void Start() { this.labels = Regex.Split(this.labelsFile.text, "\n|\r|\r\n") .Where(s => !string.IsNullOrEmpty(s)).ToArray(); var model = ModelLoader.Load(this.modelFile, false); this.worker = WorkerFactory.CreateWorker(WorkerFactory.Type.ComputePrecompiled, model); // GenericWorker w = this.worker as GenericWorker; Debug.Log(this.worker.Summary()); Debug.Log(string.Join("\n", model.layers.Select(l => $"{l.name} {l.type} " + string.Join(":", l.inputs) + " / " + string.Join(":", l.pad.Select(d => d.ToString()))))); Debug.Log(string.Join("\n", model.inputs.Select(l => $"{l.name} " + string.Join(":", l.shape)))); Debug.Log(string.Join("\n", model.outputs)); }
// Start is called before the first frame update void Start() { // init strokes = new List <Stroke>(); ResetDrawing(); //load zeros for binary texture image texture_blank_pixels = texture_blank.texture.GetPixels(0, 0, img_width, img_height); //rt model for prediction runtimeModel = ModelLoader.Load(modelSource); //used to position bubble/camera thoughtBubbleCollider = thoughtBubble.GetComponent <BoxCollider2D>(); }
public Item(string[] tokens) { Model m = (Model)loader.Load("", tokens[NAME]); m.CastShadows = true; Material defaultMaterial = new Material(); defaultMaterial.Diffuse = Color.White.ToVector4(); //new Vector4(0, 0.5f, 0, 1); defaultMaterial.Specular = Color.White.ToVector4(); defaultMaterial.SpecularPower = 10; build(m, tokens[NAME], defaultMaterial); this.Scale = new Vector3(float.Parse(tokens[Item.SCALE])); this.savedTokens = tokens; }
protected override void LoadFromPrimary() { LoggingService.Info("LoadFrompromary"); var xml = designerLoader.SerializeModel(); var modelLoader = new ModelLoader(); var reportmodel = modelLoader.Load(xml.DocumentElement) as ReportModel; var reportingFactory = new ReportingFactory(); var reportCreator = reportingFactory.ReportCreator(reportmodel); reportCreator.BuildExportList(); var previewViewModel = new PreviewViewModel(reportingFactory.ReportModel.ReportSettings, reportCreator.Pages); viewer.SetBinding(previewViewModel); }
public void TestCheckModelValidHybrid() { var model = ModelLoader.Load(hybridONNXModel); var validBrainParameters = GetHybridBrainParameters(); var errors = BarracudaModelParamLoader.CheckModel( model, validBrainParameters, new ISensor[] { new VectorSensor(validBrainParameters.VectorObservationSize) }, new ActuatorComponent[0] ); Assert.AreEqual(0, errors.Count()); // There should not be any errors }
public void TestCheckModelThrowsActionContinuous(bool useDeprecatedNNModel) { var model = useDeprecatedNNModel ? ModelLoader.Load(continuousNNModel) : ModelLoader.Load(continuousONNXModel); var brainParameters = GetContinuous2vis8vec2actionBrainParameters(); brainParameters.VectorActionSize = new[] { 3 }; // Invalid action var errors = BarracudaModelParamLoader.CheckModel(model, brainParameters, new SensorComponent[] { sensor_21_20_3, sensor_20_22_3 }, new ActuatorComponent[0]); Assert.Greater(errors.Count(), 0); brainParameters = GetContinuous2vis8vec2actionBrainParameters(); brainParameters.VectorActionSpaceType = SpaceType.Discrete;// Invalid SpaceType errors = BarracudaModelParamLoader.CheckModel(model, brainParameters, new SensorComponent[] { sensor_21_20_3, sensor_20_22_3 }, new ActuatorComponent[0]); Assert.Greater(errors.Count(), 0); }
private ModelAsset ToUnity(byte[] bytes) { if (!Glb.TryParse(bytes, out Glb glb, out Exception ex)) { throw ex; } // Vrm => Model var storage = new Vrm10.Vrm10Storage(glb.Json.Bytes, glb.Binary.Bytes); var model = ModelLoader.Load(storage, "test"); model.ConvertCoordinate(Coordinates.Unity); model.RemoveSecondary(); return(ToUnity(model)); }
public void TestCheckModelThrowsActionContinuous(bool useDeprecatedNNModel) { var model = useDeprecatedNNModel ? ModelLoader.Load(continuousNNModel) : ModelLoader.Load(continuousONNXModel); var brainParameters = GetContinuous2vis8vec2actionBrainParameters(); brainParameters.ActionSpec = ActionSpec.MakeContinuous(3); // Invalid action var errors = BarracudaModelParamLoader.CheckModel(model, brainParameters, new ISensor[] { sensor_21_20_3.CreateSensor(), sensor_20_22_3.CreateSensor() }, new ActuatorComponent[0]); Assert.Greater(errors.Count(), 0); brainParameters = GetContinuous2vis8vec2actionBrainParameters(); brainParameters.ActionSpec = ActionSpec.MakeDiscrete(3); // Invalid SpaceType errors = BarracudaModelParamLoader.CheckModel(model, brainParameters, new ISensor[] { sensor_21_20_3.CreateSensor(), sensor_20_22_3.CreateSensor() }, new ActuatorComponent[0]); Assert.Greater(errors.Count(), 0); }
public PoseDetector(PoseDetectionResource resource) { this.resource = resource; var model = ModelLoader.Load(resource.model); var shape = model.inputs[0].shape; size = (shape[5], shape[6], shape[7]); // (W, H, C) worker = model.CreateWorker(); preBuffer = new ComputeBuffer(size.w * size.h * size.c, sizeof(float)); countBuffer = new ComputeBuffer(1, sizeof(uint), ComputeBufferType.Raw); postBuffer = new ComputeBuffer(MAX_DETECTION, sizeof(float) * DETECTION_DATA_SIZE, ComputeBufferType.Append); outputBuffer = new ComputeBuffer(MAX_DETECTION, sizeof(float) * DETECTION_DATA_SIZE, ComputeBufferType.Append); }
void Start() { Application.targetFrameRate = 60; string json = File.ReadAllText(Application.dataPath + "/Images/input0.json"); InputData inputData = JsonUtility.FromJson <InputData>(json); m_RuntimeModel = ModelLoader.Load(inputModel, false); m_Worker = WorkerFactory.CreateWorker(WorkerFactory.Type.Compute, m_RuntimeModel, false); m_Input = new Tensor(1, inputData.resolutionY, inputData.resolutionX, 1, inputData.data); Texture2D tex = CreateInputTexture(inputData); outputMaterial.mainTexture = tex; }
public void TestCheckModelThrowsActionDiscrete(bool useDeprecatedNNModel) { var model = useDeprecatedNNModel ? ModelLoader.Load(discreteNNModel) : ModelLoader.Load(discreteONNXModel); var brainParameters = GetDiscrete1vis0vec_2_3action_recurrModelBrainParameters(); brainParameters.ActionSpec = ActionSpec.MakeDiscrete(3, 3); // Invalid action var errors = BarracudaModelParamLoader.CheckModel(model, brainParameters, new SensorComponent[] { sensor_21_20_3 }, new ActuatorComponent[0]); Assert.Greater(errors.Count(), 0); brainParameters = GetContinuous2vis8vec2actionBrainParameters(); brainParameters.ActionSpec = ActionSpec.MakeContinuous(2); // Invalid SpaceType errors = BarracudaModelParamLoader.CheckModel(model, brainParameters, new SensorComponent[] { sensor_21_20_3 }, new ActuatorComponent[0]); Assert.Greater(errors.Count(), 0); }
public void TestCheckModelThrowsActionHybrid() { var model = ModelLoader.Load(hybridONNXModel); var brainParameters = GetHybridBrainParameters(); brainParameters.ActionSpec = new ActionSpec(3, new[] { 3 }); // Invalid discrete action size var errors = BarracudaModelParamLoader.CheckModel(model, brainParameters, new SensorComponent[] { sensor_21_20_3, sensor_20_22_3 }, new ActuatorComponent[0]); Assert.Greater(errors.Count(), 0); brainParameters = GetContinuous2vis8vec2actionBrainParameters(); brainParameters.ActionSpec = ActionSpec.MakeDiscrete(2); // Missing continuous action errors = BarracudaModelParamLoader.CheckModel(model, brainParameters, new SensorComponent[] { sensor_21_20_3, sensor_20_22_3 }, new ActuatorComponent[0]); Assert.Greater(errors.Count(), 0); }
public void TestCheckModelValidDiscrete(bool useDeprecatedNNModel) { var model = useDeprecatedNNModel ? ModelLoader.Load(discreteNNModel) : ModelLoader.Load(discreteONNXModel); var validBrainParameters = GetDiscrete1vis0vec_2_3action_recurrModelBrainParameters(); var errors = BarracudaModelParamLoader.CheckModel( model, validBrainParameters, new ISensor[] { sensor_21_20_3.CreateSensors()[0] }, new ActuatorComponent[0] ); foreach (var e in errors) { Debug.Log(e.Message); } Assert.Greater(errors.Count(), 0); // There should be an error since LSTM v1.x is not supported }
// Use this for initialization IEnumerator Start() { Application.targetFrameRate = 60; labels = labelsAsset.text.Split('\n'); model = ModelLoader.Load(srcModel, false); engine = WorkerFactory.CreateWorker(model, useGPU ? WorkerFactory.Device.GPU : WorkerFactory.Device.CSharp); var input = new Tensor(PrepareTextureForInput(inputImage, !useGPU), 3); inputs["input"] = input; yield return(null); StartCoroutine(RunInference()); }
/// <summary> /// Generate an InferenceEvent for the model. /// </summary> /// <param name="nnModel"></param> /// <param name="behaviorName"></param> /// <param name="inferenceDevice"></param> /// <param name="sensors"></param> /// <param name="actionSpec"></param> /// <returns></returns> internal static InferenceEvent GetEventForModel( NNModel nnModel, string behaviorName, InferenceDevice inferenceDevice, IList <ISensor> sensors, ActionSpec actionSpec ) { var barracudaModel = ModelLoader.Load(nnModel); var inferenceEvent = new InferenceEvent(); // Hash the behavior name so that there's no concern about PII or "secret" data being leaked. var behaviorNameHash = Hash128.Compute(behaviorName); inferenceEvent.BehaviorName = behaviorNameHash.ToString(); inferenceEvent.BarracudaModelSource = barracudaModel.IrSource; inferenceEvent.BarracudaModelVersion = barracudaModel.IrVersion; inferenceEvent.BarracudaModelProducer = barracudaModel.ProducerName; inferenceEvent.MemorySize = (int)barracudaModel.GetTensorByName(TensorNames.MemorySize)[0]; inferenceEvent.InferenceDevice = (int)inferenceDevice; if (barracudaModel.ProducerName == "Script") { // .nn files don't have these fields set correctly. Assign some placeholder values. inferenceEvent.BarracudaModelSource = "NN"; inferenceEvent.BarracudaModelProducer = "tensorflow_to_barracuda.py"; } #if UNITY_2019_3_OR_NEWER && UNITY_EDITOR var barracudaPackageInfo = UnityEditor.PackageManager.PackageInfo.FindForAssembly(typeof(Tensor).Assembly); inferenceEvent.BarracudaPackageVersion = barracudaPackageInfo.version; #else inferenceEvent.BarracudaPackageVersion = null; #endif inferenceEvent.ActionSpec = EventActionSpec.FromActionSpec(actionSpec); inferenceEvent.ObservationSpecs = new List <EventObservationSpec>(sensors.Count); foreach (var sensor in sensors) { inferenceEvent.ObservationSpecs.Add(EventObservationSpec.FromSensor(sensor)); } inferenceEvent.TotalWeightSizeBytes = GetModelWeightSize(barracudaModel); inferenceEvent.ModelHash = GetModelHash(barracudaModel); return(inferenceEvent); }
public void TestCheckModelThrowsVectorObservationDiscrete(bool useDeprecatedNNModel) { var model = useDeprecatedNNModel ? ModelLoader.Load(discreteNNModel) : ModelLoader.Load(discreteONNXModel); var brainParameters = GetDiscrete1vis0vec_2_3action_recurrModelBrainParameters(); brainParameters.VectorObservationSize = 1; // Invalid observation var errors = BarracudaModelParamLoader.CheckModel( model, brainParameters, new ISensor[] { sensor_21_20_3.CreateSensors()[0] }, new ActuatorComponent[0] ); Assert.Greater(errors.Count(), 0); }
public FaceDetector(ResourceSet resources) { _resources = resources; _preBuffer = new ComputeBuffer(Config.InputSize, sizeof(float)); _post1Buffer = new ComputeBuffer (Config.MaxDetection, BoundingBox.Size, ComputeBufferType.Append); _post2Buffer = new ComputeBuffer (Config.MaxDetection, BoundingBox.Size, ComputeBufferType.Append); _countBuffer = new ComputeBuffer (1, sizeof(uint), ComputeBufferType.Raw); _worker = ModelLoader.Load(_resources.model).CreateWorker(); }
public void TestGetInputTensors1() { var model = ModelLoader.Load(continuous2vis8vec2actionModel); var inputTensors = BarracudaModelParamLoader.GetInputTensors(model); var inputNames = inputTensors.Select(x => x.name).ToList(); // Model should contain 3 inputs : vector, visual 1 and visual 2 Assert.AreEqual(3, inputNames.Count); Assert.Contains(TensorNames.VectorObservationPlaceholder, inputNames); Assert.Contains(TensorNames.VisualObservationPlaceholderPrefix + "0", inputNames); Assert.Contains(TensorNames.VisualObservationPlaceholderPrefix + "1", inputNames); Assert.AreEqual(2, BarracudaModelParamLoader.GetNumVisualInputs(model)); // Test if the model is null Assert.AreEqual(0, BarracudaModelParamLoader.GetInputTensors(null).Count); Assert.AreEqual(0, BarracudaModelParamLoader.GetNumVisualInputs(null)); }
void Start() { foreach (string joint in trackedJoints) { // observedteacherKinectTs.Add(GameObject.Find(joint).transform); observedteacherKinectTs.Add(TransformDeepChildExtension.FindDeepChild(teacherKinectT, joint)); observedstudentKinectTs.Add(TransformDeepChildExtension.FindDeepChild(studentKinectT, joint)); } // foreach(Transform T in observedstudentKinectTs) // { // Debug.Log(T.parent.parent.name); // } model = ModelLoader.Load(kinectModel, false); worker = WorkerFactory.CreateWorker(WorkerFactory.Type.Compute, model); }
public void TestCheckModelValidContinuous(bool useDeprecatedNNModel) { var model = useDeprecatedNNModel ? ModelLoader.Load(continuousNNModel) : ModelLoader.Load(continuousONNXModel); var validBrainParameters = GetContinuous2vis8vec2actionBrainParameters(); var errors = BarracudaModelParamLoader.CheckModel( model, validBrainParameters, new ISensor[] { new VectorSensor(8), sensor_21_20_3.CreateSensors()[0], sensor_20_22_3.CreateSensors()[0] }, new ActuatorComponent[0] ); Assert.AreEqual(0, errors.Count()); // There should not be any errors }
public void TestGetInputTensorsContinuous(bool useDeprecatedNNModel) { var model = useDeprecatedNNModel ? ModelLoader.Load(continuousNNModel) : ModelLoader.Load(continuousONNXModel); var inputNames = model.GetInputNames(); // Model should contain 3 inputs : vector, visual 1 and visual 2 Assert.AreEqual(3, inputNames.Count()); Assert.Contains(TensorNames.VectorObservationPlaceholder, inputNames); Assert.Contains(TensorNames.VisualObservationPlaceholderPrefix + "0", inputNames); Assert.Contains(TensorNames.VisualObservationPlaceholderPrefix + "1", inputNames); Assert.AreEqual(2, model.GetNumVisualInputs()); // Test if the model is null model = null; Assert.AreEqual(0, model.GetInputTensors().Count); Assert.AreEqual(0, model.GetNumVisualInputs()); }
void Start() { Application.targetFrameRate = 60; m_RuntimeModel = ModelLoader.Load(inputModel, false); m_Worker = WorkerFactory.CreateWorker(WorkerFactory.Type.ComputePrecompiled, m_RuntimeModel, false); #if (WEBCAM) m_WebcamTexture = new WebCamTexture(); m_WebcamTexture.Play(); #else var targetRT = RenderTexture.GetTemporary(inputResolutionX, inputResolutionY, 0); Graphics.Blit(inputImage, targetRT, postprocessMaterial); m_Input = new Tensor(targetRT, 3); m_Input = new Tensor(1, inputResolutionY, inputResolutionX, 3); #endif }
void AllocateObjects() { var model = ModelLoader.Load(_resources.model); _size = model.inputs[0].shape[6]; // Input tensor width _preBuffer = new ComputeBuffer(_size * _size * 3, sizeof(float)); _post1Buffer = new ComputeBuffer (MaxDetection, Detection.Size, ComputeBufferType.Append); _post2Buffer = new ComputeBuffer (MaxDetection, Detection.Size, ComputeBufferType.Append); _countBuffer = new ComputeBuffer (1, sizeof(uint), ComputeBufferType.Raw); _worker = model.CreateWorker(); }
public bool Load() { RuntimeModelLoader.Mesh runtimeMesh = ModelLoader.Load(path); if (runtimeMesh == null) { return(false); } List <Vector3> vertices = Converter.GetVerticesVector3(runtimeMesh); List <int> triangles = Converter.GetTriangules(runtimeMesh); UnityEngine.Mesh goMesh = new UnityEngine.Mesh(); goMesh.vertices = vertices.ToArray(); goMesh.triangles = triangles.ToArray(); _meshFilter.mesh = goMesh; return(true); }
// Start is called before the first frame update private void Start() { var model = ModelLoader.Load(modelSource); var worker = WorkerFactory.CreateWorker(WorkerFactory.Type.ComputePrecompiled, model); var inputTensor = new Tensor(1, 2, new float[2] { 0, 0 }); worker.Execute(inputTensor); var output = worker.PeekOutput(); print("This is the output: " + (output[0] < 0.5? 0 : 1)); inputTensor.Dispose(); output.Dispose(); worker.Dispose(); }