예제 #1
0
    void Start()
    {
        if (Application.HasUserAuthorization(UserAuthorization.WebCam))
        {
            Debug.Log("webcam found");//
        }
        else
        {
            Debug.Log("webcam not found");
        }
        Application.targetFrameRate = 30;
        m_RuntimeModel = ModelLoader.Load(inputModel, false);
        m_Worker       = WorkerFactory.CreateWorker(WorkerFactory.Type.ComputePrecompiled, m_RuntimeModel, false);
        //resultMask.enableRandomWrite = true;
#if (WEBCAM)
#if !UNITY_EDITOR
//Print this on device
        Debug.Log("Using webcam");
#endif
        m_WebcamTexture = new WebCamTexture(320, 320, 30);
        m_WebcamTexture.Play();
#else
        var targetRT = RenderTexture.GetTemporary(inputResolutionX, inputResolutionY, 0);
        Graphics.Blit(inputImage, targetRT, postprocessMaterial);
        m_Input = new Tensor(targetRT, 3);

        m_Input = new Tensor(1, inputResolutionY, inputResolutionX, 3);
#endif
        //Test
        //resultMask = new RenderTexture(inputResolutionX, inputResolutionY, 0);
    }
예제 #2
0
    void Start()
    {
        var model = ModelLoader.Load(modelFile);

        worker = WorkerFactory.CreateWorker(WorkerFactory.Type.ComputePrecompiled, model);
        LoadLabels();
    }
예제 #3
0
    public Sprite sprite1, sprite2; // Images to be classified (assets)

    // Start is called before the first frame update
    void Start()
    {
        model  = ModelLoader.Load(modelSource);                                            // Load ONNX model as runtime binary model
        worker = WorkerFactory.CreateWorker(WorkerFactory.Type.ComputePrecompiled, model); // Create Worker

        Classify();                                                                        // Calling it once
    }
    public void ProcessImage()
    {
        Debug.Log("Process Image Called");
        bool verbose = false;

        var additionalOutputs = new string[] { "StatefulPartitionedCall/sequential/dense/Softmax" };
        var engine            = WorkerFactory.CreateWorker(m_RuntimeModel, additionalOutputs, WorkerFactory.Device.GPU, verbose);

        //var model_tf = ModelLoader.LoadFromStreamingAssets(modelName + ".nn"); //if you did tensorflow_to_barracuda.py instead of onnx
        RenderTexture temp_tex  = tex;
        Texture2D     i_texture = toTexture2D(temp_tex);

        i_texture = Resize(i_texture, 224, 224);

        //texture inputs
        var channelCount = 3; // you can treat input pixels as 1 (grayscale), 3 (color) or 4 (color with alpha) channels
        var input        = new Tensor(i_texture, channelCount);

        engine.Execute(input);
        var prediction = engine.PeekOutput("StatefulPartitionedCall/sequential/dense/Softmax"); //put (output name) if there are multiple outputs in model

        float cat_value = prediction[0];

        float[] values = prediction.AsFloats();

        bool isCat = values[0] > 0.99f;
        bool isDog = values[1] > 0.99f;

        Debug.Log("Cat Probability: " + values[0].ToString("F4"));
        Debug.Log("Dog Probability: " + values[1].ToString("F4"));

        prediction.Dispose();
        engine.Dispose();
        input.Dispose();
    }
    void Start()
    {
        m_DataSet = LoadDataSet();

        var batch = m_DataSet.Item1[0].batch;

        m_SirenBuilder = new SirenModel(batch, false, 256, 3,
                                        8, 2, 6,
                                        "vector_observation", "continuous_actions", false);
        m_model      = m_SirenBuilder.model;
        m_parameters = m_SirenBuilder.parameters;

        if (loadModelFromOnnx)
        {
            m_model = ModelLoader.Load(model, false);
        }
        m_worker = WorkerFactory.CreateWorker(ms_workerType, m_model, false);

        m_input  = m_DataSet.Item1[9];
        m_target = m_DataSet.Item2[9];

        m_lr = new Tensor(1, 1, new[] { learningRate });

        InitPlot();

        var t = m_target.Reshape(new TensorShape(1, 100, 200, 1));

        t.ToRenderTexture(targetRT, batch: 0, fromChannel: 0);

        m_lastUpdateTime = Time.realtimeSinceStartup;

        StartCoroutine(TrainingLoop());
    }
예제 #6
0
    // Discount factor when computing the Q values
    // Note: Usually it is 0.9-0.99, but here the next state is random and is not relevant, so it is not used
    //private float gamma = 0.0f;
    // Starting epsilon (choose random actions)
    //private float epsilon = 1.0f;
    // Minimum epsilon (after coolingSteps)
    //private float minEpsilon = 0.1f;
    // Factor to reduce epsilon (number of actions that it
    // takes it to go down to minEpsilon)
    //private int coolingSteps = 10000;



    // Not using Start() so we are positive that this is run before the agent is asked to play
    public void Initialize()
    {
        imgWidth  = renderTextures[0].width;
        imgHeight = renderTextures[0].height;

        // Create the neural network
        model  = ModelLoader.Load(CNN_Model_Asset, false);
        worker = WorkerFactory.CreateWorker(WorkerFactory.Type.CSharp, model, false);
        Debug.Log("started");

        // Gather other game information
        numEnemyCards = renderTextures.Length;

        myCards = new int[NUM_CLASSES];  // counts how many cards of each class the agent has

        enemyCombinations = Mathf.FloorToInt(Mathf.Pow(NUM_CLASSES, numEnemyCards));
        deckCombinations  = Mathf.FloorToInt(Mathf.Pow(DECK_SIZE + 1, myCards.Length - 1));

        // The max number of possible actions is the same as the number of enemy combinations
        // (NUM_CLASSES ^ numCards)
        numActions = enemyCombinations;

        // Generate the Q-table
        qTable = new float[enemyCombinations * deckCombinations, numActions];

        //Debug.LogError(qTable.GetLength(0) + " and " + qTable.GetLength(1));
        LoadLearning();

        //LoadLearning();
        //foreach (var item in qTable)
        //{
        //    Debug.Log(item.ToString());
        //}
    }
예제 #7
0
    private void Start()
    {
        // Initialize
        HeatMapCol_Squared  = HeatMapCol * HeatMapCol;
        HeatMapCol_Cube     = HeatMapCol * HeatMapCol * HeatMapCol;
        HeatMapCol_JointNum = HeatMapCol * JointNum;
        CubeOffsetLinear    = HeatMapCol * JointNum_Cube;
        CubeOffsetSquared   = HeatMapCol_Squared * JointNum_Cube;

        heatMap2D          = new float[JointNum * HeatMapCol_Squared];
        offset2D           = new float[JointNum * HeatMapCol_Squared * 2];
        heatMap3D          = new float[JointNum * HeatMapCol_Cube];
        offset3D           = new float[JointNum * HeatMapCol_Cube * 3];
        unit               = 1f / (float)HeatMapCol;
        InputImageSizeF    = InputImageSize;
        InputImageSizeHalf = InputImageSizeF / 2f;
        ImageScale         = InputImageSize / (float)HeatMapCol;// 224f / (float)InputImageSize;

        // Disabel sleep
        Screen.sleepTimeout = SleepTimeout.NeverSleep;

        // Init model
        _model  = ModelLoader.Load(NNModel, Verbose);
        _worker = WorkerFactory.CreateWorker(WorkerType, _model, Verbose);
        StartCoroutine("WaitLoad");

        // Init VNect model
        jointPoints = VNectModel.Init();
        // Init VideoCapture
        videoCapture.Init(InputImageSize, InputImageSize);
    }
예제 #8
0
    private void Start()
    {
        var model = ModelLoader.Load(ModelOpenCVFeatures);

        //worker = BarracudaWorkerFactory.CreateWorker(BarracudaWorkerFactory.Type.ComputePrecompiled, model);
        worker = WorkerFactory.CreateWorker(WorkerFactory.Type.ComputePrecompiled, model);
    }
예제 #9
0
    public void MLP_Shape()
    {
        TensorCachingAllocator tca = new TensorCachingAllocator();
        var shape = new MultiLayerPerception.Shape {
            inputSize  = 2,
            outputSize = 3,
            hiddenSize = 5
        };
        MultiLayerPerception mlp = new MultiLayerPerception(shape);
        IWorker worker           = WorkerFactory.CreateWorker(mlp.model, WorkerFactory.Device.GPU);
        Tensor  input            = tca.Alloc(new TensorShape(1, 1, 1, shape.inputSize));

        for (int i = 0; i < shape.inputSize; i++)
        {
            input[i] = i;
        }
        IWorker ex = worker.Execute(input);

        ex.FlushSchedule(true);
        Tensor output = ex.PeekOutput();

        for (int i = 0; i < shape.outputSize; i++)
        {
            Debug.Log($"output[{i}] = {output[i]}");
        }
        tca.Dispose();
        ex.Dispose();
        worker.Dispose();
        Debug.Assert(true);
    }
예제 #10
0
    // Start is called before the first frame update
    void Start()
    {
        body      = GetComponent <Rigidbody>();
        navigator = GetComponent <PathNavigator>();
        autopilot = GetComponent <Autopilot>();
        vehicle   = GetComponent <Vehicle>();
        workers   = new List <IWorker>();

        int[] inputShape = null;
        for (int i = 0; i < 5; i++)
        {
            var model = ModelLoader.LoadFromStreamingAssets(modelName + i + ".nn");
            workers.Add(WorkerFactory.CreateWorker(WorkerFactory.Type.CSharpBurst, model));
            inputShape = model.inputs[0].shape;
        }

        classificationWorker = WorkerFactory.CreateWorker(WorkerFactory.Type.CSharpBurst, ModelLoader.LoadFromStreamingAssets(modelName + "C" + ".nn"));

        numObservations = (inputShape.Last() - 1) / 3;
        inputs          = new Tensor(new TensorShape(inputShape));
        estimations     = new List <float>();

        curvature   = new float[numObservations];
        camber      = new float[numObservations];
        inclination = new float[numObservations];

        rearLeft  = vehicle.wheels.Where(w => w.localAttachmentPosition.z < 0).Where(w => w.localAttachmentPosition.x < 0).ToArray();
        rearRight = vehicle.wheels.Where(w => w.localAttachmentPosition.z < 0).Where(w => w.localAttachmentPosition.x > 0).ToArray();
    }
예제 #11
0
    void TestAndWriteTexture()
    {
        print(test.format);
        print(test.graphicsFormat);
        //Create the Barracuda worker
        var worker = WorkerFactory.CreateWorker(WorkerFactory.Type.ComputePrecompiled, m_runtimeModel);
        //Create the tensor from the Texture2D
        var tensor = new Tensor(test, channels: 3);
        //Normalize the tensor into [-1,1]
        var normalizedTensor = NormalizeTensor(tensor);

        //Execute the Worker on the normalized tensor
        worker.Execute(normalizedTensor);
        //Pull the output tensor from the worker
        var output = worker.PeekOutput();
        //Normalize the output tensor
        var outputNormalized = NormalizeTensorUp(output);
        //Assign result tensor to Texture 2D
        var ot = Tensor2Image(outputNormalized);
        //Apply output tensor to RenderTexture
        var rtOutput = new RenderTexture(256, 256, 32);

        outputNormalized.ToRenderTexture(rtOutput);
        RenderTexture.active = rtOutput;
        //Assign result texture to Texture2D
        //testTextureOutput.SetPixels(ot.GetPixels());
        testTextureOutput.ReadPixels(new Rect(0, 0, rtOutput.width, rtOutput.height), 0, 0);
        testTextureOutput.Apply();
        RenderTexture.active = null;
        Destroy(rtOutput);
    }
예제 #12
0
    void Start()
    {
        runtimeModel = ModelLoader.Load(model);
        worker       = WorkerFactory.CreateWorker(runtimeModel);

#if UNITY_EDITOR
        if (testImages.pixelValueArray.Length == 0)
        {
            testImages.LoadBytesFromPath();
        }
        if (testNetwork)
        {
            int    fails = 0;
            Tensor input = new Tensor(1, 0, 28 * 28, 1);
            for (int i = 0; i < testImages.imageCount; i++)
            {
                for (int j = 0; j < testImages.pixelValueArrayOffset; j++)
                {
                    input[0, 0, j, 0] =
                        testImages.pixelValueArray[i * testImages.pixelValueArrayOffset + j];
                }
                Tensor output       = worker.Execute(input).PeekOutput("Y");
                int    value        = GetResult(output);
                bool   correctGuess = value == testImages.labelValueArray[i];
                fails += correctGuess ? 0 : 1;
            }
            Debug.Log(fails + " fails of " + testImages.imageCount + " images");
            Debug.Log((1 - ((float)fails / testImages.imageCount)) * 100 + "% accuracy");
        }
#endif
        TestRandomImage(true);
    }
예제 #13
0
 /// <summary>
 /// Constructor for a regualar Pix2Pix inference object
 /// </summary>
 public Pix2Pix()
 {
     // Initialise the model
     _modelAsset  = Resources.Load <NNModel>("Models/blobs_run_1");
     _loadedModel = ModelLoader.Load(_modelAsset);
     _worker      = WorkerFactory.CreateWorker(WorkerFactory.Type.ComputePrecompiled, _loadedModel);
 }
예제 #14
0
        public void BasicNNInferenceTest()
        {
            string[] allCandidates = AssetDatabase.FindAssets(modelFileName);

            Assert.True(allCandidates.Length > 0);

            var nnModel =
                AssetDatabase.LoadAssetAtPath(AssetDatabase.GUIDToAssetPath(allCandidates[0]), typeof(NNModel)) as
                NNModel;
            var model  = ModelLoader.Load(nnModel);
            var engine = WorkerFactory.CreateWorker(model, WorkerFactory.Device.CPU);

            var inputTensor = new Tensor(1, 28, 28, 1, input);

            engine.Execute(inputTensor);

            var outputTensor = engine.PeekOutput();

            Assert.AreEqual(output.Length, outputTensor.length);

            // Check if output matches expected output down to epsilon
            for (var i = 0; i < output.Length; i++)
            {
                Assert.LessOrEqual(Mathf.Abs(outputTensor[i] - output[i]), epsilon);
            }

            inputTensor.Dispose();
            engine.Dispose();
        }
예제 #15
0
    public void kill()
    {
        dead           = true;
        bonkTimer      = 5;
        trail.emitting = false;
        var       model  = ModelLoader.Load(modelSource);
        var       worker = WorkerFactory.CreateWorker(WorkerFactory.Type.ComputePrecompiled, model);
        Tensor    input  = new Tensor(0, 28, 28, 1);
        Texture2D newTex = dataHolder.resizedTexture(28, 2);

        newTex.Apply();
        display = newTex;
        for (int i = 0; i < 28; i++)
        {
            for (int j = 0; j < 28; j++)
            {
                input[0, i, j, 0] = newTex.GetPixel(i, 28 - j - 1).grayscale;
            }
        }
        worker.Execute(input);
        Tensor output = worker.PeekOutput();
        string str    = "";

        for (int i = 0; i < 7; i++)
        {
            str += output[0, 0, 0, i] + ", ";
        }
        print(str);
        input.Dispose();
        output.Dispose();
        worker.Dispose();
    }
예제 #16
0
    public void Start()
    {
        this.labels = Regex.Split(this.labelsFile.text, "\n|\r|\r\n")
                      .Where(s => !String.IsNullOrEmpty(s)).ToArray();
        var model = ModelLoader.Load(this.modelFile);

        this.worker = WorkerFactory.CreateWorker(WorkerFactory.Type.ComputePrecompiled, model);
    }
    private void Start()
    {
        normTxt = new Texture2D(W, H, TextureFormat.RGB24, false);
        var model = ModelLoader.Load(modelSource);

        //worker = WorkerFactory.CreateWorker(WorkerFactory.Type.ComputePrecompiled, model);
        worker = WorkerFactory.CreateWorker(model);
    }
예제 #18
0
 public void StartModel()
 {
     if (worker == null)
     {
         model  = ModelLoader.Load(modelSource, verbose: false);
         worker = WorkerFactory.CreateWorker(model, verbose: false);
     }
 }
예제 #19
0
        /// <summary>
        /// Initializes the Brain with the Model that it will use when selecting actions for
        /// the agents
        /// </summary>
        /// <param name="model"> The Barracuda model to load </param>
        /// <param name="actionSpec"> Description of the actions for the Agent.</param>
        /// <param name="inferenceDevice"> Inference execution device. CPU is the fastest
        /// option for most of ML Agents models. </param>
        /// <param name="seed"> The seed that will be used to initialize the RandomNormal
        /// and Multinomial objects used when running inference.</param>
        /// <exception cref="UnityAgentsException">Throws an error when the model is null
        /// </exception>
        public ModelRunner(
            NNModel model,
            ActionSpec actionSpec,
            InferenceDevice inferenceDevice,
            int seed = 0)
        {
            Model barracudaModel;

            m_Model           = model;
            m_ModelName       = model.name;
            m_InferenceDevice = inferenceDevice;
            m_TensorAllocator = new TensorCachingAllocator();
            if (model != null)
            {
#if BARRACUDA_VERBOSE
                m_Verbose = true;
#endif

                D.logEnabled = m_Verbose;

                barracudaModel = ModelLoader.Load(model);
                WorkerFactory.Type executionDevice;
                switch (inferenceDevice)
                {
                case InferenceDevice.CPU:
                    executionDevice = WorkerFactory.Type.CSharp;
                    break;

                case InferenceDevice.GPU:
                    executionDevice = WorkerFactory.Type.ComputePrecompiled;
                    break;

                case InferenceDevice.Burst:
                    executionDevice = WorkerFactory.Type.CSharpBurst;
                    break;

                case InferenceDevice.Default:     // fallthrough
                default:
                    executionDevice = WorkerFactory.Type.CSharpBurst;
                    break;
                }
                m_Engine = WorkerFactory.CreateWorker(executionDevice, barracudaModel, m_Verbose);
            }
            else
            {
                barracudaModel = null;
                m_Engine       = null;
            }

            m_InferenceInputs = barracudaModel.GetInputTensors();
            m_OutputNames     = barracudaModel.GetOutputNames();
            m_TensorGenerator = new TensorGenerator(
                seed, m_TensorAllocator, m_Memories, barracudaModel);
            m_TensorApplier = new TensorApplier(
                actionSpec, seed, m_TensorAllocator, m_Memories, barracudaModel);
            m_InputsByName     = new Dictionary <string, Tensor>();
            m_InferenceOutputs = new List <TensorProxy>();
        }
    void OnPeriodicUpdate()
    {
        if (GeneBankManager.Inst.GenomeCount <= 0)
        {
            return;
        }

        float3 state = MoveContext.GetRandomState(in _simParams);

        ParetoGeneBank.Genome gi = GeneBankManager.Inst.GetGenomeByID(SelectedGeneId);
        Debug.Log(SelectedGeneId);
        if (gi == null)
        {
            return;
        }
        MultiLayerPerception mlp = new MultiLayerPerception(_simParams.mlpShape, Layer.FusedActivation.Relu6);

        mlp.LoadWeights(gi._weights.ToArray());
        IWorker worker   = WorkerFactory.CreateWorker(WorkerFactory.Type.Auto, mlp.model, false);
        Tensor  inTensor = new Tensor(1, _simParams.mlpShape.inputSize);

        for (int i = 0; i < _simParams.iterations; i++)
        {
            float2 obs = AcademyMove.Observe(state);
            for (int iINode = 0; iINode < _simParams.mlpShape.inputSize; iINode++)
            {
                _observeBuffer[i][iINode] = (iINode < 2) ? obs[iINode] : inTensor[runIdx, iINode];
                inTensor[runIdx, iINode]  = _observeBuffer[i][iINode];
            }
            worker.SetInput(inTensor);
            worker.Execute().FlushSchedule(true);
            using (Tensor outTensor = worker.PeekOutput()) {
                float2 act = 0;
                Debug.Assert(0 <= outTensor[runIdx, 0] && outTensor[runIdx, 0] <= 6);
                Debug.Assert(0 <= outTensor[runIdx, 1] && outTensor[runIdx, 1] <= 6);
                act.x = math.remap(0, 6, 0, 1, outTensor[runIdx, 0]);
                act.y = math.remap(0, 6, -1, 1, outTensor[runIdx, 1]);

                for (int iINode = 2; iINode < _simParams.mlpShape.inputSize; iINode++)
                {
                    inTensor[runIdx, iINode] = outTensor[runIdx, iINode];
                }
                float2 dir = new float2(math.cos(state.z), math.sin(state.z));
                act           = math.clamp(act, _simParams.actionSpaceMin, _simParams.actionSpaceMax);
                _actBuffer[i] = act;
                state.z      += act.y * _simParams.dt;
                state.xy     += dir * act.x * _simParams.dt;
            }
            _stateBuffer[i] = state;
        }
        worker.Dispose();
        inTensor.Dispose();
        if (_NetDraw)
        {
            _NetDraw._TestMLP = mlp;
        }
    }
예제 #21
0
 // Start is called before the first frame update
 void Start()
 {
     m_RuntimeModel = ModelLoader.Load(modelAsset);
     foreach (var name in m_RuntimeModel.layers)
     {
         Debug.Log(name);
     }
     worker = WorkerFactory.CreateWorker(WorkerFactory.Type.CSharpBurst, m_RuntimeModel);
 }
예제 #22
0
    void Start()
    {
        model  = ModelLoader.Load(modelfile);
        engine = WorkerFactory.CreateWorker(model, WorkerFactory.Device.GPU);
        int[] shape  = { 10, 1, 10 };
        var   input  = new Tensor(shape);
        var   output = engine.Execute(input).PeekOutput();

        Debug.Log("model running");
    }
예제 #23
0
    // Start is called before the first frame update
    void Start()
    {
        m_RuntimeModel = ModelLoader.Load(modelAsset);

        var workerType = WorkerFactory.Type.Compute; // GPUで実行する場合はこちらを利用

        // var workerType = WorkerFactory.Type.CSharp;  // CPUで実行する場合はこちらを利用

        m_worker = WorkerFactory.CreateWorker(workerType, m_RuntimeModel);
    }
예제 #24
0
    public void Start()
    {
        this.labels = Regex.Split(this.labelsFile.text, "\n|\r|\r\n")
                      .Where(s => !String.IsNullOrEmpty(s)).ToArray();
        var model = ModelLoader.Load(this.modelFile);
        // https://docs.unity3d.com/Packages/[email protected]/manual/Worker.html
        // var workerType = WorkerFactory.Type.ComputePrecompiled; // GPU
        var workerType = WorkerFactory.Type.CSharpBurst;  // CPU

        this.worker = WorkerFactory.CreateWorker(workerType, model);
    }
예제 #25
0
    void Start()
    {
        // Read the file and convert the text inside in a Array separated by lines (Using Linq)
        outputParser.SetLabels(Regex.Split(m_LabelsFile.text, "\n|\r|\r\n").Where(s => !String.IsNullOrEmpty(s)).ToArray());
        outputParser.SetColors(m_Colors);
        // Load the onnx model file
        var model = ModelLoader.Load(m_ModelFile);

        // Create the barracuda inference engine (breaks down the given model into executable tasks and schedules them on the GPU or CPU)
        this.worker = WorkerFactory.CreateWorker(WorkerFactory.Type.ComputePrecompiled, model);
    }
예제 #26
0
        public BarracudaWorker(NNModel nnModel, WorkerFactory.Type type)
        {
            bool verbose = false;

            model  = ModelLoader.Load(nnModel, verbose);
            worker = WorkerFactory.CreateWorker(type, model, verbose);

            var kernels = ComputeShaderSingleton.Instance.kernels;

            ops = new PrecompiledComputeOps(kernels, kernels[0]);
        }
예제 #27
0
        private static Human Parse(string humanInfo)
        {
            var arr = humanInfo.Split(new[] { ' ' }, StringSplitOptions.RemoveEmptyEntries);

            if (arr.Length == 3)
            {
                return(StudentFactory.CreateStudent(arr));
            }

            return(WorkerFactory.CreateWorker(arr));
        }
    public NNHandler(NNModel nnmodel)
    {
        model = ModelLoader.Load(nnmodel);
#if UNITY_WEBGL && !UNITY_EDITOR
        Debug.Log("Worker:CPU");
        worker = WorkerFactory.CreateWorker(WorkerFactory.Type.CSharpBurst, model); // CPU
#else
        Debug.Log("Worker:GPU");
        worker = WorkerFactory.CreateWorker(WorkerFactory.Type.ComputePrecompiled, model); // GPU
#endif
    }
예제 #29
0
    void Start()
    {
        model = ModelLoader.Load(modelAsset);
        IWorker worker = WorkerFactory.CreateWorker(WorkerFactory.Type.CSharp, model);
        Tensor  input  = new Tensor(0, 3, new float[] { -0.301426f, 0.715417f, 0.214615f });

        worker.Execute(input);
        var res = worker.PeekOutput();

        Debug.Log(res[0]);
        input.Dispose();
    }
예제 #30
0
    private void TensorPredict(Tensor input)
    {
        var worker = WorkerFactory.CreateWorker(WorkerFactory.Type.CSharpBurst, m_RuntimeModel);

        worker.Execute(input);

        //If the model has a single output, you can use worker.PeekOutput()
        //Tensor O = m_Worker.PeekOutput("output_layer_name");
        var prediction = worker.PeekOutput("Sigmoid");

        input.Dispose();
    }