Ejemplo n.º 1
0
    public void TensorCachingAllocatorTest()
    {
        ReferenceComputeOps gpuOps;

        Debug.Log(ComputeShaderSingleton.Instance);
        gpuOps = new ReferenceComputeOps(ComputeShaderSingleton.Instance.referenceKernels);

        TensorCachingAllocator tca = new TensorCachingAllocator();

        int[]  shape = new[] { 2, 3, 5, 1 };
        Tensor X     = tca.Alloc(new TensorShape(shape));
        Tensor W     = tca.Alloc(new TensorShape(15, 7));

        X[0] = 3;
        W[0] = 5;
        Debug.Log($"X WxH:{X.flatHeight} {X.flatWidth}");
        Debug.Log($"W WxH:{W.flatHeight} {W.flatWidth}");
        Tensor Y = gpuOps.MatMul(X, false, W, false);

        Debug.Log($"Y WxH:{Y.flatHeight} {Y.flatWidth}");
        Debug.Log(X.data.GetType());
        tca.Dispose();
        gpuOps.ResetAllocator(false);
        Debug.Assert(true); // Just getting here is good enough
    }
 /// <inheritdoc/>
 public virtual float[] Download(TensorShape shape)
 {
     if (ComputeInfo.supportsCompute && SystemInfo.supportsComputeShaders)
     {
         var gpuBackend = new ReferenceComputeOps(null);
         // @TODO: cache compute buffer
         using (var computeTensorData =
                    gpuBackend.TextureToTensorData(this, "__internalDownloadTextureToTensorData"))
         {
             return(computeTensorData.Download(shape));
         }
     }
     else
     {
         return(TextureToTensorDataCache(shape));
     }
 }
Ejemplo n.º 3
0
    public void ReferenceComputeOps_BasicTest()
    {
        ReferenceComputeOps gpuOps;

        Debug.Log(ComputeShaderSingleton.Instance);
        gpuOps = new ReferenceComputeOps(ComputeShaderSingleton.Instance.referenceKernels);
        int[]  shape = new[] { 2, 3, 5, 1 };
        Tensor X     = new Tensor(shape, "TestX");
        Tensor W     = new Tensor(new TensorShape(15, 7), "TestW");

        X[0] = 3;
        W[0] = 5;
        Debug.Log($"X WxH:{X.flatHeight} {X.flatWidth}");
        Debug.Log($"W WxH:{W.flatHeight} {W.flatWidth}");
        Tensor Y = gpuOps.MatMul(X, false, W, false);

        Debug.Log($"Y WxH:{Y.flatHeight} {Y.flatWidth}");
        X.Dispose();
        W.Dispose();
        Y.Dispose();
        gpuOps.ResetAllocator(false);
        Debug.Assert(true); // Just getting here is good enough
    }
    private void CustomTensorToRenderTexture(Tensor X, RenderTexture target, int batch, int fromChannel, Vector4 scale, Vector4 bias, Texture3D lut = null)
    {
        if (!internalSetup.shouldUseSRGBTensor)
        {
            X.ToRenderTexture(target, batch, fromChannel, scale, bias, lut);
            return;
        }

        //By default Barracuda work on Tensor containing value in linear color space.
        //Here we handle custom convertion from tensor to texture when tensor is in sRGB color space.
        //This is important for this demo as network was trained with data is sRGB color space.
        //Direct support for this will be added in a latter revision of Barracuda.
        if (!target.enableRandomWrite || !target.IsCreated())
        {
            target.Release();
            target.enableRandomWrite = true;
            target.Create();
        }

        var gpuBackend = new ReferenceComputeOps(ComputeShaderSingleton.Instance.referenceKernels);
        var fn         = new CustomComputeKernel(tensorToTextureSRGB, "TensorToTexture" + (lut == null?"NoLUT":"3DLUT"));
        var XonDevice  = gpuBackend.Pin(X);

        fn.SetTensor("X", X.shape, XonDevice.buffer, XonDevice.offset);
        fn.shader.SetTexture(fn.kernelIndex, "Otex2D", target);
        fn.shader.SetVector("_Scale", scale);
        fn.shader.SetVector("_Bias", bias);
        fn.shader.SetInts("_Pad", new int[] { batch, 0, 0, fromChannel });
        fn.shader.SetBool("_FlipY", true);
        if (lut != null)
        {
            fn.shader.SetTexture(fn.kernelIndex, "Otex3D", lut);
            fn.shader.SetVector("_LutParams", new Vector2(1f / lut.width, lut.width - 1f));
        }

        fn.Dispatch(target.width, target.height, 1);
    }
Ejemplo n.º 5
0
    public void TensorFlattenTest()
    {
        ReferenceComputeOps gpuOps;

        Debug.Log(ComputeShaderSingleton.Instance);
        gpuOps = new ReferenceComputeOps(ComputeShaderSingleton.Instance.referenceKernels);

        TensorCachingAllocator tca = new TensorCachingAllocator();

        int[]  shape = new[] { 2, 2, 3, 4 };
        Tensor X     = tca.Alloc(new TensorShape(shape));

        for (int idx = 0; idx < new TensorShape(shape).length; idx++)
        {
            X[idx] = idx;
        }
        Debug.Log($"X WxH:{X.flatHeight} {X.flatWidth}");
        Debug.Log($"{X[0, 0]} {X[1, 0]}");
        Debug.Log($"{X[0, 0, 0, 0]} {X[0, 1, 0, 0]}");
        Debug.Log($"{X[0, 0, 0, 0]} {X[0, 0, 1, 0]}");
        Debug.Log($"{X[0, 0, 0, 0]} {X[0, 0, 0, 1]}");
        tca.Dispose();
        Debug.Assert(true); // Just getting here is good enough
    }
        /// <summary>
        /// Fill a RenderTexture with a slice/batch of a tensor.
        /// </summary>
        public void ToRenderTexture(UnityEngine.RenderTexture target, int batch, int fromChannel, Vector4 scale, Vector4 bias, Texture3D lut = null)
        {
            var gpuBackend = new ReferenceComputeOps(ComputeShaderSingleton.Instance.referenceKernels);

            gpuBackend.TensorToRenderTexture(this, target, batch, fromChannel, scale, bias, lut);
        }
Ejemplo n.º 7
0
        /// <summary>
        /// Fill a RenderTexture with a slice/batch of a tensor.
        /// </summary>
        public void ToRenderTexture(UnityEngine.RenderTexture target, int batch = 0, int fromChannel = 0, float scale = 1.0f, float bias = 0f)
        {
            var gpuBackend = new ReferenceComputeOps(ComputeShaderSingleton.Instance.referenceKernels);

            gpuBackend.TensorToRenderTexture(this, target, batch, fromChannel, scale, bias);
        }