GetData() private method

private GetData ( Array data ) : void
data Array
return void
コード例 #1
1
    /*****/
    private void CreateDistanceField()
    {
        var size = 128;
        var pdbName = "MA_matrix_G1";
        string path = "Assets/Resources/3D Textures/" + pdbName + ".asset";

        Texture3D tmp = (Texture3D)AssetDatabase.LoadAssetAtPath(path, typeof(Texture3D));

        if (tmp)
        {
            _volumeTexture = tmp;
        }
        else
        {
            RenderTexture _distanceFieldRT;

            _distanceFieldRT = new RenderTexture(size, size, 0, RenderTextureFormat.R8);
            _distanceFieldRT.volumeDepth = size;
            _distanceFieldRT.isVolume = true;
            _distanceFieldRT.isPowerOfTwo = true;
            _distanceFieldRT.enableRandomWrite = true;
            _distanceFieldRT.filterMode = FilterMode.Trilinear;
            _distanceFieldRT.name = pdbName;
            _distanceFieldRT.hideFlags = HideFlags.HideAndDontSave;
            _distanceFieldRT.generateMips = true;
            _distanceFieldRT.useMipMap = true;
            _distanceFieldRT.Create();

            var atomSpheres = PdbLoader.LoadAtomSpheres(pdbName);
            var atomSphereGPUBuffer = new ComputeBuffer(atomSpheres.Count, sizeof(float) * 4, ComputeBufferType.Default);
            atomSphereGPUBuffer.SetData(atomSpheres.ToArray());

            Graphics.SetRenderTarget(_distanceFieldRT);
            GL.Clear(true, true, new Color(0, 0, 0));

            var createDistanceFieldCS = Resources.Load("Compute Shaders/CreateDistanceField") as ComputeShader;
            createDistanceFieldCS.SetInt("_GridSize", size);
            createDistanceFieldCS.SetInt("_NumAtoms", atomSpheres.Count);
            createDistanceFieldCS.SetBuffer(0, "_SpherePositions", atomSphereGPUBuffer);
            createDistanceFieldCS.SetTexture(0, "_VolumeTexture", _distanceFieldRT);
            createDistanceFieldCS.Dispatch(0, Mathf.CeilToInt(size / 10.0f), Mathf.CeilToInt(size / 10.0f), Mathf.CeilToInt(size / 10.0f));

            atomSphereGPUBuffer.Release();

            //****

            var flatSize = size * size * size;
            var voxelGPUBuffer = new ComputeBuffer(flatSize, sizeof(float));

            var readVoxelsCS = Resources.Load("Compute Shaders/ReadVoxels") as ComputeShader;
            readVoxelsCS.SetInt("_VolumeSize", size);
            readVoxelsCS.SetBuffer(0, "_VoxelBuffer", voxelGPUBuffer);
            readVoxelsCS.SetTexture(0, "_VolumeTexture", _distanceFieldRT);
            readVoxelsCS.Dispatch(0, size, size, size);

            var voxelCPUBuffer = new float[flatSize];
            voxelGPUBuffer.GetData(voxelCPUBuffer);

            var volumeColors = new Color[flatSize];
            for (int i = 0; i < flatSize; i++)
            {
                volumeColors[i] = new Color(0, 0, 0, voxelCPUBuffer[i]);
            }

            var texture3D = new Texture3D(size, size, size, TextureFormat.Alpha8, true);
            texture3D.SetPixels(volumeColors);
            texture3D.wrapMode = TextureWrapMode.Clamp;
            texture3D.anisoLevel = 0;
            texture3D.Apply();

            AssetDatabase.CreateAsset(texture3D, path);
            AssetDatabase.SaveAssets();

            // Print the path of the created asset
            Debug.Log(AssetDatabase.GetAssetPath(texture3D));

            voxelGPUBuffer.Release();

            _distanceFieldRT.Release();
            DestroyImmediate(_distanceFieldRT);

            _volumeTexture = texture3D;
        }
    }
コード例 #2
0
ファイル: KernelExample.cs プロジェクト: Appms/GPGPU-Tests
    void Start()
    {
        ComputeBuffer buffer = new ComputeBuffer (4 * 4 * 2 * 2, sizeof(int));

        int kernel = shader.FindKernel ("CSMain2");

        shader.SetBuffer (kernel, "buffer2", buffer);

        shader.Dispatch (kernel, 2, 2, 1);

        int[] data = new int[4 * 4 * 2 * 2];

        buffer.GetData (data);

        for(int i = 0; i < 8; i++)
        {
            string line = "";
            for(int j = 0; j < 8; j++)
            {
                line += " " + data[j+i*8];
            }
            Debug.Log (line);
        }

        buffer.Release ();
    }
コード例 #3
0
ファイル: GpuHelper.cs プロジェクト: Micah1/Pix2Pix
        public static Tensor InvokeFunctionKernel(string name, Tensor input)
        {
            var compute = Pix2PixResources.Compute;
            var kernel  = compute.FindKernel(name);

            uint tgn_x, tgn_y, tgn_z;

            compute.GetKernelThreadGroupSizes(kernel, out tgn_x, out tgn_y, out tgn_z);
            Debug.Assert(tgn_y == 1 && tgn_z == 1);

            var length = input.Data.Length;

            Debug.Assert(length % tgn_x == 0);

            var buffer_input  = new UnityEngine.ComputeBuffer(length, sizeof(float));
            var buffer_output = new UnityEngine.ComputeBuffer(length, sizeof(float));

            buffer_input.SetData(input.Data);
            compute.SetBuffer(kernel, "Input", buffer_input);
            compute.SetBuffer(kernel, "Output", buffer_output);
            compute.Dispatch(kernel, length / (int)tgn_x, 1, 1);

            var output = new Tensor(input.Shape);

            buffer_output.GetData(output.Data);

            buffer_input.Dispose();
            buffer_output.Dispose();

            return(output);
        }
コード例 #4
0
    // Use this for initialization
    void Start()
    {
        Debug.Log("Population size: " + populationSize);
        int width = (int)Mathf.Round(Mathf.Sqrt(populationSize));
        int height = (int)Mathf.Round(Mathf.Sqrt(populationSize));

        testing = new ComputeBuffer(10, Marshal.SizeOf(typeof(Individual)));

        Debug.Log("Seed " + DateTime.Now.Millisecond);

        // Fill with random genome, and run first fitness test.
        int kernel = shader.FindKernel("InitializePopulation");
        DebugAux.Assert(kernel >= 0, "Couldn't find kernel: " + "InitializePopulation " + kernel);
        shader.SetBuffer(kernel, "Population", testing);
        shader.SetFloat("seed", DateTime.Now.Millisecond);
        shader.Dispatch(kernel, 32, 32, 1);

        Individual[] tes = new Individual[10];
        testing.GetData(tes);
        for (int i = 0; i < tes.Length; i++)
            Debug.Log(tes[i].genome + " " + tes[i].fitness);

        // Selection..
        /*kernel = shader.FindKernel("AllOnesFitness");
        DebugAux.Assert(kernel >= 0, "Couldn't find kernel: " + "AllOnesFitness " + kernel);
        shader.SetBuffer(kernel, "Population", testing);
        shader.Dispatch(kernel, 32, 32, 1);*/

        testing.Dispose();
    }
コード例 #5
0
 static public int GetData(IntPtr l)
 {
     try {
                     #if DEBUG
         var    method     = System.Reflection.MethodBase.GetCurrentMethod();
         string methodName = GetMethodName(method);
                     #if UNITY_5_5_OR_NEWER
         UnityEngine.Profiling.Profiler.BeginSample(methodName);
                     #else
         Profiler.BeginSample(methodName);
                     #endif
                     #endif
         UnityEngine.ComputeBuffer self = (UnityEngine.ComputeBuffer)checkSelf(l);
         System.Array a1;
         checkType(l, 2, out a1);
         self.GetData(a1);
         pushValue(l, true);
         return(1);
     }
     catch (Exception e) {
         return(error(l, e));
     }
             #if DEBUG
     finally {
                     #if UNITY_5_5_OR_NEWER
         UnityEngine.Profiling.Profiler.EndSample();
                     #else
         Profiler.EndSample();
                     #endif
     }
             #endif
 }
コード例 #6
0
ファイル: GpuHelper.cs プロジェクト: Micah1/Pix2Pix
        public static Tensor InvokeConvolutionKernel(
            ConvolutionMode mode, string name, Tensor input, Tensor filter, Tensor bias
            )
        {
            var compute = Pix2PixResources.Compute;
            var kernel  = compute.FindKernel(name);

            uint tgn_x, tgn_y, tgn_z;

            compute.GetKernelThreadGroupSizes(kernel, out tgn_x, out tgn_y, out tgn_z);

            var deconv      = (mode == ConvolutionMode.Backward);
            var outHeight   = deconv ? input.Shape[0] * 2 : input.Shape[0] / 2;
            var outWidth    = deconv ? input.Shape[1] * 2 : input.Shape[1] / 2;
            var outChannels = filter.Shape[deconv ? 2 : 3];

            Debug.Assert(outHeight % tgn_z == 0);
            Debug.Assert(outWidth % tgn_y == 0);
            Debug.Assert(outChannels % tgn_x == 0);

            var output = new Tensor(new [] { outHeight, outWidth, outChannels });

            var buffer_input  = new UnityEngine.ComputeBuffer(input.Data.Length, sizeof(float));
            var buffer_filter = new UnityEngine.ComputeBuffer(filter.Data.Length, sizeof(float));
            var buffer_bias   = new UnityEngine.ComputeBuffer(bias.Data.Length, sizeof(float));
            var buffer_output = new UnityEngine.ComputeBuffer(output.Data.Length, sizeof(float));

            buffer_input.SetData(input.Data);
            buffer_filter.SetData(filter.Data);
            buffer_bias.SetData(bias.Data);

            compute.SetInts("InputShape", input.Shape);
            compute.SetInts("FilterShape", filter.Shape);
            compute.SetInts("OutputShape", output.Shape);

            compute.SetBuffer(kernel, "Input", buffer_input);
            compute.SetBuffer(kernel, "Filter", buffer_filter);
            compute.SetBuffer(kernel, "Bias", buffer_bias);
            compute.SetBuffer(kernel, "Output", buffer_output);

            compute.Dispatch(kernel,
                             outChannels / (int)tgn_x,
                             outWidth / (int)tgn_y,
                             outHeight / (int)tgn_z
                             );

            buffer_output.GetData(output.Data);

            buffer_input.Dispose();
            buffer_filter.Dispose();
            buffer_bias.Dispose();
            buffer_output.Dispose();

            return(output);
        }
コード例 #7
0
 public void updateVertices(ComputeBuffer verticesBuff)
 {
     Vector3[] vertices = mesh.vertices;
     verticesBuff.GetData (vertices);
     /*for (int i = 0; i < vertices.Length; i++) {
         vertices[i].y = -1.0f;
     }*/
     mesh.vertices = vertices;
     mesh.RecalculateNormals ();
     // 	UnityEngine.Debug.Log ("updatedVertices");
 }
コード例 #8
0
 static public int GetData(IntPtr l)
 {
     try {
                     #if DEBUG
         var    method     = System.Reflection.MethodBase.GetCurrentMethod();
         string methodName = GetMethodName(method);
                     #if UNITY_5_5_OR_NEWER
         UnityEngine.Profiling.Profiler.BeginSample(methodName);
                     #else
         Profiler.BeginSample(methodName);
                     #endif
                     #endif
         int argc = LuaDLL.lua_gettop(l);
         if (argc == 2)
         {
             UnityEngine.ComputeBuffer self = (UnityEngine.ComputeBuffer)checkSelf(l);
             System.Array a1;
             checkType(l, 2, out a1);
             self.GetData(a1);
             pushValue(l, true);
             return(1);
         }
         else if (argc == 5)
         {
             UnityEngine.ComputeBuffer self = (UnityEngine.ComputeBuffer)checkSelf(l);
             System.Array a1;
             checkType(l, 2, out a1);
             System.Int32 a2;
             checkType(l, 3, out a2);
             System.Int32 a3;
             checkType(l, 4, out a3);
             System.Int32 a4;
             checkType(l, 5, out a4);
             self.GetData(a1, a2, a3, a4);
             pushValue(l, true);
             return(1);
         }
         pushValue(l, false);
         LuaDLL.lua_pushstring(l, "No matched override function GetData to call");
         return(2);
     }
     catch (Exception e) {
         return(error(l, e));
     }
             #if DEBUG
     finally {
                     #if UNITY_5_5_OR_NEWER
         UnityEngine.Profiling.Profiler.EndSample();
                     #else
         Profiler.EndSample();
                     #endif
     }
             #endif
 }
コード例 #9
0
ファイル: KMeansClustering.cs プロジェクト: matmuze/VIZZIES
    public static List<Vector4> GetClusters(List<Vector4> atoms, int numCentroids)
    {
        if (ComputeShaderManager.Instance.KMeansCS == null) throw new Exception("KMeans compute shader not assigned");

        if (numCentroids <= 0) throw new Exception("Num centroids too low");

        var centroids = new List<Vector4>();
        var centroidStep = Mathf.CeilToInt(atoms.Count / (float)numCentroids);
        for (int i = 0; i < numCentroids; i++)
        {
            if (i*centroidStep < atoms.Count)
            {
                centroids.Add(atoms[i * centroidStep]);
            }
            else
            {
                centroids.Add(atoms[UnityEngine.Random.Range(0, atoms.Count)]);
            }
        }

        var centroidBuffer = new ComputeBuffer(numCentroids, 4 * sizeof(float));
        centroidBuffer.SetData(centroids.ToArray());

        var pointBuffer = new ComputeBuffer(atoms.Count, 4 * sizeof(float));
        pointBuffer.SetData(atoms.ToArray());

        var membershipBuffer = new ComputeBuffer(atoms.Count, sizeof(int));

        ComputeShaderManager.Instance.KMeansCS.SetInt("_NumPoints", atoms.Count);
        ComputeShaderManager.Instance.KMeansCS.SetInt("_NumCentroids", numCentroids);

        for (int i = 0; i < 5; i++)
        {
            ComputeShaderManager.Instance.KMeansCS.SetBuffer(0, "_PointBuffer", pointBuffer);
            ComputeShaderManager.Instance.KMeansCS.SetBuffer(0, "_CentroidBuffer", centroidBuffer);
            ComputeShaderManager.Instance.KMeansCS.SetBuffer(0, "_MembershipBuffer", membershipBuffer);
            ComputeShaderManager.Instance.KMeansCS.Dispatch(0, Mathf.CeilToInt(atoms.Count / 1), 1, 1);

            ComputeShaderManager.Instance.KMeansCS.SetBuffer(1, "_PointBuffer", pointBuffer);
            ComputeShaderManager.Instance.KMeansCS.SetBuffer(1, "_NewCentroidBuffer", centroidBuffer);
            ComputeShaderManager.Instance.KMeansCS.SetBuffer(1, "_NewMembershipBuffer", membershipBuffer);
            ComputeShaderManager.Instance.KMeansCS.Dispatch(1, Mathf.CeilToInt(numCentroids / 64.0f), 1, 1);
        }

        var newCentroids = new Vector4[numCentroids];
        centroidBuffer.GetData(newCentroids);

        pointBuffer.Release();
        centroidBuffer.Release();
        membershipBuffer.Release();

        return newCentroids.ToList();
    }
コード例 #10
0
	// Use this for initialization
	void Start () {
        ComputeBuffer buffer = new ComputeBuffer(4,sizeof(int));
        shader.SetBuffer(0, "buffer1", buffer);
        shader.Dispatch(0, 1, 1, 1);
        int[] data = new int[4];
        buffer.GetData(data);
        for(int i = 0; i < 4; i++)
        {


            buffer.Release();
        }
    }
コード例 #11
0
 static public int GetData(IntPtr l)
 {
     try{
         UnityEngine.ComputeBuffer self = (UnityEngine.ComputeBuffer)checkSelf(l);
         System.Array a1;
         checkType(l, 2, out a1);
         self.GetData(a1);
         return(0);
     }
     catch (Exception e) {
         LuaDLL.luaL_error(l, e.ToString());
         return(0);
     }
 }
コード例 #12
0
 static public int GetData(IntPtr l)
 {
     try {
         UnityEngine.ComputeBuffer self = (UnityEngine.ComputeBuffer)checkSelf(l);
         System.Array a1;
         checkType(l, 2, out a1);
         self.GetData(a1);
         pushValue(l, true);
         return(1);
     }
     catch (Exception e) {
         return(error(l, e));
     }
 }
コード例 #13
0
ファイル: GpuHelper.cs プロジェクト: Micah1/Pix2Pix
        public static Tensor InvokeNormalizationKernel(
            string name, Tensor input, Tensor scale, Tensor offset
            )
        {
            var compute = Pix2PixResources.Compute;
            var kernel  = compute.FindKernel(name);

            uint tgn_x, tgn_y, tgn_z;

            compute.GetKernelThreadGroupSizes(kernel, out tgn_x, out tgn_y, out tgn_z);

            var length   = input.Data.Length;
            var channels = input.Shape[2];

            Debug.Assert(channels % tgn_x == 0);
            Debug.Assert(channels == scale.Data.Length);
            Debug.Assert(channels == offset.Data.Length);

            var buffer_input  = new UnityEngine.ComputeBuffer(length, sizeof(float));
            var buffer_scale  = new UnityEngine.ComputeBuffer(channels, sizeof(float));
            var buffer_offset = new UnityEngine.ComputeBuffer(channels, sizeof(float));
            var buffer_output = new UnityEngine.ComputeBuffer(length, sizeof(float));

            buffer_input.SetData(input.Data);
            buffer_scale.SetData(scale.Data);
            buffer_offset.SetData(offset.Data);

            compute.SetInts("InputShape", input.Shape);
            compute.SetInts("OutputShape", input.Shape);

            compute.SetBuffer(kernel, "Input", buffer_input);
            compute.SetBuffer(kernel, "Filter", buffer_scale);
            compute.SetBuffer(kernel, "Bias", buffer_offset);
            compute.SetBuffer(kernel, "Output", buffer_output);

            compute.Dispatch(kernel, channels / (int)tgn_x, 1, 1);

            var output = new Tensor(input.Shape);

            buffer_output.GetData(output.Data);

            buffer_input.Dispose();
            buffer_scale.Dispose();
            buffer_offset.Dispose();
            buffer_output.Dispose();

            return(output);
        }
コード例 #14
0
    public void RunMultiplyShader() {
        VecMatPair[] data = new VecMatPair[5];
        VecMatPair[] output = new VecMatPair[5];
        // Init Data here!!!
        for (int i = 0; i < data.Length; i++) {
            data[i].point = UnityEngine.Random.onUnitSphere;
            data[i].matrix = Matrix4x4.TRS(UnityEngine.Random.onUnitSphere, UnityEngine.Random.rotation, UnityEngine.Random.onUnitSphere);
            Debug.Log("PreShader! Pos: " + data[i].point.ToString() + ", Matrix: " + data[i].matrix.ToString());
        }

        ComputeBuffer buffer = new ComputeBuffer(data.Length, 76);
        int kernelHandle = shader.FindKernel("Multiply");
        buffer.SetData(data);
        shader.SetBuffer(kernelHandle, "dataBuffer", buffer);
        shader.Dispatch(kernelHandle, data.Length, 1, 1);
        buffer.GetData(output);

        for (int i = 0; i < output.Length; i++) {            
            Debug.Log("PostShader! Pos: " + output[i].point.ToString() + ", Matrix: " + output[i].matrix.ToString());
        }

        buffer.Dispose();

        /*public ComputeShader compute;
        public ComputeBuffer buffer;
        public int[] cols;
   
        void Start () {
        var mesh = GetComponent<MeshFilter>().mesh;
        int n = mesh.vertexCount;
            ///
        buffer = new ComputeBuffer (n, 16);
        ///
        cols = new int[n];
            ///
        for (int i = 0; i < n; ++i)
             cols[i] = 0;      
        buffer.SetData (cols); 
            ///
        compute.SetBuffer(compute.FindKernel ("CSMain"),"bufColors", buffer);
            ///
            compute.Dispatch(0,4,4,1);
        ///
        buffer.GetData(cols);
        Debug.Log (cols[0]); 
        */
    }
コード例 #15
0
    public void Init()
    {
        _grassShader = Resources.Load<Shader>("Shaders/GrassGeneratorShader");
        _grassMaterial = Resources.Load<Material>("GrassGeneratorMat");
        _noiseTex = Resources.Load<Texture>("Noise");
        if(_noiseTex == null)
        {
            Debug.LogError("Not found noise");
        }

        _grassComputeShader = Resources.Load<ComputeShader>("ComputeShaders/GrassComputeShader");
        _initGrassKernelId = _grassComputeShader.FindKernel(kInitGrassKernel);
        _updateGrassKernelId = _grassComputeShader.FindKernel(kUpdateGrassKernel);

        _numGrassItems = _numGroupGrassX*_numGroupGrassY*kThreadsX*kThreadsY;
        _grassBuffer = new ComputeBuffer(_numGrassItems, System.Runtime.InteropServices.Marshal.SizeOf(typeof(GrassData)));
        _obstaclesBuffer = new ComputeBuffer(kMaxObstacles, System.Runtime.InteropServices.Marshal.SizeOf(typeof(ObstacleData)));

        _grassComputeShader.SetFloat("_Width", _numGroupGrassX*kThreadsX);
        _grassComputeShader.SetFloat("_Height", _numGroupGrassY*kThreadsY);
        _grassComputeShader.SetTexture(_initGrassKernelId, "_NoiseTex", _noiseTex);

        _grassMaterial.SetTexture("_NoiseTex", _noiseTex);
        _grassMaterial.SetFloat("_Width", _numGroupGrassX*kThreadsX);
        _grassMaterial.SetFloat("_Height", _numGroupGrassY*kThreadsY);

        _grassComputeShader.SetBuffer(_initGrassKernelId, "_GrassBuffer", _grassBuffer);
        _grassComputeShader.SetBuffer(_updateGrassKernelId, "_GrassBuffer", _grassBuffer);
        _grassComputeShader.SetBuffer(_updateGrassKernelId, "_ObstaclesBuffer", _obstaclesBuffer);
        _grassComputeShader.SetInt("_NumObstacles", 0);
        _grassMaterial.SetBuffer("_GrassBuffer", _grassBuffer);

        _grassComputeShader.Dispatch(_initGrassKernelId, _numGroupGrassX, _numGroupGrassY, 1);
        #if GRASS_CPU
        _grassDataTestCPU = new GrassData[_numGrassItems];
        _grassBuffer.GetData(_grassDataTestCPU);
        #endif
        _isInit = true;
    }
コード例 #16
0
 static public int GetData(IntPtr l)
 {
     try {
         int argc = LuaDLL.lua_gettop(l);
         if (argc == 2)
         {
             UnityEngine.ComputeBuffer self = (UnityEngine.ComputeBuffer)checkSelf(l);
             System.Array a1;
             checkType(l, 2, out a1);
             self.GetData(a1);
             pushValue(l, true);
             return(1);
         }
         else if (argc == 5)
         {
             UnityEngine.ComputeBuffer self = (UnityEngine.ComputeBuffer)checkSelf(l);
             System.Array a1;
             checkType(l, 2, out a1);
             System.Int32 a2;
             checkType(l, 3, out a2);
             System.Int32 a3;
             checkType(l, 4, out a3);
             System.Int32 a4;
             checkType(l, 5, out a4);
             self.GetData(a1, a2, a3, a4);
             pushValue(l, true);
             return(1);
         }
         pushValue(l, false);
         LuaDLL.lua_pushstring(l, "No matched override function to call");
         return(2);
     }
     catch (Exception e) {
         return(error(l, e));
     }
 }
コード例 #17
0
    /// <summary>
    /// Uses the GPU to process an array of 4D coordinates for noise and return an array with the noise at the specified coordinates.
    /// </summary>
    /// <returns>Float array</returns>
    /// <param name="positions"> Array of coordinates to process. </param>
    /// <param name="noiseScale"> Value to scale the noise coordinates by. </param>
    /// <param name="normalize"> Whether or not to remap the noise from (-1, 1) to (0, 1). </param>
    public static float[] NoiseArrayGPU(UnityEngine.Vector4[] positions, float noiseScale = 0.01f, bool normalize = true)
    {
        UnityEngine.ComputeShader shader = UnityEngine.Resources.Load(shaderPath) as UnityEngine.ComputeShader;
        if (shader == null)
        {
            UnityEngine.Debug.LogError(noShaderMsg);
            return(null);
        }

        int kernel = shader.FindKernel("ComputeNoiseArray");

        SetShaderVars(shader, UnityEngine.Vector2.zero, normalize, noiseScale);
        shader.SetInt("dimension", 4);

        UnityEngine.ComputeBuffer permBuffer = new UnityEngine.ComputeBuffer(perm.Length, 4);
        permBuffer.SetData(perm);
        shader.SetBuffer(kernel, "perm", permBuffer);

        UnityEngine.ComputeBuffer posBuffer = new UnityEngine.ComputeBuffer(positions.Length, 16);
        posBuffer.SetData(positions);
        shader.SetBuffer(kernel, "float4Array", posBuffer);

        UnityEngine.ComputeBuffer outputBuffer = new UnityEngine.ComputeBuffer(positions.Length, 4);
        shader.SetBuffer(kernel, "outputArray", outputBuffer);

        shader.Dispatch(kernel, positions.Length / 14, 1, 1);

        float[] outputData = new float[positions.Length];
        outputBuffer.GetData(outputData);

        permBuffer.Release();
        posBuffer.Release();
        outputBuffer.Release();

        return(outputData);
    }
コード例 #18
0
ファイル: CapturePanorama.cs プロジェクト: EliCDavis/ped-sim
        public IEnumerator CaptureScreenshotAsyncHelper(string filenameBase, bool async)
        {
            if (async)
                while (Capturing)
                    yield return null; // If CaptureScreenshot() was called programmatically multiple times, serialize the coroutines
            Capturing = true;

            if (!OnCaptureStart())
            {
                audioSource.PlayOneShot(failSound);
                Capturing = false;
                yield break;
            }

            // Have to refresh cameras each frame during video in case cameras or image effects change - consider an option for this.
            Camera[] cameras = GetCaptureCameras();
            Array.Sort(cameras, (x, y) => x.depth.CompareTo(y.depth));

            if (cameras.Length == 0)
            {
                Debug.LogWarning("No cameras found to capture");
                audioSource.PlayOneShot(failSound);
                Capturing = false;
                yield break;
            }

            // Need to do this first in case we need to reinitialize
            if (antiAliasing != AntiAliasing._1)
            {
                foreach (Camera c in cameras)
                {
                    if (c.actualRenderingPath == RenderingPath.DeferredLighting ||
                        c.actualRenderingPath == RenderingPath.DeferredShading)
                    {
                        Debug.LogWarning("CapturePanorama: Setting Anti Aliasing=1 because at least one camera in deferred mode. Use SSAA setting or Antialiasing image effect if needed.");
                        antiAliasing = AntiAliasing._1;
                        Reinitialize();
                        break;
                    }
                }
            }

            Log("Starting panorama capture");
            if (!captureEveryFrame && startSound != null && Camera.main != null)
            {
                audioSource.PlayOneShot(startSound);
            }

            List<ScreenFadeControl> fadeControls = new List<ScreenFadeControl>();
            foreach (Camera c in Camera.allCameras)
            {
                if (c.isActiveAndEnabled && c.targetTexture == null) // Is a camera visible to the player
                {
                    var fadeControl = c.gameObject.AddComponent<ScreenFadeControl>();
                    fadeControl.fadeMaterial = fadeMaterial;
                    fadeControls.Add(fadeControl);
                }
            }
            SetFadersEnabled(fadeControls, false);

            if (fadeDuringCapture && async)
                yield return StartCoroutine(FadeOut(fadeControls));

            // Make sure black is shown before we start - sometimes two frames are needed
            for (int i = 0; i < 2; i++)
                yield return new WaitForEndOfFrame();

            // Initialize compute buffers - do here instead of in Reinitialize() to work around error on Destroy()
            ComputeBuffer convertPanoramaResultBuffer = null;
            ComputeBuffer forceWaitResultConvertPanoramaStereoBuffer = null;
            if (usingGpuTransform)
            {
                if (captureStereoscopic)
                {
                    convertPanoramaResultBuffer =
                        new ComputeBuffer(/*count*/panoramaWidth * panoramaHeight * 2 + 1, /*stride*/4); // + 1 for sentinel
                    convertPanoramaStereoShader.SetBuffer(renderStereoIdx, "result", convertPanoramaResultBuffer);

                    forceWaitResultConvertPanoramaStereoBuffer = new ComputeBuffer(/*count*/1, /*stride*/4);
                    convertPanoramaStereoShader.SetBuffer(renderStereoIdx, "forceWaitResultBuffer", forceWaitResultConvertPanoramaStereoBuffer);
                }
                else
                {
                    int sliceHeight = (panoramaHeight + ResultBufferSlices - 1) / ResultBufferSlices;
                    convertPanoramaResultBuffer =
                        new ComputeBuffer(/*count*/panoramaWidth * sliceHeight + 1, /*stride*/4); // + 1 for sentinel
                    foreach (int kernelIdx in convertPanoramaKernelIdxs)
                        convertPanoramaShader.SetBuffer(kernelIdx, "result", convertPanoramaResultBuffer);
                }
            }
            int cameraPixelsBufferNumTextures = numCameras;
            overlapTextures = 0;
            int circlePointCircularBufferSize = 0;
            if (captureStereoscopic && usingGpuTransform)
            {
                overlapTextures = ssaaFactor == 1 ? 1 : 2;  // Overlap of 1 supports blending between circle points, overlap of 2 supports it even with SSAA at boundaries
                circlePointCircularBufferSize = 1 + overlapTextures;
                // 2 + for top/bottom, and divide by 2 because we're doing left/right and up/down separately
                cameraPixelsBufferNumTextures = Math.Min(numCameras, 2 + (CamerasPerCirclePoint / 2) * circlePointCircularBufferSize);
            }
            ComputeBuffer cameraPixelsBuffer = new ComputeBuffer(/*count*/cameraPixelsBufferNumTextures * cameraWidth * cameraHeight + 1, /*stride*/4);
            textureToBufferShader.SetBuffer(textureToBufferIdx, "result", cameraPixelsBuffer);

            // Set up sentinels to detect out of graphics memory
            textureToBufferShader.SetInt("sentinelIdx", cameraPixelsBuffer.count - 1);
            if (usingGpuTransform && !captureStereoscopic)
            {
                convertPanoramaShader.SetInt("cameraPixelsSentinelIdx", cameraPixelsBuffer.count - 1);
                convertPanoramaShader.SetInt("sentinelIdx", convertPanoramaResultBuffer.count - 1);
                foreach (int kernelIdx in convertPanoramaKernelIdxs)
                    convertPanoramaShader.SetBuffer(kernelIdx, "cameraPixels", cameraPixelsBuffer);
            }
            if (usingGpuTransform && captureStereoscopic)
            {
                convertPanoramaStereoShader.SetInt("cameraPixelsSentinelIdx", cameraPixelsBuffer.count - 1);
                convertPanoramaStereoShader.SetBuffer(renderStereoIdx, "cameraPixels", cameraPixelsBuffer);
            }

            ComputeBuffer forceWaitResultTextureToBufferBuffer = new ComputeBuffer(/*count*/1, /*stride*/4);
            textureToBufferShader.SetBuffer(textureToBufferIdx, "forceWaitResultBuffer", forceWaitResultTextureToBufferBuffer);

            float startTime = Time.realtimeSinceStartup;

            Quaternion headOrientation = Quaternion.identity;
#if OVR_SUPPORT
            if (OVRManager.display != null)
            {
                headOrientation = OVRManager.display.GetHeadPose(0.0).orientation;
            }
#endif
#if UNITY_5_1
            if (VRSettings.enabled && VRSettings.loadedDevice != VRDeviceType.None)
            {
                headOrientation = InputTracking.GetLocalRotation(0);
            }
#endif

            Log("Rendering camera views");
            foreach (Camera c in cameras)
                Log("Camera name: " + c.gameObject.name);

            var methodMap = new Dictionary<Camera, List<ImageEffectCopyCamera.InstanceMethodPair>>();
            foreach (Camera c in cameras)
                methodMap[c] = ImageEffectCopyCamera.GenerateMethodList(c);

            // Need to extract each cubemap into a Texture2D so we can read the pixels, but Unity bug
            // prevents this with antiAliasing: http://issuetracker.unity3d.com/issues/texture2d-dot-readpixels-fails-if-rendertexture-has-anti-aliasing-set
            // We copy the cubemap textures using a shader as a workaround.

            string suffix = "." + FormatToExtension(imageFormat);
            string filePath = "";
            // Save in separate thread to avoid hiccups
            string imagePath = saveImagePath;
            if (imagePath == null || imagePath == "")
            {
                imagePath = Application.dataPath + "/..";
            }

            convertPanoramaStereoShader.SetInt("circlePointCircularBufferSize", circlePointCircularBufferSize);
            int nextCirclePointCircularBufferStart = 0, nextCirclePointStart = 0, writeIdx = 0;
            int ilimit = usingGpuTransform ? numCameras + overlapTextures * CamerasPerCirclePoint : numCameras;
            int leftRightPhaseEnd = (ilimit - 2) / 2 + 2;
            int circlePointsRendered = 0;
            int saveCubemapImageNum = 0;

            Log("Changing quality level");
            int saveQualityLevel = QualitySettings.GetQualityLevel();
            bool qualitySettingWasChanged = false;
            string[] qualitySettingNames = QualitySettings.names;
            if (qualitySetting != qualitySettingNames[saveQualityLevel]) // Don't change if already set to it
            {
                for (int i = 0; i < qualitySettingNames.Length; i++)
                {
                    string name = qualitySettingNames[i];
                    if (name == qualitySetting)
                    {
                        QualitySettings.SetQualityLevel(i, /*applyExpensiveChanges*/false); // applyExpensiveChanges causes trouble
                        qualitySettingWasChanged = true;
                    }
                }
                if (qualitySetting != "" && !qualitySettingWasChanged)
                {
                    Debug.LogError("Quality setting specified for CapturePanorama is invalid, ignoring.", this);
                }
            }

            BeforeRenderPanorama();

            RenderTexture.active = null;
            for (int i = 0; i < ilimit; i++)
            {
                // Don't use RenderToCubemap - it causes problems with compositing multiple cameras, and requires
                // more temporary VRAM. Just render cube map manually.
                if (captureStereoscopic)
                {
                    if (i < 2)
                    {
                        // 0, 1 are top/bottom caps
                        camGos[1].transform.localPosition = Vector3.zero;
                        camGos[1].transform.localRotation = Quaternion.Euler((i == 0) ? 90.0f : -90.0f, 0.0f, 0.0f);
                    }
                    else
                    {
                        // Do all left/right textures first then all up/down textures
                        int iAdjusted, numInGroupBias;
                        if (i < leftRightPhaseEnd)
                        {
                            iAdjusted = i - 2;
                            numInGroupBias = 0;
                        }
                        else
                        {
                            iAdjusted = i - leftRightPhaseEnd;
                            numInGroupBias = 2;
                        }

                        int circlePointNum = (iAdjusted / (CamerasPerCirclePoint / 2)) % numCirclePoints;
                        int numInGroup = iAdjusted % (CamerasPerCirclePoint / 2) + numInGroupBias;

                        float circleAngle = 360.0f * circlePointNum / numCirclePoints;
                        camGos[1].transform.localPosition = Quaternion.Euler(0.0f, circleAngle, 0.0f) * Vector3.forward * circleRadius;

                        if (numInGroup < 2)
                            camGos[1].transform.localRotation = Quaternion.Euler(0.0f, circleAngle + (numInGroup == 0 ? -hFovAdjustDegrees : hFovAdjustDegrees), 0.0f);
                        else
                            camGos[1].transform.localRotation = Quaternion.Euler((numInGroup == 2 ? -vFovAdjustDegrees : vFovAdjustDegrees), circleAngle, 0.0f);

                        if (numInGroup == 1 || numInGroup == 3) circlePointsRendered++;
                    }
                }
                else
                {
                    switch ((CubemapFace)i)
                    {
                        case CubemapFace.PositiveX: camGos[1].transform.localRotation = Quaternion.Euler(  0.0f,  90.0f, 0.0f); break;
                        case CubemapFace.NegativeX: camGos[1].transform.localRotation = Quaternion.Euler(  0.0f, -90.0f, 0.0f); break;
                        case CubemapFace.PositiveY: camGos[1].transform.localRotation = Quaternion.Euler( 90.0f,   0.0f, 0.0f); break;
                        case CubemapFace.NegativeY: camGos[1].transform.localRotation = Quaternion.Euler(-90.0f,   0.0f, 0.0f); break;
                        case CubemapFace.PositiveZ: camGos[1].transform.localRotation = Quaternion.Euler(  0.0f,   0.0f, 0.0f); break;
                        case CubemapFace.NegativeZ: camGos[1].transform.localRotation = Quaternion.Euler(  0.0f, 180.0f, 0.0f); break;
                    }
                }

                foreach (Camera c in cameras)
                {
                    // To get the camera in the right eye position, migrate the camera transform to camGos[0]
                    camGos[2].transform.parent = null;
                    
                    cam.CopyFrom(c);

                    // TODO: Determine if we should reset matrices of the camera in case it's using custom transform matrices
                    
                    camGos[0].transform.localPosition = cam.transform.localPosition;
                    camGos[0].transform.localRotation = cam.transform.localRotation;
                    camGos[2].transform.parent = camGos[1].transform;
                    cam.transform.localPosition = Vector3.zero;
                    cam.transform.localRotation = Quaternion.identity;
                    copyCameraScript.enabled = methodMap[c].Count > 0;
                    copyCameraScript.onRenderImageMethods = methodMap[c];
                    cam.fieldOfView = vFov; // hFov inferred from aspect ratio of target

                    // Question: Should we adjust near clip in stereoscopic mode based on circleRadius?
                    // (avoids clipping that doesn't occur in normal camera view but might lead to unexpected bad effects)

                    camGos[0].transform.rotation *= Quaternion.Inverse(headOrientation);
                    if (useDefaultOrientation)
                        camGos[0].transform.rotation = Quaternion.identity;

                    cam.targetTexture = cubemapRenderTexture;
                    // Aspect ratio must be determined by size of render target. This is critical when Unity native VR is enabled.
                    cam.ResetAspect();

                    // Temporarily set original camera to same position/rotation/field of view as
                    // rendering camera during render. If any image effects examine camera
                    // orientation/FOV this will ensure they behave correctly.

                    Vector3 savePosition = c.transform.position;
                    Quaternion saveRotation = c.transform.rotation;
                    float saveFieldOfView = c.fieldOfView;
                    RenderTexture saveRenderTexture = c.targetTexture;

                    c.transform.position = cam.transform.position;
                    c.transform.rotation = cam.transform.rotation;
                    c.fieldOfView = cam.fieldOfView;

                    cam.Render();

                    c.transform.position = savePosition;
                    c.transform.rotation = saveRotation;
                    c.fieldOfView = saveFieldOfView;
                    c.targetTexture = saveRenderTexture;
                }
                
                // Read one pixel from texture to force render to complete before continuing
                RenderTexture.active = cubemapRenderTexture;
                forceWaitTexture.ReadPixels(new Rect(cameraWidth - 1, cameraHeight - 1, 1, 1), 0, 0);

                int forceWaitValue = 1000000 + i;
                textureToBufferShader.SetInt("forceWaitValue", forceWaitValue);
                textureToBufferShader.SetTexture(textureToBufferIdx, "source", cubemapRenderTexture);
                textureToBufferShader.SetInt("startIdx", writeIdx * cameraWidth * cameraHeight);
                textureToBufferShader.Dispatch(textureToBufferIdx, (cameraWidth + threadsX - 1) / threadsX, (cameraHeight + threadsY - 1) / threadsY, 1);

                uint[] forceWaitResult = new uint[1];
                forceWaitResultTextureToBufferBuffer.GetData(forceWaitResult);
                if (forceWaitResult[0] != forceWaitValue)
                    Debug.LogError("TextureToBufferShader: Unexpected forceWaitResult value " + forceWaitResult[0] + ", should be " + forceWaitValue);

                if (saveCubemap &&
                    ((i < 2) ||
                     (i >= 2 && i < 2 + numCirclePoints * 2) ||
                     (i >= leftRightPhaseEnd && i < leftRightPhaseEnd + numCirclePoints * 2)))
                {
                    // This is really slow - retrieving all cameraPixels data for just a portion of it. But this is mainly useful for debugging anyway.
                    cameraPixelsBuffer.GetData(cameraPixels);
                    if (cameraPixels[cameraPixelsBuffer.count - 1] != BufferSentinelValue)
                        ReportOutOfGraphicsMemory();

                    SaveCubemapImage(cameraPixels, filenameBase, suffix, imagePath, saveCubemapImageNum, writeIdx);
                    saveCubemapImageNum++;
                }

                writeIdx++;
                if (writeIdx >= cameraPixelsBufferNumTextures) writeIdx = 2; // Leave top/bottom in indexes 0/1

                // For stereoscopic GPU transform, interleave capture and rendering to decrease VRAM consumption
                if (captureStereoscopic && usingGpuTransform &&
                    ((i - 2) + 1) % (CamerasPerCirclePoint / 2) == 0 &&
                    (circlePointsRendered - nextCirclePointStart >= circlePointCircularBufferSize || i + 1 == 2 + (ilimit - 2) / 2 || i + 1 == ilimit))
                {
                    forceWaitValue = 2000000 + i;
                    convertPanoramaStereoShader.SetInt("forceWaitValue", forceWaitValue);
                    convertPanoramaStereoShader.SetInt("leftRightPass", i < leftRightPhaseEnd ? 1 : 0);
                    convertPanoramaStereoShader.SetInt("circlePointStart", nextCirclePointStart);
                    convertPanoramaStereoShader.SetInt("circlePointEnd", cameraPixelsBufferNumTextures < numCameras ? circlePointsRendered : circlePointsRendered + 1);
                    convertPanoramaStereoShader.SetInt("circlePointCircularBufferStart", nextCirclePointCircularBufferStart);
                    convertPanoramaStereoShader.Dispatch(renderStereoIdx, (panoramaWidth + threadsX - 1) / threadsX, (panoramaHeight + threadsY - 1) / threadsY, 2);

                    forceWaitResultConvertPanoramaStereoBuffer.GetData(forceWaitResult);
                    if (forceWaitResult[0] != forceWaitValue)
                        Debug.LogError("ConvertPanoramaStereoShader: Unexpected forceWaitResult value " + forceWaitResult[0] + ", should be " + forceWaitValue);

                    if (i + 1 == leftRightPhaseEnd)
                    {
                        nextCirclePointCircularBufferStart = (nextCirclePointCircularBufferStart + circlePointCircularBufferSize) % circlePointCircularBufferSize;
                        nextCirclePointStart = 0;
                        circlePointsRendered = 0;
                    }
                    else
                    {
                        nextCirclePointStart = circlePointsRendered - overlapTextures;
                        nextCirclePointCircularBufferStart = (nextCirclePointCircularBufferStart + circlePointCircularBufferSize - overlapTextures) % circlePointCircularBufferSize;
                    }
                }

                RenderTexture.active = null;
            }

            AfterRenderPanorama();

            Log("Resetting quality level");
            if (qualitySettingWasChanged)
                QualitySettings.SetQualityLevel(saveQualityLevel, /*applyExpensiveChanges*/false);

            // If we need to access the cubemap pixels on the CPU, retrieve them now
            if (saveCubemap || !usingGpuTransform)
            {
                cameraPixelsBuffer.GetData(cameraPixels);
                if (cameraPixels[cameraPixelsBuffer.count - 1] != BufferSentinelValue)
                    ReportOutOfGraphicsMemory();
            }

            RenderTexture.active = null;

            if (saveCubemap &&
                !(captureStereoscopic && usingGpuTransform)) // In this mode images are saved during capture
            {
                // Save cubemap while still faded, as fast as possible - should be pretty quick
                for (int i = 0; i < numCameras; i++)
                {
                    int bufferIdx = i;
                    SaveCubemapImage(cameraPixels, filenameBase, suffix, imagePath, i, bufferIdx);
                }
            }

            // If this is not here, the fade-in will drop frames.
            for (int i = 0; i < 2; i++)
                yield return new WaitForEndOfFrame();

            if (async && !usingGpuTransform && fadeDuringCapture)
                yield return StartCoroutine(FadeIn(fadeControls));

            filePath = imagePath + "/" + filenameBase + suffix;

            bool producedImageSuccess = false;

            {
                // Write pixels directly to .NET Bitmap for saving out
                // Based on https://msdn.microsoft.com/en-us/library/5ey6h79d%28v=vs.110%29.aspx
                Bitmap bitmap = new Bitmap(panoramaWidth, panoramaHeight * (captureStereoscopic ? 2 : 1), PixelFormat.Format32bppArgb);
                var bmpData = bitmap.LockBits(new Rectangle(0, 0, bitmap.Width, bitmap.Height), ImageLockMode.WriteOnly, bitmap.PixelFormat);
                IntPtr ptr = bmpData.Scan0;
                byte[] pixelValues = new byte[Math.Abs(bmpData.Stride) * bitmap.Height];

                // Convert to equirectangular projection - use compute shader for better performance if supported by platform

                if (async)
                    yield return StartCoroutine(CubemapToEquirectangular(cameraPixelsBuffer, cameraPixels, convertPanoramaResultBuffer, cameraWidth, cameraHeight, pixelValues, bmpData.Stride, panoramaWidth, panoramaHeight, ssaaFactor, async));
                else
                {
                    var enumerator = CubemapToEquirectangular(cameraPixelsBuffer, cameraPixels, convertPanoramaResultBuffer, cameraWidth, cameraHeight, pixelValues, bmpData.Stride, panoramaWidth, panoramaHeight, ssaaFactor, async);
                    while (enumerator.MoveNext()) { }
                }

                producedImageSuccess = (pixelValues[3] == 255);

                yield return null;
                System.Runtime.InteropServices.Marshal.Copy(pixelValues, 0, ptr, pixelValues.Length);
                bitmap.UnlockBits(bmpData);
                yield return null;

                Log("Time to take panorama screenshot: " + (Time.realtimeSinceStartup - startTime) + " sec");

                if (producedImageSuccess)
                {
                    var thread = new Thread(() =>
                    {
                        Log("Saving equirectangular image");
                        // TODO: Use better image processing library to get decent JPEG quality out.
                        bitmap.Save(filePath, FormatToDrawingFormat(imageFormat));
                    });
                    thread.Start();
                    while (thread.ThreadState == ThreadState.Running)
                        if (async)
                            yield return null;
                        else
                            Thread.Sleep(0);
                }

                bitmap.Dispose();
            }

            // Release ComputeBuffers - all done with these
            foreach (var buffer in new ComputeBuffer[] {
                convertPanoramaResultBuffer,
                cameraPixelsBuffer,
                forceWaitResultConvertPanoramaStereoBuffer,
                forceWaitResultTextureToBufferBuffer })
                if (buffer != null)
                    buffer.Release();
            convertPanoramaResultBuffer = cameraPixelsBuffer = null;

            if (async && usingGpuTransform && fadeDuringCapture)
                yield return StartCoroutine(FadeIn(fadeControls));

            foreach (ScreenFadeControl fadeControl in fadeControls)
            {
                Destroy(fadeControl);
            }
            fadeControls.Clear();

            if (producedImageSuccess && uploadImages && !captureEveryFrame)
            {
                Log("Uploading image");
                imageFileBytes = File.ReadAllBytes(filePath);
                string mimeType = FormatMimeType(imageFormat);
                if (async)
                    yield return StartCoroutine(UploadImage(imageFileBytes, filenameBase + suffix, mimeType, async));
                else
                {
                    var enumerator = UploadImage(imageFileBytes, filenameBase + suffix, mimeType, async);
                    while (enumerator.MoveNext()) { }
                }
            }
            else
            {
                if (!producedImageSuccess)
                {
                    if (failSound != null && Camera.main != null)
                        audioSource.PlayOneShot(failSound);
                }
                else if (!captureEveryFrame && doneSound != null && Camera.main != null)
                {
                    audioSource.PlayOneShot(doneSound);
                }
                Capturing = false;
            }
        }
コード例 #19
0
ファイル: CapturePanorama.cs プロジェクト: EliCDavis/ped-sim
        IEnumerator CubemapToEquirectangular(ComputeBuffer cameraPixelsBuffer, uint[] cameraPixels, ComputeBuffer convertPanoramaResultBuffer, int cameraWidth, int cameraHeight, byte[] pixelValues,
            int stride, int panoramaWidth, int panoramaHeight, int ssaaFactor, bool async)
        {
            if (captureStereoscopic && usingGpuTransform)
            {
                // Was already done earlier, just grab the result
                convertPanoramaResultBuffer.GetData(resultPixels);
                if (resultPixels[convertPanoramaResultBuffer.count - 1] != BufferSentinelValue)
                    ReportOutOfGraphicsMemory();

                writeOutputPixels(pixelValues, stride, panoramaWidth, panoramaHeight * 2, panoramaHeight * 2, /*yStart*/0);
            }
            else if (captureStereoscopic && !usingGpuTransform)
            {
                // TODO: Factor out into separate method
                float startTime = Time.realtimeSinceStartup;
                float processingTimePerFrame = cpuMillisecondsPerFrame / 1000.0f;

                for (int y = 0; y < panoramaHeight; y++)
                for (int x = 0; x < panoramaWidth;  x++)
                {
                    float xcoord = (float)x / panoramaWidth;
                    float ycoord = (float)y / panoramaHeight;

                    float latitude = (ycoord - 0.5f) * Mathf.PI;
                    float sinLat = Mathf.Sin(latitude);
                    float cosLat = Mathf.Cos(latitude);
                    float longitude = (xcoord * 2.0f - 1.0f) * Mathf.PI;
                    float sinLong = Mathf.Sin(longitude);
                    float cosLong = Mathf.Cos(longitude);

                    // Scale IPD down as latitude moves toward poles to avoid discontinuities
                    float latitudeNormalized = latitude / (Mathf.PI / 2.0f); // Map to [-1, 1]
                    float ipdScale = IpdScaleFunction(latitudeNormalized);
                    float scaledEyeRadius = ipdScale * interpupillaryDistance / 2.0f;

                    int cameraNum;
                    float u, v;

                    float ipdScaleLerp = 1.0f - ipdScale * 5.0f; // Scale [0, 0.2] to [0, 1] and reverse
                    // Top/bottom cap
                    Color colorCap = new Color(0.0f, 0.0f, 0.0f, 0.0f);
                    if (ipdScaleLerp > 0.0f)
                    {
                        Vector3 equirectRayDirection = new Vector3(cosLat * sinLong, sinLat, cosLat * cosLong);
                        float distance = 1.0f / equirectRayDirection.y;
                        u = equirectRayDirection.x * distance; v = equirectRayDirection.z * distance;
                        if (u * u <= 1 && v * v <= 1)
                        {
                            if (equirectRayDirection.y > 0.0f)
                            {
                                cameraNum = 0;
                            }
                            else
                            {
                                u = -u;
                                cameraNum = 1;
                            }

                            u = (u + 1.0f) * 0.5f;
                            v = (v + 1.0f) * 0.5f;

                            colorCap = GetCameraPixelBilinear(cameraPixels, cameraNum, u, v);
                        }
                    }

                    for (int i = 0; i < 2; i++)
                    {
                        // The following is equivalent to:
                        // Quaternion eyesRotation = Quaternion.Euler(0.0f, longitude * 360.0f / (2 * Mathf.PI), 0.0f);
                        // Vector3 initialEyePosition = (i == 0 ? Vector3.left : Vector3.right) * scaledEyeRadius;
                        // Vector3 pos = eyesRotation * initialEyePosition; // eye position
                        // Vector3 dir = eyesRotation * Vector3.forward; // gaze direction

                        Vector3 dir = new Vector3(sinLong, 0.0f, cosLong);

                        float angle = (Mathf.PI / 2.0f - Mathf.Acos(scaledEyeRadius / circleRadius));
                        if (i == 0) angle = -angle;
                        float circlePointAngle = longitude + angle;
                        if (circlePointAngle < 0.0f) circlePointAngle += 2 * Mathf.PI;
                        if (circlePointAngle >= 2 * Mathf.PI) circlePointAngle -= 2 * Mathf.PI;
                        // Debug.Assert(circlePointAngle >= 0.0f && circlePointAngle < 2 * Mathf.PI);

                        float circlePointNumber = circlePointAngle / (2 * Mathf.PI) * numCirclePoints;
                        int circlePoint0 = (int)Mathf.Floor(circlePointNumber) % numCirclePoints;

                        // Get color from each adjacent circle point
                        Color color0 = new Color(), color1 = new Color();
                        for (int j=0; j < 2; j++)
                        {
                            int circlePointIdx = (j == 0 ? circlePoint0 : (circlePoint0 + 1) % numCirclePoints);
                            float cameraPointAngle = 2 * Mathf.PI * circlePointIdx / numCirclePoints;
                            float sinCameraPointAngle = Mathf.Sin(cameraPointAngle);
                            float cosCameraPointAngle = Mathf.Cos(cameraPointAngle);

                            // Equivalent to (using fact that both dir and circlePointNorm are unit vectors):
                            // Quaternion circlePointRotation = Quaternion.Euler(0.0f, cameraPointAngle * 360.0f / (2 * Mathf.PI), 0.0f);
                            // Vector3 circlePointNormal = circlePointRotation * Vector3.forward;
                            // float newLongitude = Mathf.Sign(Vector3.Cross(circlePointNormal, dir).y) * Vector3.Angle(circlePointNormal, dir) * (2 * Mathf.PI) / 360.0f;

                            float newLongitude = Mathf.Sign(dir.x * cosCameraPointAngle - dir.z * sinCameraPointAngle) *
                                                 Mathf.Acos(dir.z * cosCameraPointAngle + dir.x * sinCameraPointAngle);
                            float cosNewLong = Mathf.Cos(newLongitude);
                            float sinNewLong = Mathf.Sin(newLongitude);

                            // Select which of the two cameras for this point to use and adjust ray to make camera plane perpendicular to axes
                            cameraNum = 2 + circlePointIdx * (CamerasPerCirclePoint / 2) + (newLongitude >= 0.0f ? 1 : 0);
                        
                            float longitudeAdjust = (newLongitude >= 0.0f ? -hFovAdjust : hFovAdjust);
                            float longSum = newLongitude + longitudeAdjust;

                            // Equivalent to:
                            // Vector3 textureRayDir = Quaternion.Euler(-latitude * 360.0f / (2 * Mathf.PI), newLongitude * 360.0f / (2 * Mathf.PI), 0.0f) * Vector3.forward;
                            // Vector3 textureRayDirAdjusted = Quaternion.Euler(0.0f, longitudeAdjust * 360.0f / (2 * Mathf.PI), 0.0f) * textureRayDir;
                            Vector3 textureRayDirAdjusted = new Vector3(cosLat * Mathf.Sin(longSum), sinLat, cosLat * Mathf.Cos(longSum)); 

                            u =  textureRayDirAdjusted.x / textureRayDirAdjusted.z / tanHalfHFov;
                            v = -textureRayDirAdjusted.y / textureRayDirAdjusted.z / tanHalfVFov;

                            // There's a lot of vertical overlap so don't accept v near the edge of the left/right cameras, to avoid artifact pixels
                            if (! (textureRayDirAdjusted.z > 0.0f && u * u <= 1.0f && v * v <= 1.0f - 0.1f) )
                            {
                                cameraNum = 2 + numCirclePoints * (CamerasPerCirclePoint / 2) + circlePointIdx * (CamerasPerCirclePoint / 2) + (latitude >= 0.0f ? 1 : 0);
                                float latitudeAdjust = (latitude >= 0.0f ? vFovAdjust : -vFovAdjust);
                                float cosLatAdjust = Mathf.Cos(latitudeAdjust);
                                float sinLatAdjust = Mathf.Sin(latitudeAdjust);
                                // Equivalent to:
                                // textureRayDirAdjusted = Quaternion.Euler(latitudeAdjust * 360.0f / (2 * Mathf.PI), 0.0f, 0.0f) * textureRayDir;
                                textureRayDirAdjusted = new Vector3(cosLat * sinNewLong,
                                                                    cosLatAdjust * sinLat - cosLat * cosNewLong * sinLatAdjust,
                                                                    sinLatAdjust * sinLat + cosLat * cosNewLong * cosLatAdjust);

                                u =  textureRayDirAdjusted.x / textureRayDirAdjusted.z / tanHalfHFov;
                                v = -textureRayDirAdjusted.y / textureRayDirAdjusted.z / tanHalfVFov;

                                // Debug.Assert(ipdScaleLerp >= 1.0 || (textureRayDirAdjusted.z > 0.0f && u * u <= 1.0f && v * v <= 1.0f));
                            }

                            u = (u + 1.0f) * 0.5f;
        				    v = (v + 1.0f) * 0.5f;

                            Color col = GetCameraPixelBilinear(cameraPixels, cameraNum, u, v);
                            if (j == 0) color0 = col; else color1 = col;
                        }

                        Color32 c = Color.Lerp(color0, color1, circlePointNumber - Mathf.Floor(circlePointNumber));
                        if (colorCap.a > 0.0f && ipdScaleLerp > 0.0f)
                            c = Color.Lerp(c, colorCap, ipdScaleLerp);

                        int outputIdx = stride * (y + panoramaHeight * i) + x * 4;
                        pixelValues[outputIdx + 0] = c.b;
                        pixelValues[outputIdx + 1] = c.g;
                        pixelValues[outputIdx + 2] = c.r;
                        pixelValues[outputIdx + 3] = 255;
                    }

                    if ((x & 0xFF) == 0 && Time.realtimeSinceStartup - startTime > processingTimePerFrame)
                    {
                        yield return null; // Wait until next frame
                        startTime = Time.realtimeSinceStartup;
                    }
                }
            }
            else if (!captureStereoscopic && usingGpuTransform)
            {
                int sliceHeight = (panoramaHeight + ResultBufferSlices - 1) / ResultBufferSlices;

                Log("Invoking GPU shader for equirectangular reprojection");
                int endYNegative   = (int)Mathf.Floor(panoramaHeight * 0.25f);
                int startYPositive = (int)Mathf.Ceil(panoramaHeight * 0.75f);
                for (int sliceNum = 0; sliceNum < ResultBufferSlices; sliceNum++)
                {
                    int startSlice = sliceNum * sliceHeight;
                    int endSlice = Math.Min(startSlice + sliceHeight, panoramaHeight);
                    convertPanoramaShader.SetInt("startY", sliceNum * sliceHeight);
                    convertPanoramaShader.SetInt("sliceHeight", endSlice - startSlice);
                    if (endSlice <= endYNegative)
                        convertPanoramaShader.Dispatch(convertPanoramaYNegativeKernelIdx, (panoramaWidth + threadsX - 1) / threadsX, (sliceHeight + threadsY - 1) / threadsY, 1);
                    else if (startSlice >= startYPositive)
                        convertPanoramaShader.Dispatch(convertPanoramaYPositiveKernelIdx, (panoramaWidth + threadsX - 1) / threadsX, (sliceHeight + threadsY - 1) / threadsY, 1);
                    else
                        convertPanoramaShader.Dispatch(convertPanoramaKernelIdx, (panoramaWidth + threadsX - 1) / threadsX, (panoramaHeight + threadsY - 1) / threadsY, 1);

                    convertPanoramaResultBuffer.GetData(resultPixels);
                    if (resultPixels[convertPanoramaResultBuffer.count - 1] != BufferSentinelValue)
                        ReportOutOfGraphicsMemory();

                    writeOutputPixels(pixelValues, stride, panoramaWidth, sliceHeight, panoramaHeight, startSlice);
                }
            }
            else // if (!captureStereoscopic && !usingGpuTransform)
            {
                if (async)
                    yield return StartCoroutine(CubemapToEquirectangularCpu(cameraPixels, cameraWidth, cameraHeight, pixelValues,
                        stride, panoramaWidth, panoramaHeight, ssaaFactor, async));
                else
                {
                    var enumerator = CubemapToEquirectangularCpu(cameraPixels, cameraWidth, cameraHeight, pixelValues,
                        stride, panoramaWidth, panoramaHeight, ssaaFactor, async);
                    while (enumerator.MoveNext()) { }
                }
            }
        }
コード例 #20
0
    public void BuildMesh() {
        float startTime = Time.realtimeSinceStartup;
        
        // NOISE VOLUME!
        RenderTexture DensityVolume = new RenderTexture(16, 16, 0, RenderTextureFormat.RFloat, RenderTextureReadWrite.sRGB);
        DensityVolume.volumeDepth = 16;
        DensityVolume.isVolume = true;
        DensityVolume.enableRandomWrite = true;
        DensityVolume.filterMode = FilterMode.Bilinear;
        DensityVolume.wrapMode = TextureWrapMode.Repeat;
        DensityVolume.Create();
        int mgen_id = CShaderSimplex.FindKernel("FillEmpty");
        // uses renderTexture rather than StructuredBuffer?
        CShaderSimplex.SetTexture(mgen_id, "Result", DensityVolume);  // Links RenderTexture to the "Result" RWTexture in the compute shader?	
        CShaderSimplex.Dispatch(mgen_id, 1, 1, 16);  // run computeShader "FillEmpty" with 1 x 1 x 31 threadGroups?      
        mgen_id = CShaderSimplex.FindKernel("Simplex3d");
        CShaderSimplex.SetTexture(mgen_id, "Result", DensityVolume);
        CShaderSimplex.Dispatch(mgen_id, 1, 1, 16);  // Fill shared RenderTexture with GPU simplex Noise
        

        ComputeBuffer cBufferSegmentTransform = new ComputeBuffer(critterSegmentTransforms.Length, sizeof(float) * (3 + 3 + 4));
        cBufferSegmentTransform.SetData(critterSegmentTransforms);
        int kernelID = CShaderBuildMC.FindKernel("CSMain");
        CShaderBuildMC.SetBuffer(kernelID, "segmentTransformBuffer", cBufferSegmentTransform);
        CShaderBuildMC.SetTexture(kernelID, "noise_volume", DensityVolume);  // Noise 3D texture
        //Debug.Log(DensityVolume.colorBuffer.ToString());

        // Figure out how many chunks are needed:
        int numChunksX = Mathf.CeilToInt(GlobalBoundingBoxDimensions.x / (cellResolution * 8f));
        int numChunksY = Mathf.CeilToInt(GlobalBoundingBoxDimensions.y / (cellResolution * 8f));
        int numChunksZ = Mathf.CeilToInt(GlobalBoundingBoxDimensions.z / (cellResolution * 8f));
        //Debug.Log("numChunks: (" + numChunksX.ToString() + ", " + numChunksY.ToString() + ", " + numChunksZ.ToString() + ")");

        int totalNumChunks = numChunksX * numChunksY * numChunksZ;
        Poly[][] PolyArrayArray = new Poly[totalNumChunks][];  // This will hold the mesh data from the chunks calculated on the GPU
        int[] numPolysArray = new int[totalNumChunks];
        int totalNumPolys = 0;

        // Get each chunk!
        int chunkIndex = 0;
        for(int x = 0; x < numChunksX; x++) {
            for(int y = 0; y < numChunksY; y++) {
                for(int z = 0; z < numChunksZ; z++) {
                    // Figure out chunk offset amount:
                    Vector3 chunkOffset = new Vector3(cellResolution * 8f * x, cellResolution * 8f * y, cellResolution * 8f * z) + GlobalBoundingBoxOffset - (GlobalBoundingBoxDimensions / 2f);

                    int[] numPolys = new int[1];
                    ComputeBuffer cBufferNumPoly = new ComputeBuffer(1, sizeof(int));
                    cBufferNumPoly.SetData(numPolys);

                    int id = CShaderBuildMC.FindKernel("CSMain");
                    CShaderBuildMC.SetInt("_CalcNumPolys", 1); // only calculate how many tris so I can correctly size the poly buffer
                    CShaderBuildMC.SetFloat("_GlobalOffsetX", chunkOffset.x);
                    CShaderBuildMC.SetFloat("_GlobalOffsetY", chunkOffset.y);
                    CShaderBuildMC.SetFloat("_GlobalOffsetZ", chunkOffset.z);
                    CShaderBuildMC.SetFloat("_CellSize", cellResolution);
                    CShaderBuildMC.SetVector("_ColorPrimary", colorPrimary);
                    CShaderBuildMC.SetVector("_ColorSecondary", colorSecondary);
                    CShaderBuildMC.SetFloat("_ColorNoiseScale", colorNoiseScale);
                    CShaderBuildMC.SetFloat("_ColorSmlAmplitude", colorSmlAmplitude);
                    CShaderBuildMC.SetFloat("_ColorMedAmplitude", colorMedAmplitude);
                    CShaderBuildMC.SetFloat("_ColorLrgAmplitude", colorLrgAmplitude);
                    CShaderBuildMC.SetFloat("_ColorContrast", colorContrast);
                    CShaderBuildMC.SetFloat("_ColorThreshold", colorThreshold);
                    CShaderBuildMC.SetVector("_SkinNoiseScale", skinNoiseScale);
                    CShaderBuildMC.SetFloat("_SkinNoiseAmplitude", skinNoiseAmplitude);
                    CShaderBuildMC.SetVector("_SkinLocalTaper", skinLocalTaper);
                    CShaderBuildMC.SetVector("_SkinLocalSinFreq", skinLocalSinFreq);
                    CShaderBuildMC.SetVector("_SkinLocalSinAmp", skinLocalSinAmp);
                    // Local Segment-space modifications, sin, taper, etc.

                    CShaderBuildMC.SetBuffer(id, "numPolyBuffer", cBufferNumPoly);
                    CShaderBuildMC.Dispatch(id, 1, 1, 1);  // calc num polys      
                    cBufferNumPoly.GetData(numPolys);  // get numPolys
                    //Debug.Log("Chunk: " + (z + (numChunksZ * y) + (numChunksZ * numChunksY * x)).ToString() + ", cBufferNumPoly.GetData(numPolys): " + numPolys[0].ToString() + ", chunkOffset: " + chunkOffset.ToString());
                    totalNumPolys += numPolys[0];
                    numPolysArray[chunkIndex] = numPolys[0];
                                        
                    if(numPolys[0] > 0) {   // only do this if there was at least 1 triangle in the test pass
                        Poly[] polyArray = new Poly[numPolys[0]];
                        int cBufferStride = sizeof(float) * (18 + 9 + 6) + sizeof(int) * (6);
                        ComputeBuffer cBuffer = new ComputeBuffer(numPolys[0], cBufferStride);  // 18 floats x 4 bytes/float = 72   + COLORS! 9 x 4 = 36  = 108   + BONES! 6x4 = 24 + 6 xint...
                        cBuffer.SetData(polyArray);
                        
                        CShaderBuildMC.SetBuffer(id, "buffer", cBuffer);
                        CShaderBuildMC.SetInt("_CalcNumPolys", 0);  // Actually calc tris        
                        CShaderBuildMC.Dispatch(id, 1, 1, 1);
                        cBuffer.GetData(polyArray);  // return data from GPU

                        PolyArrayArray[chunkIndex] = polyArray;
                        cBuffer.Dispose();
                    }

                    cBufferNumPoly.Dispose();

                    chunkIndex++;
                }
            }
        }

        
        CritterDecorationsTest.decorationStruct[] points = new CritterDecorationsTest.decorationStruct[totalNumPolys];
        
        //Construct mesh using received data 
        int vindex = 0;
        int decindex = 0;
                
        // Why same number of tris as vertices?  == // because all triangles have duplicate verts - no shared vertices?
        Vector3[] vertices = new Vector3[totalNumPolys * 3];
        Color[] colors = new Color[totalNumPolys * 3];
        int[] tris = new int[totalNumPolys * 3];
        Vector2[] uvs = new Vector2[totalNumPolys * 3];
        Vector3[] normals = new Vector3[totalNumPolys * 3];
        BoneWeight[] weights = new BoneWeight[totalNumPolys * 3];

        //Parse triangles
        for(int i = 0; i < PolyArrayArray.Length; i++) {
            if(numPolysArray[i] > 0) {  // only do this if there was at least 1 triangle in the test pass
                for (int ix = 0; ix < numPolysArray[i]; ix++) {

                    Vector3 vPos;
                    Vector3 vOffset = new Vector3(0, 0, 0);   //???  offsets all vertices by this amount, but why 30?? 
                                                              //A1,A2,A3
                    vPos = new Vector3(PolyArrayArray[i][ix].A1, PolyArrayArray[i][ix].A2, PolyArrayArray[i][ix].A3) + vOffset;
                    vertices[vindex] = vPos * _Scale;
                    normals[vindex] = new Vector3(PolyArrayArray[i][ix].NA1, PolyArrayArray[i][ix].NA2, PolyArrayArray[i][ix].NA3);
                    tris[vindex] = vindex;
                    uvs[vindex] = new Vector2(vertices[vindex].z, vertices[vindex].x);
                    colors[vindex] = new Color(PolyArrayArray[i][ix].CAR, PolyArrayArray[i][ix].CAG, PolyArrayArray[i][ix].CAB, 1.0f);
                    weights[vindex].boneIndex0 = PolyArrayArray[i][ix].BoneIndexA0;
                    weights[vindex].boneIndex1 = PolyArrayArray[i][ix].BoneIndexA1;
                    weights[vindex].weight0 = PolyArrayArray[i][ix].BoneWeightA0;
                    weights[vindex].weight1 = PolyArrayArray[i][ix].BoneWeightA1;

                    points[decindex].pos = vPos;
                    points[decindex].normal = normals[vindex];
                    points[decindex].color = new Vector3(colors[vindex].r, colors[vindex].g, colors[vindex].b);
                    
                    decindex++;
                    vindex++;

                    //B1,B2,B3
                    vPos = new Vector3(PolyArrayArray[i][ix].B1, PolyArrayArray[i][ix].B2, PolyArrayArray[i][ix].B3) + vOffset;
                    vertices[vindex] = vPos * _Scale;
                    normals[vindex] = new Vector3(PolyArrayArray[i][ix].NB1, PolyArrayArray[i][ix].NB2, PolyArrayArray[i][ix].NB3);
                    tris[vindex] = vindex;
                    uvs[vindex] = new Vector2(vertices[vindex].z, vertices[vindex].x);
                    colors[vindex] = new Color(PolyArrayArray[i][ix].CBR, PolyArrayArray[i][ix].CBG, PolyArrayArray[i][ix].CBB, 1.0f);
                    weights[vindex].boneIndex0 = PolyArrayArray[i][ix].BoneIndexB0;
                    weights[vindex].boneIndex1 = PolyArrayArray[i][ix].BoneIndexB1;
                    weights[vindex].weight0 = PolyArrayArray[i][ix].BoneWeightB0;
                    weights[vindex].weight1 = PolyArrayArray[i][ix].BoneWeightB1;

                    vindex++;

                    //C1,C2,C3
                    vPos = new Vector3(PolyArrayArray[i][ix].C1, PolyArrayArray[i][ix].C2, PolyArrayArray[i][ix].C3) + vOffset;
                    vertices[vindex] = vPos * _Scale;
                    normals[vindex] = new Vector3(PolyArrayArray[i][ix].NC1, PolyArrayArray[i][ix].NC2, PolyArrayArray[i][ix].NC3);
                    tris[vindex] = vindex;
                    uvs[vindex] = new Vector2(vertices[vindex].z, vertices[vindex].x);
                    colors[vindex] = new Color(PolyArrayArray[i][ix].CCR, PolyArrayArray[i][ix].CCG, PolyArrayArray[i][ix].CCB, 1.0f);
                    weights[vindex].boneIndex0 = PolyArrayArray[i][ix].BoneIndexC0;
                    weights[vindex].boneIndex1 = PolyArrayArray[i][ix].BoneIndexC1;
                    weights[vindex].weight0 = PolyArrayArray[i][ix].BoneWeightC0;
                    weights[vindex].weight1 = PolyArrayArray[i][ix].BoneWeightC1;
                    
                    vindex++;
                }
            }            
        }
        
        //We have got all data and are ready to setup a new mesh!
        Mesh newMesh = new Mesh();
        newMesh.vertices = vertices;
        newMesh.uv = uvs; //Unwrapping.GeneratePerTriangleUV(NewMesh);
        newMesh.triangles = tris;
        newMesh.normals = normals; //NewMesh.RecalculateNormals();
        newMesh.colors = colors;
        newMesh.Optimize();        

        // Set up SKINNING!!!:
        Transform[] bones = new Transform[critter.critterSegmentList.Count];
        Matrix4x4[] bindPoses = new Matrix4x4[critter.critterSegmentList.Count];
        // Try just using existing critter's GameObjects/Transforms:
        for(int seg = 0; seg < critter.critterSegmentList.Count; seg++) {
            bones[seg] = critter.critterSegmentList[seg].transform;
            bindPoses[seg] = bones[seg].worldToLocalMatrix * transform.localToWorldMatrix;  // ?????????????????  
            // the bind pose is the inverse of inverse transformation matrix of the bone, when the bone is in the bind pose .... unhelpful ....
        }
        newMesh.boneWeights = weights;
        newMesh.bindposes = bindPoses;
        SkinnedMeshRenderer skinnedMeshRenderer = this.GetComponent<SkinnedMeshRenderer>();
        skinnedMeshRenderer.bones = bones;
        skinnedMeshRenderer.sharedMesh = newMesh;
        skinnedMeshRenderer.enabled = true;
                
        cBufferSegmentTransform.Release();

        critterDecorationsTest.TurnOn(points);

        float calcTime = Time.realtimeSinceStartup - startTime;
        Debug.Log("MeshCreated! " + calcTime.ToString());
    }
コード例 #21
0
    ///<summary>
    /// Input a cubemap, and then prefilter this cubemap for image-based lighting equation.
    ///</summary>
    public static void PreFilterEnviromentMap(Cubemap cubemap)
    {
        if (cubemap)
        {
            int cube_width = cubemap.width;

            Vector3 vec3 = new Vector3();
            // Create a read buffer to store cubemap direction data.
            ComputeBuffer cubeMatrix = new ComputeBuffer(sgFaceInput.Length, Marshal.SizeOf(vec3));
            cubeMatrix.SetData(sgFaceInput);

            Vector4 vec4 = new Vector4();
            // Create a output buffer.
            ComputeBuffer dstData = new ComputeBuffer(cube_width * cube_width * 6, Marshal.SizeOf(vec4));

            ComputeShader CSEnvFilter;
            CSEnvFilter = (ComputeShader)AssetDatabase.LoadAssetAtPath("Assets/EnvironmentMapTool/ComputeShader/FilterCubeMap.compute", typeof(ComputeShader));
            // Set cubemap to shader.
            CSEnvFilter.SetTexture(0, "gCubemap", cubemap);
            // Set read write buffer for data output.
            CSEnvFilter.SetBuffer(0, "gOutput", dstData);
            // Set cubemap direction data.
            CSEnvFilter.SetBuffer(0, "sgFace2DMapping", cubeMatrix);

            Color[] outputData = new Color[cube_width * cube_width * 6];

            // How many mipmap level?
            float mipLevelNum = Mathf.Log(cube_width, 2);

            // Loop each mipmap level with different roughness.
            for (int i = 0; i < mipLevelNum + 1; i++)
            {
                // The texel number of a face.
                int image_size = cube_width * cube_width;
                // The texel number of a cubemap.
                int num_threads = image_size * 6;
                // Set roughness value (between 0~1).
                CSEnvFilter.SetFloat("gRoughness", (i / mipLevelNum));
                // The width of a mipmap level of a cube map.
                CSEnvFilter.SetInt("gWidth", cube_width);
                // The total number of thread groups (the number of my thread group : 64).
                num_threads = (int)Mathf.Ceil((float)num_threads / 64.0f);
                // Run compute shader.
                CSEnvFilter.Dispatch(0, num_threads, 1, 1);
                // Get data from the read & write buffer.
                dstData.GetData(outputData);
                // Copy data to the original cubemap.
                SetCubeMipMap(cubemap, outputData, image_size, i);
                // Half the size for the next mipmap level.
                cube_width = cube_width / 2;
            }

            // Set false to disable auto-generating mipmap.
            cubemap.Apply(false);
            // Use trilinear mode to interpolate different mipmap levels.
            cubemap.filterMode = FilterMode.Trilinear;
            cubemap.wrapMode = TextureWrapMode.Clamp;
            cubemap.name = cubemap.name + "(PreFilter)";

            // Release data.
            dstData.Release();
            cubeMatrix.Release();
        }
    }
コード例 #22
0
ファイル: Helper.cs プロジェクト: illvisation/cellVIEW
    public static int ReadPixelId(RenderTexture texture, Vector2 coord)
    {
        var outBuffer = new ComputeBuffer(1, sizeof(int));

        ComputeShaderManager.Instance.ReadPixelCS.SetInts("_Coord", (int)coord.x, Screen.height - (int)coord.y);
        ComputeShaderManager.Instance.ReadPixelCS.SetTexture(0, "_IdTexture", texture);
        ComputeShaderManager.Instance.ReadPixelCS.SetBuffer(0, "_OutputBuffer", outBuffer);
        ComputeShaderManager.Instance.ReadPixelCS.Dispatch(0, 1, 1, 1);

        var pixelId = new[] { 0 };
        outBuffer.GetData(pixelId);
        outBuffer.Release();

        return pixelId[0];
    }
コード例 #23
0
    public void BuildMesh() {
        float startTime = Time.realtimeSinceStartup;

        // CritterSegmentTransforms!!
        SegmentTransform[] critterSegmentTransforms = new SegmentTransform[2];
        Quaternion rot = Quaternion.Euler(55f, 12f, -230f);
        Quaternion rot2 = Quaternion.Euler(-35f, 112f, -67f);
        SegmentTransform segmentTransform;
        segmentTransform.PX = 10f;
        segmentTransform.PY = 8f;
        segmentTransform.PZ = 13f;
        segmentTransform.RX = rot.x;
        segmentTransform.RY = rot.y;
        segmentTransform.RZ = rot.z;
        segmentTransform.RW = rot.w;
        segmentTransform.SX = 6f;
        segmentTransform.SY = 7f;
        segmentTransform.SZ = 2.5f;
        SegmentTransform segmentTransform2;
        segmentTransform2.PX = 22f;
        segmentTransform2.PY = 11f;
        segmentTransform2.PZ = 15f;
        segmentTransform2.RX = rot2.x;
        segmentTransform2.RY = rot2.y;
        segmentTransform2.RZ = rot2.z;
        segmentTransform2.RW = rot2.w;
        segmentTransform2.SX = 9f;
        segmentTransform2.SY = 3f;
        segmentTransform2.SZ = 7f;
        critterSegmentTransforms[0] = segmentTransform;
        critterSegmentTransforms[1] = segmentTransform2;

        ComputeBuffer cBufferSegmentTransform = new ComputeBuffer(critterSegmentTransforms.Length, sizeof(float) * (3 + 3 + 4));
        cBufferSegmentTransform.SetData(critterSegmentTransforms);
        int kernelID = CShaderBuildMC.FindKernel("CSMain");
        CShaderBuildMC.SetBuffer(kernelID, "segmentTransformBuffer", cBufferSegmentTransform);

        // Figure out how many chunks are needed:
        int numChunksX = Mathf.CeilToInt(GlobalBoundingBoxDimensions.x * cellResolution / 8f);
        int numChunksY = Mathf.CeilToInt(GlobalBoundingBoxDimensions.y * cellResolution / 8f);
        int numChunksZ = Mathf.CeilToInt(GlobalBoundingBoxDimensions.z * cellResolution / 8f);
        Debug.Log("numChunks: (" + numChunksX.ToString() + ", " + numChunksY.ToString() + ", " + numChunksZ.ToString() + ")");

        int totalNumChunks = numChunksX * numChunksY * numChunksZ;
        Poly[][] PolyArrayArray = new Poly[totalNumChunks][];  // This will hold the mesh data from the chunks calculated on the GPU
        int[] numPolysArray = new int[totalNumChunks];
        int totalNumPolys = 0;

        // Get each chunk!
        int chunkIndex = 0;
        for(int x = 0; x < numChunksX; x++) {
            for(int y = 0; y < numChunksY; y++) {
                for(int z = 0; z < numChunksZ; z++) {
                    // Figure out chunk offset amount:
                    Vector3 chunkOffset = new Vector3(cellResolution * 8f * x, cellResolution * 8f * y, cellResolution * 8f * z);

                    int[] numPolys = new int[1];
                    ComputeBuffer cBufferNumPoly = new ComputeBuffer(1, sizeof(int));
                    cBufferNumPoly.SetData(numPolys);

                    int id = CShaderBuildMC.FindKernel("CSMain");
                    CShaderBuildMC.SetInt("_CalcNumPolys", 1); // only calculate how many tris so I can correctly size the poly buffer
                    CShaderBuildMC.SetFloat("_GlobalOffsetX", chunkOffset.x);
                    CShaderBuildMC.SetFloat("_GlobalOffsetY", chunkOffset.y);
                    CShaderBuildMC.SetFloat("_GlobalOffsetZ", chunkOffset.z);
                    CShaderBuildMC.SetBuffer(id, "numPolyBuffer", cBufferNumPoly);
                    CShaderBuildMC.Dispatch(id, 1, 1, 1);  // calc num polys      
                    cBufferNumPoly.GetData(numPolys);  // get numPolys
                    Debug.Log("Chunk: " + (z + (numChunksZ * y) + (numChunksZ * numChunksY * x)).ToString() + ", cBufferNumPoly.GetData(numPolys): " + numPolys[0].ToString() + ", chunkIndex: " + chunkIndex.ToString());
                    totalNumPolys += numPolys[0];
                    numPolysArray[chunkIndex] = numPolys[0];

                    //_MaxBufferSize = numPolys[0];
                    if(numPolys[0] > 0) {   // only do this if there was at least 1 triangle in the test pass
                        Poly[] polyArray = new Poly[numPolys[0]];
                        ComputeBuffer cBuffer = new ComputeBuffer(numPolys[0], 72);  // 18 floats x 4 bytes/float = 72
                        cBuffer.SetData(polyArray);

                        CShaderBuildMC.SetBuffer(id, "buffer", cBuffer);
                        CShaderBuildMC.SetInt("_CalcNumPolys", 0);  // Actually calc tris        
                        CShaderBuildMC.Dispatch(id, 1, 1, 1);
                        cBuffer.GetData(polyArray);  // return data from GPU

                        PolyArrayArray[chunkIndex] = polyArray;
                        cBuffer.Dispose();
                    }

                    cBufferNumPoly.Dispose();

                    chunkIndex++;
                }
            }
        }

        //Construct mesh using received data        
        Mesh newMesh = new Mesh();

        int vindex = 0;
                
        // Why same number of tris as vertices?  == // because all triangles have duplicate verts - no shared vertices?
        Vector3[] vertices = new Vector3[totalNumPolys * 3];
        int[] tris = new int[totalNumPolys * 3];
        Vector2[] uvs = new Vector2[totalNumPolys * 3];
        Vector3[] normals = new Vector3[totalNumPolys * 3];

        //Parse triangles
        for(int i = 0; i < PolyArrayArray.Length; i++) {
            if(numPolysArray[i] > 0) {  // only do this if there was at least 1 triangle in the test pass
                for (int ix = 0; ix < numPolysArray[i]; ix++) {

                    Vector3 vPos;
                    Vector3 vOffset = new Vector3(0, 0, 0);   //???  offsets all vertices by this amount, but why 30?? 
                                                              //A1,A2,A3
                    vPos = new Vector3(PolyArrayArray[i][ix].A1, PolyArrayArray[i][ix].A2, PolyArrayArray[i][ix].A3) + vOffset;
                    vertices[vindex] = vPos * _Scale;
                    normals[vindex] = new Vector3(PolyArrayArray[i][ix].NA1, PolyArrayArray[i][ix].NA2, PolyArrayArray[i][ix].NA3);
                    tris[vindex] = vindex;
                    uvs[vindex] = new Vector2(vertices[vindex].z, vertices[vindex].x);

                    vindex++;

                    //B1,B2,B3
                    vPos = new Vector3(PolyArrayArray[i][ix].B1, PolyArrayArray[i][ix].B2, PolyArrayArray[i][ix].B3) + vOffset;
                    vertices[vindex] = vPos * _Scale;
                    normals[vindex] = new Vector3(PolyArrayArray[i][ix].NB1, PolyArrayArray[i][ix].NB2, PolyArrayArray[i][ix].NB3);
                    tris[vindex] = vindex;
                    uvs[vindex] = new Vector2(vertices[vindex].z, vertices[vindex].x);

                    vindex++;

                    //C1,C2,C3
                    vPos = new Vector3(PolyArrayArray[i][ix].C1, PolyArrayArray[i][ix].C2, PolyArrayArray[i][ix].C3) + vOffset;
                    vertices[vindex] = vPos * _Scale;
                    normals[vindex] = new Vector3(PolyArrayArray[i][ix].NC1, PolyArrayArray[i][ix].NC2, PolyArrayArray[i][ix].NC3);
                    tris[vindex] = vindex;
                    uvs[vindex] = new Vector2(vertices[vindex].z, vertices[vindex].x);

                    vindex++;
                }
            }            
        }
        
        //We have got all data and are ready to setup a new mesh!

        //newMesh.Clear();

        newMesh.vertices = vertices;
        newMesh.uv = uvs; //Unwrapping.GeneratePerTriangleUV(NewMesh);
        newMesh.triangles = tris;
        newMesh.normals = normals; //NewMesh.RecalculateNormals();
        newMesh.RecalculateNormals();
        newMesh.Optimize();

        //cBuffer.Dispose();
        //cBufferNumPoly.Dispose();
        //cBuffer.Release();
        cBufferSegmentTransform.Release();

        this.GetComponent<MeshFilter>().sharedMesh = newMesh;
        float calcTime = Time.realtimeSinceStartup - startTime;
        Debug.Log("MeshCreated! " + calcTime.ToString());
    }
コード例 #24
0
    private void InitTerrain() {
        int meshGridSize = 16;
        int numTerrainMeshVertices = meshGridSize * meshGridSize;
        terrainMeshBuffer = new ComputeBuffer(numTerrainMeshVertices, sizeof(float) * (3 + 3 + 2 + 4));
        int numStrokesPerVertX = 16;
        int numStrokesPerVertZ = 16;
        int numTerrainStrokes = meshGridSize * meshGridSize * numStrokesPerVertX * numStrokesPerVertZ;
        terrainStrokesBuffer = new ComputeBuffer(numTerrainStrokes, sizeof(float) * (3 + 3 + 3 + 3 + 3 + 2) + sizeof(int) * 1);

        //terrainGeneratorCompute = new ComputeShader();
        int kernel_id = terrainGeneratorCompute.FindKernel("CSMain");
        terrainGeneratorCompute.SetFloat("_GridSideLength", terrainSize);
        terrainGeneratorCompute.SetFloat("_NoiseFrequency", terrainNoiseFrequency);
        terrainGeneratorCompute.SetFloat("_NoiseAmplitude", terrainNoiseAmplitude);
        terrainGeneratorCompute.SetFloat("_GroundHeight", terrainAltitude);
        terrainGeneratorCompute.SetInt("_NumGroupsX", numStrokesPerVertX);
        terrainGeneratorCompute.SetInt("_NumGroupsZ", numStrokesPerVertZ);
        terrainGeneratorCompute.SetBuffer(kernel_id, "buf_StrokeData", terrainStrokesBuffer);
        terrainGeneratorCompute.SetBuffer(kernel_id, "buf_MeshData", terrainMeshBuffer);

        meshData[] meshDataArray = new meshData[numTerrainMeshVertices];  // memory to receive data from computeshader
        terrainGeneratorCompute.Dispatch(kernel_id, numStrokesPerVertX, 1, numStrokesPerVertZ);  // fill buffers

        terrainMeshBuffer.GetData(meshDataArray);  // download mesh Data

        // generate Mesh from data:
        //Construct mesh using received data         
        // Why same number of tris as vertices?  == // because all triangles have duplicate verts - no shared vertices?
        Vector3[] vertices = new Vector3[numTerrainMeshVertices];
        Color[] colors = new Color[numTerrainMeshVertices];
        int[] tris = new int[2 * (meshGridSize - 1) * (meshGridSize - 1) * 3];
        Vector2[] uvs = new Vector2[numTerrainMeshVertices];
        Vector3[] normals = new Vector3[numTerrainMeshVertices];

        for(int i = 0; i < numTerrainMeshVertices; i++) {
            vertices[i] = meshDataArray[i].pos;
            normals[i] = meshDataArray[i].normal;
            uvs[i] = meshDataArray[i].uv;
            colors[i] = meshDataArray[i].color;            
        }
        // Figure out triangles:
        int index = 0;
        int numSquares = meshGridSize - 1;
        for (int y = 0; y < numSquares; y++) {
            for(int x = 0; x < numSquares; x++) {
                // counterclockwise winding order:
                tris[index] = ((y + 1) * meshGridSize) + x;
                tris[index + 1] = (y * meshGridSize) + x + 1;
                tris[index + 2] = (y * meshGridSize) + x;

                tris[index + 3] = ((y + 1) * meshGridSize) + x;
                tris[index + 4] = ((y + 1) * meshGridSize) + x + 1;
                tris[index + 5] = (y * meshGridSize) + x + 1;

                index = index + 6;
            }
        }

        Mesh terrainMesh = new Mesh();
        terrainMesh.vertices = vertices;
        terrainMesh.uv = uvs; //Unwrapping.GeneratePerTriangleUV(NewMesh);
        terrainMesh.triangles = tris;
        terrainMesh.normals = normals; //NewMesh.RecalculateNormals();        
        terrainMesh.colors = colors;
        terrainMesh.RecalculateNormals();
        terrainMesh.RecalculateBounds();

        trainerTerrainManager.GetComponent<MeshFilter>().sharedMesh = terrainMesh;
        trainerTerrainManager.GetComponent<MeshCollider>().sharedMesh = terrainMesh;

        terrainMeshBuffer.Release();
        terrainMeshBuffer.Dispose();
    }
コード例 #25
0
ファイル: PreProcessAtmo.cs プロジェクト: Climberfx/Scatterer
        void SaveAsRaw(int size, int channels, string fileName, RenderTexture rtex)
        {
            ComputeBuffer buffer = new ComputeBuffer(size, sizeof(float)*channels);

            CBUtility.ReadFromRenderTexture(rtex, channels, buffer, m_readData);

            float[] data = new float[size * channels];

            buffer.GetData(data);

            byte[] byteArray = new byte[size * 4 * channels];
            System.Buffer.BlockCopy(data, 0, byteArray, 0, byteArray.Length);
            System.IO.File.WriteAllBytes(Application.dataPath + m_filePath + fileName + ".raw", byteArray);

            buffer.Release();
        }
コード例 #26
0
    public void BuildMesh() {
        float startTime = Time.realtimeSinceStartup;
        
        int[] numPolys = new int[1];        
        ComputeBuffer cBufferNumPoly = new ComputeBuffer(1, sizeof(int));        
        cBufferNumPoly.SetData(numPolys);

        int id = CShaderBuildMC.FindKernel("CSMain");        
        CShaderBuildMC.SetInt("_CalcNumPolys", 1); // only calculate how many tris so I can correctly size the poly buffer
        CShaderBuildMC.SetBuffer(id, "numPolyBuffer", cBufferNumPoly);        
        CShaderBuildMC.Dispatch(id, 1, 1, 1);  // calc num polys      
        cBufferNumPoly.GetData(numPolys);  // get numPolys
        Debug.Log("cBufferNumPoly.GetData(numPolys): " + numPolys[0].ToString());

        _MaxBufferSize = numPolys[0];
        Poly[] polyArray = new Poly[_MaxBufferSize];
        ComputeBuffer cBuffer = new ComputeBuffer(_MaxBufferSize, 72);  // 18 floats x 4 bytes/float = 72
        cBuffer.SetData(polyArray);

        CShaderBuildMC.SetBuffer(id, "buffer", cBuffer);
        CShaderBuildMC.SetInt("_CalcNumPolys", 0);  // Actually calc tris        
        CShaderBuildMC.Dispatch(id, 1, 1, 1);
        cBuffer.GetData(polyArray);  // return data from GPU
        
        //Construct mesh using received data
        Mesh newMesh = new Mesh();

        int vindex = 0;
        //int count = 0;

        //Count real data length   --- Looks like there might be wasted data??? -- investigate how to Optimize
        /*for (count = 0; count < _MaxBufferSize; count++) {
            if (polyArray[count].A1 == 0.0f && polyArray[count].B1 == 0.0f && polyArray[count].C1 == 0.0 &&
                polyArray[count].A2 == 0.0f && polyArray[count].B2 == 0.0f && polyArray[count].C2 == 0.0 &&
                polyArray[count].A3 == 0.0f && polyArray[count].B3 == 0.0f && polyArray[count].C3 == 0.0) {

                break;
            }
        }*/
        //Debug.Log(count+" triangles got");
        // Why same number of tris as vertices?  == // because all triangles have duplicate verts - no shared vertices?
        Vector3[] vertices = new Vector3[_MaxBufferSize * 3];
        int[] tris = new int[_MaxBufferSize * 3];
        Vector2[] uvs = new Vector2[_MaxBufferSize * 3];
        Vector3[] normals = new Vector3[_MaxBufferSize * 3];

        //Parse triangles
        for (int ix = 0; ix < _MaxBufferSize; ix++) {

            Vector3 vPos;
            Vector3 vOffset = new Vector3(0, 0, 0);   //???  offsets all vertices by this amount, but why 30?? 
                                                            //A1,A2,A3
            vPos = new Vector3(polyArray[ix].A1, polyArray[ix].A2, polyArray[ix].A3) + vOffset;
            vertices[vindex] = vPos * _Scale;
            normals[vindex] = new Vector3(polyArray[ix].NA1, polyArray[ix].NA2, polyArray[ix].NA3);
            tris[vindex] = vindex;
            uvs[vindex] = new Vector2(vertices[vindex].z, vertices[vindex].x);

            vindex++;

            //B1,B2,B3
            vPos = new Vector3(polyArray[ix].B1, polyArray[ix].B2, polyArray[ix].B3) + vOffset;
            vertices[vindex] = vPos * _Scale;
            normals[vindex] = new Vector3(polyArray[ix].NB1, polyArray[ix].NB2, polyArray[ix].NB3);
            tris[vindex] = vindex;
            uvs[vindex] = new Vector2(vertices[vindex].z, vertices[vindex].x);

            vindex++;

            //C1,C2,C3
            vPos = new Vector3(polyArray[ix].C1, polyArray[ix].C2, polyArray[ix].C3) + vOffset;
            vertices[vindex] = vPos * _Scale;
            normals[vindex] = new Vector3(polyArray[ix].NC1, polyArray[ix].NC2, polyArray[ix].NC3);
            tris[vindex] = vindex;
            uvs[vindex] = new Vector2(vertices[vindex].z, vertices[vindex].x);

            vindex++;
        }

        //We have got all data and are ready to setup a new mesh!

        //newMesh.Clear();

        newMesh.vertices = vertices;
        newMesh.uv = uvs; //Unwrapping.GeneratePerTriangleUV(NewMesh);
        newMesh.triangles = tris;
        newMesh.normals = normals; //NewMesh.RecalculateNormals();
        newMesh.RecalculateNormals();
        newMesh.Optimize();

        cBuffer.Dispose();
        cBufferNumPoly.Dispose();
        //cBuffer.Release();

        this.GetComponent<MeshFilter>().sharedMesh = newMesh;
    }
コード例 #27
0
ファイル: PreProcessAtmo.cs プロジェクト: Climberfx/Scatterer
        void SaveAs8bit(int width, int height, int channels, string fileName, RenderTexture rtex, float scale = 1.0f)
        {
            //Only used to get a visible image for debugging.

            ComputeBuffer buffer = new ComputeBuffer(width*height, sizeof(float)*channels);

            CBUtility.ReadFromRenderTexture(rtex, channels, buffer, m_readData);

            float[] data = new float[width*height* channels];

            buffer.GetData(data);

            Texture2D tex = new Texture2D(width, height);

            for(int x = 0; x < width; x++)
            {
                for(int y = 0; y < height; y++)
                {
                    Color col = new Color(0,0,0,1);

                    col.r = data[(x + y * width) * channels + 0];

                    if(channels > 1)
                        col.g = data[(x + y * width) * channels + 1];

                    if(channels > 2)
                        col.b = data[(x + y * width) * channels + 2];

                    tex.SetPixel(x, y, col * scale);
                }
            }

            tex.Apply();

            byte[] bytes = tex.EncodeToPNG();

            System.IO.File.WriteAllBytes(Application.dataPath + m_filePath + fileName + ".png", bytes);

            buffer.Release();
        }
コード例 #28
0
ファイル: PEParticles.cs プロジェクト: MrJoy/SIMDExample
    int RunGPUBenchmark(string text)
    {
        if (!SystemInfo.supportsComputeShaders)
        {
            m_output.text = m_output.text + text + "not available\n";
            return 0;
        }

        ComputeBuffer cb_params = new ComputeBuffer(1, CSParams.size);
        ComputeBuffer cb_particles = new ComputeBuffer(BenchmarkParticleCount, peParticle.size);
        var particles = new peParticle[BenchmarkParticleCount];

        {
            UnityEngine.Random.seed = 0;
            for (int i = 0; i < particles.Length; ++i)
            {
                particles[i].position = new Vector3(
                    UnityEngine.Random.Range(-5.0f, 5.0f),
                    UnityEngine.Random.Range(-5.0f, 5.0f) + 5.0f,
                    UnityEngine.Random.Range(-5.0f, 5.0f) );
            }
            cb_particles.SetData(particles);
        }
        {
            CSParams[] csparams = new CSParams[1];
            csparams[0].particle_count = BenchmarkParticleCount;
            csparams[0].particle_size = m_particle_size;
            csparams[0].rcp_particle_size2 = 1.0f / (m_particle_size * 2.0f);
            csparams[0].pressure_stiffness = m_pressure_stiffness;
            csparams[0].wall_stiffness = m_wall_stiffness;
            csparams[0].timestep = BenchmarkDeltaTime;
            cb_params.SetData(csparams);
        }
        for (int i = 0; i < 2; ++i )
        {
            m_cs_particle_core.SetBuffer(i, "g_params", cb_params);
            m_cs_particle_core.SetBuffer(i, "g_particles", cb_particles);
        }

        float elapsed_total = 0.0f;
        int num_try = 0;
        while (elapsed_total < BenchmarkTimeout)
        {
            float t = Time.realtimeSinceStartup;
            m_cs_particle_core.Dispatch(0, BenchmarkParticleCount / KernelBlockSize, 1, 1);
            m_cs_particle_core.Dispatch(1, BenchmarkParticleCount / KernelBlockSize, 1, 1);
            cb_particles.GetData(particles);
            elapsed_total += Time.realtimeSinceStartup - t;
            ++num_try;
        }
        cb_params.Release();
        cb_particles.Release();

        m_output.text = m_output.text + text + (elapsed_total / num_try * 1000.0f).ToString("0.00") + "ms\n";
        return 0;
    }