Esempio n. 1
0
        public KnnContainer(NativeArray <float3> points, bool buildNow, Allocator allocator)
        {
            int nodeCountEstimate = 4 * (int)math.ceil(points.Length / (float)c_maxPointsPerLeafNode + 1) + 1;

            Points = points;

            // Both arrays are filled in as we go, so start with uninitialized mem
            m_nodes = new NativeList <KdNode>(nodeCountEstimate, allocator);

            // Dumb way to create an int* essentially..
            m_permutation   = new NativeArray <int>(points.Length, allocator, NativeArrayOptions.UninitializedMemory);
            m_rootNodeIndex = new NativeArray <int>(new[] { -1 }, allocator);
            m_buildQueue    = new NativeQueue <int>(allocator);

#if ENABLE_UNITY_COLLECTIONS_CHECKS
            if (allocator <= Allocator.None)
            {
                throw new ArgumentException("Allocator must be Temp, TempJob or Persistent", nameof(allocator));
            }

            if (points.Length <= 0)
            {
                throw new ArgumentOutOfRangeException(nameof(points), "Input points length must be >= 0");
            }

            DisposeSentinel.Create(out m_Safety, out m_DisposeSentinel, 0, allocator);
#endif

            if (buildNow)
            {
                var rebuild = new KnnRebuildJob(this);
                rebuild.Schedule().Complete();
            }
        }
Esempio n. 2
0
    // After particle job
    void LateUpdate()
    {
        // Rebuild our datastructure
        var rebuild       = new KnnRebuildJob(m_container);
        var rebuildHandle = rebuild.Schedule();

        // Get all probe positions / colors
        if (!m_queryPositions.IsCreated || m_queryPositions.Length != QueryProbe.All.Count)
        {
            if (m_queryPositions.IsCreated)
            {
                m_queryPositions.Dispose();
                m_results.Dispose();
                m_queryColors.Dispose();
            }

            m_queryPositions = new NativeArray <float3>(QueryProbe.All.Count, Allocator.Persistent);
            m_results        = new NativeArray <int>(QueryK * QueryProbe.All.Count, Allocator.Persistent);
            m_queryColors    = new NativeArray <Color32>(QueryProbe.All.Count, Allocator.Persistent);
        }

        for (int i = 0; i < QueryProbe.All.Count; i++)
        {
            var p = QueryProbe.All[i];
            m_queryPositions[i] = p.transform.position;
            m_queryColors[i]    = p.Color;
        }

        // Now do the KNN query
        var query = new KNearestBatchQueryJob(m_container, m_queryPositions, m_results);

        // Schedule query, dependent on the rebuild
        // We're only doing a very limited number of points - so allow each query to have it's own job
        query.ScheduleBatch(m_queryPositions.Length, 1, rebuildHandle).Complete();
    }
Esempio n. 3
0
        private void InitJobData()
        {
            float deltaTime = 0f;

            #region JobDataSetup
            //
            //Init all job data here. Declare roughly inline. Optional brackets for things that can be parallel
            //

            #region ResetBeginningOfSimFrame

            _jobDataResetBlobAccelerations = new MemsetNativeArray <float2> {
                Source = _blobAccelerations, Value = float2.zero
            };

            _jobDataResetCursorAccelerations = new MemsetNativeArray <float2> {
                Source = _cursorAccelerations, Value = float2.zero
            };

            _jobDataResetGooGroups = new MemsetNativeArray <int> {
                Source = _blobGroupIDs, Value = -1
            };
            //   _jobDataCopyBlobRadii = new MemsetNativeArray<float> {Source = _blobRadii, Value = GooPhysics.MaxSpringDistance};

            _jobDataCopyBlobInfoToFloat3 = new JobCopyBlobInfoToFloat3
            {
                BlobPos       = _blobPositions,
                BlobTeams     = _blobTeamIDs,
                BlobPosFloat3 = _blobPositionsV3
            };


            _jobBuildKnnTree = new KnnRebuildJob(_knnContainer);

            // Initialize all the range query results
            _blobKNNNearestNeighbourQueryResults = new NativeArray <RangeQueryResult>(_blobPositions.Length, Allocator.Persistent);

            _uniqueBlobEdges        = new NativeMultiHashMap <int, int>(_blobPositions.Length * 40, Allocator.Persistent);
            _uniqueBlobEdgesHashSet = new NativeHashSet <long>(_blobPositions.Length * 40, Allocator.Persistent);
            // Each range query result object needs to declare upfront what the maximum number of points in range is


            for (int i = 0; i < _blobKNNNearestNeighbourQueryResults.Length; ++i)
            {
                _blobKNNNearestNeighbourQueryResults[i] = new RangeQueryResult(GooPhysics.MaxNearestNeighbours, Allocator.Persistent);
            }


            _jobDataQueryNearestNeighboursKNN = new QueryRangesBatchJob {
                m_container      = _knnContainer,
                m_queryPositions = _blobPositionsV3,
                m_SearchRadius   = GooPhysics.MaxSpringDistance,

                Results = _blobKNNNearestNeighbourQueryResults
            };
            #endregion //ResetBeginningOfSimFrame

            #region Updates
            //build edges with existing positions


            _jobDataFloodFillGroupIDsKnn = new JobFloodFillIDsKnn()
            {
                BlobNearestNeighbours = _blobKNNNearestNeighbourQueryResults,
                GroupIDs   = _blobGroupIDs,
                FloodQueue = _floodQueue,
                NumGroups  = _numGroups //for safety.don't want divide by zero
            };

            _jobDataFloodFillGroupIDsMultiHashMap = new JobFloodFillIDsUniqueEdges()
            {
                Springs    = _uniqueBlobEdges,
                GroupIDs   = _blobGroupIDs,
                FloodQueue = _floodQueue,
                NumGroups  = _numGroups //for safety.don't want divide by zero
            };

            _jobDataSpringForcesUsingKnn = new JobSpringForceUsingKNNResults
            {
                AccelerationAccumulator = _blobAccelerations,
                BlobNearestNeighbours   = _blobKNNNearestNeighbourQueryResults,
                MaxEdgeDistanceRaw      = GooPhysics.MaxSpringDistance * 2.0f,
                SpringConstant          = GooPhysics.SpringForce,
                DampeningConstant       = GooPhysics.DampeningConstant,
                Positions = _blobPositions,
                Velocity  = _blobVelocities,
            };

            _jobCompileDataUniqueEdges = new JobCompileUniqueEdges
            {
                BlobNearestNeighbours = _blobKNNNearestNeighbourQueryResults,
                Edges       = _uniqueBlobEdges.AsParallelWriter(),
                UniqueEdges = _uniqueBlobEdgesHashSet.AsParallelWriter()
            };

            _jobDataSpringForcesUniqueEdges = new JobUniqueSpringForce
            {
                AccelerationAccumulator = _blobAccelerations,
                Springs            = _uniqueBlobEdges,
                MaxEdgeDistanceRaw = GooPhysics.MaxSpringDistance * 2.0f,
                SpringConstant     = GooPhysics.SpringForce,
                DampeningConstant  = GooPhysics.DampeningConstant,
                Positions          = _blobPositions,
                Velocity           = _blobVelocities,
            };

            _jobDataFluidInfluence = new JobVelocityInfluenceFalloff
            {
                BlobPositions         = _blobPositions,
                BlobVelocities        = _blobVelocities,
                BlobNearestNeighbours = _blobKNNNearestNeighbourQueryResults,
                InfluenceRadius       = _blobRadii,
                InfluenceModulator    = GooPhysics.FluidInfluenceModulator,
                BlobAccelAccumulator  = _blobAccelerations
            };

            //update cursor accel based on inputs
            //todo: could be CopyTo?

            //_cursorInputDeltas.CopyTo(_cursorAccelerations);
            _jobDataSetCursorAcceleration = new JobSetAcceleration
            {
                ValueToSet = _cursorInputDeltas,
                AccumulatedAcceleration = _cursorAccelerations
            };

            //update cursor friction
            _jobDataApplyCursorFriction = new JobApplyLinearAndConstantFriction
            {
                DeltaTime               = deltaTime,
                LinearFriction          = CursorLinearFriction,
                ConstantFriction        = CursorConstantFriction,
                AccumulatedAcceleration = _cursorAccelerations,
                Velocity = _cursorVelocities
            };


            _jobDataUpdateCursorPositions = new JobApplyAcceelrationAndVelocity
            {
                DeltaTime = deltaTime,
                AccumulatedAcceleration = _cursorAccelerations,
                VelocityInAndOut        = _cursorVelocities,
                PositionInAndOut        = _cursorPositions
            };

            //Now we can update the blobs with the new state of the cursors
            _jobDataCursorsInfluenceBlobs = new JobCursorsInfluenceBlobs
            {
                CursorPositions      = _cursorPositions,
                CursorVelocities     = _cursorVelocities,
                CursorRadius         = _cursorRadii,
                BlobPositions        = _blobPositions,
                BlobAccelAccumulator = _blobAccelerations
            };

            _jobDataApplyFrictionToBlobs = new JobApplyLinearAndConstantFriction
            {
                DeltaTime = deltaTime,
                //TODO: maybe I want friction based on acceleration (t*t) since that's the freshest part of this.
                //So, constant + linear(t) + accelerative (t*t)
                LinearFriction          = GooPhysics.LinearFriction,
                ConstantFriction        = GooPhysics.ConstantFriction,
                AccumulatedAcceleration = _blobAccelerations,
                Velocity = _blobVelocities
            };

            //Blob sim gets updated
            _jobDataUpdateBlobPositions = new JobApplyAcceelrationAndVelocity
            {
                DeltaTime = deltaTime,
                AccumulatedAcceleration = _blobAccelerations,
                VelocityInAndOut        = _blobVelocities,
                PositionInAndOut        = _blobPositions
            };

            //Output

            _jobDataDebugColorisationInt = new JobDebugColorisationInt()
            {
                minVal = 0,
                maxVal = 10,
                values = _blobGroupIDs,
                colors = _blobColors,
            };

            _jobDataDebugColorisationKNNLength = new JobDebugColorisationKNNRangeQuery()
            {
                minVal = 0,
                maxVal = 10,
                values = _blobKNNNearestNeighbourQueryResults,
                colors = _blobColors,
            };

            /*  _jobDataDebugColorisationFloat = new JobDebugColorisationFloat
             * {
             *   minVal = 0,
             *   maxVal = 10,
             *   values = _blobEdgeCount,
             *   colors =_blobColors,
             * }*/

            _jobDataDebugColorisationFloat2Magnitude = new JobDebugColorisationFloat2XY
            {
                maxVal = 10,
                values = _blobVelocities,
                colors = _blobColors
            };

            _jobDataCopyBlobsToParticleSystem = new JopCopyBlobsToParticleSystem
            {
                colors     = _blobColors,
                positions  = _blobPositions,
                velocities = _blobVelocities
            };

            _jobDataCopyCursorsToTransforms = new JobCopyBlobsToTransforms
            {
                BlobPos = _cursorPositions
            };


            #region BoundsForCamera
            _jobDataCalculateAABB = new JobCalculateAABB()
            {
                Positions = _blobPositions,
                Bounds    = _overallGooBounds
            };
            #endregion // BoundsForCamera
            #endregion // Updates
            #endregion // JobDataSetup
        }
Esempio n. 4
0
    // After particle job
    void LateUpdate()
    {
        // Rebuild our datastructure
        var rebuild       = new KnnRebuildJob(m_container);
        var rebuildHandle = rebuild.Schedule();

        // Get all probe positions / colors
        if (!m_queryPositions.IsCreated || m_queryPositions.Length != QueryProbe.All.Count)
        {
            if (m_queryPositions.IsCreated)
            {
                m_queryPositions.Dispose();
                m_results.Dispose();
                m_queryColors.Dispose();
            }

            m_queryPositions = new NativeArray <float3>(QueryProbe.All.Count, Allocator.Persistent);
            m_results        = new NativeArray <int>(QueryK * QueryProbe.All.Count, Allocator.Persistent);
            m_queryColors    = new NativeArray <Color32>(QueryProbe.All.Count, Allocator.Persistent);

            // Initialize all the range query results
            m_rangeResults = new NativeArray <RangeQueryResult>(QueryProbe.All.Count, Allocator.Persistent);

            // Each range query result object needs to declare upfront what the maximum number of points in range is
            for (int i = 0; i < m_rangeResults.Length; ++i)
            {
                // Allow for a maximum of 1024 results
                m_rangeResults[i] = new RangeQueryResult(1024, Allocator.Persistent);
            }
        }

        for (int i = 0; i < QueryProbe.All.Count; i++)
        {
            var p = QueryProbe.All[i];
            m_queryPositions[i] = p.transform.position;
            m_queryColors[i]    = p.Color;
        }

        switch (Mode)
        {
        case QueryMode.KNearest: {
            // Do a KNN query
            var query = new QueryKNearestBatchJob(m_container, m_queryPositions, m_results);

            // Schedule query, dependent on the rebuild
            // We're only doing a very limited number of points - so allow each query to have it's own job
            query.ScheduleBatch(m_queryPositions.Length, 1, rebuildHandle).Complete();
            break;
        }

        case QueryMode.Range: {
            // Do a range query
            var query = new QueryRangeBatchJob(m_container, m_queryPositions, QueryRange, m_rangeResults);

            // Schedule query, dependent on the rebuild
            // We're only doing a very limited number of points - so allow each query to have it's own job
            query.Schedule(m_queryPositions.Length, 1, rebuildHandle).Complete();
            break;
        }
        }
    }
Esempio n. 5
0
    public static void Demo()
    {
        Profiler.BeginSample("Test Query");

        // First let's create a random point cloud
        var points = new NativeArray <float3>(100000, Allocator.Persistent);
        var rand   = new Random(123456);

        for (int i = 0; i < points.Length; ++i)
        {
            points[i] = rand.NextFloat3();
        }

        // Number of neighbours we want to query
        const int kNeighbours   = 10;
        float3    queryPosition = float3.zero;

        Profiler.BeginSample("Build");
        // Create a container that accelerates querying for neighbours.
        // The 2nd argument indicates whether we want to build the tree straight away or not
        // Let's hold off on building it a little bit
        var knnContainer = new KnnContainer(points, false, Allocator.TempJob);

        Profiler.EndSample();

        // Whenever your point cloud changes, you can make a job to rebuild the container:
        var rebuildJob = new KnnRebuildJob(knnContainer);

        rebuildJob.Schedule().Complete();

        // Most basic usage:
        // Get 10 nearest neighbours as indices into our points array!
        // This is NOT burst accelerated yet! Unity need to implement compiling delegates with Burst
        var result = new NativeArray <int>(kNeighbours, Allocator.TempJob);

        knnContainer.QueryKNearest(queryPosition, result);

        // The result array at this point contains indices into the points array with the nearest neighbours!

        Profiler.BeginSample("Simple Query");
        // Get a job to do the query.
        var queryJob = new QueryKNearestJob(knnContainer, queryPosition, result);

        // And just run immediately on the main thread for now. This uses Burst!
        queryJob.Schedule().Complete();
        Profiler.EndSample();

        // Or maybe we want to query neighbours for multiple points.
        const int queryPoints = 100000;

        // Keep an array of neighbour indices of all points
        var results = new NativeArray <int>(queryPoints * kNeighbours, Allocator.TempJob);

        // Query at a few random points
        var queryPositions = new NativeArray <float3>(queryPoints, Allocator.TempJob);

        for (int i = 0; i < queryPoints; ++i)
        {
            queryPositions[i] = rand.NextFloat3() * 0.1f;
        }

        Profiler.BeginSample("Batch Query");
        // Fire up job to get results for all points
        var batchQueryJob = new QueryKNearestBatchJob(knnContainer, queryPositions, results);

        // And just run immediately now. This will run on multiple threads!
        batchQueryJob.ScheduleBatch(queryPositions.Length, queryPositions.Length / 32).Complete();
        Profiler.EndSample();

        // Or maybe we're interested in a range around eacht query point
        var queryRangeResult = new NativeList <int>(Allocator.TempJob);
        var rangeQueryJob    = new QueryRangeJob(knnContainer, queryPosition, 2.0f, queryRangeResult);

        // Store a list of particles in range
        var rangeResults = new NativeArray <RangeQueryResult>(queryPoints, Allocator.TempJob);

        // And just run immediately on the main thread for now. This uses Burst!
        rangeQueryJob.Schedule().Complete();

        // Unfortunately, for batch range queries we do need to decide upfront the maximum nr. of neighbours we allow
        // This is due to limitation on allocations within a job.
        for (int i = 0; i < rangeResults.Length; ++i)
        {
            rangeResults[i] = new RangeQueryResult(128, Allocator.TempJob);
        }

        Profiler.BeginSample("Batch Range Query");
        // Fire up job to get results for all points
        var batchRange = new QueryRangeBatchJob(knnContainer, queryPositions, 2.0f, rangeResults);

        // And just run immediately now. This will run on multiple threads!
        batchRange.Schedule(queryPositions.Length, queryPositions.Length / 32).Complete();
        Profiler.EndSample();

        // Now the results array contains all the neighbours!
        queryRangeResult.Dispose();
        foreach (var r in rangeResults)
        {
            r.Dispose();
        }
        rangeResults.Dispose();
        knnContainer.Dispose();
        queryPositions.Dispose();
        results.Dispose();
        points.Dispose();
        result.Dispose();
        Profiler.EndSample();
    }