public void Dispose() { for (int i = 0; i < constraintCaches.Length; ++i) { if (constraintCaches[i].Buffer.Allocated) { pool.Return(ref constraintCaches[i].Buffer); } } pool.SpecializeFor <UntypedList>().Return(ref constraintCaches); for (int i = 0; i < collisionCaches.Length; ++i) { if (collisionCaches[i].Buffer.Allocated) { pool.Return(ref collisionCaches[i].Buffer); } } pool.SpecializeFor <UntypedList>().Return(ref collisionCaches); this = new WorkerPairCache(); //note that the pending collections are not disposed here; they are disposed upon flushing immediately after the narrow phase completes. }
public void Prepare(IThreadDispatcher threadDispatcher = null) { int maximumConstraintTypeCount = 0, maximumCollisionTypeCount = 0; for (int i = 0; i < workerCaches.Count; ++i) { workerCaches[i].GetMaximumCacheTypeCounts(out var collision, out var constraint); if (collision > maximumCollisionTypeCount) { maximumCollisionTypeCount = collision; } if (constraint > maximumConstraintTypeCount) { maximumConstraintTypeCount = constraint; } } QuickList <PreallocationSizes, Buffer <PreallocationSizes> > .Create(pool.SpecializeFor <PreallocationSizes>(), maximumConstraintTypeCount, out var minimumSizesPerConstraintType); QuickList <PreallocationSizes, Buffer <PreallocationSizes> > .Create(pool.SpecializeFor <PreallocationSizes>(), maximumCollisionTypeCount, out var minimumSizesPerCollisionType); //Since the minimum size accumulation builds the minimum size incrementally, bad data within the array can corrupt the result- we must clear it. minimumSizesPerConstraintType.Span.Clear(0, minimumSizesPerConstraintType.Span.Length); minimumSizesPerCollisionType.Span.Clear(0, minimumSizesPerCollisionType.Span.Length); for (int i = 0; i < workerCaches.Count; ++i) { workerCaches[i].AccumulateMinimumSizes(ref minimumSizesPerConstraintType, ref minimumSizesPerCollisionType); } var threadCount = threadDispatcher != null ? threadDispatcher.ThreadCount : 1; //Ensure that the new worker pair caches can hold all workers. if (!NextWorkerCaches.Span.Allocated || NextWorkerCaches.Span.Length < threadCount) { //The next worker caches should never need to be disposed here. The flush should have taken care of it. #if DEBUG for (int i = 0; i < NextWorkerCaches.Count; ++i) { Debug.Assert(NextWorkerCaches[i].Equals(default(WorkerPairCache))); } #endif QuickList <WorkerPairCache, Array <WorkerPairCache> > .Create(new PassthroughArrayPool <WorkerPairCache>(), threadCount, out NextWorkerCaches); } //Note that we have not initialized the workerCaches from the previous frame. In the event that this is the first frame and there are no previous worker caches, //there will be no pointers into the caches, and removal analysis loops over the count which defaults to zero- so it's safe. NextWorkerCaches.Count = threadCount; var pendingSize = Math.Max(minimumPendingSize, previousPendingSize); if (threadDispatcher != null) { for (int i = 0; i < threadCount; ++i) { NextWorkerCaches[i] = new WorkerPairCache(i, threadDispatcher.GetThreadMemoryPool(i), ref minimumSizesPerConstraintType, ref minimumSizesPerCollisionType, pendingSize, minimumPerTypeCapacity); } } else { NextWorkerCaches[0] = new WorkerPairCache(0, pool, ref minimumSizesPerConstraintType, ref minimumSizesPerCollisionType, pendingSize, minimumPerTypeCapacity); } minimumSizesPerConstraintType.Dispose(pool.SpecializeFor <PreallocationSizes>()); minimumSizesPerCollisionType.Dispose(pool.SpecializeFor <PreallocationSizes>()); //Create the pair freshness array for the existing overlaps. pool.Take(Mapping.Count, out PairFreshness); //This clears 1 byte per pair. 32768 pairs with 10GBps assumed single core bandwidth means about 3 microseconds. //There is a small chance that multithreading this would be useful in larger simulations- but it would be very, very close. PairFreshness.Clear(0, Mapping.Count); }