public static SupportsDeallocateOnJobCompletion Create() => new SupportsDeallocateOnJobCompletion { Handle = AtomicSafetyHandle.Create() };
unsafe NativeList(int capacity, Allocator i_label, int stackDepth) { #if ENABLE_UNITY_COLLECTIONS_CHECKS var guardian = new NativeBufferSentinel(stackDepth, i_label); m_Safety = (i_label == Allocator.Temp) ? AtomicSafetyHandle.GetTempMemoryHandle() : AtomicSafetyHandle.Create(); m_Impl = new NativeListImpl <T, DefaultMemoryManager, NativeBufferSentinel>(capacity, i_label, guardian); #else m_Impl = new NativeListImpl <T, DefaultMemoryManager>(capacity, i_label); #endif }
internal static unsafe JobHandle ScheduleInternal <T>( ref T jobData, EntityQuery query, JobHandle dependsOn, ScheduleMode mode, int batchesPerChunk, bool isParallel = true, NativeArray <Entity> limitToEntityArray = default(NativeArray <Entity>)) where T : struct, IJobEntityBatchWithIndex { var queryImpl = query._GetImpl(); var queryData = queryImpl->_QueryData; var batchCount = 0; var filteredChunkCount = 0; var useEntityArray = limitToEntityArray.IsCreated; var prebuiltBatchList = new UnsafeList(Allocator.TempJob); var perBatchMatchingArchetypeIndex = new UnsafeIntList(0, Allocator.TempJob); if (useEntityArray) { // Forces the creation of an EntityQueryMask, which is necessary to filter batches. var access = queryImpl->_Access; access->EntityQueryManager->GetEntityQueryMask(queryData, access->EntityComponentStore); ChunkIterationUtility.FindBatchesForEntityArrayWithQuery( queryImpl->_Access->EntityComponentStore, queryData, ref queryImpl->_Filter, (Entity *)limitToEntityArray.GetUnsafePtr(), limitToEntityArray.Length, ref prebuiltBatchList, ref perBatchMatchingArchetypeIndex); batchCount = prebuiltBatchList.Length; } else { filteredChunkCount = query.CalculateChunkCount(); batchCount = filteredChunkCount * batchesPerChunk; } // Allocate one buffer for all prefilter data and distribute it // We keep the full buffer as a "dummy array" so we can deallocate it later with [DeallocateOnJobCompletion] var sizeofBatchArray = sizeof(ArchetypeChunk) * batchCount; var sizeofIndexArray = sizeof(int) * batchCount; var prefilterDataSize = sizeofBatchArray + sizeofIndexArray + sizeof(int); var prefilterData = (byte *)Memory.Unmanaged.Allocate(prefilterDataSize, 64, Allocator.TempJob); var prefilterDataArray = NativeArrayUnsafeUtility.ConvertExistingDataToNativeArray <byte>(prefilterData, prefilterDataSize, Allocator.TempJob); #if ENABLE_UNITY_COLLECTIONS_CHECKS NativeArrayUnsafeUtility.SetAtomicSafetyHandle(ref prefilterDataArray, AtomicSafetyHandle.Create()); #endif var prefilterHandle = dependsOn; if (useEntityArray) { var prefilterJob = new PrefilterForJobEntityBatchWithIndex_EntityArray { MatchingArchetypes = queryImpl->_QueryData->MatchingArchetypes, Filter = queryImpl->_Filter, EntityComponentStore = queryImpl->_Access->EntityComponentStore, PrefilterData = prefilterData, PrebuiltBatches = prebuiltBatchList, PerBatchMatchingArchetypeIndex = perBatchMatchingArchetypeIndex }; if (mode != ScheduleMode.Run) { prefilterHandle = prefilterJob.Schedule(dependsOn); } else { prefilterJob.Run(); } prefilterHandle = prebuiltBatchList.Dispose(prefilterHandle); prefilterHandle = perBatchMatchingArchetypeIndex.Dispose(prefilterHandle); } else { var prefilterJob = new PrefilterForJobEntityBatchWithIndex { MatchingArchetypes = queryImpl->_QueryData->MatchingArchetypes, Filter = queryImpl->_Filter, BatchesPerChunk = batchesPerChunk, EntityComponentStore = queryImpl->_Access->EntityComponentStore, PrefilterData = prefilterData, FilteredChunkCount = filteredChunkCount }; if (mode != ScheduleMode.Run) { prefilterHandle = prefilterJob.Schedule(dependsOn); } else { prefilterJob.Run(); } } JobEntityBatchIndexWrapper <T> jobEntityBatchIndexWrapper = new JobEntityBatchIndexWrapper <T> { #if ENABLE_UNITY_COLLECTIONS_CHECKS // All IJobEntityBatchWithIndex jobs have a EntityManager safety handle to ensure that BeforeStructuralChange throws an error if // jobs without any other safety handles are still running (haven't been synced). safety = new EntitySafetyHandle { m_Safety = queryImpl->SafetyHandles->GetEntityManagerSafetyHandle() }, #endif JobData = jobData, PrefilterData = prefilterDataArray, JobsPerChunk = batchesPerChunk, IsParallel = isParallel ? 1 : 0 }; var scheduleParams = new JobsUtility.JobScheduleParameters( UnsafeUtility.AddressOf(ref jobEntityBatchIndexWrapper), isParallel ? JobEntityBatchIndexProducer <T> .InitializeParallel() : JobEntityBatchIndexProducer <T> .InitializeSingle(), prefilterHandle, mode); #if UNITY_DOTSRUNTIME // This should just be a call to FinalizeScheduleChecked, but DOTSR requires the JobsUtility calls to be // in this specific function. #if ENABLE_UNITY_COLLECTIONS_CHECKS try { #endif if (!isParallel) { return(JobsUtility.Schedule(ref scheduleParams)); } else { return(JobsUtility.ScheduleParallelFor(ref scheduleParams, batchCount, 1)); } #if ENABLE_UNITY_COLLECTIONS_CHECKS } catch (InvalidOperationException e) { prefilterHandle.Complete(); prefilterDataArray.Dispose(); throw e; } #endif #else // We can't use try {} catch {} with 2020.2 as we will be burst compiling the schedule code. // Burst doesn't support exception handling. bool executedManaged = false; JobHandle result = default; FinalizeScheduleChecked(isParallel, batchCount, prefilterHandle, prefilterDataArray, ref scheduleParams, ref executedManaged, ref result); if (executedManaged) { return(result); } return(FinalizeScheduleNoExceptions(isParallel, batchCount, ref scheduleParams)); #endif }
internal static JobHandle PreparePrefilteredChunkLists(int unfilteredChunkCount, MatchingArchetypeList archetypes, ComponentGroupFilter filter, JobHandle dependsOn, ScheduleMode mode, out NativeArray <byte> prefilterDataArray, out void *deferredCountData) { // Allocate one buffer for all prefilter data and distribute it // We keep the full buffer as a "dummy array" so we can deallocate it later with [DeallocateOnJobCompletion] var sizeofChunkArray = sizeof(ArchetypeChunk) * unfilteredChunkCount; var sizeofIndexArray = sizeof(int) * unfilteredChunkCount; var prefilterDataSize = sizeofChunkArray + sizeofIndexArray + sizeof(int); var prefilterData = (byte *)UnsafeUtility.Malloc(prefilterDataSize, 64, Allocator.TempJob); prefilterDataArray = NativeArrayUnsafeUtility.ConvertExistingDataToNativeArray <byte>(prefilterData, prefilterDataSize, Allocator.TempJob); #if ENABLE_UNITY_COLLECTIONS_CHECKS NativeArrayUnsafeUtility.SetAtomicSafetyHandle(ref prefilterDataArray, AtomicSafetyHandle.Create()); #endif JobHandle prefilterHandle = default(JobHandle); if (filter.RequiresMatchesFilter) { var prefilteringJob = new GatherChunksAndOffsetsWithFilteringJob { Archetypes = archetypes, Filter = filter, PrefilterData = prefilterData, UnfilteredChunkCount = unfilteredChunkCount }; if (mode == ScheduleMode.Batched) { prefilterHandle = prefilteringJob.Schedule(dependsOn); } else { prefilteringJob.Run(); } } else { var gatherJob = new GatherChunksAndOffsetsJob { Archetypes = archetypes, PrefilterData = prefilterData, UnfilteredChunkCount = unfilteredChunkCount }; if (mode == ScheduleMode.Batched) { prefilterHandle = gatherJob.Schedule(dependsOn); } else { gatherJob.Run(); } } // ScheduleParallelForDeferArraySize expects a ptr to a structure with a void* and a count. // It only uses the count, so this is safe to fudge deferredCountData = prefilterData + sizeofChunkArray + sizeofIndexArray; deferredCountData = (byte *)deferredCountData - sizeof(void *); return(prefilterHandle); }
protected override void OnUpdate() { Entities.ForEach((MeshFilter meshFilter, VertexClothGarment garment) => { var mesh = meshFilter.mesh; var vertexCount = mesh.vertexCount; var archetype = DstEntityManager.CreateArchetype( typeof(ClothProjectedPosition), typeof(ClothCurrentPosition), typeof(ClothPreviousPosition), typeof(ClothDistanceConstraint), typeof(ClothPositionOrigin), typeof(ClothPinWeight), typeof(ClothTotalTime), typeof(ClothTimestepData), typeof(ClothSourceMeshData)); var entity = DstEntityManager.CreateEntity(archetype); // Add reference to source mesh data and set it as read/write mesh.MarkDynamic(); var meshHandle = GCHandle.Alloc(mesh, GCHandleType.Pinned); var srcMeshData = new ClothSourceMeshData { SrcMeshHandle = meshHandle }; DstEntityManager.SetComponentData(entity, srcMeshData); DstEntityManager.SetComponentData(entity, new ClothTotalTime { TotalTime = 0.0f }); DstEntityManager.SetComponentData(entity, new ClothTimestepData { FixedTimestep = 1.0f / 60.0f, IterationCount = 0 }); // Copy initial vert data to buffer var projectedPositionBuffer = DstEntityManager.GetBuffer <ClothProjectedPosition>(entity); projectedPositionBuffer.Reserve(vertexCount); var currentPositionBuffer = DstEntityManager.GetBuffer <ClothCurrentPosition>(entity); currentPositionBuffer.Reserve(vertexCount); var previousPositionBuffer = DstEntityManager.GetBuffer <ClothPreviousPosition>(entity); previousPositionBuffer.Reserve(vertexCount); var originPositionBuffer = DstEntityManager.GetBuffer <ClothPositionOrigin>(entity); originPositionBuffer.Reserve(vertexCount); fixed(Vector3 * positions = mesh.vertices) { var currentPositionsAsNativeArray = NativeArrayUnsafeUtility.ConvertExistingDataToNativeArray <ClothCurrentPosition>((float3 *)positions, vertexCount, Allocator.Invalid); var projectedPositionsAsNativeArray = NativeArrayUnsafeUtility.ConvertExistingDataToNativeArray <ClothProjectedPosition>((float3 *)positions, vertexCount, Allocator.Invalid); var previousPositionsAsNativeArray = NativeArrayUnsafeUtility.ConvertExistingDataToNativeArray <ClothPreviousPosition>((float3 *)positions, vertexCount, Allocator.Invalid); var originPositionsAsNativeArray = NativeArrayUnsafeUtility.ConvertExistingDataToNativeArray <ClothPositionOrigin>((float3 *)positions, vertexCount, Allocator.Invalid); #if ENABLE_UNITY_COLLECTIONS_CHECKS NativeArrayUnsafeUtility.SetAtomicSafetyHandle(ref currentPositionsAsNativeArray, AtomicSafetyHandle.Create()); NativeArrayUnsafeUtility.SetAtomicSafetyHandle(ref projectedPositionsAsNativeArray, AtomicSafetyHandle.Create()); NativeArrayUnsafeUtility.SetAtomicSafetyHandle(ref previousPositionsAsNativeArray, AtomicSafetyHandle.Create()); NativeArrayUnsafeUtility.SetAtomicSafetyHandle(ref originPositionsAsNativeArray, AtomicSafetyHandle.Create()); #endif currentPositionBuffer.CopyFrom(currentPositionsAsNativeArray); projectedPositionBuffer.CopyFrom(projectedPositionsAsNativeArray); previousPositionBuffer.CopyFrom(previousPositionsAsNativeArray); originPositionBuffer.CopyFrom(originPositionsAsNativeArray); } // Add constraints to the entity // todo: no garbage var barLookup = new HashSet <Vector2Int>(); var triangles = mesh.triangles; for (int i = 0; i < triangles.Length; i += 3) { for (int j = 0; j < 3; j++) { Vector2Int pair = new Vector2Int { x = triangles[i + j], y = triangles[i + (j + 1) % 3] }; if (pair.x > pair.y) { var newY = pair.x; pair.x = pair.y; pair.y = newY; } if (barLookup.Contains(pair) == false) { barLookup.Add(pair); } } } // todo: no garbage var barList = new List <Vector2Int>(barLookup); var constraintCount = barList.Count; var constraintsBuffer = DstEntityManager.GetBuffer <ClothDistanceConstraint>(entity); constraintsBuffer.Reserve(constraintCount); var vertices = mesh.vertices; for (int i = 0; i < constraintCount; ++i) { Vector3 p1 = vertices[barList[i].x]; Vector3 p2 = vertices[barList[i].y]; constraintsBuffer.Add(new ClothDistanceConstraint { RestLengthSqr = (p2 - p1).sqrMagnitude, VertexA = barList[i].x, VertexB = barList[i].y }); } // Add pin weights var pinWeightBuffer = DstEntityManager.GetBuffer <ClothPinWeight>(entity); pinWeightBuffer.Reserve(vertexCount); var normals = mesh.normals; for (int i = 0; i < vertexCount; ++i) { if (normals[i].y > .9f && vertices[i].y > .3f) { pinWeightBuffer.Add(new ClothPinWeight { InvPinWeight = 0.0f }); } else { pinWeightBuffer.Add(new ClothPinWeight { InvPinWeight = 1.0f }); } } }); }