public static unsafe void RunJobChunk <T>(ref T jobData, EntityQuery query, JobChunkRunWithoutJobSystemDelegate functionPointer) where T : unmanaged, IJobChunk, IJobBase { var myIterator = query.GetArchetypeChunkIterator(); try { query._GetImpl()->_Access->DependencyManager->IsInForEachDisallowStructuralChange++; var managedJobDataPtr = UnsafeUtility.AddressOf(ref jobData); var unmanagedSize = jobData.GetUnmanagedJobSize_Gen(); if (unmanagedSize != -1) { const int kAlignment = 16; int alignedSize = (unmanagedSize + kAlignment - 1) & ~(kAlignment - 1); byte * unmanagedJobData = stackalloc byte[alignedSize]; byte * alignedUnmanagedJobData = (byte *)((UInt64)(unmanagedJobData + kAlignment - 1) & ~(UInt64)(kAlignment - 1)); // DOTS Runtime job marshalling code assumes the job is wrapped so create the wrapper and assign the jobData JobChunkExtensions.JobChunkWrapper <T> jobChunkWrapper = default; jobChunkWrapper.JobData = jobData; byte *jobChunkDataPtr = (byte *)UnsafeUtility.AddressOf(ref jobChunkWrapper); byte *dst = (byte *)alignedUnmanagedJobData; byte *src = (byte *)jobChunkDataPtr; var marshalToBurstFnPtr = JobMarshalFnLookup <T> .GetMarshalToBurstFn(); UnsafeUtility.EnterTempScope(); UnsafeUtility.CallFunctionPtr_pp(marshalToBurstFnPtr.ToPointer(), dst, src); // Since we are running inline, normally the outer job scheduling code would // reference jobWrapper.Data however we can't do that since if we are in this code it means // we are dealing with a job/jobwrapper that is burst compiled and is non-blittable. Thus any // type-safe offset we calculate here will be based on the managed data layout which is not useful. // Instead we can at least know that for a sequential layout (which is what we know we must be using // since we are burst compiled) our JobChunkData contains a safety field as its first member. Skipping over this will // provide the necessary offset to jobChunkData.Data var DataOffset = UnsafeUtility.SizeOf <JobChunkExtensions.EntitySafetyHandle>(); Assertions.Assert.AreEqual(jobChunkWrapper.safety.GetType(), typeof(JobChunkExtensions.EntitySafetyHandle)); functionPointer(&myIterator, alignedUnmanagedJobData + DataOffset); // Since Run can capture locals for write back, we must write back the marshalled jobData after the job executes var marshalFromBurstFnPtr = JobMarshalFnLookup <T> .GetMarshalFromBurstFn(); UnsafeUtility.CallFunctionPtr_pp(marshalFromBurstFnPtr.ToPointer(), src, dst); UnsafeUtility.ExitTempScope(); jobData = jobChunkWrapper.JobData; } else { functionPointer(&myIterator, managedJobDataPtr); } } finally { query._GetImpl()->_Access->DependencyManager->IsInForEachDisallowStructuralChange--; } }
public static unsafe TransformAccessArray GetTransformAccessArray(this EntityQuery group) { var state = (TransformAccessArrayState)group._CachedState; if (state == null) { state = new TransformAccessArrayState(); } var orderVersion = group._GetImpl()->_Access->EntityComponentStore->GetComponentTypeOrderVersion(TypeManager.GetTypeIndex <Transform>()); if (state.Data.isCreated && orderVersion == state.OrderVersion) { return(state.Data); } state.OrderVersion = orderVersion; UnityEngine.Profiling.Profiler.BeginSample("DirtyTransformAccessArrayUpdate"); var trans = group.ToComponentArray <Transform>(); if (!state.Data.isCreated) { state.Data = new TransformAccessArray(trans); } else { state.Data.SetTransforms(trans); } UnityEngine.Profiling.Profiler.EndSample(); group._CachedState = state; return(state.Data); }
/// <summary> /// Gets an <see cref="EntityQueryMask"/> that can be used to quickly match if an entity belongs to an EntityQuery. /// There is a maximum limit of 1024 EntityQueryMasks that can be created. EntityQueryMasks cannot be created /// from EntityQueries with filters. /// </summary> /// <remarks>Note that EntityQueryMask only filters by Archetype, it doesn't support EntityQuery shared component or change filtering.</remarks> /// <param name="query">The EntityQuery that describes the EntityQueryMask.</param> /// <returns>The EntityQueryMask corresponding to the EntityQuery.</returns> public EntityQueryMask GetEntityQueryMask(EntityQuery query) { var access = GetCheckedEntityDataAccess(); var queryImpl = query._GetImpl(); if (queryImpl->_QueryData->EntityQueryMask.IsCreated()) { return(queryImpl->_QueryData->EntityQueryMask); } if (access->EntityQueryManager->m_EntityQueryMasksAllocated >= 1024) { throw new Exception("You have reached the limit of 1024 unique EntityQueryMasks, and cannot generate any more."); } var mask = new EntityQueryMask( (byte)(access->EntityQueryManager->m_EntityQueryMasksAllocated / 8), (byte)(1 << (access->EntityQueryManager->m_EntityQueryMasksAllocated % 8)), access->EntityComponentStore); access->EntityQueryManager->m_EntityQueryMasksAllocated++; for (var i = 0; i < queryImpl->_QueryData->MatchingArchetypes.Length; ++i) { queryImpl->_QueryData->MatchingArchetypes.Ptr[i]->Archetype->QueryMaskArray[mask.Index] |= mask.Mask; } queryImpl->_QueryData->EntityQueryMask = mask; return(mask); }
public static void AssertValidEntityQuery(EntityQuery query, EntityComponentStore *store) { var e = query._GetImpl()->_Access->EntityComponentStore; if (e != store) { AssertValidEntityQuery(e, store); } }
public void DestroyEntity(EntityQuery entityQuery) { var access = GetCheckedEntityDataAccess(); var ecs = access->EntityComponentStore; var queryImpl = entityQuery._GetImpl(); Unity.Entities.EntityComponentStore.AssertValidEntityQuery(entityQuery, ecs); DestroyEntity(queryImpl->_QueryData->MatchingArchetypes, queryImpl->_Filter); }
internal static unsafe JobHandle ScheduleInternal <T>( ref T jobData, EntityQuery query, JobHandle dependsOn, ScheduleMode mode, int batchesPerChunk, bool isParallel = true) where T : struct, IJobEntityBatch { var queryImpl = query._GetImpl(); var queryData = queryImpl->_QueryData; var cachedChunks = queryData->GetMatchingChunkCache(); // Don't schedule the job if there are no chunks to work on var chunkCount = cachedChunks.Length; JobEntityBatchWrapper <T> jobEntityBatchWrapper = new JobEntityBatchWrapper <T> { #if ENABLE_UNITY_COLLECTIONS_CHECKS // All IJobEntityBatch jobs have a EntityManager safety handle to ensure that BeforeStructuralChange throws an error if // jobs without any other safety handles are still running (haven't been synced). safety = new EntitySafetyHandle { m_Safety = queryImpl->SafetyHandles->GetEntityManagerSafetyHandle() }, #endif MatchingArchetypes = queryData->MatchingArchetypes, CachedChunks = cachedChunks, Filter = queryImpl->_Filter, JobData = jobData, JobsPerChunk = batchesPerChunk, IsParallel = isParallel ? 1 : 0 }; var scheduleParams = new JobsUtility.JobScheduleParameters( UnsafeUtility.AddressOf(ref jobEntityBatchWrapper), isParallel ? JobEntityBatchProducer <T> .InitializeParallel() : JobEntityBatchProducer <T> .InitializeSingle(), dependsOn, mode); if (!isParallel) { return(JobsUtility.Schedule(ref scheduleParams)); } else { return(JobsUtility.ScheduleParallelFor(ref scheduleParams, chunkCount * batchesPerChunk, 1)); } }
public static unsafe void UnsafeRunJobEntityBatch(void *jobPtr, EntityQuery query, NativeArray <Entity> limitToEntityArray, JobEntityBatchRunWithoutJobSystemDelegateLimitEntities functionPointer) { #if ENABLE_UNITY_COLLECTIONS_CHECKS var access = query._GetImpl()->_Access; try { access->DependencyManager->IsInForEachDisallowStructuralChange++; functionPointer(&query, (Entity *)limitToEntityArray.GetUnsafeReadOnlyPtr(), limitToEntityArray.Length, jobPtr); } finally { access->DependencyManager->IsInForEachDisallowStructuralChange--; } #else functionPointer(&myIterator, jobPtr); #endif }
/// <summary> /// Runs the job without using the jobs API. /// </summary> /// <param name="jobData">The job to execute.</param> /// <param name="query">The EntityQuery to run over.</param> /// <param name="limitToEntityArray">A list of entities to limit execution to. Only entities in the list will be processed.</param> /// <typeparam name="T">The specific IJobEntityBatch implementation type.</typeparam> unsafe public static void RunWithoutJobs <T>(ref T jobData, EntityQuery query, NativeArray <Entity> limitToEntityArray) where T : struct, IJobEntityBatch { #if ENABLE_UNITY_COLLECTIONS_CHECKS var access = query._GetImpl()->_Access; try { access->DependencyManager->IsInForEachDisallowStructuralChange++; RunWithoutJobsInternal(ref jobData, ref query, (Entity *)limitToEntityArray.GetUnsafeReadOnlyPtr(), limitToEntityArray.Length); } finally { access->DependencyManager->IsInForEachDisallowStructuralChange--; } #else RunWithoutJobsInternal(ref jobData, ref query, (Entity *)limitToEntityArray.GetUnsafeReadOnlyPtr(), limitToEntityArray.Length); #endif }
public static unsafe void UnsafeRunJobEntityBatch(void *jobPtr, EntityQuery query, JobChunkRunWithoutJobSystemDelegate functionPointer) { var myIterator = query.GetArchetypeChunkIterator(); #if ENABLE_UNITY_COLLECTIONS_CHECKS var access = query._GetImpl()->_Access; try { access->DependencyManager->IsInForEachDisallowStructuralChange++; functionPointer(&myIterator, jobPtr); } finally { access->DependencyManager->IsInForEachDisallowStructuralChange--; } #else functionPointer(&myIterator, jobPtr); #endif }
public static unsafe void RunJobChunk <T>(ref T jobData, EntityQuery query, JobChunkRunWithoutJobSystemDelegate functionPointer) where T : unmanaged, IJobChunk { var myIterator = query.GetArchetypeChunkIterator(); #if ENABLE_UNITY_COLLECTIONS_CHECKS var access = query._GetImpl()->_Access; try { access->DependencyManager->IsInForEachDisallowStructuralChange++; functionPointer(&myIterator, UnsafeUtility.AddressOf(ref jobData)); } finally { access->DependencyManager->IsInForEachDisallowStructuralChange--; } #else functionPointer(&myIterator, UnsafeUtility.AddressOf(ref jobData)); #endif }
/// <summary> /// Runs the job without using the jobs API. /// </summary> /// <param name="jobData">The job to execute.</param> /// <param name="query">The EntityQuery to run over.</param> /// <typeparam name="T">The specific IJobEntityBatch implementation type.</typeparam> unsafe public static void RunWithoutJobs <T>(ref T jobData, EntityQuery query) where T : struct, IJobEntityBatch { var myIterator = query.GetArchetypeChunkIterator(); #if ENABLE_UNITY_COLLECTIONS_CHECKS var access = query._GetImpl()->_Access; try { access->DependencyManager->IsInForEachDisallowStructuralChange++; RunWithoutJobsInternal(ref jobData, ref myIterator); } finally { access->DependencyManager->IsInForEachDisallowStructuralChange--; } #else RunWithoutJobsInternal(ref jobData, ref myIterator); #endif }
// ---------------------------------------------------------------------------------------------------------- // INTERNAL // ---------------------------------------------------------------------------------------------------------- void MoveEntitiesFromInternalQuery(EntityManager srcEntities, EntityQuery filter, NativeArray <EntityRemapUtility.EntityRemapInfo> entityRemapping) { var srcAccess = srcEntities.GetCheckedEntityDataAccess(); var selfAccess = GetCheckedEntityDataAccess(); #if ENABLE_UNITY_COLLECTIONS_CHECKS if (filter._GetImpl()->_Access != srcAccess) { throw new ArgumentException( "EntityManager.MoveEntitiesFrom failed - srcEntities and filter must belong to the same World)"); } if (srcEntities.m_EntityDataAccess == m_EntityDataAccess) { throw new ArgumentException("srcEntities must not be the same as this EntityManager."); } #endif BeforeStructuralChange(); srcEntities.BeforeStructuralChange(); using (var chunks = filter.CreateArchetypeChunkArray(Allocator.TempJob)) { #if ENABLE_UNITY_COLLECTIONS_CHECKS for (int i = 0; i < chunks.Length; ++i) { if (chunks[i].m_Chunk->Archetype->HasChunkHeader) { throw new ArgumentException("MoveEntitiesFrom can not move chunks that contain ChunkHeader components."); } } #endif var archetypeChanges = selfAccess->EntityComponentStore->BeginArchetypeChangeTracking(); MoveChunksFromFiltered(chunks, entityRemapping, srcAccess->EntityComponentStore, srcAccess->ManagedComponentStore); selfAccess->EntityComponentStore->EndArchetypeChangeTracking(archetypeChanges, selfAccess->EntityQueryManager); selfAccess->EntityComponentStore->InvalidateChunkListCacheForChangedArchetypes(); srcAccess->EntityComponentStore->InvalidateChunkListCacheForChangedArchetypes(); } }
public unsafe static T[] ToComponentArray <T>(this EntityQuery group) where T : Component { int entityCount = group.CalculateEntityCount(); var arr = new T[entityCount]; var iterator = group.GetArchetypeChunkIterator(); var indexInEntityQuery = group.GetIndexInEntityQuery(TypeManager.GetTypeIndex <T>()); var entityCounter = 0; var mcs = group._GetImpl()->_Access->ManagedComponentStore; while (iterator.MoveNext()) { var chunk = iterator.CurrentArchetypeChunk; for (int entityIndex = 0; entityIndex < chunk.Count; ++entityIndex) { arr[entityCounter++] = (T)iterator.GetManagedObject(mcs, indexInEntityQuery, entityIndex); } } return(arr); }
public static unsafe void RunWithoutJobsInternal <T>(ref T jobData, ref EntityQuery query, Entity *limitToEntityArray, int limitToEntityArrayLength) where T : struct, IJobEntityBatch { var prebuiltBatchList = new UnsafeList(Allocator.TempJob); try { ChunkIterationUtility.FindFilteredBatchesForEntityArrayWithQuery( query._GetImpl(), limitToEntityArray, limitToEntityArrayLength, ref prebuiltBatchList); ArchetypeChunk *chunks = (ArchetypeChunk *)prebuiltBatchList.Ptr; int chunkCounts = prebuiltBatchList.Length; for (int i = 0; i != chunkCounts; i++) { jobData.Execute(chunks[i], i); } } finally { prebuiltBatchList.Dispose(); } }
internal static unsafe JobHandle ScheduleInternal <T>( ref T jobData, EntityQuery query, JobHandle dependsOn, ScheduleMode mode, int batchesPerChunk, bool isParallel = true) where T : struct, IJobEntityBatch { var queryImpl = query._GetImpl(); var filteredChunkCount = queryImpl->CalculateChunkCount(); var batches = new NativeArray <ArchetypeChunk>(filteredChunkCount * batchesPerChunk, Allocator.TempJob); var prefilterHandle = new PrefilterForJobEntityBatch { MatchingArchetypes = queryImpl->_QueryData->MatchingArchetypes, Filter = queryImpl->_Filter, BatchesPerChunk = batchesPerChunk, EntityComponentStore = queryImpl->_Access->EntityComponentStore, Batches = batches }.Schedule(dependsOn); JobEntityBatchWrapper <T> jobEntityBatchWrapper = new JobEntityBatchWrapper <T> { #if ENABLE_UNITY_COLLECTIONS_CHECKS // All IJobEntityBatch jobs have a EntityManager safety handle to ensure that BeforeStructuralChange throws an error if // jobs without any other safety handles are still running (haven't been synced). safety = new EntitySafetyHandle { m_Safety = queryImpl->SafetyHandles->GetEntityManagerSafetyHandle() }, #endif JobData = jobData, Batches = batches, JobsPerChunk = batchesPerChunk, IsParallel = isParallel ? 1 : 0 }; var scheduleParams = new JobsUtility.JobScheduleParameters( UnsafeUtility.AddressOf(ref jobEntityBatchWrapper), isParallel ? JobEntityBatchProducer <T> .InitializeParallel() : JobEntityBatchProducer <T> .InitializeSingle(), prefilterHandle, mode); #if ENABLE_UNITY_COLLECTIONS_CHECKS try { #endif if (!isParallel) { return(JobsUtility.Schedule(ref scheduleParams)); } else { return(JobsUtility.ScheduleParallelFor(ref scheduleParams, filteredChunkCount * batchesPerChunk, 1)); } #if ENABLE_UNITY_COLLECTIONS_CHECKS } catch (InvalidOperationException e) { prefilterHandle.Complete(); batches.Dispose(); throw e; } #endif }
internal static unsafe JobHandle ScheduleInternal <T>( ref T jobData, EntityQuery query, JobHandle dependsOn, ScheduleMode mode, int batchesPerChunk, bool isParallel = true, NativeArray <Entity> limitToEntityArray = default(NativeArray <Entity>)) where T : struct, IJobEntityBatchWithIndex { var queryImpl = query._GetImpl(); var queryData = queryImpl->_QueryData; var batchCount = 0; var filteredChunkCount = 0; var useEntityArray = limitToEntityArray.IsCreated; var prebuiltBatchList = new UnsafeList(Allocator.TempJob); var perBatchMatchingArchetypeIndex = new UnsafeIntList(0, Allocator.TempJob); if (useEntityArray) { // Forces the creation of an EntityQueryMask, which is necessary to filter batches. var access = queryImpl->_Access; access->EntityQueryManager->GetEntityQueryMask(queryData, access->EntityComponentStore); ChunkIterationUtility.FindBatchesForEntityArrayWithQuery( queryImpl->_Access->EntityComponentStore, queryData, ref queryImpl->_Filter, (Entity *)limitToEntityArray.GetUnsafePtr(), limitToEntityArray.Length, ref prebuiltBatchList, ref perBatchMatchingArchetypeIndex); batchCount = prebuiltBatchList.Length; } else { filteredChunkCount = query.CalculateChunkCount(); batchCount = filteredChunkCount * batchesPerChunk; } // Allocate one buffer for all prefilter data and distribute it // We keep the full buffer as a "dummy array" so we can deallocate it later with [DeallocateOnJobCompletion] var sizeofBatchArray = sizeof(ArchetypeChunk) * batchCount; var sizeofIndexArray = sizeof(int) * batchCount; var prefilterDataSize = sizeofBatchArray + sizeofIndexArray + sizeof(int); var prefilterData = (byte *)Memory.Unmanaged.Allocate(prefilterDataSize, 64, Allocator.TempJob); var prefilterDataArray = NativeArrayUnsafeUtility.ConvertExistingDataToNativeArray <byte>(prefilterData, prefilterDataSize, Allocator.TempJob); #if ENABLE_UNITY_COLLECTIONS_CHECKS NativeArrayUnsafeUtility.SetAtomicSafetyHandle(ref prefilterDataArray, AtomicSafetyHandle.Create()); #endif var prefilterHandle = dependsOn; if (useEntityArray) { var prefilterJob = new PrefilterForJobEntityBatchWithIndex_EntityArray { MatchingArchetypes = queryImpl->_QueryData->MatchingArchetypes, Filter = queryImpl->_Filter, EntityComponentStore = queryImpl->_Access->EntityComponentStore, PrefilterData = prefilterData, PrebuiltBatches = prebuiltBatchList, PerBatchMatchingArchetypeIndex = perBatchMatchingArchetypeIndex }; if (mode != ScheduleMode.Run) { prefilterHandle = prefilterJob.Schedule(dependsOn); } else { prefilterJob.Run(); } prefilterHandle = prebuiltBatchList.Dispose(prefilterHandle); prefilterHandle = perBatchMatchingArchetypeIndex.Dispose(prefilterHandle); } else { var prefilterJob = new PrefilterForJobEntityBatchWithIndex { MatchingArchetypes = queryImpl->_QueryData->MatchingArchetypes, Filter = queryImpl->_Filter, BatchesPerChunk = batchesPerChunk, EntityComponentStore = queryImpl->_Access->EntityComponentStore, PrefilterData = prefilterData, FilteredChunkCount = filteredChunkCount }; if (mode != ScheduleMode.Run) { prefilterHandle = prefilterJob.Schedule(dependsOn); } else { prefilterJob.Run(); } } JobEntityBatchIndexWrapper <T> jobEntityBatchIndexWrapper = new JobEntityBatchIndexWrapper <T> { #if ENABLE_UNITY_COLLECTIONS_CHECKS // All IJobEntityBatchWithIndex jobs have a EntityManager safety handle to ensure that BeforeStructuralChange throws an error if // jobs without any other safety handles are still running (haven't been synced). safety = new EntitySafetyHandle { m_Safety = queryImpl->SafetyHandles->GetEntityManagerSafetyHandle() }, #endif JobData = jobData, PrefilterData = prefilterDataArray, JobsPerChunk = batchesPerChunk, IsParallel = isParallel ? 1 : 0 }; var scheduleParams = new JobsUtility.JobScheduleParameters( UnsafeUtility.AddressOf(ref jobEntityBatchIndexWrapper), isParallel ? JobEntityBatchIndexProducer <T> .InitializeParallel() : JobEntityBatchIndexProducer <T> .InitializeSingle(), prefilterHandle, mode); #if UNITY_DOTSRUNTIME // This should just be a call to FinalizeScheduleChecked, but DOTSR requires the JobsUtility calls to be // in this specific function. #if ENABLE_UNITY_COLLECTIONS_CHECKS try { #endif if (!isParallel) { return(JobsUtility.Schedule(ref scheduleParams)); } else { return(JobsUtility.ScheduleParallelFor(ref scheduleParams, batchCount, 1)); } #if ENABLE_UNITY_COLLECTIONS_CHECKS } catch (InvalidOperationException e) { prefilterHandle.Complete(); prefilterDataArray.Dispose(); throw e; } #endif #else // We can't use try {} catch {} with 2020.2 as we will be burst compiling the schedule code. // Burst doesn't support exception handling. bool executedManaged = false; JobHandle result = default; FinalizeScheduleChecked(isParallel, batchCount, prefilterHandle, prefilterDataArray, ref scheduleParams, ref executedManaged, ref result); if (executedManaged) { return(result); } return(FinalizeScheduleNoExceptions(isParallel, batchCount, ref scheduleParams)); #endif }
internal static unsafe JobHandle ScheduleInternal <T>( ref T jobData, EntityQuery query, JobHandle dependsOn, ScheduleMode mode, int batchesPerChunk, bool isParallel = true) where T : struct, IJobEntityBatchWithIndex { var queryImpl = query._GetImpl(); var filteredChunkCount = query.CalculateChunkCount(); // Allocate one buffer for all prefilter data and distribute it // We keep the full buffer as a "dummy array" so we can deallocate it later with [DeallocateOnJobCompletion] var sizeofBatchArray = sizeof(ArchetypeChunk) * filteredChunkCount * batchesPerChunk; var sizeofIndexArray = sizeof(int) * filteredChunkCount * batchesPerChunk; var prefilterDataSize = sizeofBatchArray + sizeofIndexArray + sizeof(int); var prefilterData = (byte *)UnsafeUtility.Malloc(prefilterDataSize, 64, Allocator.TempJob); var prefilterDataArray = NativeArrayUnsafeUtility.ConvertExistingDataToNativeArray <byte>(prefilterData, prefilterDataSize, Allocator.TempJob); #if ENABLE_UNITY_COLLECTIONS_CHECKS NativeArrayUnsafeUtility.SetAtomicSafetyHandle(ref prefilterDataArray, AtomicSafetyHandle.Create()); #endif var prefilterHandle = new PrefilterForJobEntityBatchWithIndex() { MatchingArchetypes = queryImpl->_QueryData->MatchingArchetypes, Filter = queryImpl->_Filter, BatchesPerChunk = batchesPerChunk, EntityComponentStore = queryImpl->_Access->EntityComponentStore, PrefilterData = prefilterData, FilteredChunkCount = filteredChunkCount }.Schedule(dependsOn); JobEntityBatchIndexWrapper <T> jobEntityBatchIndexWrapper = new JobEntityBatchIndexWrapper <T> { #if ENABLE_UNITY_COLLECTIONS_CHECKS // All IJobEntityBatchWithIndex jobs have a EntityManager safety handle to ensure that BeforeStructuralChange throws an error if // jobs without any other safety handles are still running (haven't been synced). safety = new EntitySafetyHandle { m_Safety = queryImpl->SafetyHandles->GetEntityManagerSafetyHandle() }, #endif JobData = jobData, PrefilterData = prefilterDataArray, JobsPerChunk = batchesPerChunk, IsParallel = isParallel ? 1 : 0 }; var scheduleParams = new JobsUtility.JobScheduleParameters( UnsafeUtility.AddressOf(ref jobEntityBatchIndexWrapper), isParallel ? JobEntityBatchIndexProducer <T> .InitializeParallel() : JobEntityBatchIndexProducer <T> .InitializeSingle(), prefilterHandle, mode); #if ENABLE_UNITY_COLLECTIONS_CHECKS try { #endif if (!isParallel) { return(JobsUtility.Schedule(ref scheduleParams)); } else { return(JobsUtility.ScheduleParallelFor(ref scheduleParams, filteredChunkCount * batchesPerChunk, 1)); } #if ENABLE_UNITY_COLLECTIONS_CHECKS } catch (InvalidOperationException e) { prefilterHandle.Complete(); prefilterDataArray.Dispose(); throw e; } #endif }
internal static unsafe JobHandle ScheduleInternal <T>( ref T jobData, EntityQuery query, JobHandle dependsOn, ScheduleMode mode, int batchesPerChunk, bool isParallel = true, NativeArray <Entity> limitToEntityArray = default(NativeArray <Entity>)) where T : struct, IJobEntityBatch { var queryImpl = query._GetImpl(); var queryData = queryImpl->_QueryData; var cachedChunks = queryData->GetMatchingChunkCache(); // Don't schedule the job if there are no chunks to work on var chunkCount = cachedChunks.Length; var useEntityArray = limitToEntityArray.IsCreated; var prebuiltBatchList = default(UnsafeList); var perBatchMatchingArchetypeIndex = default(UnsafeIntList); var batchCount = chunkCount * batchesPerChunk; if (useEntityArray) { prebuiltBatchList = new UnsafeList(Allocator.TempJob); perBatchMatchingArchetypeIndex = new UnsafeIntList(0, Allocator.TempJob); // Forces the creation of an EntityQueryMask, which is necessary to filter batches. var access = queryImpl->_Access; access->EntityQueryManager->GetEntityQueryMask(queryData, access->EntityComponentStore); ChunkIterationUtility.FindBatchesForEntityArrayWithQuery( queryImpl->_Access->EntityComponentStore, queryData, ref queryImpl->_Filter, (Entity *)limitToEntityArray.GetUnsafePtr(), limitToEntityArray.Length, ref prebuiltBatchList, ref perBatchMatchingArchetypeIndex); batchCount = prebuiltBatchList.Length; } JobEntityBatchWrapper <T> jobEntityBatchWrapper = new JobEntityBatchWrapper <T> { #if ENABLE_UNITY_COLLECTIONS_CHECKS // All IJobEntityBatch jobs have a EntityManager safety handle to ensure that BeforeStructuralChange throws an error if // jobs without any other safety handles are still running (haven't been synced). safety = new EntitySafetyHandle { m_Safety = queryImpl->SafetyHandles->GetEntityManagerSafetyHandle() }, #endif MatchingArchetypes = queryData->MatchingArchetypes, CachedChunks = cachedChunks, Filter = queryImpl->_Filter, JobData = jobData, JobsPerChunk = batchesPerChunk, IsParallel = isParallel ? 1 : 0, UsePrebuiltBatchList = useEntityArray ? 1: 0, PrebuiltBatchList = prebuiltBatchList, PrebuiltBatchListMatchingArchetypeIndices = perBatchMatchingArchetypeIndex }; var scheduleParams = new JobsUtility.JobScheduleParameters( UnsafeUtility.AddressOf(ref jobEntityBatchWrapper), isParallel ? JobEntityBatchProducer <T> .InitializeParallel() : JobEntityBatchProducer <T> .InitializeSingle(), dependsOn, mode); var result = default(JobHandle); if (!isParallel) { result = JobsUtility.Schedule(ref scheduleParams); } else { result = JobsUtility.ScheduleParallelFor(ref scheduleParams, batchCount, 1); } if (useEntityArray) { result = prebuiltBatchList.Dispose(result); result = perBatchMatchingArchetypeIndex.Dispose(result); } return(result); }