internal static unsafe JobHandle ScheduleInternal <T>( ref T jobData, EntityQuery query, JobHandle dependsOn, ScheduleMode mode, int batchesPerChunk, bool isParallel = true) where T : struct, IJobEntityBatchWithIndex { var queryImpl = query._GetImpl(); var filteredChunkCount = query.CalculateChunkCount(); // Allocate one buffer for all prefilter data and distribute it // We keep the full buffer as a "dummy array" so we can deallocate it later with [DeallocateOnJobCompletion] var sizeofBatchArray = sizeof(ArchetypeChunk) * filteredChunkCount * batchesPerChunk; var sizeofIndexArray = sizeof(int) * filteredChunkCount * batchesPerChunk; var prefilterDataSize = sizeofBatchArray + sizeofIndexArray + sizeof(int); var prefilterData = (byte *)UnsafeUtility.Malloc(prefilterDataSize, 64, Allocator.TempJob); var prefilterDataArray = NativeArrayUnsafeUtility.ConvertExistingDataToNativeArray <byte>(prefilterData, prefilterDataSize, Allocator.TempJob); #if ENABLE_UNITY_COLLECTIONS_CHECKS NativeArrayUnsafeUtility.SetAtomicSafetyHandle(ref prefilterDataArray, AtomicSafetyHandle.Create()); #endif var prefilterHandle = new PrefilterForJobEntityBatchWithIndex() { MatchingArchetypes = queryImpl->_QueryData->MatchingArchetypes, Filter = queryImpl->_Filter, BatchesPerChunk = batchesPerChunk, EntityComponentStore = queryImpl->_Access->EntityComponentStore, PrefilterData = prefilterData, FilteredChunkCount = filteredChunkCount }.Schedule(dependsOn); JobEntityBatchIndexWrapper <T> jobEntityBatchIndexWrapper = new JobEntityBatchIndexWrapper <T> { #if ENABLE_UNITY_COLLECTIONS_CHECKS // All IJobEntityBatchWithIndex jobs have a EntityManager safety handle to ensure that BeforeStructuralChange throws an error if // jobs without any other safety handles are still running (haven't been synced). safety = new EntitySafetyHandle { m_Safety = queryImpl->SafetyHandles->GetEntityManagerSafetyHandle() }, #endif JobData = jobData, PrefilterData = prefilterDataArray, JobsPerChunk = batchesPerChunk, IsParallel = isParallel ? 1 : 0 }; var scheduleParams = new JobsUtility.JobScheduleParameters( UnsafeUtility.AddressOf(ref jobEntityBatchIndexWrapper), isParallel ? JobEntityBatchIndexProducer <T> .InitializeParallel() : JobEntityBatchIndexProducer <T> .InitializeSingle(), prefilterHandle, mode); #if ENABLE_UNITY_COLLECTIONS_CHECKS try { #endif if (!isParallel) { return(JobsUtility.Schedule(ref scheduleParams)); } else { return(JobsUtility.ScheduleParallelFor(ref scheduleParams, filteredChunkCount * batchesPerChunk, 1)); } #if ENABLE_UNITY_COLLECTIONS_CHECKS } catch (InvalidOperationException e) { prefilterHandle.Complete(); prefilterDataArray.Dispose(); throw e; } #endif }
internal static unsafe JobHandle ScheduleInternal <T>( ref T jobData, EntityQuery query, JobHandle dependsOn, ScheduleMode mode, int batchesPerChunk, bool isParallel = true, NativeArray <Entity> limitToEntityArray = default(NativeArray <Entity>)) where T : struct, IJobEntityBatchWithIndex { var queryImpl = query._GetImpl(); var queryData = queryImpl->_QueryData; var batchCount = 0; var filteredChunkCount = 0; var useEntityArray = limitToEntityArray.IsCreated; var prebuiltBatchList = new UnsafeList(Allocator.TempJob); var perBatchMatchingArchetypeIndex = new UnsafeIntList(0, Allocator.TempJob); if (useEntityArray) { // Forces the creation of an EntityQueryMask, which is necessary to filter batches. var access = queryImpl->_Access; access->EntityQueryManager->GetEntityQueryMask(queryData, access->EntityComponentStore); ChunkIterationUtility.FindBatchesForEntityArrayWithQuery( queryImpl->_Access->EntityComponentStore, queryData, ref queryImpl->_Filter, (Entity *)limitToEntityArray.GetUnsafePtr(), limitToEntityArray.Length, ref prebuiltBatchList, ref perBatchMatchingArchetypeIndex); batchCount = prebuiltBatchList.Length; } else { filteredChunkCount = query.CalculateChunkCount(); batchCount = filteredChunkCount * batchesPerChunk; } // Allocate one buffer for all prefilter data and distribute it // We keep the full buffer as a "dummy array" so we can deallocate it later with [DeallocateOnJobCompletion] var sizeofBatchArray = sizeof(ArchetypeChunk) * batchCount; var sizeofIndexArray = sizeof(int) * batchCount; var prefilterDataSize = sizeofBatchArray + sizeofIndexArray + sizeof(int); var prefilterData = (byte *)Memory.Unmanaged.Allocate(prefilterDataSize, 64, Allocator.TempJob); var prefilterDataArray = NativeArrayUnsafeUtility.ConvertExistingDataToNativeArray <byte>(prefilterData, prefilterDataSize, Allocator.TempJob); #if ENABLE_UNITY_COLLECTIONS_CHECKS NativeArrayUnsafeUtility.SetAtomicSafetyHandle(ref prefilterDataArray, AtomicSafetyHandle.Create()); #endif var prefilterHandle = dependsOn; if (useEntityArray) { var prefilterJob = new PrefilterForJobEntityBatchWithIndex_EntityArray { MatchingArchetypes = queryImpl->_QueryData->MatchingArchetypes, Filter = queryImpl->_Filter, EntityComponentStore = queryImpl->_Access->EntityComponentStore, PrefilterData = prefilterData, PrebuiltBatches = prebuiltBatchList, PerBatchMatchingArchetypeIndex = perBatchMatchingArchetypeIndex }; if (mode != ScheduleMode.Run) { prefilterHandle = prefilterJob.Schedule(dependsOn); } else { prefilterJob.Run(); } prefilterHandle = prebuiltBatchList.Dispose(prefilterHandle); prefilterHandle = perBatchMatchingArchetypeIndex.Dispose(prefilterHandle); } else { var prefilterJob = new PrefilterForJobEntityBatchWithIndex { MatchingArchetypes = queryImpl->_QueryData->MatchingArchetypes, Filter = queryImpl->_Filter, BatchesPerChunk = batchesPerChunk, EntityComponentStore = queryImpl->_Access->EntityComponentStore, PrefilterData = prefilterData, FilteredChunkCount = filteredChunkCount }; if (mode != ScheduleMode.Run) { prefilterHandle = prefilterJob.Schedule(dependsOn); } else { prefilterJob.Run(); } } JobEntityBatchIndexWrapper <T> jobEntityBatchIndexWrapper = new JobEntityBatchIndexWrapper <T> { #if ENABLE_UNITY_COLLECTIONS_CHECKS // All IJobEntityBatchWithIndex jobs have a EntityManager safety handle to ensure that BeforeStructuralChange throws an error if // jobs without any other safety handles are still running (haven't been synced). safety = new EntitySafetyHandle { m_Safety = queryImpl->SafetyHandles->GetEntityManagerSafetyHandle() }, #endif JobData = jobData, PrefilterData = prefilterDataArray, JobsPerChunk = batchesPerChunk, IsParallel = isParallel ? 1 : 0 }; var scheduleParams = new JobsUtility.JobScheduleParameters( UnsafeUtility.AddressOf(ref jobEntityBatchIndexWrapper), isParallel ? JobEntityBatchIndexProducer <T> .InitializeParallel() : JobEntityBatchIndexProducer <T> .InitializeSingle(), prefilterHandle, mode); #if UNITY_DOTSRUNTIME // This should just be a call to FinalizeScheduleChecked, but DOTSR requires the JobsUtility calls to be // in this specific function. #if ENABLE_UNITY_COLLECTIONS_CHECKS try { #endif if (!isParallel) { return(JobsUtility.Schedule(ref scheduleParams)); } else { return(JobsUtility.ScheduleParallelFor(ref scheduleParams, batchCount, 1)); } #if ENABLE_UNITY_COLLECTIONS_CHECKS } catch (InvalidOperationException e) { prefilterHandle.Complete(); prefilterDataArray.Dispose(); throw e; } #endif #else // We can't use try {} catch {} with 2020.2 as we will be burst compiling the schedule code. // Burst doesn't support exception handling. bool executedManaged = false; JobHandle result = default; FinalizeScheduleChecked(isParallel, batchCount, prefilterHandle, prefilterDataArray, ref scheduleParams, ref executedManaged, ref result); if (executedManaged) { return(result); } return(FinalizeScheduleNoExceptions(isParallel, batchCount, ref scheduleParams)); #endif }