internal static unsafe JobHandle ScheduleInternal <T>(ref T jobData, ComponentGroup group, JobHandle dependsOn, ScheduleMode mode) where T : struct, IJobChunk { ComponentChunkIterator iterator = group.GetComponentChunkIterator(); JobDataLiveFilter <T> fullData = new JobDataLiveFilter <T> { #if ENABLE_UNITY_COLLECTIONS_CHECKS // All IJobChunk jobs have a safety handle for the Entity type to ensure that BeforeStructuralChange throws an error if // jobs without any other safety handles are still running (haven't been synced). safety = new EntitySafetyHandle { m_Safety = group.SafetyManager.GetSafetyHandle(TypeManager.GetTypeIndex <Entity>(), true) }, #endif data = jobData, iterator = iterator, }; var totalChunks = group.CalculateNumberOfChunksWithoutFiltering(); var scheduleParams = new JobsUtility.JobScheduleParameters( UnsafeUtility.AddressOf(ref fullData), JobChunkLiveFilter_Process <T> .Initialize(), dependsOn, mode); return(JobsUtility.ScheduleParallelFor(ref scheduleParams, totalChunks, 1)); }
internal static unsafe JobHandle ScheduleInternal <T>(ref T jobData, ComponentGroup group, JobHandle dependsOn, ScheduleMode mode) where T : struct, IJobChunk { ComponentChunkIterator iterator = group.GetComponentChunkIterator(); var unfilteredChunkCount = group.CalculateNumberOfChunksWithoutFiltering(); var prefilterHandle = ComponentChunkIterator.PreparePrefilteredChunkLists(unfilteredChunkCount, iterator.m_MatchingArchetypeList, iterator.m_Filter, dependsOn, mode, out var prefilterData, out var deferredCountData); JobChunkData <T> fullData = new JobChunkData <T> { #if ENABLE_UNITY_COLLECTIONS_CHECKS // All IJobChunk jobs have a EntityManager safety handle to ensure that BeforeStructuralChange throws an error if // jobs without any other safety handles are still running (haven't been synced). safety = new EntitySafetyHandle { m_Safety = group.SafetyManager.GetEntityManagerSafetyHandle() }, #endif Data = jobData, PrefilterData = prefilterData, }; var scheduleParams = new JobsUtility.JobScheduleParameters( UnsafeUtility.AddressOf(ref fullData), JobChunk_Process <T> .Initialize(), prefilterHandle, mode); #if ENABLE_UNITY_COLLECTIONS_CHECKS try { #endif if (mode == ScheduleMode.Batched) { return(JobsUtility.ScheduleParallelForDeferArraySize(ref scheduleParams, 1, deferredCountData, null)); } else { var count = unfilteredChunkCount; return(JobsUtility.ScheduleParallelFor(ref scheduleParams, count, 1)); } #if ENABLE_UNITY_COLLECTIONS_CHECKS } catch (InvalidOperationException e) { prefilterData.Dispose(); throw e; } #endif }
internal static unsafe JobHandle ScheduleInternal <T>(ref T jobData, ComponentGroup group, JobHandle dependsOn, ScheduleMode mode) where T : struct, IJobChunk { ComponentChunkIterator iterator; group.GetComponentChunkIterator(out iterator); JobDataLiveFilter <T> output = new JobDataLiveFilter <T> { data = jobData, iterator = iterator }; JobsUtility.JobScheduleParameters parameters = new JobsUtility.JobScheduleParameters(UnsafeUtility.AddressOf <JobDataLiveFilter <T> >(ref output), JobChunkLiveFilter_Process <T> .Initialize(), dependsOn, mode); return(JobsUtility.ScheduleParallelFor(ref parameters, group.CalculateNumberOfChunksWithoutFiltering(), 1)); }
internal static unsafe JobHandle ScheduleInternal <T>(ref T jobData, ComponentGroup group, JobHandle dependsOn, ScheduleMode mode) where T : struct, IJobChunk { ComponentChunkIterator iterator = group.GetComponentChunkIterator(); JobDataLiveFilter <T> fullData = new JobDataLiveFilter <T> { data = jobData, iterator = iterator, }; var totalChunks = group.CalculateNumberOfChunksWithoutFiltering(); var scheduleParams = new JobsUtility.JobScheduleParameters( UnsafeUtility.AddressOf(ref fullData), JobChunkLiveFilter_Process <T> .Initialize(), dependsOn, mode); return(JobsUtility.ScheduleParallelFor(ref scheduleParams, totalChunks, 1)); }
internal static unsafe void Initialize(ComponentSystemBase system, Type jobType, Type wrapperJobType, bool isParallelFor, ref JobProcessComponentDataCache cache, out ProcessIterationData iterator) { int num; int num1; if (!isParallelFor || !(cache.JobReflectionDataParallelFor == IntPtr.Zero)) { num1 = isParallelFor ? 0 : ((int)(cache.JobReflectionData == IntPtr.Zero)); } else { num1 = 1; } if (num1 != 0) { Type iJobProcessComponentDataInterface = GetIJobProcessComponentDataInterface(jobType); if (cache.Types == null) { cache.Types = GetComponentTypes(jobType, iJobProcessComponentDataInterface, out cache.ProcessTypesCount, out cache.FilterChanged); } IntPtr ptr = GetJobReflection(jobType, wrapperJobType, iJobProcessComponentDataInterface, isParallelFor); if (isParallelFor) { cache.JobReflectionDataParallelFor = ptr; } else { cache.JobReflectionData = ptr; } } if (cache.ComponentSystem != system) { cache.ComponentGroup = system.GetComponentGroupInternal(cache.Types); if (cache.FilterChanged.Length != 0) { cache.ComponentGroup.SetFilterChanged(cache.FilterChanged); } else { cache.ComponentGroup.ResetFilter(); } cache.ComponentSystem = system; } ComponentGroup componentGroup = cache.ComponentGroup; iterator.IsReadOnly3 = num = 0; iterator.IsReadOnly2 = num = num; iterator.IsReadOnly1 = num = num; iterator.IsReadOnly0 = num; int *numPtr = &iterator.IsReadOnly0; int index = 0; while (true) { if (index == cache.ProcessTypesCount) { fixed(int *numRef = null) { componentGroup.GetComponentChunkIterator(out iterator.Iterator); iterator.IndexInGroup3 = num = -1; iterator.IndexInGroup2 = num = num; iterator.IndexInGroup0 = iterator.IndexInGroup1 = num; int *numPtr2 = &iterator.IndexInGroup0; int num3 = 0; while (true) { if (num3 == cache.ProcessTypesCount) { fixed(int *numRef2 = null) { iterator.m_IsParallelFor = isParallelFor; iterator.m_Length = componentGroup.CalculateNumberOfChunksWithoutFiltering(); iterator.m_MaxIndex = iterator.m_Length - 1; iterator.m_MinIndex = 0; AtomicSafetyHandle handle = new AtomicSafetyHandle(); iterator.m_Safety3 = handle = handle; iterator.m_Safety2 = handle = handle; iterator.m_Safety0 = iterator.m_Safety1 = handle; iterator.m_SafetyReadOnlyCount = 0; AtomicSafetyHandle *handlePtr = &iterator.m_Safety0; int num4 = 0; while (true) { if (num4 == cache.ProcessTypesCount) { fixed(AtomicSafetyHandle *handleRef = null) { iterator.m_SafetyReadWriteCount = 0; AtomicSafetyHandle *handlePtr2 = &iterator.m_Safety0; int num5 = 0; while (true) { if (num5 == cache.ProcessTypesCount) { fixed(AtomicSafetyHandle *handleRef2 = null) { Assert.AreEqual(cache.ProcessTypesCount, iterator.m_SafetyReadWriteCount + iterator.m_SafetyReadOnlyCount); return; } } if (cache.Types[num5].AccessModeType == ComponentType.AccessMode.ReadWrite) { handlePtr2[iterator.m_SafetyReadOnlyCount + iterator.m_SafetyReadWriteCount] = componentGroup.GetSafetyHandle(componentGroup.GetIndexInComponentGroup(cache.Types[num5].TypeIndex)); int *numPtr1 = (int *)ref iterator.m_SafetyReadWriteCount; numPtr1[0]++; } num5++; } } } if (cache.Types[num4].AccessModeType == ComponentType.AccessMode.ReadOnly) { handlePtr[iterator.m_SafetyReadOnlyCount] = componentGroup.GetSafetyHandle(componentGroup.GetIndexInComponentGroup(cache.Types[num4].TypeIndex)); int *numPtr3 = (int *)ref iterator.m_SafetyReadOnlyCount; numPtr3[0]++; } num4++; } } } numPtr2[num3] = componentGroup.GetIndexInComponentGroup(cache.Types[num3].TypeIndex); num3++; } } } numPtr[index] = (cache.Types[index].AccessModeType == ComponentType.AccessMode.ReadOnly) ? 1 : 0; index++; } }