static unsafe JobHandle Schedule <T>(ref T jobData, IntPtr arrayLengthPtr, int innerloopBatchCount, JobHandle dependsOn) where T : struct, IJobParallelForDefer { #if UNITY_SINGLETHREADED_JOBS var arrayLength = UnsafeUtility.AsRef <int>(arrayLengthPtr.ToPointer()); for (var i = 0; i < arrayLength; ++i) { jobData.Execute(i); } DoDeallocateOnJobCompletion(jobData); return(new JobHandle()); #else var jobStruct = new JobStructDefer <T>() { JobData = jobData, ArrayLengthPtr = arrayLengthPtr, }; var jobDataPtr = UnsafeUtility.Malloc(UnsafeUtility.SizeOf <JobStructDefer <T> >(), UnsafeUtility.AlignOf <JobStructDefer <T> >(), Allocator.TempJob); UnsafeUtility.CopyStructureToPtr(ref jobStruct, jobDataPtr); var scheduleParams = new JobsUtility.JobScheduleParameters(jobDataPtr, JobStructDefer <T> .Initialize(), dependsOn, ScheduleMode.Batched); return(JobsUtility.ScheduleParallelFor(ref scheduleParams, JobsUtility.JobQueueThreadCount, innerloopBatchCount)); #endif }
public static unsafe void Run <T>(this T jobData, JobHandle dependency = default) where T : struct, IJobTimeSlice { var parameters = new JobsUtility.JobScheduleParameters(UnsafeUtility.AddressOf(ref jobData), JobTimeSliceStruct <T> .Initialize(), dependency, ScheduleMode.Run); var amount = Amount(jobData); JobsUtility.ScheduleParallelFor(ref parameters, amount, amount); }
public static unsafe JobHandle ScheduleBatch <T>(this T jobData, int arrayLength, int minIndicesPerJobCount, JobHandle dependsOn = new JobHandle()) where T : struct, IJobParallelForBatch { #if UNITY_SINGLETHREADED_JOBS jobData.Execute(0, arrayLength); DoDeallocateOnJobCompletion(jobData); return(new JobHandle()); #elif UNITY_DOTSPLAYER var jobStruct = new ParallelForBatchJobStruct <T>() { JobData = jobData, Ranges = new JobRanges() { ArrayLength = arrayLength, IndicesPerPhase = JobsUtility.GetDefaultIndicesPerPhase(arrayLength) }, }; var jobDataPtr = UnsafeUtility.Malloc(UnsafeUtility.SizeOf <ParallelForBatchJobStruct <T> >(), UnsafeUtility.AlignOf <ParallelForBatchJobStruct <T> >(), Allocator.TempJob); UnsafeUtility.CopyStructureToPtr(ref jobStruct, jobDataPtr); var scheduleParams = new JobsUtility.JobScheduleParameters(jobDataPtr, ParallelForBatchJobStruct <T> .Initialize(), dependsOn, ScheduleMode.Batched); return(JobsUtility.ScheduleParallelFor(ref scheduleParams, arrayLength, minIndicesPerJobCount)); #else var scheduleParams = new JobsUtility.JobScheduleParameters(UnsafeUtility.AddressOf(ref jobData), ParallelForBatchJobStruct <T> .Initialize(), dependsOn, ScheduleMode.Batched); return(JobsUtility.ScheduleParallelFor(ref scheduleParams, arrayLength, minIndicesPerJobCount)); #endif }
public static unsafe JobHandle ScheduleParallel <T>(this T jobData, int batchSize = 1, JobHandle dependency = default) where T : struct, IJobTimeSlice { var parameters = new JobsUtility.JobScheduleParameters(UnsafeUtility.AddressOf(ref jobData), JobTimeSliceStruct <T> .Initialize(), dependency, ScheduleMode.Parallel); var amount = Amount(jobData); return(JobsUtility.ScheduleParallelFor(ref parameters, amount, batchSize)); }
internal static unsafe JobHandle ScheduleInternal <T>(ref T jobData, ComponentGroup group, JobHandle dependsOn, ScheduleMode mode) where T : struct, IJobChunk { ComponentChunkIterator iterator = group.GetComponentChunkIterator(); JobDataLiveFilter <T> fullData = new JobDataLiveFilter <T> { #if ENABLE_UNITY_COLLECTIONS_CHECKS // All IJobChunk jobs have a safety handle for the Entity type to ensure that BeforeStructuralChange throws an error if // jobs without any other safety handles are still running (haven't been synced). safety = new EntitySafetyHandle { m_Safety = group.SafetyManager.GetSafetyHandle(TypeManager.GetTypeIndex <Entity>(), true) }, #endif data = jobData, iterator = iterator, }; var totalChunks = group.CalculateNumberOfChunksWithoutFiltering(); var scheduleParams = new JobsUtility.JobScheduleParameters( UnsafeUtility.AddressOf(ref fullData), JobChunkLiveFilter_Process <T> .Initialize(), dependsOn, mode); return(JobsUtility.ScheduleParallelFor(ref scheduleParams, totalChunks, 1)); }
static unsafe JobHandle Schedule(void *fullData, NativeArray <byte> prefilterData, int unfilteredLength, int innerloopBatchCount, bool isParallelFor, bool isFiltered, ref JobForEachCache cache, void *deferredCountData, JobHandle dependsOn, ScheduleMode mode) { #if ENABLE_UNITY_COLLECTIONS_CHECKS try { #endif if (isParallelFor) { var scheduleParams = new JobsUtility.JobScheduleParameters(fullData, cache.JobReflectionDataParallelFor, dependsOn, mode); if (isFiltered) { return(JobsUtility.ScheduleParallelForDeferArraySize(ref scheduleParams, innerloopBatchCount, deferredCountData, null)); } else { return(JobsUtility.ScheduleParallelFor(ref scheduleParams, unfilteredLength, innerloopBatchCount)); } } else { var scheduleParams = new JobsUtility.JobScheduleParameters(fullData, cache.JobReflectionData, dependsOn, mode); return(JobsUtility.Schedule(ref scheduleParams)); } #if ENABLE_UNITY_COLLECTIONS_CHECKS } catch (InvalidOperationException e) { prefilterData.Dispose(); throw e; } #endif }
public static unsafe void RunBatch <T>(this T jobData, int arrayLength) where T : struct, IJobParallelForBatch { var scheduleParams = new JobsUtility.JobScheduleParameters(UnsafeUtility.AddressOf(ref jobData), JobParallelForBatchProducer <T> .Initialize(), new JobHandle(), ScheduleMode.Run); JobsUtility.ScheduleParallelFor(ref scheduleParams, arrayLength, arrayLength); }
public static unsafe void RunBatch <T>(this T jobData, int arrayLength) where T : struct, IJobParallelForBatch { #if UNITY_DOTSPLAYER ScheduleBatch(jobData, arrayLength, arrayLength).Complete(); #else var scheduleParams = new JobsUtility.JobScheduleParameters(UnsafeUtility.AddressOf(ref jobData), ParallelForBatchJobStruct <T> .Initialize(), new JobHandle(), ScheduleMode.Run); JobsUtility.ScheduleParallelFor(ref scheduleParams, arrayLength, arrayLength); #endif }
unsafe internal static void Run <T>(this T jobData, int arrayLength) where T : struct, IJobParallelForBurstSchedulable { var reflectionData = ParallelForJobStructBurstSchedulable <T> .jobReflectionData.Data; CheckReflectionDataCorrect(reflectionData); var scheduleParams = new JobsUtility.JobScheduleParameters(UnsafeUtility.AddressOf(ref jobData), reflectionData, new JobHandle(), ScheduleMode.Run); JobsUtility.ScheduleParallelFor(ref scheduleParams, arrayLength, arrayLength); }
unsafe public static JobHandle Schedule <T>(this T jobData, int arrayLength, JobHandle dependency) where T : struct, IJobFor { // https://unity3d.atlassian.net/browse/DOTSR-1888 // var scheduleParams = new JobsUtility.JobScheduleParameters(UnsafeUtility.AddressOf(ref jobData), JobProducer<T>.Initialize(), dependency, ScheduleMode.Single); // IJobChunk uses both JobsUtility.ScheduleParallelFor and JobsUtility.Schedule, so that could be implemented. // However, it brings this class (which is rarely used) even more out of sync with the Unity.Runtime version, where // the better fix is to implement ScheduleMode.Single var scheduleParams = new JobsUtility.JobScheduleParameters(UnsafeUtility.AddressOf(ref jobData), JobProducer <T> .Initialize(), dependency, ScheduleMode.Parallel); return(JobsUtility.ScheduleParallelFor(ref scheduleParams, arrayLength, arrayLength)); }
public static unsafe JobHandle ScheduleBatch <T>(this T jobData, int arrayLength, int minIndicesPerJobCount, JobHandle dependsOn = new JobHandle()) where T : struct, IJobParallelForBatch { var scheduleParams = new JobsUtility.JobScheduleParameters( UnsafeUtility.AddressOf(ref jobData), JobParallelForBatchProducer <T> .Initialize(), dependsOn, ScheduleMode.Batched); return(JobsUtility.ScheduleParallelFor(ref scheduleParams, arrayLength, minIndicesPerJobCount)); }
private static unsafe JobHandle FinalizeScheduleNoExceptions(bool isParallel, int batchCount, ref JobsUtility.JobScheduleParameters scheduleParams) { if (!isParallel) { return(JobsUtility.Schedule(ref scheduleParams)); } else { return(JobsUtility.ScheduleParallelFor(ref scheduleParams, batchCount, 1)); } }
unsafe internal static JobHandle Schedule <T>(this T jobData, int arrayLength, int innerloopBatchCount, JobHandle dependsOn = new JobHandle()) where T : struct, IJobParallelForBurstSchedulable { var reflectionData = ParallelForJobStructBurstSchedulable <T> .jobReflectionData.Data; CheckReflectionDataCorrect(reflectionData); #if UNITY_2020_2_OR_NEWER || UNITY_DOTSRUNTIME var scheduleParams = new JobsUtility.JobScheduleParameters(UnsafeUtility.AddressOf(ref jobData), reflectionData, dependsOn, ScheduleMode.Parallel); #else var scheduleParams = new JobsUtility.JobScheduleParameters(UnsafeUtility.AddressOf(ref jobData), reflectionData, dependsOn, ScheduleMode.Batched); #endif return(JobsUtility.ScheduleParallelFor(ref scheduleParams, arrayLength, innerloopBatchCount)); }
unsafe static public JobHandle ScheduleBatch <T>(this T jobData, int arrayLength, int minIndicesPerJobCount, JobHandle dependsOn = new JobHandle()) where T : struct, IJobParallelForBatched { #if UNITY_2020_2_OR_NEWER // This was renamed in Unity 2020.2 var scheduleMode = ScheduleMode.Parallel; #else var scheduleMode = ScheduleMode.Batched; #endif var scheduleParams = new JobsUtility.JobScheduleParameters(UnsafeUtility.AddressOf(ref jobData), ParallelForBatchJobStruct <T> .Initialize(), dependsOn, scheduleMode); return(JobsUtility.ScheduleParallelFor(ref scheduleParams, arrayLength, minIndicesPerJobCount)); }
unsafe public static JobHandle Schedule <T>(this T jobData, int arrayLength, int innerloopBatchCount, JobHandle dependsOn = new JobHandle()) where T : struct, IJobParallelForBurstScheduable { var reflectionData = ParallelForJobStructBurstScheduable <T> .jobReflectionData.Data; if (reflectionData == IntPtr.Zero) { throw new InvalidOperationException("Reflection data was not set up by code generation"); } var scheduleParams = new JobsUtility.JobScheduleParameters(UnsafeUtility.AddressOf(ref jobData), reflectionData, dependsOn, ScheduleMode.Batched); return(JobsUtility.ScheduleParallelFor(ref scheduleParams, arrayLength, innerloopBatchCount)); }
unsafe public static void Run <T>(this T jobData, int arrayLength) where T : struct, IJobParallelForBurstScheduable { var reflectionData = ParallelForJobStructBurstScheduable <T> .jobReflectionData.Data; if (reflectionData == IntPtr.Zero) { throw new InvalidOperationException("Reflection data was not set up by code generation"); } var scheduleParams = new JobsUtility.JobScheduleParameters(UnsafeUtility.AddressOf(ref jobData), reflectionData, new JobHandle(), ScheduleMode.Run); JobsUtility.ScheduleParallelFor(ref scheduleParams, arrayLength, arrayLength); }
internal static unsafe JobHandle ScheduleInternal <T>(ref T jobData, EntityQuery query, JobHandle dependsOn, ScheduleMode mode) where T : struct, IJobChunk { ComponentChunkIterator iterator = query.GetComponentChunkIterator(); var unfilteredChunkCount = query.CalculateChunkCountWithoutFiltering(); var prefilterHandle = ComponentChunkIterator.PreparePrefilteredChunkLists(unfilteredChunkCount, iterator.m_MatchingArchetypeList, iterator.m_Filter, dependsOn, mode, out var prefilterData, out var deferredCountData); JobChunkData <T> fullData = new JobChunkData <T> { #if ENABLE_UNITY_COLLECTIONS_CHECKS // All IJobChunk jobs have a EntityManager safety handle to ensure that BeforeStructuralChange throws an error if // jobs without any other safety handles are still running (haven't been synced). safety = new EntitySafetyHandle { m_Safety = query.SafetyManager->GetEntityManagerSafetyHandle() }, #endif Data = jobData, PrefilterData = prefilterData, }; var scheduleParams = new JobsUtility.JobScheduleParameters( UnsafeUtility.AddressOf(ref fullData), JobChunk_Process <T> .Initialize(), prefilterHandle, mode); #if ENABLE_UNITY_COLLECTIONS_CHECKS try { #endif if (mode == ScheduleMode.Batched) { return(JobsUtility.ScheduleParallelForDeferArraySize(ref scheduleParams, 1, deferredCountData, null)); } else { var count = unfilteredChunkCount; return(JobsUtility.ScheduleParallelFor(ref scheduleParams, count, 1)); } #if ENABLE_UNITY_COLLECTIONS_CHECKS } catch (InvalidOperationException e) { prefilterData.Dispose(); throw e; } #endif }
internal static unsafe JobHandle ScheduleInternal <T>( ref T jobData, EntityQuery query, JobHandle dependsOn, ScheduleMode mode, int batchesPerChunk, bool isParallel = true) where T : struct, IJobEntityBatch { var queryImpl = query._GetImpl(); var queryData = queryImpl->_QueryData; var cachedChunks = queryData->GetMatchingChunkCache(); // Don't schedule the job if there are no chunks to work on var chunkCount = cachedChunks.Length; JobEntityBatchWrapper <T> jobEntityBatchWrapper = new JobEntityBatchWrapper <T> { #if ENABLE_UNITY_COLLECTIONS_CHECKS // All IJobEntityBatch jobs have a EntityManager safety handle to ensure that BeforeStructuralChange throws an error if // jobs without any other safety handles are still running (haven't been synced). safety = new EntitySafetyHandle { m_Safety = queryImpl->SafetyHandles->GetEntityManagerSafetyHandle() }, #endif MatchingArchetypes = queryData->MatchingArchetypes, CachedChunks = cachedChunks, Filter = queryImpl->_Filter, JobData = jobData, JobsPerChunk = batchesPerChunk, IsParallel = isParallel ? 1 : 0 }; var scheduleParams = new JobsUtility.JobScheduleParameters( UnsafeUtility.AddressOf(ref jobEntityBatchWrapper), isParallel ? JobEntityBatchProducer <T> .InitializeParallel() : JobEntityBatchProducer <T> .InitializeSingle(), dependsOn, mode); if (!isParallel) { return(JobsUtility.Schedule(ref scheduleParams)); } else { return(JobsUtility.ScheduleParallelFor(ref scheduleParams, chunkCount * batchesPerChunk, 1)); } }
public static unsafe JobHandle Schedule <T, U0>(this T jobData, ComponentGroupArray <U0> array, int innerloopBatchCount, JobHandle dependsOn = new JobHandle()) where T : struct, IJobProcessEntities <U0> where U0 : struct { JobStruct <T, U0> fullData; fullData.Data = jobData; fullData.Array = array.m_Data; var scheduleParams = new JobsUtility.JobScheduleParameters(UnsafeUtility.AddressOf(ref fullData), JobStruct <T, U0> .Initialize(), dependsOn, ScheduleMode.Batched); return(JobsUtility.ScheduleParallelFor(ref scheduleParams, array.Length, innerloopBatchCount)); }
internal static unsafe JobHandle ScheduleInternal <T>(ref T jobData, ComponentGroup group, JobHandle dependsOn, ScheduleMode mode) where T : struct, IJobChunk { ComponentChunkIterator iterator; group.GetComponentChunkIterator(out iterator); JobDataLiveFilter <T> output = new JobDataLiveFilter <T> { data = jobData, iterator = iterator }; JobsUtility.JobScheduleParameters parameters = new JobsUtility.JobScheduleParameters(UnsafeUtility.AddressOf <JobDataLiveFilter <T> >(ref output), JobChunkLiveFilter_Process <T> .Initialize(), dependsOn, mode); return(JobsUtility.ScheduleParallelFor(ref parameters, group.CalculateNumberOfChunksWithoutFiltering(), 1)); }
public static unsafe void Run <T, U0>(this T jobData, ComponentGroupArray <U0> array) where T : struct, IJobProcessEntities <U0> where U0 : struct { JobStruct <T, U0> fullData; fullData.Data = jobData; fullData.Array = array.m_Data; var scheduleParams = new JobsUtility.JobScheduleParameters(UnsafeUtility.AddressOf(ref fullData), JobStruct <T, U0> .Initialize(), new JobHandle(), ScheduleMode.Run); var entityCount = array.Length; JobsUtility.ScheduleParallelFor(ref scheduleParams, entityCount, entityCount); }
public static unsafe JobHandle Schedule <TJob, TKey>(this TJob jobData, NativeMultiHashMap <TKey, int> hashMap, int minIndicesPerJobCount, JobHandle dependsOn = new JobHandle()) where TJob : struct, IJobNativeMultiHashMapMergedSharedKeyIndices where TKey : struct, IEquatable <TKey> { var fullData = new NativeMultiHashMapUniqueHashJobStruct <TJob, TKey> .JobMultiHashMap { HashMap = hashMap, JobData = jobData }; var scheduleParams = new JobsUtility.JobScheduleParameters(UnsafeUtility.AddressOf(ref fullData), NativeMultiHashMapUniqueHashJobStruct <TJob, TKey> .Initialize(), dependsOn, ScheduleMode.Batched); return(JobsUtility.ScheduleParallelFor(ref scheduleParams, hashMap.m_Buffer->bucketCapacityMask + 1, minIndicesPerJobCount)); }
/// <summary> /// Run a job synchronously /// </summary> /// /// <param name="jobData"> /// Job to run /// </param> /// /// <param name="valuesLength"> /// Length of the values to execute on. /// </param> /// /// <typeparam name="T"> /// Type of job to run /// </typeparam> public static unsafe void RunRanged <T>( this T jobData, int valuesLength) where T : struct, IJobParallelForRanged { var scheduleParams = new JobsUtility.JobScheduleParameters( UnsafeUtility.AddressOf(ref jobData), ParallelForJobStruct <T> .Initialize(), new JobHandle(), ScheduleMode.Run); JobsUtility.ScheduleParallelFor( ref scheduleParams, valuesLength, valuesLength); }
private static unsafe JobHandle Schedule(void *fullData, int length, int innerloopBatchCount, bool isParallelFor, ref JobProcessComponentDataCache cache, JobHandle dependsOn, ScheduleMode mode) { if (isParallelFor) { var scheduleParams = new JobsUtility.JobScheduleParameters(fullData, cache.JobReflectionDataParallelFor, dependsOn, mode); return(JobsUtility.ScheduleParallelFor(ref scheduleParams, length, innerloopBatchCount)); } else { var scheduleParams = new JobsUtility.JobScheduleParameters(fullData, cache.JobReflectionData, dependsOn, mode); return(JobsUtility.Schedule(ref scheduleParams)); } }
/// <summary> /// Run a job asynchronously /// </summary> /// /// <param name="jobData"> /// Job to run /// </param> /// /// <param name="valuesLength"> /// Length of the values to execute on. /// </param> /// /// <param name="innerloopBatchCount"> /// Number of job executions per batch /// </param> /// /// <param name="dependsOn"> /// Handle of the job that must be run before this job /// </param> /// /// <returns> /// A handle to the created job /// </returns> /// /// <typeparam name="T"> /// Type of job to run /// </typeparam> unsafe public static JobHandle ScheduleRanged <T>( this T jobData, int valuesLength, int innerloopBatchCount, JobHandle dependsOn = new JobHandle()) where T : struct, IJobParallelForRanged { var scheduleParams = new JobsUtility.JobScheduleParameters( UnsafeUtility.AddressOf(ref jobData), ParallelForJobStruct <T> .Initialize(), dependsOn, ScheduleMode.Batched); return(JobsUtility.ScheduleParallelFor( ref scheduleParams, valuesLength, innerloopBatchCount)); }
public static unsafe JobHandle Schedule <TJob>(this TJob jobData, NativeMultiHashMap <int, int> hashMap, int minIndicesPerJobCount, JobHandle dependsOn = new JobHandle()) where TJob : struct, IJobNativeMultiHashMapMergedSharedKeyIndices { var jobProducer = new JobNativeMultiHashMapMergedSharedKeyIndicesProducer <TJob> { HashMap = hashMap, JobData = jobData }; var scheduleParams = new JobsUtility.JobScheduleParameters( UnsafeUtility.AddressOf(ref jobProducer) , JobNativeMultiHashMapMergedSharedKeyIndicesProducer <TJob> .Initialize() , dependsOn , ScheduleMode.Parallel ); return(JobsUtility.ScheduleParallelFor(ref scheduleParams, hashMap.GetUnsafeBucketData().bucketCapacityMask + 1, minIndicesPerJobCount)); }
public static unsafe JobHandle Schedule <T>(this T jobData, int arrayLength, int innerloopBatchCount, JobHandle dependsOn = default(JobHandle)) where T : struct, IJobParallelFor { var parallelForJobProducer = new JobParallelForProducer <T>() { JobData = jobData, #if ENABLE_UNITY_COLLECTIONS_CHECKS Sentinel = 37 + arrayLength // check that code is patched as expected #endif }; var scheduleParams = new JobsUtility.JobScheduleParameters(UnsafeUtility.AddressOf(ref parallelForJobProducer), JobParallelForProducer <T> .Initialize(), dependsOn, ScheduleMode.Batched); return(JobsUtility.ScheduleParallelFor(ref scheduleParams, arrayLength, innerloopBatchCount)); }
public static unsafe JobHandle ScheduleBatch <T>(this T jobData, int arrayLength, int minIndicesPerJobCount, JobHandle dependsOn = new JobHandle()) where T : struct, IJobParallelForBatch { #if UNITY_AVOID_REFLECTION // Protect against garbage collection if (!ParallelForBatchJobStruct <T> .ExecuteHandle.IsAllocated) { ParallelForBatchJobStruct <T> .ExecuteDelegate = ParallelForBatchJobStruct <T> .Execute; ParallelForBatchJobStruct <T> .ExecuteHandle = GCHandle.Alloc(ParallelForBatchJobStruct <T> .ExecuteDelegate); ParallelForBatchJobStruct <T> .ExecuteFunctionPtr = Marshal.GetFunctionPointerForDelegate(ParallelForBatchJobStruct <T> .ExecuteDelegate); } // Protect against garbage collection if (!ParallelForBatchJobStruct <T> .CleanupHandle.IsAllocated) { ParallelForBatchJobStruct <T> .CleanupDelegate = ParallelForBatchJobStruct <T> .Cleanup; ParallelForBatchJobStruct <T> .CleanupHandle = GCHandle.Alloc(ParallelForBatchJobStruct <T> .CleanupDelegate); ParallelForBatchJobStruct <T> .CleanupFunctionPtr = Marshal.GetFunctionPointerForDelegate(ParallelForBatchJobStruct <T> .CleanupDelegate); } var jobFunctionPtr = ParallelForBatchJobStruct <T> .ExecuteFunctionPtr; var completionFuncPtr = ParallelForBatchJobStruct <T> .CleanupFunctionPtr; var jobStruct = new ParallelForBatchJobStruct <T>() { JobData = jobData, Ranges = new JobRanges() { ArrayLength = arrayLength, IndicesPerPhase = JobsUtility.GetDefaultIndicesPerPhase(arrayLength) }, }; var jobDataPtr = UnsafeUtility.Malloc(UnsafeUtility.SizeOf <ParallelForBatchJobStruct <T> >(), UnsafeUtility.AlignOf <ParallelForBatchJobStruct <T> >(), Allocator.TempJob); UnsafeUtility.CopyStructureToPtr(ref jobStruct, jobDataPtr); return(JobsUtility.ScheduleJobForEach(jobFunctionPtr, completionFuncPtr, new IntPtr(jobDataPtr), arrayLength, minIndicesPerJobCount, dependsOn)); #else var scheduleParams = new JobsUtility.JobScheduleParameters(UnsafeUtility.AddressOf(ref jobData), ParallelForBatchJobStruct <T> .Initialize(), dependsOn, ScheduleMode.Batched); return(JobsUtility.ScheduleParallelFor(ref scheduleParams, arrayLength, minIndicesPerJobCount)); #endif }
private static unsafe JobHandle ScheduleInternal <TJob, T>( this TJob jobData, EventSystemBase eventSystem, JobHandle dependsOn, bool isParallel) where TJob : struct, IJobEvent <T> where T : struct { dependsOn = eventSystem.GetEventReaders <T>(dependsOn, out var events); for (var i = 0; i < events.Count; i++) { var reader = events[i]; var fullData = new JobEventProducer <TJob, T> { Reader = reader, JobData = jobData, IsParallel = isParallel, }; #if UNITY_2020_2_OR_NEWER const ScheduleMode scheduleMode = ScheduleMode.Parallel; #else const ScheduleMode scheduleMode = ScheduleMode.Batched; #endif var scheduleParams = new JobsUtility.JobScheduleParameters( UnsafeUtility.AddressOf(ref fullData), isParallel ? JobEventProducer <TJob, T> .InitializeParallel() : JobEventProducer <TJob, T> .InitializeSingle(), dependsOn, scheduleMode); dependsOn = isParallel ? JobsUtility.ScheduleParallelFor(ref scheduleParams, reader.ForEachCount, 1) : JobsUtility.Schedule(ref scheduleParams); } eventSystem.AddJobHandleForConsumer <T>(dependsOn); return(dependsOn); }
internal static unsafe JobHandle ScheduleInternal <T>(ref T jobData, ComponentGroup group, JobHandle dependsOn, ScheduleMode mode) where T : struct, IJobChunk { ComponentChunkIterator iterator = group.GetComponentChunkIterator(); JobDataLiveFilter <T> fullData = new JobDataLiveFilter <T> { data = jobData, iterator = iterator, }; var totalChunks = group.CalculateNumberOfChunksWithoutFiltering(); var scheduleParams = new JobsUtility.JobScheduleParameters( UnsafeUtility.AddressOf(ref fullData), JobChunkLiveFilter_Process <T> .Initialize(), dependsOn, mode); return(JobsUtility.ScheduleParallelFor(ref scheduleParams, totalChunks, 1)); }