コード例 #1
0
        public unsafe void TempAllocatorMultithreadUnique()
        {
            Thread[]         threads = new Thread[16];
            System.Random    rand    = new System.Random();
            ManualResetEvent mre     = new ManualResetEvent(false);

            for (int i = 0; i < threads.Length; i++)
            {
                threads[i] = new Thread((object ra) =>
                {
                    int r = (int)ra;
                    UnsafeUtility.EnterTempScope();
                    int oldSize = UnsafeUtility.GetTempUsed();
                    void *mem   = UnsafeUtility.Malloc(r, 0, Collections.Allocator.Temp);
                    mre.WaitOne();

                    Assert.GreaterOrEqual(UnsafeUtility.GetTempUsed(), oldSize + r);
                    Assert.Less(UnsafeUtility.GetTempUsed(), oldSize + r + 128);

                    UnsafeUtility.Free(mem, Collections.Allocator.Temp);
                    UnsafeUtility.ExitTempScope();
                });
                threads[i].Start(rand.Next(1, 32768));
            }

            Thread.Sleep(200);
            mre.Set();

            for (int i = 0; i < threads.Length; i++)
            {
                threads[i].Join();
            }
        }
コード例 #2
0
        public unsafe void TempAllocatorCapacityGrowsAndResetsManyAllocs()
        {
            void *[] mem = new void *[4096];

            ExitTempScopesLocally();

            int oldCap = UnsafeUtility.GetTempCapacity();

            for (int i = 0; i < mem.Length; i++)
            {
                UnsafeUtility.EnterTempScope();
                mem[i] = UnsafeUtility.Malloc(4096, 0, Collections.Allocator.Temp);
            }

            Assert.Greater(UnsafeUtility.GetTempCapacity(), oldCap);

            for (int i = mem.Length - 1; i >= 0; i--)
            {
                UnsafeUtility.Free(mem[i], Collections.Allocator.Temp);
                UnsafeUtility.ExitTempScope();
            }

            Assert.AreEqual(UnsafeUtility.GetTempCapacity(), oldCap);

            EnterTempScopesLocally();
        }
コード例 #3
0
        public unsafe void TempAllocatorRenestingDoesntLeak()
        {
            void *[] mem          = new void *[4096];
            int      tempUsedBase = UnsafeUtility.GetTempUsed();
            int      tempUsedTop  = -1;
            int      tempCapTop   = -1;

            for (int j = 0; j < 20; j++)
            {
                for (int i = 0; i < mem.Length; i++)
                {
                    UnsafeUtility.EnterTempScope();
                    mem[i] = UnsafeUtility.Malloc(4096, 0, Collections.Allocator.Temp);
                }

                if (tempUsedTop == -1)
                {
                    tempUsedTop = UnsafeUtility.GetTempUsed();
                    tempCapTop  = UnsafeUtility.GetTempCapacity();
                }
                Assert.AreEqual(tempUsedTop, UnsafeUtility.GetTempUsed());
                Assert.AreEqual(tempCapTop, UnsafeUtility.GetTempCapacity());

                for (int i = mem.Length - 1; i >= 0; i--)
                {
                    UnsafeUtility.Free(mem[i], Collections.Allocator.Temp);
                    UnsafeUtility.ExitTempScope();
                }

                Assert.AreEqual(tempUsedBase, UnsafeUtility.GetTempUsed());
            }
        }
コード例 #4
0
        public unsafe void TempAllocatorRenestingUserNull()
        {
            void *[] mem = new void *[4096];
            for (int j = 0; j < 2; j++)
            {
                for (int i = 0; i < mem.Length; i++)
                {
                    UnsafeUtility.EnterTempScope();
                    if (j == 0)
                    {
                        UnsafeUtility.SetTempScopeUser((void *)(i + 1));
                    }
                    else
                    {
                        Assert.IsTrue(UnsafeUtility.GetTempScopeUser() == null);
                    }
                    mem[i] = UnsafeUtility.Malloc(4096, 0, Collections.Allocator.Temp);
                }

                for (int i = mem.Length - 1; i >= 0; i--)
                {
                    UnsafeUtility.Free(mem[i], Collections.Allocator.Temp);
                    UnsafeUtility.ExitTempScope();
                }
            }
        }
コード例 #5
0
        public static unsafe void ExitScope()
        {
#if ENABLE_UNITY_COLLECTIONS_CHECKS
            ReleaseScopeSafetyHandle();
#endif
            UnsafeUtility.ExitTempScope();
        }
        public static unsafe void CheckNativeHeapStatsInternal()
        {
            ProfilerStats.CalculateStatsSnapshot();
            int oldKb = ProfilerStats.Stats.memoryStats.kbReservedTotal;

            UnsafeUtility.EnterTempScope();

            // Taking a stats snap shot should
            // a) Fill memory stats automatically since they are part of DOTS Runtime core
            // b) Convert all AccumStats (many of which are represented as longs) into Stats (required by current profiler stats protocol)

            void *mem  = UnsafeUtility.Malloc(2048, 16, Collections.Allocator.Persistent);
            void *mem2 = UnsafeUtility.Malloc(1024, 16, Collections.Allocator.Temp);

            ProfilerStats.CalculateStatsSnapshot();
            Assert.IsTrue((ProfilerStats.GatheredStats & ProfilerModes.ProfileMemory) != 0);
            // debug build padding could make it 3 or 4 kb
            AssertHeapKbRange(ProfilerStats.Stats.memoryStats.kbReservedTotal, 3 + oldKb);

            UnsafeUtility.Free(mem, Collections.Allocator.Persistent);
            ProfilerStats.CalculateStatsSnapshot();
            // debug build padding could make it 1 or 2 kb
            AssertHeapKbRange(ProfilerStats.Stats.memoryStats.kbReservedTotal, 1 + oldKb);

            // Freeing a temp allocation should not adjust stats (you don't technically need to, and it
            // can cause fake leaks)
            UnsafeUtility.Free(mem2, Collections.Allocator.Temp);
            ProfilerStats.CalculateStatsSnapshot();
            AssertHeapKbRange(ProfilerStats.Stats.memoryStats.kbReservedTotal, 1 + oldKb);

            // This should adjust stats - it's meant to be fast and release all temp allocations in scope
            UnsafeUtility.ExitTempScope();
            ProfilerStats.CalculateStatsSnapshot();
            Assert.IsTrue(ProfilerStats.Stats.memoryStats.kbReservedTotal == oldKb);
        }
コード例 #7
0
        public static unsafe void RunJobChunk <T>(ref T jobData, EntityQuery query, JobChunkRunWithoutJobSystemDelegate functionPointer) where T : unmanaged, IJobChunk, IJobBase
        {
            var myIterator = query.GetArchetypeChunkIterator();

            try
            {
                query._GetImpl()->_Access->DependencyManager->IsInForEachDisallowStructuralChange++;

                var managedJobDataPtr = UnsafeUtility.AddressOf(ref jobData);
                var unmanagedSize     = jobData.GetUnmanagedJobSize_Gen();
                if (unmanagedSize != -1)
                {
                    const int kAlignment              = 16;
                    int       alignedSize             = (unmanagedSize + kAlignment - 1) & ~(kAlignment - 1);
                    byte *    unmanagedJobData        = stackalloc byte[alignedSize];
                    byte *    alignedUnmanagedJobData = (byte *)((UInt64)(unmanagedJobData + kAlignment - 1) & ~(UInt64)(kAlignment - 1));

                    // DOTS Runtime job marshalling code assumes the job is wrapped so create the wrapper and assign the jobData
                    JobChunkExtensions.JobChunkWrapper <T> jobChunkWrapper = default;
                    jobChunkWrapper.JobData = jobData;
                    byte *jobChunkDataPtr = (byte *)UnsafeUtility.AddressOf(ref jobChunkWrapper);

                    byte *dst = (byte *)alignedUnmanagedJobData;
                    byte *src = (byte *)jobChunkDataPtr;
                    var   marshalToBurstFnPtr = JobMarshalFnLookup <T> .GetMarshalToBurstFn();

                    UnsafeUtility.EnterTempScope();
                    UnsafeUtility.CallFunctionPtr_pp(marshalToBurstFnPtr.ToPointer(), dst, src);

                    // Since we are running inline, normally the outer job scheduling code would
                    // reference jobWrapper.Data however we can't do that since if we are in this code it means
                    // we are dealing with a job/jobwrapper that is burst compiled and is non-blittable. Thus any
                    // type-safe offset we calculate here will be based on the managed data layout which is not useful.
                    // Instead we can at least know that for a sequential layout (which is what we know we must be using
                    // since we are burst compiled) our JobChunkData contains a safety field as its first member. Skipping over this will
                    // provide the necessary offset to jobChunkData.Data
                    var DataOffset = UnsafeUtility.SizeOf <JobChunkExtensions.EntitySafetyHandle>();
                    Assertions.Assert.AreEqual(jobChunkWrapper.safety.GetType(), typeof(JobChunkExtensions.EntitySafetyHandle));
                    functionPointer(&myIterator, alignedUnmanagedJobData + DataOffset);

                    // Since Run can capture locals for write back, we must write back the marshalled jobData after the job executes
                    var marshalFromBurstFnPtr = JobMarshalFnLookup <T> .GetMarshalFromBurstFn();

                    UnsafeUtility.CallFunctionPtr_pp(marshalFromBurstFnPtr.ToPointer(), src, dst);
                    UnsafeUtility.ExitTempScope();

                    jobData = jobChunkWrapper.JobData;
                }
                else
                {
                    functionPointer(&myIterator, managedJobDataPtr);
                }
            }
            finally
            {
                query._GetImpl()->_Access->DependencyManager->IsInForEachDisallowStructuralChange--;
            }
        }
コード例 #8
0
        public static unsafe void RunIJob <T>(ref T jobData, JobRunWithoutJobSystemDelegate functionPointer) where T : unmanaged, IJob, IJobBase
        {
            var managedJobDataPtr = UnsafeUtility.AddressOf(ref jobData);
            var unmanagedSize     = jobData.GetUnmanagedJobSize_Gen();

            if (unmanagedSize != -1)
            {
                const int kAlignment              = 16;
                int       alignedSize             = (unmanagedSize + kAlignment - 1) & ~(kAlignment - 1);
                byte *    unmanagedJobData        = stackalloc byte[alignedSize];
                byte *    alignedUnmanagedJobData = (byte *)((UInt64)(unmanagedJobData + kAlignment - 1) & ~(UInt64)(kAlignment - 1));

                // DOTS Runtime job marshalling code assumes the job is wrapped so create the wrapper and assign the jobData

                IJobExtensions.JobProducer <T> jobStructData = default;
                jobStructData.JobData = jobData;
                byte *jobStructDataPtr = (byte *)UnsafeUtility.AddressOf(ref jobStructData);

                byte *dst = (byte *)alignedUnmanagedJobData;
                byte *src = (byte *)jobStructDataPtr;
                var   marshalToBurstFnPtr = JobMarshalFnLookup <T> .GetMarshalToBurstFn();

                UnsafeUtility.EnterTempScope();
                try
                {
                    UnsafeUtility.CallFunctionPtr_pp(marshalToBurstFnPtr.ToPointer(), dst, src);

                    // In the case of JobStruct we know the jobwrapper doesn't add
                    // anything to the jobData so just pass it along, no offset required unlike JobChunk
                    functionPointer(alignedUnmanagedJobData);

                    // Since Run can capture locals for write back, we must write back the marshalled jobData after the job executes
                    var marshalFromBurstFnPtr = JobMarshalFnLookup <T> .GetMarshalFromBurstFn();

                    UnsafeUtility.CallFunctionPtr_pp(marshalFromBurstFnPtr.ToPointer(), src, dst);
                }
                finally
                {
                    UnsafeUtility.ExitTempScope();
                }

                jobData = jobStructData.JobData;
            }
            else
            {
                functionPointer(managedJobDataPtr);
            }
        }
コード例 #9
0
        public unsafe void TempAllocatorRewindSavesUserData()
        {
            void *[] mem = new void *[4096];
            for (int i = 0; i < mem.Length; i++)
            {
                UnsafeUtility.EnterTempScope();
                UnsafeUtility.SetTempScopeUser((void *)(i + 1));
                mem[i] = UnsafeUtility.Malloc(4096, 0, Collections.Allocator.Temp);
            }

            for (int i = mem.Length - 1; i >= 0; i--)
            {
                Assert.IsTrue((void *)(i + 1) == UnsafeUtility.GetTempScopeUser());
                UnsafeUtility.Free(mem[i], Collections.Allocator.Temp);
                UnsafeUtility.ExitTempScope();
            }
        }
コード例 #10
0
        public unsafe void TempAllocatorLargerThanDefaultSize()
        {
            const int kSize = 1024 * 1024 * 4;

            ExitTempScopesLocally();

            UnsafeUtility.EnterTempScope();
            int   oldCap    = UnsafeUtility.GetTempCapacity();
            void *largeTemp = UnsafeUtility.Malloc(kSize, 0, Collections.Allocator.Temp);

            Assert.Greater(UnsafeUtility.GetTempCapacity(), oldCap);
            Assert.GreaterOrEqual(UnsafeUtility.GetTempUsed(), kSize);

            UnsafeUtility.Free(largeTemp, Collections.Allocator.Temp);
            UnsafeUtility.ExitTempScope();

            Assert.AreEqual(UnsafeUtility.GetTempCapacity(), oldCap);

            EnterTempScopesLocally();
        }
コード例 #11
0
        public static unsafe JobHandle Schedule(ref JobScheduleParameters parameters)
        {
            // Ensure the user has not set the schedule mode to a currently unsupported type
            Assert.IsTrue(parameters.ScheduleMode != ScheduleMode.Single);

            // Heap memory must be passed to schedule, so that Cleanup can free() it.
            UnsafeUtility.AssertHeap(parameters.JobDataPtr);
            UnsafeUtility.AssertHeap(parameters.ReflectionData);
            ReflectionDataProxy jobReflectionData = UnsafeUtility.AsRef <ReflectionDataProxy>(parameters.ReflectionData);

            Assert.IsTrue(jobReflectionData.ExecuteFunctionPtr.ToPointer() != null);
            Assert.IsTrue(jobReflectionData.CleanupFunctionPtr.ToPointer() != null);

#if ENABLE_UNITY_COLLECTIONS_CHECKS && !UNITY_DOTSRUNTIME_IL2CPP
            Assert.IsTrue((jobReflectionData.UnmanagedSize != -1 && jobReflectionData.MarshalToBurstFunctionPtr != IntPtr.Zero) ||
                          (jobReflectionData.UnmanagedSize == -1 && jobReflectionData.MarshalToBurstFunctionPtr == IntPtr.Zero));
#endif

            JobMetaData *managedJobDataPtr = parameters.JobDataPtr;
            JobMetaData  jobMetaData;

            Assert.IsTrue(sizeof(JobRanges) <= JobMetaData.kJobMetaDataIsParallelOffset);
            UnsafeUtility.CopyPtrToStructure(managedJobDataPtr, out jobMetaData);
            Assert.IsTrue(jobMetaData.jobDataSize > 0); // set by JobScheduleParameters
            jobMetaData.managedPtr    = managedJobDataPtr;
            jobMetaData.isParallelFor = 0;
            UnsafeUtility.CopyStructureToPtr(ref jobMetaData, managedJobDataPtr);

            JobHandle jobHandle = default;
#if !UNITY_SINGLETHREADED_JOBS
            bool runSingleThreadSynchronous =
                parameters.ScheduleMode == ScheduleMode.RunOnMainThread ||
                parameters.ScheduleMode == ScheduleMode.Run ||
                parameters.ScheduleMode == ScheduleMode.ScheduleOnMainThread;
#else
            bool runSingleThreadSynchronous = true;
#endif

            if (runSingleThreadSynchronous)
            {
                bool syncNow = parameters.ScheduleMode == ScheduleMode.Run || parameters.ScheduleMode == ScheduleMode.RunOnMainThread;

#if UNITY_SINGLETHREADED_JOBS
                if (!syncNow)
                {
                    jobHandle.JobGroup = GetFakeJobGroupId();
#if ENABLE_UNITY_COLLECTIONS_CHECKS
                    DebugDidScheduleJob(ref jobHandle, (JobHandle *)UnsafeUtility.AddressOf(ref parameters.Dependency), 1);
#endif
                }
#endif

                parameters.Dependency.Complete();
                UnsafeUtility.SetInJob(1);
                try
                {
                    // We assume there are no non-blittable fields in a bursted job (i.e. DisposeSentinel) if
                    // collections checks are not enabled
#if ENABLE_UNITY_COLLECTIONS_CHECKS && !UNITY_DOTSRUNTIME_IL2CPP
                    // If the job was bursted, and the job structure contained non-blittable fields, the UnmanagedSize will
                    // be something other than -1 meaning we need to marshal the managed representation before calling the ExecuteFn
                    if (jobReflectionData.UnmanagedSize != -1)
                    {
                        JobMetaData *unmanagedJobData = AllocateJobHeapMemory(jobReflectionData.UnmanagedSize, 1);

                        void *dst = (byte *)unmanagedJobData + sizeof(JobMetaData);
                        void *src = (byte *)managedJobDataPtr + sizeof(JobMetaData);

                        UnsafeUtility.EnterTempScope();
                        try
                        {
                            UnsafeUtility.CallFunctionPtr_pp(jobReflectionData.MarshalToBurstFunctionPtr.ToPointer(), dst, src);

                            // In the single threaded case, this is synchronous execution.
                            // The cleanup *is* bursted, so pass in the unmanangedJobDataPtr
                            CopyMetaDataToJobData(ref jobMetaData, managedJobDataPtr, unmanagedJobData);

                            UnsafeUtility.CallFunctionPtr_pi(jobReflectionData.ExecuteFunctionPtr.ToPointer(), unmanagedJobData, k_MainThreadWorkerIndex);
                        }
                        finally
                        {
                            UnsafeUtility.ExitTempScope();
                        }
                    }
                    else
#endif
                    {
                        CopyMetaDataToJobData(ref jobMetaData, managedJobDataPtr, null);

                        // In the single threaded case, this is synchronous execution.
                        UnsafeUtility.EnterTempScope();
                        try
                        {
                            UnsafeUtility.CallFunctionPtr_pi(jobReflectionData.ExecuteFunctionPtr.ToPointer(), managedJobDataPtr, k_MainThreadWorkerIndex);
                        }
                        finally
                        {
                            UnsafeUtility.ExitTempScope();
                        }
                    }
                }
                finally
                {
                    UnsafeUtility.SetInJob(0);
                }

                return(jobHandle);
            }
#if !UNITY_SINGLETHREADED_JOBS
#if ENABLE_UNITY_COLLECTIONS_CHECKS && !UNITY_DOTSRUNTIME_IL2CPP
            // If the job was bursted, and the job structure contained non-blittable fields, the UnmanagedSize will
            // be something other than -1 meaning we need to marshal the managed representation before calling the ExecuteFn.
            // This time though, we have a whole bunch of jobs that need to be processed.
            if (jobReflectionData.UnmanagedSize != -1)
            {
                JobMetaData *unmanagedJobData = AllocateJobHeapMemory(jobReflectionData.UnmanagedSize, 1);

                void *dst = (byte *)unmanagedJobData + sizeof(JobMetaData);
                void *src = (byte *)managedJobDataPtr + sizeof(JobMetaData);
                UnsafeUtility.CallFunctionPtr_pp(jobReflectionData.MarshalToBurstFunctionPtr.ToPointer(), dst, src);

                CopyMetaDataToJobData(ref jobMetaData, managedJobDataPtr, unmanagedJobData);
                jobHandle = ScheduleJob(jobReflectionData.ExecuteFunctionPtr, unmanagedJobData, parameters.Dependency);
            }
            else
#endif
            {
                CopyMetaDataToJobData(ref jobMetaData, managedJobDataPtr, null);
                jobHandle = ScheduleJob(jobReflectionData.ExecuteFunctionPtr, parameters.JobDataPtr, parameters.Dependency);
            }
#endif
            return(jobHandle);
        }
コード例 #12
0
        static unsafe JobHandle ScheduleParallelForInternal(ref JobScheduleParameters parameters, int arrayLength, void *deferredDataPtr, int innerloopBatchCount)
        {
            // Ensure the user has not set the schedule mode to a currently unsupported type
            Assert.IsTrue(parameters.ScheduleMode != ScheduleMode.Single);

            // May provide an arrayLength (>=0) OR a deferredDataPtr, but both is senseless.
            Assert.IsTrue((arrayLength >= 0 && deferredDataPtr == null) || (arrayLength < 0 && deferredDataPtr != null));

            UnsafeUtility.AssertHeap(parameters.JobDataPtr);
            UnsafeUtility.AssertHeap(parameters.ReflectionData);
            ReflectionDataProxy jobReflectionData = UnsafeUtility.AsRef <ReflectionDataProxy>(parameters.ReflectionData);

            Assert.IsFalse(jobReflectionData.ExecuteFunctionPtr.ToPointer() == null);
            Assert.IsFalse(jobReflectionData.CleanupFunctionPtr.ToPointer() == null);
#if ENABLE_UNITY_COLLECTIONS_CHECKS && !UNITY_DOTSRUNTIME_IL2CPP
            Assert.IsTrue((jobReflectionData.UnmanagedSize != -1 && jobReflectionData.MarshalToBurstFunctionPtr != IntPtr.Zero) ||
                          (jobReflectionData.UnmanagedSize == -1 && jobReflectionData.MarshalToBurstFunctionPtr == IntPtr.Zero));
#endif
            JobMetaData *managedJobDataPtr = parameters.JobDataPtr;
            JobMetaData  jobMetaData;

            UnsafeUtility.CopyPtrToStructure(parameters.JobDataPtr, out jobMetaData);
            Assert.IsTrue(jobMetaData.jobDataSize > 0); // set by JobScheduleParameters
            Assert.IsTrue(sizeof(JobRanges) <= JobMetaData.kJobMetaDataIsParallelOffset);
            jobMetaData.JobRanges.ArrayLength     = (arrayLength >= 0) ? arrayLength : 0;
            jobMetaData.JobRanges.IndicesPerPhase = (arrayLength >= 0) ? GetDefaultIndicesPerPhase(arrayLength) : 1; // TODO indicesPerPhase isn't actually used, except as a flag.
            // If this is set to -1 by codegen, that indicates an error if we schedule the job as parallel for because
            // it potentially consists of write operations which are not parallel compatible
            if (jobMetaData.isParallelFor == -1)
            {
                throw new InvalidOperationException("Parallel writing not supported in this job. Parallel scheduling invalid.");
            }
            jobMetaData.isParallelFor   = 1;
            jobMetaData.deferredDataPtr = deferredDataPtr;

            JobHandle jobHandle = default;
#if !UNITY_SINGLETHREADED_JOBS
            bool runSingleThreadSynchronous =
                parameters.ScheduleMode == ScheduleMode.RunOnMainThread ||
                parameters.ScheduleMode == ScheduleMode.ScheduleOnMainThread;
#else
            bool runSingleThreadSynchronous = true;
#endif

            jobMetaData.JobRanges.runOnMainThread = runSingleThreadSynchronous ? 1 : 0;

            if (runSingleThreadSynchronous)
            {
                bool syncNow = parameters.ScheduleMode == ScheduleMode.Run || parameters.ScheduleMode == ScheduleMode.RunOnMainThread;
#if UNITY_SINGLETHREADED_JOBS
                // Nativejobs needs further support in creating a JobHandle not linked to an actual job in order to support this correctly
                // in multithreaded builds
                if (!syncNow)
                {
                    jobHandle.JobGroup = GetFakeJobGroupId();
#if ENABLE_UNITY_COLLECTIONS_CHECKS
                    DebugDidScheduleJob(ref jobHandle, (JobHandle *)UnsafeUtility.AddressOf(ref parameters.Dependency), 1);
#endif
                }
#endif

                parameters.Dependency.Complete();
                UnsafeUtility.SetInJob(1);
                try
                {
                    // We assume there are no non-blittable fields in a bursted job (i.e. DisposeSentinel) if
                    // collections checks are not enabled
#if ENABLE_UNITY_COLLECTIONS_CHECKS && !UNITY_DOTSRUNTIME_IL2CPP
                    // If the job was bursted, and the job structure contained non-blittable fields, the UnmanagedSize will
                    // be something other than -1 meaning we need to marshal the managed representation before calling the ExecuteFn
                    if (jobReflectionData.UnmanagedSize != -1)
                    {
                        JobMetaData *unmanagedJobData = AllocateJobHeapMemory(jobReflectionData.UnmanagedSize, 1);

                        void *dst = (byte *)unmanagedJobData + sizeof(JobMetaData);
                        void *src = (byte *)managedJobDataPtr + sizeof(JobMetaData);

                        // In the single threaded case, this is synchronous execution.
                        UnsafeUtility.EnterTempScope();
                        try
                        {
                            UnsafeUtility.CallFunctionPtr_pp(jobReflectionData.MarshalToBurstFunctionPtr.ToPointer(), dst, src);

                            CopyMetaDataToJobData(ref jobMetaData, managedJobDataPtr, unmanagedJobData);

                            UnsafeUtility.CallFunctionPtr_pi(jobReflectionData.ExecuteFunctionPtr.ToPointer(), unmanagedJobData, k_MainThreadWorkerIndex);
                            UnsafeUtility.CallFunctionPtr_p(jobReflectionData.CleanupFunctionPtr.ToPointer(), unmanagedJobData);
                        }
                        finally
                        {
                            UnsafeUtility.ExitTempScope();
                        }
                    }
                    else
#endif
                    {
                        CopyMetaDataToJobData(ref jobMetaData, managedJobDataPtr, null);

                        // In the single threaded case, this is synchronous execution.
                        UnsafeUtility.EnterTempScope();
                        try
                        {
                            UnsafeUtility.CallFunctionPtr_pi(jobReflectionData.ExecuteFunctionPtr.ToPointer(), managedJobDataPtr, k_MainThreadWorkerIndex);
                            UnsafeUtility.CallFunctionPtr_p(jobReflectionData.CleanupFunctionPtr.ToPointer(), managedJobDataPtr);
                        }
                        finally
                        {
                            UnsafeUtility.ExitTempScope();
                        }
                    }
                }
                finally
                {
                    UnsafeUtility.SetInJob(0);
                }

                return(jobHandle);
            }
#if !UNITY_SINGLETHREADED_JOBS
#if ENABLE_UNITY_COLLECTIONS_CHECKS && !UNITY_DOTSRUNTIME_IL2CPP
            // If the job was bursted, and the job structure contained non-blittable fields, the UnmanagedSize will
            // be something other than -1 meaning we need to marshal the managed representation before calling the ExecuteFn
            if (jobReflectionData.UnmanagedSize != -1)
            {
                int          nWorker          = JobWorkerCount > 1 ? JobWorkerCount : 1;
                JobMetaData *unmanagedJobData = AllocateJobHeapMemory(jobReflectionData.UnmanagedSize, nWorker);

                for (int i = 0; i < nWorker; i++)
                {
                    void *dst = (byte *)unmanagedJobData + sizeof(JobMetaData) + i * jobReflectionData.UnmanagedSize;
                    void *src = (byte *)managedJobDataPtr + sizeof(JobMetaData) + i * jobMetaData.jobDataSize;
                    UnsafeUtility.CallFunctionPtr_pp(jobReflectionData.MarshalToBurstFunctionPtr.ToPointer(), dst, src);
                }

                // Need to change the jobDataSize so the job will have the correct stride when finding
                // the correct jobData for a thread.
                JobMetaData unmanagedJobMetaData = jobMetaData;
                unmanagedJobMetaData.jobDataSize = jobReflectionData.UnmanagedSize;
                CopyMetaDataToJobData(ref unmanagedJobMetaData, managedJobDataPtr, unmanagedJobData);

                jobHandle = ScheduleJobParallelFor(jobReflectionData.ExecuteFunctionPtr,
                                                   jobReflectionData.CleanupFunctionPtr, unmanagedJobData, arrayLength,
                                                   innerloopBatchCount, parameters.Dependency);
            }
            else
#endif
            {
                CopyMetaDataToJobData(ref jobMetaData, managedJobDataPtr, null);
                jobHandle = ScheduleJobParallelFor(jobReflectionData.ExecuteFunctionPtr,
                                                   jobReflectionData.CleanupFunctionPtr, parameters.JobDataPtr, arrayLength,
                                                   innerloopBatchCount, parameters.Dependency);
            }

            if (parameters.ScheduleMode == ScheduleMode.Run)
            {
                jobHandle.Complete();
            }
#endif
            return(jobHandle);
        }