internal static string CheckSafetyAfterUpdate(object system, ref UnsafeIntList readingSystems, ref UnsafeIntList writingSystems, ComponentDependencyManager *dependencyManager)
        {
            // Check that all reading and writing jobs are a dependency of the output job, to
            // catch systems that forget to add one of their jobs to the dependency graph.
            //
            // Note that this check is not strictly needed as we would catch the mistake anyway later,
            // but checking it here means we can flag the system that has the mistake, rather than some
            // other (innocent) system that is doing things correctly.

            //@TODO: It is not ideal that we call m_SafetyManager.GetDependency,
            //       it can result in JobHandle.CombineDependencies calls.
            //       Which seems like debug code might have side-effects

            string dependencyError = null;

            for (var index = 0; index < readingSystems.Length && dependencyError == null; index++)
            {
                var type = readingSystems.Ptr[index];
                dependencyError = CheckJobDependencies(system, type, dependencyManager);
            }

            for (var index = 0; index < writingSystems.Length && dependencyError == null; index++)
            {
                var type = writingSystems.Ptr[index];
                dependencyError = CheckJobDependencies(system, type, dependencyManager);
            }

            if (dependencyError != null)
            {
                EmergencySyncAllJobs(ref readingSystems, ref writingSystems, dependencyManager);
            }

            return(dependencyError);
        }
Exemplo n.º 2
0
            /// <summary>
            /// Constructs an allocator.
            /// </summary>
            /// <param name="budgetInBytes">Budget of the allocator in bytes.</param>
            /// <param name="bufferSizeInBytes">Size of each buffer to be allocated in bytes.</param>
            /// <param name="handle">An AllocatorHandle to use for internal bookkeeping structures.</param>
            /// <exception cref="InvalidOperationException">Thrown if the allocator cannot reserve the address range required for the given budget.</exception>
            public BufferAllocator(int budgetInBytes, int bufferSizeInBytes, AllocatorManager.AllocatorHandle handle)
            {
                BufferSizeInBytes = bufferSizeInBytes;

                // Reserve the entire budget's worth of address space. The reserved space may be larger than the budget
                // due to page sizes.
                var pageCount = VirtualMemoryUtility.BytesToPageCount((uint)budgetInBytes, VirtualMemoryUtility.DefaultPageSizeInBytes);
                BaselibErrorState errorState;

                ReservedRange = VirtualMemoryUtility.ReserveAddressSpace(pageCount, VirtualMemoryUtility.DefaultPageSizeInBytes, out errorState);

#if ENABLE_UNITY_COLLECTIONS_CHECKS
                if (!errorState.Success)
                {
                    throw new InvalidOperationException($"Failed to reserve address range for {budgetInBytes} bytes");
                }
#endif

                // Init a free list of blocks.
                MaxBufferCount = (int)VirtualMemoryUtility.BytesToPageCount((uint)budgetInBytes, (uint)bufferSizeInBytes);
                FreeList       = new UnsafeIntList(MaxBufferCount, handle);

                for (int i = MaxBufferCount - 1; i >= 0; --i)
                {
                    FreeList.Add(i);
                }
            }
Exemplo n.º 3
0
        internal virtual void OnBeforeCreateInternal(World world)
        {
#if ENABLE_UNITY_COLLECTIONS_CHECKS
            m_SystemID = World.AllocateSystemID();
#endif
            m_World             = world;
            m_EntityManager     = world.EntityManager;
            m_DependencyManager = m_EntityManager.DependencyManager;

            m_JobDependencyForReadingSystems = new UnsafeIntList(0, Allocator.Persistent);
            m_JobDependencyForWritingSystems = new UnsafeIntList(0, Allocator.Persistent);

            m_EntityQueries = new EntityQuery[0];
#if !NET_DOTS
            m_AlwaysUpdateSystem = GetType().GetCustomAttributes(typeof(AlwaysUpdateSystemAttribute), true).Length != 0;
#else
            m_AlwaysUpdateSystem = false;
            var attrs = TypeManager.GetSystemAttributes(GetType());
            foreach (var attr in attrs)
            {
                if (attr.GetType() == typeof(AlwaysUpdateSystemAttribute))
                {
                    m_AlwaysUpdateSystem = true;
                }
            }
#endif
        }
        public BlockAllocator(AllocatorManager.AllocatorHandle handle, int budgetInBytes)
        {
            m_handle         = handle;
            m_nextByteOffset = 0;
            var blocks = (budgetInBytes + ms_BlockSize - 1) >> ms_Log2BlockSize;

            m_blocks      = new UnsafePtrList(blocks, handle);
            m_allocations = new UnsafeIntList(blocks, handle);
        }
        public static bool AddReaderTypeIndex(int typeIndex, ref UnsafeIntList reading, ref UnsafeIntList writing)
        {
            if (reading.Contains(typeIndex))
            {
                return(false);
            }
            if (writing.Contains(typeIndex))
            {
                return(false);
            }

            reading.Add(typeIndex);
            return(true);
        }
        internal static void EmergencySyncAllJobs(ref UnsafeIntList readingSystems, ref UnsafeIntList writingSystems, ComponentDependencyManager *dependencyManager)
        {
            for (int i = 0; i != readingSystems.Length; i++)
            {
                int type = readingSystems.Ptr[i];
                AtomicSafetyHandle.EnforceAllBufferJobsHaveCompleted(dependencyManager->Safety.GetSafetyHandle(type, true));
            }

            for (int i = 0; i != writingSystems.Length; i++)
            {
                int type = writingSystems.Ptr[i];
                AtomicSafetyHandle.EnforceAllBufferJobsHaveCompleted(dependencyManager->Safety.GetSafetyHandle(type, true));
            }
        }
Exemplo n.º 7
0
        internal bool AddReaderWritersToLists(ref UnsafeIntList reading, ref UnsafeIntList writing)
        {
            bool anyAdded = false;

            for (int i = 0; i < m_QueryData->ReaderTypesCount; ++i)
            {
                anyAdded |= CalculateReaderWriterDependency.AddReaderTypeIndex(m_QueryData->ReaderTypes[i], ref reading, ref writing);
            }

            for (int i = 0; i < m_QueryData->WriterTypesCount; ++i)
            {
                anyAdded |= CalculateReaderWriterDependency.AddWriterTypeIndex(m_QueryData->WriterTypes[i], ref reading, ref writing);
            }
            return(anyAdded);
        }
Exemplo n.º 8
0
        public BlockAllocator(AllocatorManager.AllocatorHandle handle, int budgetInBytes)
        {
            m_bufferAllocator = new BufferAllocator(budgetInBytes, ms_BlockSize, handle);
            m_nextPtr         = 0;
            var blocks = (budgetInBytes + ms_BlockSize - 1) >> ms_Log2BlockSize;

            m_allocations = new UnsafeIntList(blocks, handle);

            for (int i = 0; i < blocks; ++i)
            {
                m_allocations.Add(0);
            }

            m_currentBlockIndex = -1;
        }
        public static bool AddWriterTypeIndex(int typeIndex, ref UnsafeIntList reading, ref UnsafeIntList writing)
        {
            if (writing.Contains(typeIndex))
            {
                return(false);
            }

            var readingIndex = reading.IndexOf(typeIndex);

            if (readingIndex != -1)
            {
                reading.RemoveAtSwapBack(readingIndex);
            }

            writing.Add(typeIndex);
            return(true);
        }
        public static bool Add(ComponentType type, ref UnsafeIntList reading, ref UnsafeIntList writing)
        {
            Assert.IsFalse(type == ComponentType.ReadWrite <Entity>());

            if (type.IsZeroSized)
            {
                return(false);
            }

            if (type.AccessModeType == ComponentType.AccessMode.ReadOnly)
            {
                return(AddReaderTypeIndex(type.TypeIndex, ref reading, ref writing));
            }
            else
            {
                return(AddWriterTypeIndex(type.TypeIndex, ref reading, ref writing));
            }
        }
        internal static bool CheckSafetyAfterUpdate(ref UnsafeIntList readingSystems, ref UnsafeIntList writingSystems,
                                                    ComponentDependencyManager *dependencyManager, out SafetyErrorDetails details)
        {
            details = default;

            // Check that all reading and writing jobs are a dependency of the output job, to
            // catch systems that forget to add one of their jobs to the dependency graph.
            //
            // Note that this check is not strictly needed as we would catch the mistake anyway later,
            // but checking it here means we can flag the system that has the mistake, rather than some
            // other (innocent) system that is doing things correctly.

            //@TODO: It is not ideal that we call m_SafetyManager.GetDependency,
            //       it can result in JobHandle.CombineDependencies calls.
            //       Which seems like debug code might have side-effects

            for (var index = 0; index < readingSystems.Length; index++)
            {
                var type = readingSystems.Ptr[index];
                if (CheckJobDependencies(ref details, type, dependencyManager))
                {
                    return(true);
                }
            }

            for (var index = 0; index < writingSystems.Length; index++)
            {
                var type = writingSystems.Ptr[index];
                if (CheckJobDependencies(ref details, type, dependencyManager))
                {
                    return(true);
                }
            }

// EmergencySyncAllJobs(ref readingSystems, ref writingSystems, dependencyManager);

            return(false);
        }
Exemplo n.º 12
0
        internal static unsafe JobHandle ScheduleInternal <T>(
            ref T jobData,
            EntityQuery query,
            JobHandle dependsOn,
            ScheduleMode mode,
            int batchesPerChunk,
            bool isParallel = true,
            NativeArray <Entity> limitToEntityArray = default(NativeArray <Entity>))
            where T : struct, IJobEntityBatchWithIndex
        {
            var queryImpl = query._GetImpl();
            var queryData = queryImpl->_QueryData;

            var batchCount                     = 0;
            var filteredChunkCount             = 0;
            var useEntityArray                 = limitToEntityArray.IsCreated;
            var prebuiltBatchList              = new UnsafeList(Allocator.TempJob);
            var perBatchMatchingArchetypeIndex = new UnsafeIntList(0, Allocator.TempJob);

            if (useEntityArray)
            {
                // Forces the creation of an EntityQueryMask, which is necessary to filter batches.
                var access = queryImpl->_Access;
                access->EntityQueryManager->GetEntityQueryMask(queryData, access->EntityComponentStore);

                ChunkIterationUtility.FindBatchesForEntityArrayWithQuery(
                    queryImpl->_Access->EntityComponentStore,
                    queryData,
                    ref queryImpl->_Filter,
                    (Entity *)limitToEntityArray.GetUnsafePtr(),
                    limitToEntityArray.Length,
                    ref prebuiltBatchList,
                    ref perBatchMatchingArchetypeIndex);

                batchCount = prebuiltBatchList.Length;
            }
            else
            {
                filteredChunkCount = query.CalculateChunkCount();
                batchCount         = filteredChunkCount * batchesPerChunk;
            }

            // Allocate one buffer for all prefilter data and distribute it
            // We keep the full buffer as a "dummy array" so we can deallocate it later with [DeallocateOnJobCompletion]
            var sizeofBatchArray  = sizeof(ArchetypeChunk) * batchCount;
            var sizeofIndexArray  = sizeof(int) * batchCount;
            var prefilterDataSize = sizeofBatchArray + sizeofIndexArray + sizeof(int);

            var prefilterData      = (byte *)Memory.Unmanaged.Allocate(prefilterDataSize, 64, Allocator.TempJob);
            var prefilterDataArray = NativeArrayUnsafeUtility.ConvertExistingDataToNativeArray <byte>(prefilterData, prefilterDataSize, Allocator.TempJob);

#if ENABLE_UNITY_COLLECTIONS_CHECKS
            NativeArrayUnsafeUtility.SetAtomicSafetyHandle(ref prefilterDataArray, AtomicSafetyHandle.Create());
#endif

            var prefilterHandle = dependsOn;
            if (useEntityArray)
            {
                var prefilterJob = new PrefilterForJobEntityBatchWithIndex_EntityArray
                {
                    MatchingArchetypes             = queryImpl->_QueryData->MatchingArchetypes,
                    Filter                         = queryImpl->_Filter,
                    EntityComponentStore           = queryImpl->_Access->EntityComponentStore,
                    PrefilterData                  = prefilterData,
                    PrebuiltBatches                = prebuiltBatchList,
                    PerBatchMatchingArchetypeIndex = perBatchMatchingArchetypeIndex
                };

                if (mode != ScheduleMode.Run)
                {
                    prefilterHandle = prefilterJob.Schedule(dependsOn);
                }
                else
                {
                    prefilterJob.Run();
                }

                prefilterHandle = prebuiltBatchList.Dispose(prefilterHandle);
                prefilterHandle = perBatchMatchingArchetypeIndex.Dispose(prefilterHandle);
            }
            else
            {
                var prefilterJob = new PrefilterForJobEntityBatchWithIndex
                {
                    MatchingArchetypes   = queryImpl->_QueryData->MatchingArchetypes,
                    Filter               = queryImpl->_Filter,
                    BatchesPerChunk      = batchesPerChunk,
                    EntityComponentStore = queryImpl->_Access->EntityComponentStore,
                    PrefilterData        = prefilterData,
                    FilteredChunkCount   = filteredChunkCount
                };

                if (mode != ScheduleMode.Run)
                {
                    prefilterHandle = prefilterJob.Schedule(dependsOn);
                }
                else
                {
                    prefilterJob.Run();
                }
            }


            JobEntityBatchIndexWrapper <T> jobEntityBatchIndexWrapper = new JobEntityBatchIndexWrapper <T>
            {
#if ENABLE_UNITY_COLLECTIONS_CHECKS
                // All IJobEntityBatchWithIndex jobs have a EntityManager safety handle to ensure that BeforeStructuralChange throws an error if
                // jobs without any other safety handles are still running (haven't been synced).
                safety = new EntitySafetyHandle {
                    m_Safety = queryImpl->SafetyHandles->GetEntityManagerSafetyHandle()
                },
#endif

                JobData       = jobData,
                PrefilterData = prefilterDataArray,

                JobsPerChunk = batchesPerChunk,
                IsParallel   = isParallel ? 1 : 0
            };

            var scheduleParams = new JobsUtility.JobScheduleParameters(
                UnsafeUtility.AddressOf(ref jobEntityBatchIndexWrapper),
                isParallel
                ? JobEntityBatchIndexProducer <T> .InitializeParallel()
                : JobEntityBatchIndexProducer <T> .InitializeSingle(),
                prefilterHandle,
                mode);

#if UNITY_DOTSRUNTIME
            // This should just be a call to FinalizeScheduleChecked, but DOTSR requires the JobsUtility calls to be
            // in this specific function.
#if ENABLE_UNITY_COLLECTIONS_CHECKS
            try
            {
#endif
            if (!isParallel)
            {
                return(JobsUtility.Schedule(ref scheduleParams));
            }
            else
            {
                return(JobsUtility.ScheduleParallelFor(ref scheduleParams, batchCount, 1));
            }
#if ENABLE_UNITY_COLLECTIONS_CHECKS
        }

        catch (InvalidOperationException e)
        {
            prefilterHandle.Complete();
            prefilterDataArray.Dispose();
            throw e;
        }
#endif
#else
            // We can't use try {} catch {} with 2020.2 as we will be burst compiling the schedule code.
            // Burst doesn't support exception handling.
            bool executedManaged = false;
            JobHandle result     = default;
            FinalizeScheduleChecked(isParallel, batchCount, prefilterHandle, prefilterDataArray, ref scheduleParams, ref executedManaged, ref result);

            if (executedManaged)
            {
                return(result);
            }

            return(FinalizeScheduleNoExceptions(isParallel, batchCount, ref scheduleParams));
#endif
        }
        internal static unsafe JobHandle ScheduleInternal <T>(
            ref T jobData,
            EntityQuery query,
            JobHandle dependsOn,
            ScheduleMode mode,
            int batchesPerChunk,
            bool isParallel = true,
            NativeArray <Entity> limitToEntityArray = default(NativeArray <Entity>))
            where T : struct, IJobEntityBatch
        {
            var queryImpl = query._GetImpl();
            var queryData = queryImpl->_QueryData;

            var cachedChunks = queryData->GetMatchingChunkCache();

            // Don't schedule the job if there are no chunks to work on
            var chunkCount = cachedChunks.Length;

            var useEntityArray    = limitToEntityArray.IsCreated;
            var prebuiltBatchList = default(UnsafeList);
            var perBatchMatchingArchetypeIndex = default(UnsafeIntList);

            var batchCount = chunkCount * batchesPerChunk;

            if (useEntityArray)
            {
                prebuiltBatchList = new UnsafeList(Allocator.TempJob);
                perBatchMatchingArchetypeIndex = new UnsafeIntList(0, Allocator.TempJob);

                // Forces the creation of an EntityQueryMask, which is necessary to filter batches.
                var access = queryImpl->_Access;
                access->EntityQueryManager->GetEntityQueryMask(queryData, access->EntityComponentStore);

                ChunkIterationUtility.FindBatchesForEntityArrayWithQuery(
                    queryImpl->_Access->EntityComponentStore,
                    queryData,
                    ref queryImpl->_Filter,
                    (Entity *)limitToEntityArray.GetUnsafePtr(),
                    limitToEntityArray.Length,
                    ref prebuiltBatchList,
                    ref perBatchMatchingArchetypeIndex);

                batchCount = prebuiltBatchList.Length;
            }

            JobEntityBatchWrapper <T> jobEntityBatchWrapper = new JobEntityBatchWrapper <T>
            {
#if ENABLE_UNITY_COLLECTIONS_CHECKS
                // All IJobEntityBatch jobs have a EntityManager safety handle to ensure that BeforeStructuralChange throws an error if
                // jobs without any other safety handles are still running (haven't been synced).
                safety = new EntitySafetyHandle {
                    m_Safety = queryImpl->SafetyHandles->GetEntityManagerSafetyHandle()
                },
#endif

                MatchingArchetypes = queryData->MatchingArchetypes,
                CachedChunks       = cachedChunks,
                Filter             = queryImpl->_Filter,

                JobData      = jobData,
                JobsPerChunk = batchesPerChunk,
                IsParallel   = isParallel ? 1 : 0,

                UsePrebuiltBatchList = useEntityArray ? 1: 0,
                PrebuiltBatchList    = prebuiltBatchList,
                PrebuiltBatchListMatchingArchetypeIndices = perBatchMatchingArchetypeIndex
            };

            var scheduleParams = new JobsUtility.JobScheduleParameters(
                UnsafeUtility.AddressOf(ref jobEntityBatchWrapper),
                isParallel
                    ? JobEntityBatchProducer <T> .InitializeParallel()
                    : JobEntityBatchProducer <T> .InitializeSingle(),
                dependsOn,
                mode);

            var result = default(JobHandle);

            if (!isParallel)
            {
                result = JobsUtility.Schedule(ref scheduleParams);
            }
            else
            {
                result = JobsUtility.ScheduleParallelFor(ref scheduleParams, batchCount, 1);
            }

            if (useEntityArray)
            {
                result = prebuiltBatchList.Dispose(result);
                result = perBatchMatchingArchetypeIndex.Dispose(result);
            }

            return(result);
        }
Exemplo n.º 14
0
 public void AddRange(UnsafeIntList src)
 {
     ListData.AddRange <int>(src.ListData);
 }
 public void AddRange(UnsafeIntList src)
 {
     this.ListData().AddRange <int>(src.ListData());
 }
 public UnsafeIntListDebugView(UnsafeIntList listData)
 {
     m_ListData = listData;
 }
 public void AddRangeNoResize(UnsafeIntList list)
 {
     Writer.AddRangeNoResize <int>(UnsafeIntListExtensions.ListData(ref list));
 }