コード例 #1
0
        /// <summary>
        /// Schedule 'callback' to be called in the next GC.  If the callback returns true it is
        /// rescheduled for the next Gen 2 GC.  Otherwise the callbacks stop.
        ///
        /// NOTE: This callback will be kept alive until either the callback function returns false,
        /// or the target object dies.
        /// </summary>
        public static void Register(Func <object, bool> callback, object targetObj)
        {
            // Create a unreachable object that remembers the callback function and target object.
            Gen2GcCallback gcCallback = new Gen2GcCallback();

            gcCallback.Setup(callback, targetObj);
        }
コード例 #2
0
 private void Restock(out object returnBuffer)
 {
     lock (this)
     {
         if (!this.m_FreeList.TryPop(out returnBuffer))
         {
             if (this.m_restockSize == 0)
             {
                 Gen2GcCallback.Register(new Func <object, bool>(PinnableBufferCache.Gen2GcCallbackFunc), this);
             }
             this.m_moreThanFreeListNeeded = true;
             PinnableBufferCacheEventSource.Log.AllocateBufferFreeListEmpty(this.m_CacheName, this.m_NotGen2.Count);
             if (this.m_NotGen2.Count == 0)
             {
                 this.CreateNewBuffers();
             }
             int index = this.m_NotGen2.Count - 1;
             if (GC.GetGeneration(this.m_NotGen2[index]) < GC.MaxGeneration && GC.GetGeneration(this.m_NotGen2[0]) == GC.MaxGeneration)
             {
                 index = 0;
             }
             returnBuffer = this.m_NotGen2[index];
             this.m_NotGen2.RemoveAt(index);
             if (PinnableBufferCacheEventSource.Log.IsEnabled() && GC.GetGeneration(returnBuffer) < GC.MaxGeneration)
             {
                 PinnableBufferCacheEventSource.Log.AllocateBufferFromNotGen2(this.m_CacheName, this.m_NotGen2.Count);
             }
             if (!this.AgePendingBuffers() && this.m_NotGen2.Count == this.m_restockSize / 2)
             {
                 PinnableBufferCacheEventSource.Log.DebugMessage("Proactively adding more buffers to aging pool");
                 this.CreateNewBuffers();
             }
         }
     }
 }
コード例 #3
0
ファイル: PinnableBufferCache.cs プロジェクト: xsustek/corert
        /// <summary>
        /// Called when we don't have any buffers in our free list to give out.
        /// </summary>
        /// <returns></returns>
        private void Restock(out object returnBuffer)
        {
            lock (this)
            {
                // Try again after getting the lock as another thread could have just filled the free list.  If we don't check
                // then we unnecessarily grab a new set of buffers because we think we are out.
                if (m_FreeList.TryPop(out returnBuffer))
                {
                    return;
                }

                // Lazy init, Ask that TrimFreeListIfNeeded be called on every Gen 2 GC.
                if (m_restockSize == 0)
                {
                    Gen2GcCallback.Register(Gen2GcCallbackFunc, this);
                }

                // Indicate to the trimming policy that the free list is insufficent.
                m_moreThanFreeListNeeded = true;
                PinnableBufferCacheEventSource.Log.AllocateBufferFreeListEmpty(m_CacheName, m_NotGen2.Count);

                // Get more buffers if needed.
                if (m_NotGen2.Count == 0)
                {
                    CreateNewBuffers();
                }

                // We have no buffers in the aged freelist, so get one from the newer list.   Try to pick the best one.
                // Debug.Assert(m_NotGen2.Count != 0);
                int idx = m_NotGen2.Count - 1;
                if (GC.GetGeneration(m_NotGen2[idx]) < GC.MaxGeneration && GC.GetGeneration(m_NotGen2[0]) == GC.MaxGeneration)
                {
                    idx = 0;
                }
                returnBuffer = m_NotGen2[idx];
                m_NotGen2.RemoveAt(idx);

                // Remember any sub-optimial buffer so we don't put it on the free list when it gets freed.
                if (PinnableBufferCacheEventSource.Log.IsEnabled() && GC.GetGeneration(returnBuffer) < GC.MaxGeneration)
                {
                    PinnableBufferCacheEventSource.Log.AllocateBufferFromNotGen2(m_CacheName, m_NotGen2.Count);
                }

                // If we have a Gen1 collection, then everything on m_NotGen2 should have aged.  Move them to the m_Free list.
                if (!AgePendingBuffers())
                {
                    // Before we could age at set of buffers, we have handed out half of them.
                    // This implies we should be proactive about allocating more (since we will trim them if we over-allocate).
                    if (m_NotGen2.Count == m_restockSize / 2)
                    {
                        PinnableBufferCacheEventSource.Log.DebugMessage("Proactively adding more buffers to aging pool");
                        CreateNewBuffers();
                    }
                }
            }
        }
コード例 #4
0
        // Token: 0x06003BB7 RID: 15287 RVA: 0x000E1148 File Offset: 0x000DF348
        public static void Register(Func <object, bool> callback, object targetObj)
        {
            Gen2GcCallback gen2GcCallback = new Gen2GcCallback();

            gen2GcCallback.Setup(callback, targetObj);
        }
コード例 #5
0
            private static void GateThreadStart()
            {
                bool disableStarvationDetection =
                    AppContextConfigHelper.GetBooleanConfig("System.Threading.ThreadPool.DisableStarvationDetection", false);
                bool debuggerBreakOnWorkStarvation =
                    AppContextConfigHelper.GetBooleanConfig("System.Threading.ThreadPool.DebugBreakOnWorkerStarvation", false);

                // The first reading is over a time range other than what we are focusing on, so we do not use the read other
                // than to send it to any runtime-specific implementation that may also use the CPU utilization.
                CpuUtilizationReader cpuUtilizationReader = default;

                _ = cpuUtilizationReader.CurrentUtilization;

                PortableThreadPool threadPoolInstance   = ThreadPoolInstance;
                LowLevelLock       threadAdjustmentLock = threadPoolInstance._threadAdjustmentLock;
                DelayHelper        delayHelper          = default;

                if (BlockingConfig.IsCooperativeBlockingEnabled)
                {
                    // Initialize memory usage and limits, and register to update them on gen 2 GCs
                    threadPoolInstance.OnGen2GCCallback();
                    Gen2GcCallback.Register(threadPoolInstance.OnGen2GCCallback);
                }

                while (true)
                {
                    RunGateThreadEvent.WaitOne();
                    int currentTimeMs = Environment.TickCount;
                    delayHelper.SetGateActivitiesTime(currentTimeMs);

                    while (true)
                    {
                        bool wasSignaledToWake = DelayEvent.WaitOne((int)delayHelper.GetNextDelay(currentTimeMs));
                        currentTimeMs = Environment.TickCount;

                        // Thread count adjustment for cooperative blocking
                        do
                        {
                            PendingBlockingAdjustment pendingBlockingAdjustment = threadPoolInstance._pendingBlockingAdjustment;
                            if (pendingBlockingAdjustment == PendingBlockingAdjustment.None)
                            {
                                delayHelper.ClearBlockingAdjustmentDelay();
                                break;
                            }

                            bool previousDelayElapsed = false;
                            if (delayHelper.HasBlockingAdjustmentDelay)
                            {
                                previousDelayElapsed =
                                    delayHelper.HasBlockingAdjustmentDelayElapsed(currentTimeMs, wasSignaledToWake);
                                if (pendingBlockingAdjustment == PendingBlockingAdjustment.WithDelayIfNecessary &&
                                    !previousDelayElapsed)
                                {
                                    break;
                                }
                            }

                            uint nextDelayMs = threadPoolInstance.PerformBlockingAdjustment(previousDelayElapsed);
                            if (nextDelayMs <= 0)
                            {
                                delayHelper.ClearBlockingAdjustmentDelay();
                            }
                            else
                            {
                                delayHelper.SetBlockingAdjustmentTimeAndDelay(currentTimeMs, nextDelayMs);
                            }
                        } while (false);

                        //
                        // Periodic gate activities
                        //

                        if (!delayHelper.ShouldPerformGateActivities(currentTimeMs, wasSignaledToWake))
                        {
                            continue;
                        }

                        if (ThreadPool.EnableWorkerTracking && NativeRuntimeEventSource.Log.IsEnabled())
                        {
                            NativeRuntimeEventSource.Log.ThreadPoolWorkingThreadCount(
                                (uint)threadPoolInstance.GetAndResetHighWatermarkCountOfThreadsProcessingUserCallbacks());
                        }

                        int cpuUtilization = cpuUtilizationReader.CurrentUtilization;
                        threadPoolInstance._cpuUtilization = cpuUtilization;

                        bool needGateThreadForRuntime = ThreadPool.PerformRuntimeSpecificGateActivities(cpuUtilization);

                        if (!disableStarvationDetection &&
                            threadPoolInstance._pendingBlockingAdjustment == PendingBlockingAdjustment.None &&
                            threadPoolInstance._separated.numRequestedWorkers > 0 &&
                            SufficientDelaySinceLastDequeue(threadPoolInstance))
                        {
                            bool addWorker = false;
                            threadAdjustmentLock.Acquire();
                            try
                            {
                                // Don't add a thread if we're at max or if we are already in the process of adding threads.
                                // This logic is slightly different from the native implementation in CoreCLR because there are
                                // no retired threads. In the native implementation, when hill climbing reduces the thread count
                                // goal, threads that are stopped from processing work are switched to "retired" state, and they
                                // don't count towards the equivalent existing thread count. In this implementation, the
                                // existing thread count includes any worker thread that has not yet exited, including those
                                // stopped from working by hill climbing, so here the number of threads processing work, instead
                                // of the number of existing threads, is compared with the goal. There may be alternative
                                // solutions, for now this is only to maintain consistency in behavior.
                                ThreadCounts counts = threadPoolInstance._separated.counts;
                                if (counts.NumProcessingWork < threadPoolInstance._maxThreads &&
                                    counts.NumProcessingWork >= threadPoolInstance._separated.numThreadsGoal)
                                {
                                    if (debuggerBreakOnWorkStarvation)
                                    {
                                        Debugger.Break();
                                    }

                                    short newNumThreadsGoal = (short)(counts.NumProcessingWork + 1);
                                    threadPoolInstance._separated.numThreadsGoal = newNumThreadsGoal;
                                    HillClimbing.ThreadPoolHillClimber.ForceChange(
                                        newNumThreadsGoal,
                                        HillClimbing.StateOrTransition.Starvation);
                                    addWorker = true;
                                }
                            }
                            finally
                            {
                                threadAdjustmentLock.Release();
                            }

                            if (addWorker)
                            {
                                WorkerThread.MaybeAddWorkingWorker(threadPoolInstance);
                            }
                        }

                        if (!needGateThreadForRuntime &&
                            threadPoolInstance._separated.numRequestedWorkers <= 0 &&
                            threadPoolInstance._pendingBlockingAdjustment == PendingBlockingAdjustment.None &&
                            Interlocked.Decrement(ref threadPoolInstance._separated.gateThreadRunningState) <= GetRunningStateForNumRuns(0))
                        {
                            break;
                        }
                    }
                }
            }