private static void GateThreadStart() { bool disableStarvationDetection = AppContextConfigHelper.GetBooleanConfig("System.Threading.ThreadPool.DisableStarvationDetection", false); bool debuggerBreakOnWorkStarvation = AppContextConfigHelper.GetBooleanConfig("System.Threading.ThreadPool.DebugBreakOnWorkerStarvation", false); // The first reading is over a time range other than what we are focusing on, so we do not use the read other // than to send it to any runtime-specific implementation that may also use the CPU utilization. CpuUtilizationReader cpuUtilizationReader = default; _ = cpuUtilizationReader.CurrentUtilization; PortableThreadPool threadPoolInstance = ThreadPoolInstance; LowLevelLock threadAdjustmentLock = threadPoolInstance._threadAdjustmentLock; DelayHelper delayHelper = default; if (BlockingConfig.IsCooperativeBlockingEnabled) { // Initialize memory usage and limits, and register to update them on gen 2 GCs threadPoolInstance.OnGen2GCCallback(); Gen2GcCallback.Register(threadPoolInstance.OnGen2GCCallback); } while (true) { RunGateThreadEvent.WaitOne(); int currentTimeMs = Environment.TickCount; delayHelper.SetGateActivitiesTime(currentTimeMs); while (true) { bool wasSignaledToWake = DelayEvent.WaitOne((int)delayHelper.GetNextDelay(currentTimeMs)); currentTimeMs = Environment.TickCount; // Thread count adjustment for cooperative blocking do { PendingBlockingAdjustment pendingBlockingAdjustment = threadPoolInstance._pendingBlockingAdjustment; if (pendingBlockingAdjustment == PendingBlockingAdjustment.None) { delayHelper.ClearBlockingAdjustmentDelay(); break; } bool previousDelayElapsed = false; if (delayHelper.HasBlockingAdjustmentDelay) { previousDelayElapsed = delayHelper.HasBlockingAdjustmentDelayElapsed(currentTimeMs, wasSignaledToWake); if (pendingBlockingAdjustment == PendingBlockingAdjustment.WithDelayIfNecessary && !previousDelayElapsed) { break; } } uint nextDelayMs = threadPoolInstance.PerformBlockingAdjustment(previousDelayElapsed); if (nextDelayMs <= 0) { delayHelper.ClearBlockingAdjustmentDelay(); } else { delayHelper.SetBlockingAdjustmentTimeAndDelay(currentTimeMs, nextDelayMs); } } while (false); // // Periodic gate activities // if (!delayHelper.ShouldPerformGateActivities(currentTimeMs, wasSignaledToWake)) { continue; } if (ThreadPool.EnableWorkerTracking && NativeRuntimeEventSource.Log.IsEnabled()) { NativeRuntimeEventSource.Log.ThreadPoolWorkingThreadCount( (uint)threadPoolInstance.GetAndResetHighWatermarkCountOfThreadsProcessingUserCallbacks()); } int cpuUtilization = cpuUtilizationReader.CurrentUtilization; threadPoolInstance._cpuUtilization = cpuUtilization; bool needGateThreadForRuntime = ThreadPool.PerformRuntimeSpecificGateActivities(cpuUtilization); if (!disableStarvationDetection && threadPoolInstance._pendingBlockingAdjustment == PendingBlockingAdjustment.None && threadPoolInstance._separated.numRequestedWorkers > 0 && SufficientDelaySinceLastDequeue(threadPoolInstance)) { bool addWorker = false; threadAdjustmentLock.Acquire(); try { // Don't add a thread if we're at max or if we are already in the process of adding threads. // This logic is slightly different from the native implementation in CoreCLR because there are // no retired threads. In the native implementation, when hill climbing reduces the thread count // goal, threads that are stopped from processing work are switched to "retired" state, and they // don't count towards the equivalent existing thread count. In this implementation, the // existing thread count includes any worker thread that has not yet exited, including those // stopped from working by hill climbing, so here the number of threads processing work, instead // of the number of existing threads, is compared with the goal. There may be alternative // solutions, for now this is only to maintain consistency in behavior. ThreadCounts counts = threadPoolInstance._separated.counts; if (counts.NumProcessingWork < threadPoolInstance._maxThreads && counts.NumProcessingWork >= threadPoolInstance._separated.numThreadsGoal) { if (debuggerBreakOnWorkStarvation) { Debugger.Break(); } short newNumThreadsGoal = (short)(counts.NumProcessingWork + 1); threadPoolInstance._separated.numThreadsGoal = newNumThreadsGoal; HillClimbing.ThreadPoolHillClimber.ForceChange( newNumThreadsGoal, HillClimbing.StateOrTransition.Starvation); addWorker = true; } } finally { threadAdjustmentLock.Release(); } if (addWorker) { WorkerThread.MaybeAddWorkingWorker(threadPoolInstance); } } if (!needGateThreadForRuntime && threadPoolInstance._separated.numRequestedWorkers <= 0 && threadPoolInstance._pendingBlockingAdjustment == PendingBlockingAdjustment.None && Interlocked.Decrement(ref threadPoolInstance._separated.gateThreadRunningState) <= GetRunningStateForNumRuns(0)) { break; } } } }