/// <summary> /// Reduce the number of working workers by one, but maybe add back a worker (possibily this thread) if a thread request comes in while we are marking this thread as not working. /// </summary> private static void RemoveWorkingWorker() { ThreadCounts currentCounts = ThreadCounts.VolatileReadCounts(ref ThreadPoolInstance._separated.counts); while (true) { ThreadCounts newCounts = currentCounts; newCounts.numProcessingWork--; ThreadCounts oldCounts = ThreadCounts.CompareExchangeCounts(ref ThreadPoolInstance._separated.counts, newCounts, currentCounts); if (oldCounts == currentCounts) { break; } currentCounts = oldCounts; } // It's possible that we decided we had thread requests just before a request came in, // but reduced the worker count *after* the request came in. In this case, we might // miss the notification of a thread request. So we wake up a thread (maybe this one!) // if there is work to do. if (ThreadPoolInstance._numRequestedWorkers > 0) { MaybeAddWorkingWorker(); } }
internal static void MaybeAddWorkingWorker() { ThreadCounts counts = ThreadCounts.VolatileReadCounts(ref ThreadPoolInstance._separated.counts); ThreadCounts newCounts; while (true) { newCounts = counts; newCounts.numProcessingWork = Math.Max(counts.numProcessingWork, Math.Min((short)(counts.numProcessingWork + 1), counts.numThreadsGoal)); newCounts.numExistingThreads = Math.Max(counts.numExistingThreads, newCounts.numProcessingWork); if (newCounts == counts) { return; } ThreadCounts oldCounts = ThreadCounts.CompareExchangeCounts(ref ThreadPoolInstance._separated.counts, newCounts, counts); if (oldCounts == counts) { break; } counts = oldCounts; } int toCreate = newCounts.numExistingThreads - counts.numExistingThreads; int toRelease = newCounts.numProcessingWork - counts.numProcessingWork; if (toRelease > 0) { s_semaphore.Release(toRelease); } while (toCreate > 0) { if (TryCreateWorkerThread()) { toCreate--; } else { counts = ThreadCounts.VolatileReadCounts(ref ThreadPoolInstance._separated.counts); while (true) { newCounts = counts; newCounts.numProcessingWork -= (short)toCreate; newCounts.numExistingThreads -= (short)toCreate; ThreadCounts oldCounts = ThreadCounts.CompareExchangeCounts(ref ThreadPoolInstance._separated.counts, newCounts, counts); if (oldCounts == counts) { break; } counts = oldCounts; } toCreate = 0; } } }
/// <summary> /// Returns if the current thread should stop processing work on the thread pool. /// A thread should stop processing work on the thread pool when work remains only when /// there are more worker threads in the thread pool than we currently want. /// </summary> /// <returns>Whether or not this thread should stop processing work even if there is still work in the queue.</returns> internal static bool ShouldStopProcessingWorkNow() { ThreadCounts counts = ThreadCounts.VolatileReadCounts(ref ThreadPoolInstance._separated.counts); while (true) { // When there are more threads processing work than the thread count goal, hill climbing must have decided // to decrease the number of threads. Stop processing if the counts can be updated. We may have more // threads existing than the thread count goal and that is ok, the cold ones will eventually time out if // the thread count goal is not increased again. This logic is a bit different from the original CoreCLR // code from which this implementation was ported, which turns a processing thread into a retired thread // and checks for pending requests like RemoveWorkingWorker. In this implementation there are // no retired threads, so only the count of threads processing work is considered. if (counts.numProcessingWork <= counts.numThreadsGoal) { return(false); } ThreadCounts newCounts = counts; newCounts.numProcessingWork--; ThreadCounts oldCounts = ThreadCounts.CompareExchangeCounts(ref ThreadPoolInstance._separated.counts, newCounts, counts); if (oldCounts == counts) { return(true); } counts = oldCounts; } }
private static void WorkerThreadStart() { ClrThreadPoolEventSource.Log.WorkerThreadStart(ThreadCounts.VolatileReadCounts(ref ThreadPoolInstance._separated.counts).numExistingThreads); RuntimeThread currentThread = RuntimeThread.CurrentThread; while (true) { while (WaitForRequest()) { if (TakeActiveRequest()) { Volatile.Write(ref ThreadPoolInstance._separated.lastDequeueTime, Environment.TickCount); if (ThreadPoolWorkQueue.Dispatch()) { // If the queue runs out of work for us, we need to update the number of working workers to reflect that we are done working for now RemoveWorkingWorker(); } } else { // If we woke up but couldn't find a request, we need to update the number of working workers to reflect that we are done working for now RemoveWorkingWorker(); } } ThreadPoolInstance._hillClimbingThreadAdjustmentLock.Acquire(); try { // At this point, the thread's wait timed out. We are shutting down this thread. // We are going to decrement the number of exisiting threads to no longer include this one // and then change the max number of threads in the thread pool to reflect that we don't need as many // as we had. Finally, we are going to tell hill climbing that we changed the max number of threads. ThreadCounts counts = ThreadCounts.VolatileReadCounts(ref ThreadPoolInstance._separated.counts); while (true) { if (counts.numExistingThreads == counts.numProcessingWork) { // In this case, enough work came in that this thread should not time out and should go back to work. break; } ThreadCounts newCounts = counts; newCounts.numExistingThreads--; newCounts.numThreadsGoal = Math.Max(ThreadPoolInstance._minThreads, Math.Min(newCounts.numExistingThreads, newCounts.numThreadsGoal)); ThreadCounts oldCounts = ThreadCounts.CompareExchangeCounts(ref ThreadPoolInstance._separated.counts, newCounts, counts); if (oldCounts == counts) { HillClimbing.ThreadPoolHillClimber.ForceChange(newCounts.numThreadsGoal, HillClimbing.StateOrTransition.ThreadTimedOut); ClrThreadPoolEventSource.Log.WorkerThreadStop(newCounts.numExistingThreads); return; } } } finally { ThreadPoolInstance._hillClimbingThreadAdjustmentLock.Release(); } } }
// TODO: CoreCLR: Worker Tracking in CoreCLR? (Config name: ThreadPool_EnableWorkerTracking) private static void GateThreadStart() { AppContext.TryGetSwitch("System.Threading.ThreadPool.DisableStarvationDetection", out bool disableStarvationDetection); AppContext.TryGetSwitch("System.Threading.ThreadPool.DebugBreakOnWorkerStarvation", out bool debuggerBreakOnWorkStarvation); while (true) { RuntimeThread.Sleep(GateThreadDelayMs); if (ThreadPoolInstance._numRequestedWorkers > 0) { WorkerThread.MaybeAddWorkingWorker(); } if (!s_requested) { continue; } s_requested = false; ThreadPoolInstance._cpuUtilization = s_cpu.CurrentUtilization; if (!disableStarvationDetection) { if (ThreadPoolInstance._numRequestedWorkers > 0 && SufficientDelaySinceLastDequeue()) { try { ThreadPoolInstance._hillClimbingThreadAdjustmentLock.Acquire(); ThreadCounts counts = ThreadCounts.VolatileReadCounts(ref ThreadPoolInstance._separated.counts); // don't add a thread if we're at max or if we are already in the process of adding threads while (counts.numExistingThreads < ThreadPoolInstance._maxThreads && counts.numExistingThreads >= counts.numThreadsGoal) { if (debuggerBreakOnWorkStarvation) { Debug.WriteLine("The CLR ThreadPool detected work starvation!"); Debugger.Break(); } ThreadCounts newCounts = counts; newCounts.numThreadsGoal = (short)(newCounts.numExistingThreads + 1); ThreadCounts oldCounts = ThreadCounts.CompareExchangeCounts(ref ThreadPoolInstance._separated.counts, newCounts, counts); if (oldCounts == counts) { HillClimbing.ThreadPoolHillClimber.ForceChange(newCounts.numThreadsGoal, HillClimbing.StateOrTransition.Starvation); WorkerThread.MaybeAddWorkingWorker(); break; } counts = oldCounts; } } finally { ThreadPoolInstance._hillClimbingThreadAdjustmentLock.Release(); } } } } }
// // This method must only be called if ShouldAdjustMaxWorkersActive has returned true, *and* // _hillClimbingThreadAdjustmentLock is held. // private void AdjustMaxWorkersActive() { _hillClimbingThreadAdjustmentLock.VerifyIsLocked(); int currentTicks = Environment.TickCount; int totalNumCompletions = (int)_completionCounter.Count; int numCompletions = totalNumCompletions - _separated.priorCompletionCount; long startTime = _currentSampleStartTime; long endTime = Stopwatch.GetTimestamp(); long freq = Stopwatch.Frequency; double elapsedSeconds = (double)(endTime - startTime) / freq; if (elapsedSeconds * 1000 >= _threadAdjustmentIntervalMs / 2) { ThreadCounts currentCounts = ThreadCounts.VolatileReadCounts(ref _separated.counts); int newMax; (newMax, _threadAdjustmentIntervalMs) = HillClimbing.ThreadPoolHillClimber.Update(currentCounts.numThreadsGoal, elapsedSeconds, numCompletions); while (newMax != currentCounts.numThreadsGoal) { ThreadCounts newCounts = currentCounts; newCounts.numThreadsGoal = (short)newMax; ThreadCounts oldCounts = ThreadCounts.CompareExchangeCounts(ref _separated.counts, newCounts, currentCounts); if (oldCounts == currentCounts) { // // If we're increasing the max, inject a thread. If that thread finds work, it will inject // another thread, etc., until nobody finds work or we reach the new maximum. // // If we're reducing the max, whichever threads notice this first will sleep and timeout themselves. // if (newMax > oldCounts.numThreadsGoal) { WorkerThread.MaybeAddWorkingWorker(); } break; } else { if (oldCounts.numThreadsGoal > currentCounts.numThreadsGoal && oldCounts.numThreadsGoal >= newMax) { // someone (probably the gate thread) increased the thread count more than // we are about to do. Don't interfere. break; } currentCounts = oldCounts; } } _separated.priorCompletionCount = totalNumCompletions; _separated.nextCompletedWorkRequestsTime = currentTicks + _threadAdjustmentIntervalMs; Volatile.Write(ref _separated.priorCompletedWorkRequestsTime, currentTicks); _currentSampleStartTime = endTime; } }
/// <summary> /// Waits for a request to work. /// </summary> /// <returns>If this thread was woken up before it timed out.</returns> private static bool WaitForRequest() { PortableThreadPoolEventSource log = PortableThreadPoolEventSource.Log; if (log.IsEnabled()) { log.WorkerThreadWait(ThreadCounts.VolatileReadCounts(ref ThreadPoolInstance._separated.counts).numExistingThreads); } return(s_semaphore.Wait(ThreadPoolThreadTimeoutMs)); }
public int GetAvailableThreads() { ThreadCounts counts = ThreadCounts.VolatileReadCounts(ref _separated.counts); int count = _maxThreads - counts.numProcessingWork; if (count < 0) { return(0); } return(count); }
// TODO: CoreCLR: Worker Tracking in CoreCLR? (Config name: ThreadPool_EnableWorkerTracking) private static void GateThreadStart() { _ = s_cpu.CurrentUtilization; // The first reading is over a time range other than what we are focusing on, so we do not use the read. AppContext.TryGetSwitch("System.Threading.ThreadPool.DisableStarvationDetection", out bool disableStarvationDetection); AppContext.TryGetSwitch("System.Threading.ThreadPool.DebugBreakOnWorkerStarvation", out bool debuggerBreakOnWorkStarvation); while (true) { s_runGateThreadEvent.WaitOne(); do { Thread.Sleep(GateThreadDelayMs); ThreadPoolInstance._cpuUtilization = s_cpu.CurrentUtilization; if (!disableStarvationDetection) { if (ThreadPoolInstance._numRequestedWorkers > 0 && SufficientDelaySinceLastDequeue()) { try { ThreadPoolInstance._hillClimbingThreadAdjustmentLock.Acquire(); ThreadCounts counts = ThreadCounts.VolatileReadCounts(ref ThreadPoolInstance._separated.counts); // don't add a thread if we're at max or if we are already in the process of adding threads while (counts.numExistingThreads < ThreadPoolInstance._maxThreads && counts.numExistingThreads >= counts.numThreadsGoal) { if (debuggerBreakOnWorkStarvation) { Debugger.Break(); } ThreadCounts newCounts = counts; newCounts.numThreadsGoal = (short)(newCounts.numExistingThreads + 1); ThreadCounts oldCounts = ThreadCounts.CompareExchangeCounts(ref ThreadPoolInstance._separated.counts, newCounts, counts); if (oldCounts == counts) { HillClimbing.ThreadPoolHillClimber.ForceChange(newCounts.numThreadsGoal, HillClimbing.StateOrTransition.Starvation); WorkerThread.MaybeAddWorkingWorker(); break; } counts = oldCounts; } } finally { ThreadPoolInstance._hillClimbingThreadAdjustmentLock.Release(); } } } } while (ThreadPoolInstance._numRequestedWorkers > 0 || Interlocked.Decrement(ref s_runningState) > GetRunningStateForNumRuns(0)); } }
private bool ShouldAdjustMaxWorkersActive() { // We need to subtract by prior time because Environment.TickCount can wrap around, making a comparison of absolute times unreliable. int priorTime = Volatile.Read(ref _separated.priorCompletedWorkRequestsTime); int requiredInterval = _separated.nextCompletedWorkRequestsTime - priorTime; int elapsedInterval = Environment.TickCount - priorTime; if (elapsedInterval >= requiredInterval) { ThreadCounts counts = ThreadCounts.VolatileReadCounts(ref _separated.counts); return(counts.numExistingThreads >= counts.numThreadsGoal); } return(false); }
public bool SetMinThreads(int minThreads) { _maxMinThreadLock.Acquire(); try { if (minThreads < 0 || minThreads > _maxThreads) { return(false); } else { short threads = (short)Math.Min(minThreads, MaxPossibleThreadCount); if (s_forcedMinWorkerThreads == 0) { _minThreads = threads; ThreadCounts counts = ThreadCounts.VolatileReadCounts(ref _separated.counts); while (counts.numThreadsGoal < _minThreads) { ThreadCounts newCounts = counts; newCounts.numThreadsGoal = _minThreads; ThreadCounts oldCounts = ThreadCounts.CompareExchangeCounts(ref _separated.counts, newCounts, counts); if (oldCounts == counts) { counts = newCounts; if (newCounts.numThreadsGoal > oldCounts.numThreadsGoal && _numRequestedWorkers > 0) { WorkerThread.MaybeAddWorkingWorker(); } } else { counts = oldCounts; } } } return(true); } } finally { _maxMinThreadLock.Release(); } }
// called by logic to spawn new worker threads, return true if it's been too long // since the last dequeue operation - takes number of worker threads into account // in deciding "too long" private static bool SufficientDelaySinceLastDequeue() { int delay = Environment.TickCount - Volatile.Read(ref ThreadPoolInstance._separated.lastDequeueTime); int minimumDelay; if (ThreadPoolInstance._cpuUtilization < CpuUtilizationLow) { minimumDelay = GateThreadDelayMs; } else { ThreadCounts counts = ThreadCounts.VolatileReadCounts(ref ThreadPoolInstance._separated.counts); int numThreads = counts.numThreadsGoal; minimumDelay = numThreads * DequeueDelayThresholdMs; } return(delay > minimumDelay); }
public bool SetMaxThreads(int maxThreads) { _maxMinThreadLock.Acquire(); try { if (maxThreads < _minThreads || maxThreads == 0) { return(false); } else { short threads = (short)Math.Min(maxThreads, MaxPossibleThreadCount); if (s_forcedMaxWorkerThreads == 0) { _maxThreads = threads; ThreadCounts counts = ThreadCounts.VolatileReadCounts(ref _separated.counts); while (counts.numThreadsGoal > maxThreads) { ThreadCounts newCounts = counts; newCounts.numThreadsGoal = _maxThreads; ThreadCounts oldCounts = ThreadCounts.CompareExchangeCounts(ref _separated.counts, newCounts, counts); if (oldCounts == counts) { counts = newCounts; } else { counts = oldCounts; } } } return(true); } } finally { _maxMinThreadLock.Release(); } }
private bool ShouldAdjustMaxWorkersActive() { // We need to subtract by prior time because Environment.TickCount can wrap around, making a comparison of absolute times unreliable. int priorTime = Volatile.Read(ref _separated.priorCompletedWorkRequestsTime); int requiredInterval = _separated.nextCompletedWorkRequestsTime - priorTime; int elapsedInterval = Environment.TickCount - priorTime; if (elapsedInterval >= requiredInterval) { // Avoid trying to adjust the thread count goal if there are already more threads than the thread count goal. // In that situation, hill climbing must have previously decided to decrease the thread count goal, so let's // wait until the system responds to that change before calling into hill climbing again. This condition should // be the opposite of the condition in WorkerThread.ShouldStopProcessingWorkNow that causes // threads processing work to stop in response to a decreased thread count goal. The logic here is a bit // different from the original CoreCLR code from which this implementation was ported because in this // implementation there are no retired threads, so only the count of threads processing work is considered. ThreadCounts counts = ThreadCounts.VolatileReadCounts(ref _separated.counts); return(counts.numProcessingWork <= counts.numThreadsGoal); } return(false); }
/// <summary> /// Returns if the current thread should stop processing work on the thread pool. /// A thread should stop processing work on the thread pool when work remains only when /// there are more worker threads in the thread pool than we currently want. /// </summary> /// <returns>Whether or not this thread should stop processing work even if there is still work in the queue.</returns> internal static bool ShouldStopProcessingWorkNow() { ThreadCounts counts = ThreadCounts.VolatileReadCounts(ref ThreadPoolInstance._separated.counts); while (true) { if (counts.numExistingThreads <= counts.numThreadsGoal) { return false; } ThreadCounts newCounts = counts; newCounts.numProcessingWork--; ThreadCounts oldCounts = ThreadCounts.CompareExchangeCounts(ref ThreadPoolInstance._separated.counts, newCounts, counts); if (oldCounts == counts) { return true; } counts = oldCounts; } }
/// <summary> /// Waits for a request to work. /// </summary> /// <returns>If this thread was woken up before it timed out.</returns> private static bool WaitForRequest() { ClrThreadPoolEventSource.Log.WorkerThreadWait(ThreadCounts.VolatileReadCounts(ref ThreadPoolInstance._separated.counts).numExistingThreads); return(s_semaphore.Wait(ThreadPoolThreadTimeoutMs)); }