/// <summary> /// Returns if the current thread should stop processing work on the thread pool. /// A thread should stop processing work on the thread pool when work remains only when /// there are more worker threads in the thread pool than we currently want. /// </summary> /// <returns>Whether or not this thread should stop processing work even if there is still work in the queue.</returns> internal static bool ShouldStopProcessingWorkNow(PortableThreadPool threadPoolInstance) { ThreadCounts counts = threadPoolInstance._separated.counts; while (true) { // When there are more threads processing work than the thread count goal, it may have been decided // to decrease the number of threads. Stop processing if the counts can be updated. We may have more // threads existing than the thread count goal and that is ok, the cold ones will eventually time out if // the thread count goal is not increased again. This logic is a bit different from the original CoreCLR // code from which this implementation was ported, which turns a processing thread into a retired thread // and checks for pending requests like RemoveWorkingWorker. In this implementation there are // no retired threads, so only the count of threads processing work is considered. if (counts.NumProcessingWork <= counts.NumThreadsGoal) { return(false); } ThreadCounts newCounts = counts; newCounts.SubtractNumProcessingWork(1); ThreadCounts oldCounts = threadPoolInstance._separated.counts.InterlockedCompareExchange(newCounts, counts); if (oldCounts == counts) { return(true); } counts = oldCounts; } }
/// <summary> /// Reduce the number of working workers by one, but maybe add back a worker (possibily this thread) if a thread request comes in while we are marking this thread as not working. /// </summary> private static void RemoveWorkingWorker(PortableThreadPool threadPoolInstance) { ThreadCounts currentCounts = threadPoolInstance._separated.counts.VolatileRead(); while (true) { ThreadCounts newCounts = currentCounts; newCounts.SubtractNumProcessingWork(1); ThreadCounts oldCounts = threadPoolInstance._separated.counts.InterlockedCompareExchange(newCounts, currentCounts); if (oldCounts == currentCounts) { break; } currentCounts = oldCounts; } // It's possible that we decided we had thread requests just before a request came in, // but reduced the worker count *after* the request came in. In this case, we might // miss the notification of a thread request. So we wake up a thread (maybe this one!) // if there is work to do. if (threadPoolInstance._separated.numRequestedWorkers > 0) { MaybeAddWorkingWorker(threadPoolInstance); } }
/// <summary> /// Reduce the number of working workers by one, but maybe add back a worker (possibily this thread) if a thread request comes in while we are marking this thread as not working. /// </summary> private static void RemoveWorkingWorker(PortableThreadPool threadPoolInstance) { ThreadCounts currentCounts = threadPoolInstance._separated.counts.VolatileRead(); while (true) { ThreadCounts newCounts = currentCounts; newCounts.SubtractNumProcessingWork(1); ThreadCounts oldCounts = threadPoolInstance._separated.counts.InterlockedCompareExchange(newCounts, currentCounts); if (oldCounts == currentCounts) { break; } currentCounts = oldCounts; } if (currentCounts.NumProcessingWork > 1) { // In highly bursty cases with short bursts of work, especially in the portable thread pool implementation, // worker threads are being released and entering Dispatch very quickly, not finding much work in Dispatch, // and soon afterwards going back to Dispatch, causing extra thrashing on data and some interlocked // operations. If this is not the last thread to stop processing work, introduce a slight delay to help // other threads make more efficient progress. The spin-wait is mainly for when the sleep is not effective // due to there being no other threads to schedule. Thread.UninterruptibleSleep0(); if (!Environment.IsSingleProcessor) { Thread.SpinWait(1); } } // It's possible that we decided we had thread requests just before a request came in, // but reduced the worker count *after* the request came in. In this case, we might // miss the notification of a thread request. So we wake up a thread (maybe this one!) // if there is work to do. if (threadPoolInstance._separated.numRequestedWorkers > 0) { MaybeAddWorkingWorker(threadPoolInstance); } }
internal static void MaybeAddWorkingWorker(PortableThreadPool threadPoolInstance) { ThreadCounts counts = threadPoolInstance._separated.counts; short numExistingThreads, numProcessingWork, newNumExistingThreads, newNumProcessingWork; while (true) { numProcessingWork = counts.NumProcessingWork; if (numProcessingWork >= counts.NumThreadsGoal) { return; } newNumProcessingWork = (short)(numProcessingWork + 1); numExistingThreads = counts.NumExistingThreads; newNumExistingThreads = Math.Max(numExistingThreads, newNumProcessingWork); ThreadCounts newCounts = counts; newCounts.NumProcessingWork = newNumProcessingWork; newCounts.NumExistingThreads = newNumExistingThreads; ThreadCounts oldCounts = threadPoolInstance._separated.counts.InterlockedCompareExchange(newCounts, counts); if (oldCounts == counts) { break; } counts = oldCounts; } int toCreate = newNumExistingThreads - numExistingThreads; int toRelease = newNumProcessingWork - numProcessingWork; if (toRelease > 0) { s_semaphore.Release(toRelease); } while (toCreate > 0) { if (TryCreateWorkerThread()) { toCreate--; continue; } counts = threadPoolInstance._separated.counts; while (true) { ThreadCounts newCounts = counts; newCounts.SubtractNumProcessingWork((short)toCreate); newCounts.SubtractNumExistingThreads((short)toCreate); ThreadCounts oldCounts = threadPoolInstance._separated.counts.InterlockedCompareExchange(newCounts, counts); if (oldCounts == counts) { break; } counts = oldCounts; } break; } }