private static void WorkerThreadStart()
            {
                Thread.CurrentThread.SetThreadPoolWorkerThreadName();

                PortableThreadPool threadPoolInstance = ThreadPoolInstance;

                if (NativeRuntimeEventSource.Log.IsEnabled())
                {
                    NativeRuntimeEventSource.Log.ThreadPoolWorkerThreadStart(
                        (uint)threadPoolInstance._separated.counts.VolatileRead().NumExistingThreads);
                }

                LowLevelLock          threadAdjustmentLock = threadPoolInstance._threadAdjustmentLock;
                LowLevelLifoSemaphore semaphore            = s_semaphore;

                while (true)
                {
                    bool spinWait = true;
                    while (semaphore.Wait(ThreadPoolThreadTimeoutMs, spinWait))
                    {
                        bool alreadyRemovedWorkingWorker = false;
                        while (TakeActiveRequest(threadPoolInstance))
                        {
                            threadPoolInstance._separated.lastDequeueTime = Environment.TickCount;
                            if (!ThreadPoolWorkQueue.Dispatch())
                            {
                                // ShouldStopProcessingWorkNow() caused the thread to stop processing work, and it would have
                                // already removed this working worker in the counts. This typically happens when hill climbing
                                // decreases the worker thread count goal.
                                alreadyRemovedWorkingWorker = true;
                                break;
                            }

                            if (threadPoolInstance._separated.numRequestedWorkers <= 0)
                            {
                                break;
                            }

                            // In highly bursty cases with short bursts of work, especially in the portable thread pool
                            // implementation, worker threads are being released and entering Dispatch very quickly, not finding
                            // much work in Dispatch, and soon afterwards going back to Dispatch, causing extra thrashing on
                            // data and some interlocked operations, and similarly when the thread pool runs out of work. Since
                            // there is a pending request for work, introduce a slight delay before serving the next request.
                            // The spin-wait is mainly for when the sleep is not effective due to there being no other threads
                            // to schedule.
                            Thread.UninterruptibleSleep0();
                            if (!Environment.IsSingleProcessor)
                            {
                                Thread.SpinWait(1);
                            }
                        }

                        // Don't spin-wait on the semaphore next time if the thread was actively stopped from processing work,
                        // as it's unlikely that the worker thread count goal would be increased again so soon afterwards that
                        // the semaphore would be released within the spin-wait window
                        spinWait = !alreadyRemovedWorkingWorker;

                        if (!alreadyRemovedWorkingWorker)
                        {
                            // If we woke up but couldn't find a request, or ran out of work items to process, we need to update
                            // the number of working workers to reflect that we are done working for now
                            RemoveWorkingWorker(threadPoolInstance);
                        }
                    }

                    threadAdjustmentLock.Acquire();
                    try
                    {
                        // At this point, the thread's wait timed out. We are shutting down this thread.
                        // We are going to decrement the number of existing threads to no longer include this one
                        // and then change the max number of threads in the thread pool to reflect that we don't need as many
                        // as we had. Finally, we are going to tell hill climbing that we changed the max number of threads.
                        ThreadCounts counts = threadPoolInstance._separated.counts;
                        while (true)
                        {
                            // Since this thread is currently registered as an existing thread, if more work comes in meanwhile,
                            // this thread would be expected to satisfy the new work. Ensure that NumExistingThreads is not
                            // decreased below NumProcessingWork, as that would be indicative of such a case.
                            if (counts.NumExistingThreads <= counts.NumProcessingWork)
                            {
                                // In this case, enough work came in that this thread should not time out and should go back to work.
                                break;
                            }

                            ThreadCounts newCounts             = counts;
                            short        newNumExistingThreads = --newCounts.NumExistingThreads;
                            short        newNumThreadsGoal     =
                                Math.Max(
                                    threadPoolInstance.MinThreadsGoal,
                                    Math.Min(newNumExistingThreads, counts.NumThreadsGoal));
                            newCounts.NumThreadsGoal = newNumThreadsGoal;

                            ThreadCounts oldCounts =
                                threadPoolInstance._separated.counts.InterlockedCompareExchange(newCounts, counts);
                            if (oldCounts == counts)
                            {
                                HillClimbing.ThreadPoolHillClimber.ForceChange(
                                    newNumThreadsGoal,
                                    HillClimbing.StateOrTransition.ThreadTimedOut);
                                if (NativeRuntimeEventSource.Log.IsEnabled())
                                {
                                    NativeRuntimeEventSource.Log.ThreadPoolWorkerThreadStop((uint)newNumExistingThreads);
                                }
                                return;
                            }

                            counts = oldCounts;
                        }
                    }
                    finally
                    {
                        threadAdjustmentLock.Release();
                    }
                }
            }
            private static void GateThreadStart()
            {
                bool disableStarvationDetection =
                    AppContextConfigHelper.GetBooleanConfig("System.Threading.ThreadPool.DisableStarvationDetection", false);
                bool debuggerBreakOnWorkStarvation =
                    AppContextConfigHelper.GetBooleanConfig("System.Threading.ThreadPool.DebugBreakOnWorkerStarvation", false);

                // The first reading is over a time range other than what we are focusing on, so we do not use the read other
                // than to send it to any runtime-specific implementation that may also use the CPU utilization.
                CpuUtilizationReader cpuUtilizationReader = default;

                _ = cpuUtilizationReader.CurrentUtilization;

                PortableThreadPool threadPoolInstance = ThreadPoolInstance;
                LowLevelLock       hillClimbingThreadAdjustmentLock = threadPoolInstance._hillClimbingThreadAdjustmentLock;

                while (true)
                {
                    s_runGateThreadEvent.WaitOne();

                    bool needGateThreadForRuntime;
                    do
                    {
                        Thread.Sleep(GateThreadDelayMs);

                        if (ThreadPool.EnableWorkerTracking && PortableThreadPoolEventSource.Log.IsEnabled())
                        {
                            PortableThreadPoolEventSource.Log.ThreadPoolWorkingThreadCount(
                                (uint)threadPoolInstance.GetAndResetHighWatermarkCountOfThreadsProcessingUserCallbacks());
                        }

                        int cpuUtilization = cpuUtilizationReader.CurrentUtilization;
                        threadPoolInstance._cpuUtilization = cpuUtilization;

                        needGateThreadForRuntime = ThreadPool.PerformRuntimeSpecificGateActivities(cpuUtilization);

                        if (!disableStarvationDetection &&
                            threadPoolInstance._separated.numRequestedWorkers > 0 &&
                            SufficientDelaySinceLastDequeue(threadPoolInstance))
                        {
                            try
                            {
                                hillClimbingThreadAdjustmentLock.Acquire();
                                ThreadCounts counts = threadPoolInstance._separated.counts.VolatileRead();

                                // Don't add a thread if we're at max or if we are already in the process of adding threads.
                                // This logic is slightly different from the native implementation in CoreCLR because there are
                                // no retired threads. In the native implementation, when hill climbing reduces the thread count
                                // goal, threads that are stopped from processing work are switched to "retired" state, and they
                                // don't count towards the equivalent existing thread count. In this implementation, the
                                // existing thread count includes any worker thread that has not yet exited, including those
                                // stopped from working by hill climbing, so here the number of threads processing work, instead
                                // of the number of existing threads, is compared with the goal. There may be alternative
                                // solutions, for now this is only to maintain consistency in behavior.
                                while (
                                    counts.NumExistingThreads < threadPoolInstance._maxThreads &&
                                    counts.NumProcessingWork >= counts.NumThreadsGoal)
                                {
                                    if (debuggerBreakOnWorkStarvation)
                                    {
                                        Debugger.Break();
                                    }

                                    ThreadCounts newCounts         = counts;
                                    short        newNumThreadsGoal = (short)(counts.NumProcessingWork + 1);
                                    newCounts.NumThreadsGoal = newNumThreadsGoal;

                                    ThreadCounts oldCounts = threadPoolInstance._separated.counts.InterlockedCompareExchange(newCounts, counts);
                                    if (oldCounts == counts)
                                    {
                                        HillClimbing.ThreadPoolHillClimber.ForceChange(newNumThreadsGoal, HillClimbing.StateOrTransition.Starvation);
                                        WorkerThread.MaybeAddWorkingWorker(threadPoolInstance);
                                        break;
                                    }

                                    counts = oldCounts;
                                }
                            }
                            finally
                            {
                                hillClimbingThreadAdjustmentLock.Release();
                            }
                        }
                    } while (
                        needGateThreadForRuntime ||
                        threadPoolInstance._separated.numRequestedWorkers > 0 ||
                        Interlocked.Decrement(ref threadPoolInstance._separated.gateThreadRunningState) > GetRunningStateForNumRuns(0));
                }
            }
Exemplo n.º 3
0
        //
        // This method must only be called if ShouldAdjustMaxWorkersActive has returned true, *and*
        // _hillClimbingThreadAdjustmentLock is held.
        //
        private void AdjustMaxWorkersActive()
        {
            LowLevelLock threadAdjustmentLock = _threadAdjustmentLock;

            if (!threadAdjustmentLock.TryAcquire())
            {
                // The lock is held by someone else, they will take care of this for us
                return;
            }

            bool addWorker = false;

            try
            {
                // Repeated checks from ShouldAdjustMaxWorkersActive() inside the lock
                ThreadCounts counts = _separated.counts;
                if (counts.NumProcessingWork > counts.NumThreadsGoal ||
                    _pendingBlockingAdjustment != PendingBlockingAdjustment.None)
                {
                    return;
                }

                long   endTime        = Stopwatch.GetTimestamp();
                double elapsedSeconds = Stopwatch.GetElapsedTime(_currentSampleStartTime, endTime).TotalSeconds;

                if (elapsedSeconds * 1000 >= _threadAdjustmentIntervalMs / 2)
                {
                    int currentTicks        = Environment.TickCount;
                    int totalNumCompletions = (int)_completionCounter.Count;
                    int numCompletions      = totalNumCompletions - _separated.priorCompletionCount;

                    short oldNumThreadsGoal = counts.NumThreadsGoal;
                    int   newNumThreadsGoal;
                    (newNumThreadsGoal, _threadAdjustmentIntervalMs) =
                        HillClimbing.ThreadPoolHillClimber.Update(oldNumThreadsGoal, elapsedSeconds, numCompletions);
                    if (oldNumThreadsGoal != (short)newNumThreadsGoal)
                    {
                        _separated.counts.InterlockedSetNumThreadsGoal((short)newNumThreadsGoal);

                        //
                        // If we're increasing the goal, inject a thread.  If that thread finds work, it will inject
                        // another thread, etc., until nobody finds work or we reach the new goal.
                        //
                        // If we're reducing the goal, whichever threads notice this first will sleep and timeout themselves.
                        //
                        if (newNumThreadsGoal > oldNumThreadsGoal)
                        {
                            addWorker = true;
                        }
                    }

                    _separated.priorCompletionCount          = totalNumCompletions;
                    _separated.nextCompletedWorkRequestsTime = currentTicks + _threadAdjustmentIntervalMs;
                    Volatile.Write(ref _separated.priorCompletedWorkRequestsTime, currentTicks);
                    _currentSampleStartTime = endTime;
                }
            }
            finally
            {
                threadAdjustmentLock.Release();
            }

            if (addWorker)
            {
                WorkerThread.MaybeAddWorkingWorker(this);
            }
        }
Exemplo n.º 4
0
 public LockHolder(LowLevelLock l)
 {
     l.Acquire();
     _lock = l;
 }
Exemplo n.º 5
0
        //
        // This method must only be called if ShouldAdjustMaxWorkersActive has returned true, *and*
        // _hillClimbingThreadAdjustmentLock is held.
        //
        private void AdjustMaxWorkersActive()
        {
            LowLevelLock hillClimbingThreadAdjustmentLock = _hillClimbingThreadAdjustmentLock;

            if (!hillClimbingThreadAdjustmentLock.TryAcquire())
            {
                // The lock is held by someone else, they will take care of this for us
                return;
            }

            try
            {
                long startTime = _currentSampleStartTime;
                long endTime   = Stopwatch.GetTimestamp();
                long freq      = Stopwatch.Frequency;

                double elapsedSeconds = (double)(endTime - startTime) / freq;

                if (elapsedSeconds * 1000 >= _threadAdjustmentIntervalMs / 2)
                {
                    int currentTicks        = Environment.TickCount;
                    int totalNumCompletions = (int)_completionCounter.Count;
                    int numCompletions      = totalNumCompletions - _separated.priorCompletionCount;

                    ThreadCounts currentCounts = _separated.counts.VolatileRead();
                    int          newMax;
                    (newMax, _threadAdjustmentIntervalMs) = HillClimbing.ThreadPoolHillClimber.Update(currentCounts.NumThreadsGoal, elapsedSeconds, numCompletions);

                    while (newMax != currentCounts.NumThreadsGoal)
                    {
                        ThreadCounts newCounts = currentCounts;
                        newCounts.NumThreadsGoal = (short)newMax;

                        ThreadCounts oldCounts = _separated.counts.InterlockedCompareExchange(newCounts, currentCounts);
                        if (oldCounts == currentCounts)
                        {
                            //
                            // If we're increasing the max, inject a thread.  If that thread finds work, it will inject
                            // another thread, etc., until nobody finds work or we reach the new maximum.
                            //
                            // If we're reducing the max, whichever threads notice this first will sleep and timeout themselves.
                            //
                            if (newMax > oldCounts.NumThreadsGoal)
                            {
                                WorkerThread.MaybeAddWorkingWorker(this);
                            }
                            break;
                        }

                        if (oldCounts.NumThreadsGoal > currentCounts.NumThreadsGoal && oldCounts.NumThreadsGoal >= newMax)
                        {
                            // someone (probably the gate thread) increased the thread count more than
                            // we are about to do.  Don't interfere.
                            break;
                        }

                        currentCounts = oldCounts;
                    }

                    _separated.priorCompletionCount          = totalNumCompletions;
                    _separated.nextCompletedWorkRequestsTime = currentTicks + _threadAdjustmentIntervalMs;
                    Volatile.Write(ref _separated.priorCompletedWorkRequestsTime, currentTicks);
                    _currentSampleStartTime = endTime;
                }
            }
            finally
            {
                hillClimbingThreadAdjustmentLock.Release();
            }
        }
Exemplo n.º 6
0
        //
        // This method must only be called if ShouldAdjustMaxWorkersActive has returned true, *and*
        // _hillClimbingThreadAdjustmentLock is held.
        //
        private void AdjustMaxWorkersActive()
        {
            LowLevelLock threadAdjustmentLock = _threadAdjustmentLock;

            if (!threadAdjustmentLock.TryAcquire())
            {
                // The lock is held by someone else, they will take care of this for us
                return;
            }

            bool addWorker = false;

            try
            {
                // Skip hill climbing when there is a pending blocking adjustment. Hill climbing may otherwise bypass the
                // blocking adjustment heuristics and increase the thread count too quickly.
                if (_pendingBlockingAdjustment != PendingBlockingAdjustment.None)
                {
                    return;
                }

                long startTime = _currentSampleStartTime;
                long endTime   = Stopwatch.GetTimestamp();
                long freq      = Stopwatch.Frequency;

                double elapsedSeconds = (double)(endTime - startTime) / freq;

                if (elapsedSeconds * 1000 >= _threadAdjustmentIntervalMs / 2)
                {
                    int currentTicks        = Environment.TickCount;
                    int totalNumCompletions = (int)_completionCounter.Count;
                    int numCompletions      = totalNumCompletions - _separated.priorCompletionCount;

                    int newNumThreadsGoal;
                    (newNumThreadsGoal, _threadAdjustmentIntervalMs) =
                        HillClimbing.ThreadPoolHillClimber.Update(_separated.numThreadsGoal, elapsedSeconds, numCompletions);
                    short oldNumThreadsGoal = _separated.numThreadsGoal;
                    if (oldNumThreadsGoal != (short)newNumThreadsGoal)
                    {
                        _separated.numThreadsGoal = (short)newNumThreadsGoal;

                        //
                        // If we're increasing the goal, inject a thread.  If that thread finds work, it will inject
                        // another thread, etc., until nobody finds work or we reach the new goal.
                        //
                        // If we're reducing the goal, whichever threads notice this first will sleep and timeout themselves.
                        //
                        if (newNumThreadsGoal > oldNumThreadsGoal)
                        {
                            addWorker = true;
                        }
                    }

                    _separated.priorCompletionCount          = totalNumCompletions;
                    _separated.nextCompletedWorkRequestsTime = currentTicks + _threadAdjustmentIntervalMs;
                    Volatile.Write(ref _separated.priorCompletedWorkRequestsTime, currentTicks);
                    _currentSampleStartTime = endTime;
                }
            }
            finally
            {
                threadAdjustmentLock.Release();
            }

            if (addWorker)
            {
                WorkerThread.MaybeAddWorkingWorker(this);
            }
        }
            private static void GateThreadStart()
            {
                bool disableStarvationDetection =
                    AppContextConfigHelper.GetBooleanConfig("System.Threading.ThreadPool.DisableStarvationDetection", false);
                bool debuggerBreakOnWorkStarvation =
                    AppContextConfigHelper.GetBooleanConfig("System.Threading.ThreadPool.DebugBreakOnWorkerStarvation", false);

                // The first reading is over a time range other than what we are focusing on, so we do not use the read other
                // than to send it to any runtime-specific implementation that may also use the CPU utilization.
                CpuUtilizationReader cpuUtilizationReader = default;

                _ = cpuUtilizationReader.CurrentUtilization;

                PortableThreadPool threadPoolInstance   = ThreadPoolInstance;
                LowLevelLock       threadAdjustmentLock = threadPoolInstance._threadAdjustmentLock;
                DelayHelper        delayHelper          = default;

                if (BlockingConfig.IsCooperativeBlockingEnabled)
                {
                    // Initialize memory usage and limits, and register to update them on gen 2 GCs
                    threadPoolInstance.OnGen2GCCallback();
                    Gen2GcCallback.Register(threadPoolInstance.OnGen2GCCallback);
                }

                while (true)
                {
                    RunGateThreadEvent.WaitOne();
                    int currentTimeMs = Environment.TickCount;
                    delayHelper.SetGateActivitiesTime(currentTimeMs);

                    while (true)
                    {
                        bool wasSignaledToWake = DelayEvent.WaitOne((int)delayHelper.GetNextDelay(currentTimeMs));
                        currentTimeMs = Environment.TickCount;

                        // Thread count adjustment for cooperative blocking
                        do
                        {
                            PendingBlockingAdjustment pendingBlockingAdjustment = threadPoolInstance._pendingBlockingAdjustment;
                            if (pendingBlockingAdjustment == PendingBlockingAdjustment.None)
                            {
                                delayHelper.ClearBlockingAdjustmentDelay();
                                break;
                            }

                            bool previousDelayElapsed = false;
                            if (delayHelper.HasBlockingAdjustmentDelay)
                            {
                                previousDelayElapsed =
                                    delayHelper.HasBlockingAdjustmentDelayElapsed(currentTimeMs, wasSignaledToWake);
                                if (pendingBlockingAdjustment == PendingBlockingAdjustment.WithDelayIfNecessary &&
                                    !previousDelayElapsed)
                                {
                                    break;
                                }
                            }

                            uint nextDelayMs = threadPoolInstance.PerformBlockingAdjustment(previousDelayElapsed);
                            if (nextDelayMs <= 0)
                            {
                                delayHelper.ClearBlockingAdjustmentDelay();
                            }
                            else
                            {
                                delayHelper.SetBlockingAdjustmentTimeAndDelay(currentTimeMs, nextDelayMs);
                            }
                        } while (false);

                        //
                        // Periodic gate activities
                        //

                        if (!delayHelper.ShouldPerformGateActivities(currentTimeMs, wasSignaledToWake))
                        {
                            continue;
                        }

                        if (ThreadPool.EnableWorkerTracking && NativeRuntimeEventSource.Log.IsEnabled())
                        {
                            NativeRuntimeEventSource.Log.ThreadPoolWorkingThreadCount(
                                (uint)threadPoolInstance.GetAndResetHighWatermarkCountOfThreadsProcessingUserCallbacks());
                        }

                        int cpuUtilization = cpuUtilizationReader.CurrentUtilization;
                        threadPoolInstance._cpuUtilization = cpuUtilization;

                        bool needGateThreadForRuntime = ThreadPool.PerformRuntimeSpecificGateActivities(cpuUtilization);

                        if (!disableStarvationDetection &&
                            threadPoolInstance._pendingBlockingAdjustment == PendingBlockingAdjustment.None &&
                            threadPoolInstance._separated.numRequestedWorkers > 0 &&
                            SufficientDelaySinceLastDequeue(threadPoolInstance))
                        {
                            bool addWorker = false;
                            threadAdjustmentLock.Acquire();
                            try
                            {
                                // Don't add a thread if we're at max or if we are already in the process of adding threads.
                                // This logic is slightly different from the native implementation in CoreCLR because there are
                                // no retired threads. In the native implementation, when hill climbing reduces the thread count
                                // goal, threads that are stopped from processing work are switched to "retired" state, and they
                                // don't count towards the equivalent existing thread count. In this implementation, the
                                // existing thread count includes any worker thread that has not yet exited, including those
                                // stopped from working by hill climbing, so here the number of threads processing work, instead
                                // of the number of existing threads, is compared with the goal. There may be alternative
                                // solutions, for now this is only to maintain consistency in behavior.
                                ThreadCounts counts = threadPoolInstance._separated.counts;
                                if (counts.NumProcessingWork < threadPoolInstance._maxThreads &&
                                    counts.NumProcessingWork >= threadPoolInstance._separated.numThreadsGoal)
                                {
                                    if (debuggerBreakOnWorkStarvation)
                                    {
                                        Debugger.Break();
                                    }

                                    short newNumThreadsGoal = (short)(counts.NumProcessingWork + 1);
                                    threadPoolInstance._separated.numThreadsGoal = newNumThreadsGoal;
                                    HillClimbing.ThreadPoolHillClimber.ForceChange(
                                        newNumThreadsGoal,
                                        HillClimbing.StateOrTransition.Starvation);
                                    addWorker = true;
                                }
                            }
                            finally
                            {
                                threadAdjustmentLock.Release();
                            }

                            if (addWorker)
                            {
                                WorkerThread.MaybeAddWorkingWorker(threadPoolInstance);
                            }
                        }

                        if (!needGateThreadForRuntime &&
                            threadPoolInstance._separated.numRequestedWorkers <= 0 &&
                            threadPoolInstance._pendingBlockingAdjustment == PendingBlockingAdjustment.None &&
                            Interlocked.Decrement(ref threadPoolInstance._separated.gateThreadRunningState) <= GetRunningStateForNumRuns(0))
                        {
                            break;
                        }
                    }
                }
            }
            private static void WorkerThreadStart()
            {
                Thread.CurrentThread.SetThreadPoolWorkerThreadName();

                PortableThreadPool threadPoolInstance = ThreadPoolInstance;

                if (PortableThreadPoolEventSource.Log.IsEnabled(EventLevel.Informational, PortableThreadPoolEventSource.Keywords.ThreadingKeyword))
                {
                    PortableThreadPoolEventSource.Log.ThreadPoolWorkerThreadStart(
                        (uint)threadPoolInstance._separated.counts.VolatileRead().NumExistingThreads);
                }

                LowLevelLock          hillClimbingThreadAdjustmentLock = threadPoolInstance._hillClimbingThreadAdjustmentLock;
                LowLevelLifoSemaphore semaphore = s_semaphore;

                while (true)
                {
                    bool spinWait = true;
                    while (semaphore.Wait(ThreadPoolThreadTimeoutMs, spinWait))
                    {
                        bool alreadyRemovedWorkingWorker = false;
                        while (TakeActiveRequest(threadPoolInstance))
                        {
                            Volatile.Write(ref threadPoolInstance._separated.lastDequeueTime, Environment.TickCount);
                            if (!ThreadPoolWorkQueue.Dispatch())
                            {
                                // ShouldStopProcessingWorkNow() caused the thread to stop processing work, and it would have
                                // already removed this working worker in the counts. This typically happens when hill climbing
                                // decreases the worker thread count goal.
                                alreadyRemovedWorkingWorker = true;
                                break;
                            }
                        }

                        // Don't spin-wait on the semaphore next time if the thread was actively stopped from processing work,
                        // as it's unlikely that the worker thread count goal would be increased again so soon afterwards that
                        // the semaphore would be released within the spin-wait window
                        spinWait = !alreadyRemovedWorkingWorker;

                        if (!alreadyRemovedWorkingWorker)
                        {
                            // If we woke up but couldn't find a request, or ran out of work items to process, we need to update
                            // the number of working workers to reflect that we are done working for now
                            RemoveWorkingWorker(threadPoolInstance);
                        }
                    }

                    hillClimbingThreadAdjustmentLock.Acquire();
                    try
                    {
                        // At this point, the thread's wait timed out. We are shutting down this thread.
                        // We are going to decrement the number of exisiting threads to no longer include this one
                        // and then change the max number of threads in the thread pool to reflect that we don't need as many
                        // as we had. Finally, we are going to tell hill climbing that we changed the max number of threads.
                        ThreadCounts counts = threadPoolInstance._separated.counts.VolatileRead();
                        while (true)
                        {
                            // Since this thread is currently registered as an existing thread, if more work comes in meanwhile,
                            // this thread would be expected to satisfy the new work. Ensure that NumExistingThreads is not
                            // decreased below NumProcessingWork, as that would be indicative of such a case.
                            short numExistingThreads = counts.NumExistingThreads;
                            if (numExistingThreads <= counts.NumProcessingWork)
                            {
                                // In this case, enough work came in that this thread should not time out and should go back to work.
                                break;
                            }

                            ThreadCounts newCounts = counts;
                            newCounts.SubtractNumExistingThreads(1);
                            short newNumExistingThreads = (short)(numExistingThreads - 1);
                            short newNumThreadsGoal     = Math.Max(threadPoolInstance._minThreads, Math.Min(newNumExistingThreads, newCounts.NumThreadsGoal));
                            newCounts.NumThreadsGoal = newNumThreadsGoal;

                            ThreadCounts oldCounts = threadPoolInstance._separated.counts.InterlockedCompareExchange(newCounts, counts);
                            if (oldCounts == counts)
                            {
                                HillClimbing.ThreadPoolHillClimber.ForceChange(newNumThreadsGoal, HillClimbing.StateOrTransition.ThreadTimedOut);

                                if (PortableThreadPoolEventSource.Log.IsEnabled(EventLevel.Informational, PortableThreadPoolEventSource.Keywords.ThreadingKeyword))
                                {
                                    PortableThreadPoolEventSource.Log.ThreadPoolWorkerThreadStop((uint)newNumExistingThreads);
                                }
                                return;
                            }

                            counts = oldCounts;
                        }
                    }
                    finally
                    {
                        hillClimbingThreadAdjustmentLock.Release();
                    }
                }
            }