public bool SetMinThreads(int workerThreads, int ioCompletionThreads)
        {
            if (workerThreads < 0 || ioCompletionThreads < 0)
            {
                return(false);
            }

            bool addWorker      = false;
            bool wakeGateThread = false;

            _threadAdjustmentLock.Acquire();
            try
            {
                if (workerThreads > _maxThreads || !ThreadPool.CanSetMinIOCompletionThreads(ioCompletionThreads))
                {
                    return(false);
                }

                ThreadPool.SetMinIOCompletionThreads(ioCompletionThreads);

                if (ForcedMinWorkerThreads != 0)
                {
                    return(true);
                }

                short newMinThreads = (short)Math.Max(1, Math.Min(workerThreads, MaxPossibleThreadCount));
                _minThreads = newMinThreads;
                if (_numBlockedThreads > 0)
                {
                    // Blocking adjustment will adjust the goal according to its heuristics
                    if (_pendingBlockingAdjustment != PendingBlockingAdjustment.Immediately)
                    {
                        _pendingBlockingAdjustment = PendingBlockingAdjustment.Immediately;
                        wakeGateThread             = true;
                    }
                }
                else if (_separated.counts.NumThreadsGoal < newMinThreads)
                {
                    _separated.counts.InterlockedSetNumThreadsGoal(newMinThreads);
                    if (_separated.numRequestedWorkers > 0)
                    {
                        addWorker = true;
                    }
                }
            }
            finally
            {
                _threadAdjustmentLock.Release();
            }

            if (addWorker)
            {
                WorkerThread.MaybeAddWorkingWorker(this);
            }
            else if (wakeGateThread)
            {
                GateThread.Wake(this);
            }
            return(true);
        }
Exemple #2
0
        public bool SetMinThreads(int workerThreads, int ioCompletionThreads)
        {
            if (workerThreads < 0 || ioCompletionThreads < 0)
            {
                return(false);
            }

            _maxMinThreadLock.Acquire();
            try
            {
                if (workerThreads > _maxThreads || !ThreadPool.CanSetMinIOCompletionThreads(ioCompletionThreads))
                {
                    return(false);
                }

                ThreadPool.SetMinIOCompletionThreads(ioCompletionThreads);

                if (s_forcedMinWorkerThreads != 0)
                {
                    return(true);
                }

                short newMinThreads = (short)Math.Max(1, Math.Min(workerThreads, MaxPossibleThreadCount));
                _minThreads = newMinThreads;

                ThreadCounts counts = _separated.counts.VolatileRead();
                while (counts.NumThreadsGoal < newMinThreads)
                {
                    ThreadCounts newCounts = counts;
                    newCounts.NumThreadsGoal = newMinThreads;

                    ThreadCounts oldCounts = _separated.counts.InterlockedCompareExchange(newCounts, counts);
                    if (oldCounts == counts)
                    {
                        if (_separated.numRequestedWorkers > 0)
                        {
                            WorkerThread.MaybeAddWorkingWorker(this);
                        }
                        break;
                    }

                    counts = oldCounts;
                }

                return(true);
            }
            finally
            {
                _maxMinThreadLock.Release();
            }
        }
Exemple #3
0
        public bool SetMinThreads(int minThreads)
        {
            _maxMinThreadLock.Acquire();
            try
            {
                if (minThreads < 0 || minThreads > _maxThreads)
                {
                    return(false);
                }
                else
                {
                    short threads = (short)Math.Min(minThreads, MaxPossibleThreadCount);
                    if (s_forcedMinWorkerThreads == 0)
                    {
                        _minThreads = threads;

                        ThreadCounts counts = ThreadCounts.VolatileReadCounts(ref _separated.counts);
                        while (counts.numThreadsGoal < _minThreads)
                        {
                            ThreadCounts newCounts = counts;
                            newCounts.numThreadsGoal = _minThreads;

                            ThreadCounts oldCounts = ThreadCounts.CompareExchangeCounts(ref _separated.counts, newCounts, counts);
                            if (oldCounts == counts)
                            {
                                counts = newCounts;

                                if (newCounts.numThreadsGoal > oldCounts.numThreadsGoal && _numRequestedWorkers > 0)
                                {
                                    WorkerThread.MaybeAddWorkingWorker();
                                }
                            }
                            else
                            {
                                counts = oldCounts;
                            }
                        }
                    }
                    return(true);
                }
            }
            finally
            {
                _maxMinThreadLock.Release();
            }
        }
        public static void Interrupt(Thread thread)
        {
            Debug.Assert(thread != null);

            s_lock.Acquire();
            try
            {
                thread.WaitInfo.TrySignalToInterruptWaitOrRecordPendingInterrupt();
            }
            finally
            {
                s_lock.Release();
            }
        }
 // This is called by a worker thread
 internal static void EnsureRunning()
 {
     s_requested = true;
     if (!s_created)
     {
         try
         {
             s_createdLock.Acquire();
             if (!s_created)
             {
                 CreateGateThread();
             }
         }
         finally
         {
             s_createdLock.Release();
         }
     }
 }
Exemple #6
0
        public static void SetEvent(WaitableObject waitableObject)
        {
            Debug.Assert(waitableObject != null);

            s_lock.Acquire();
            try
            {
                waitableObject.SignalEvent();
            }
            finally
            {
                s_lock.Release();
            }
        }
Exemple #7
0
        public static void SetEvent(IntPtr handle)
        {
            WaitableObject waitableObject = HandleManager.FromHandle(handle);

            s_lock.Acquire();
            try
            {
                waitableObject.SignalEvent();
            }
            finally
            {
                s_lock.Release();
            }
        }
Exemple #8
0
        public static SafeWaitHandle?CreateNamedMutex(bool initiallyOwned, string name, out bool createdNew)
        {
            // For initially owned, newly created named mutexes, there is a potential race
            // between adding the mutex to the named object table and initially acquiring it.
            // To avoid the possibility of another thread retrieving the mutex via its name
            // before we managed to acquire it, we perform both steps while holding s_lock.
            s_lock.Acquire();
            bool holdingLock = true;

            try
            {
                WaitableObject?waitableObject = WaitableObject.CreateNamedMutex_Locked(name, out createdNew);
                if (waitableObject == null)
                {
                    return(null);
                }
                SafeWaitHandle safeWaitHandle = NewHandle(waitableObject);
                if (!initiallyOwned || !createdNew)
                {
                    return(safeWaitHandle);
                }

                // Acquire the mutex. A thread's <see cref="ThreadWaitInfo"/> has a reference to all <see cref="Mutex"/>es locked
                // by the thread. See <see cref="ThreadWaitInfo.LockedMutexesHead"/>. So, acquire the lock only after all
                // possibilities for exceptions have been exhausted.
                ThreadWaitInfo waitInfo = Thread.CurrentThread.WaitInfo;
                int            status   = waitableObject.Wait_Locked(waitInfo, timeoutMilliseconds: 0, interruptible: false, prioritize: false);
                Debug.Assert(status == 0);
                // Wait_Locked has already released s_lock, so we no longer hold it here.
                holdingLock = false;
                return(safeWaitHandle);
            }
            finally
            {
                if (holdingLock)
                {
                    s_lock.Release();
                }
            }
        }
            private static void WorkerThreadStart()
            {
                Thread.CurrentThread.SetThreadPoolWorkerThreadName();

                PortableThreadPool threadPoolInstance = ThreadPoolInstance;

                if (NativeRuntimeEventSource.Log.IsEnabled())
                {
                    NativeRuntimeEventSource.Log.ThreadPoolWorkerThreadStart(
                        (uint)threadPoolInstance._separated.counts.VolatileRead().NumExistingThreads);
                }

                LowLevelLock          threadAdjustmentLock = threadPoolInstance._threadAdjustmentLock;
                LowLevelLifoSemaphore semaphore            = s_semaphore;

                while (true)
                {
                    bool spinWait = true;
                    while (semaphore.Wait(ThreadPoolThreadTimeoutMs, spinWait))
                    {
                        bool alreadyRemovedWorkingWorker = false;
                        while (TakeActiveRequest(threadPoolInstance))
                        {
                            threadPoolInstance._separated.lastDequeueTime = Environment.TickCount;
                            if (!ThreadPoolWorkQueue.Dispatch())
                            {
                                // ShouldStopProcessingWorkNow() caused the thread to stop processing work, and it would have
                                // already removed this working worker in the counts. This typically happens when hill climbing
                                // decreases the worker thread count goal.
                                alreadyRemovedWorkingWorker = true;
                                break;
                            }

                            if (threadPoolInstance._separated.numRequestedWorkers <= 0)
                            {
                                break;
                            }

                            // In highly bursty cases with short bursts of work, especially in the portable thread pool
                            // implementation, worker threads are being released and entering Dispatch very quickly, not finding
                            // much work in Dispatch, and soon afterwards going back to Dispatch, causing extra thrashing on
                            // data and some interlocked operations, and similarly when the thread pool runs out of work. Since
                            // there is a pending request for work, introduce a slight delay before serving the next request.
                            // The spin-wait is mainly for when the sleep is not effective due to there being no other threads
                            // to schedule.
                            Thread.UninterruptibleSleep0();
                            if (!Environment.IsSingleProcessor)
                            {
                                Thread.SpinWait(1);
                            }
                        }

                        // Don't spin-wait on the semaphore next time if the thread was actively stopped from processing work,
                        // as it's unlikely that the worker thread count goal would be increased again so soon afterwards that
                        // the semaphore would be released within the spin-wait window
                        spinWait = !alreadyRemovedWorkingWorker;

                        if (!alreadyRemovedWorkingWorker)
                        {
                            // If we woke up but couldn't find a request, or ran out of work items to process, we need to update
                            // the number of working workers to reflect that we are done working for now
                            RemoveWorkingWorker(threadPoolInstance);
                        }
                    }

                    threadAdjustmentLock.Acquire();
                    try
                    {
                        // At this point, the thread's wait timed out. We are shutting down this thread.
                        // We are going to decrement the number of existing threads to no longer include this one
                        // and then change the max number of threads in the thread pool to reflect that we don't need as many
                        // as we had. Finally, we are going to tell hill climbing that we changed the max number of threads.
                        ThreadCounts counts = threadPoolInstance._separated.counts;
                        while (true)
                        {
                            // Since this thread is currently registered as an existing thread, if more work comes in meanwhile,
                            // this thread would be expected to satisfy the new work. Ensure that NumExistingThreads is not
                            // decreased below NumProcessingWork, as that would be indicative of such a case.
                            if (counts.NumExistingThreads <= counts.NumProcessingWork)
                            {
                                // In this case, enough work came in that this thread should not time out and should go back to work.
                                break;
                            }

                            ThreadCounts newCounts             = counts;
                            short        newNumExistingThreads = --newCounts.NumExistingThreads;
                            short        newNumThreadsGoal     =
                                Math.Max(
                                    threadPoolInstance.MinThreadsGoal,
                                    Math.Min(newNumExistingThreads, counts.NumThreadsGoal));
                            newCounts.NumThreadsGoal = newNumThreadsGoal;

                            ThreadCounts oldCounts =
                                threadPoolInstance._separated.counts.InterlockedCompareExchange(newCounts, counts);
                            if (oldCounts == counts)
                            {
                                HillClimbing.ThreadPoolHillClimber.ForceChange(
                                    newNumThreadsGoal,
                                    HillClimbing.StateOrTransition.ThreadTimedOut);
                                if (NativeRuntimeEventSource.Log.IsEnabled())
                                {
                                    NativeRuntimeEventSource.Log.ThreadPoolWorkerThreadStop((uint)newNumExistingThreads);
                                }
                                return;
                            }

                            counts = oldCounts;
                        }
                    }
                    finally
                    {
                        threadAdjustmentLock.Release();
                    }
                }
            }
Exemple #10
0
        /// <summary>
        /// Unregisters this wait handle registration from the wait threads.
        /// </summary>
        /// <param name="waitObject">The event to signal when the handle is unregistered.</param>
        /// <returns>If the handle was successfully marked to be removed and the provided wait handle was set as the user provided event.</returns>
        /// <remarks>
        /// This method will only return true on the first call.
        /// Passing in a wait handle with a value of -1 will result in a blocking wait, where Unregister will not return until the full unregistration is completed.
        /// </remarks>
        public bool Unregister(WaitHandle?waitObject)
        {
            GC.SuppressFinalize(this);
            _callbackLock.Acquire();
            bool needToRollBackRefCountOnException = false;

            try
            {
                if (_unregisterCalled)
                {
                    return(false);
                }

                UserUnregisterWaitHandle = waitObject?.SafeWaitHandle;
                UserUnregisterWaitHandle?.DangerousAddRef(ref needToRollBackRefCountOnException);

                UserUnregisterWaitHandleValue = UserUnregisterWaitHandle?.DangerousGetHandle() ?? IntPtr.Zero;

                if (_unregistered)
                {
                    SignalUserWaitHandle();
                    return(true);
                }

                if (IsBlocking)
                {
                    _callbacksComplete = RentEvent();
                }
                else
                {
                    _removed = RentEvent();
                }
                _unregisterCalled = true;
            }
            catch (Exception) // Rollback state on exception
            {
                if (_removed != null)
                {
                    ReturnEvent(_removed);
                    _removed = null;
                }
                else if (_callbacksComplete != null)
                {
                    ReturnEvent(_callbacksComplete);
                    _callbacksComplete = null;
                }

                UserUnregisterWaitHandleValue = IntPtr.Zero;

                if (needToRollBackRefCountOnException)
                {
                    UserUnregisterWaitHandle?.DangerousRelease();
                }

                UserUnregisterWaitHandle = null;
                throw;
            }
            finally
            {
                _callbackLock.Release();
            }

            WaitThread !.UnregisterWait(this);
            return(true);
        }
Exemple #11
0
        public bool SetMinThreads(int workerThreads, int ioCompletionThreads)
        {
            if (workerThreads < 0 || ioCompletionThreads < 0)
            {
                return(false);
            }

            bool addWorker      = false;
            bool wakeGateThread = false;

            _threadAdjustmentLock.Acquire();
            try
            {
                if (workerThreads > _maxThreads)
                {
                    return(false);
                }

                if (ThreadPool.UsePortableThreadPoolForIO
                        ? ioCompletionThreads > _legacy_maxIOCompletionThreads
                        : !ThreadPool.CanSetMinIOCompletionThreads(ioCompletionThreads))
                {
                    return(false);
                }

                if (HasForcedMinThreads && workerThreads != ForcedMinWorkerThreads)
                {
                    return(false);
                }

                if (ThreadPool.UsePortableThreadPoolForIO)
                {
                    _legacy_minIOCompletionThreads = (short)Math.Max(1, ioCompletionThreads);
                }
                else
                {
                    ThreadPool.SetMinIOCompletionThreads(ioCompletionThreads);
                }

                short newMinThreads = (short)Math.Max(1, workerThreads);
                if (newMinThreads == _minThreads)
                {
                    return(true);
                }

                _minThreads = newMinThreads;
                if (_numBlockedThreads > 0)
                {
                    // Blocking adjustment will adjust the goal according to its heuristics
                    if (_pendingBlockingAdjustment != PendingBlockingAdjustment.Immediately)
                    {
                        _pendingBlockingAdjustment = PendingBlockingAdjustment.Immediately;
                        wakeGateThread             = true;
                    }
                }
                else if (_separated.counts.NumThreadsGoal < newMinThreads)
                {
                    _separated.counts.InterlockedSetNumThreadsGoal(newMinThreads);
                    if (_separated.numRequestedWorkers > 0)
                    {
                        addWorker = true;
                    }
                }

                if (NativeRuntimeEventSource.Log.IsEnabled())
                {
                    NativeRuntimeEventSource.Log.ThreadPoolMinMaxThreads(
                        (ushort)_minThreads,
                        (ushort)_maxThreads,
                        (ushort)_legacy_minIOCompletionThreads,
                        (ushort)_legacy_maxIOCompletionThreads);
                }
            }
            finally
            {
                _threadAdjustmentLock.Release();
            }

            if (addWorker)
            {
                WorkerThread.MaybeAddWorkingWorker(this);
            }
            else if (wakeGateThread)
            {
                GateThread.Wake(this);
            }
            return(true);
        }
        public bool Unregister(WaitHandle waitObject)
        {
            // The registered wait handle must have been registered by this time, otherwise the instance is not handed out to
            // the caller of the public variants of RegisterWaitForSingleObject
            Debug.Assert(WaitThread != null);

            s_callbackLock.Acquire();
            bool needToRollBackRefCountOnException = false;

            try
            {
                if (_unregisterCalled)
                {
                    return(false);
                }

                UserUnregisterWaitHandle = waitObject?.SafeWaitHandle;
                UserUnregisterWaitHandle?.DangerousAddRef(ref needToRollBackRefCountOnException);

                UserUnregisterWaitHandleValue = UserUnregisterWaitHandle?.DangerousGetHandle() ?? IntPtr.Zero;

                if (_unregistered)
                {
                    SignalUserWaitHandle();
                    return(true);
                }

                if (IsBlocking)
                {
                    _callbacksComplete = RentEvent();
                }
                else
                {
                    _removed = RentEvent();
                }
            }
            catch (Exception) // Rollback state on exception
            {
                if (_removed != null)
                {
                    ReturnEvent(_removed);
                    _removed = null;
                }
                else if (_callbacksComplete != null)
                {
                    ReturnEvent(_callbacksComplete);
                    _callbacksComplete = null;
                }

                UserUnregisterWaitHandleValue = IntPtr.Zero;

                if (needToRollBackRefCountOnException)
                {
                    UserUnregisterWaitHandle?.DangerousRelease();
                }

                UserUnregisterWaitHandle = null;
                throw;
            }
            finally
            {
                _unregisterCalled = true;
                s_callbackLock.Release();
            }

            WaitThread !.UnregisterWait(this);
            return(true);
        }
            private static void GateThreadStart()
            {
                bool disableStarvationDetection =
                    AppContextConfigHelper.GetBooleanConfig("System.Threading.ThreadPool.DisableStarvationDetection", false);
                bool debuggerBreakOnWorkStarvation =
                    AppContextConfigHelper.GetBooleanConfig("System.Threading.ThreadPool.DebugBreakOnWorkerStarvation", false);

                // The first reading is over a time range other than what we are focusing on, so we do not use the read other
                // than to send it to any runtime-specific implementation that may also use the CPU utilization.
                CpuUtilizationReader cpuUtilizationReader = default;

                _ = cpuUtilizationReader.CurrentUtilization;

                PortableThreadPool threadPoolInstance = ThreadPoolInstance;
                LowLevelLock       hillClimbingThreadAdjustmentLock = threadPoolInstance._hillClimbingThreadAdjustmentLock;

                while (true)
                {
                    s_runGateThreadEvent.WaitOne();

                    bool needGateThreadForRuntime;
                    do
                    {
                        Thread.Sleep(GateThreadDelayMs);

                        if (ThreadPool.EnableWorkerTracking && PortableThreadPoolEventSource.Log.IsEnabled())
                        {
                            PortableThreadPoolEventSource.Log.ThreadPoolWorkingThreadCount(
                                (uint)threadPoolInstance.GetAndResetHighWatermarkCountOfThreadsProcessingUserCallbacks());
                        }

                        int cpuUtilization = cpuUtilizationReader.CurrentUtilization;
                        threadPoolInstance._cpuUtilization = cpuUtilization;

                        needGateThreadForRuntime = ThreadPool.PerformRuntimeSpecificGateActivities(cpuUtilization);

                        if (!disableStarvationDetection &&
                            threadPoolInstance._separated.numRequestedWorkers > 0 &&
                            SufficientDelaySinceLastDequeue(threadPoolInstance))
                        {
                            try
                            {
                                hillClimbingThreadAdjustmentLock.Acquire();
                                ThreadCounts counts = threadPoolInstance._separated.counts.VolatileRead();

                                // Don't add a thread if we're at max or if we are already in the process of adding threads.
                                // This logic is slightly different from the native implementation in CoreCLR because there are
                                // no retired threads. In the native implementation, when hill climbing reduces the thread count
                                // goal, threads that are stopped from processing work are switched to "retired" state, and they
                                // don't count towards the equivalent existing thread count. In this implementation, the
                                // existing thread count includes any worker thread that has not yet exited, including those
                                // stopped from working by hill climbing, so here the number of threads processing work, instead
                                // of the number of existing threads, is compared with the goal. There may be alternative
                                // solutions, for now this is only to maintain consistency in behavior.
                                while (
                                    counts.NumExistingThreads < threadPoolInstance._maxThreads &&
                                    counts.NumProcessingWork >= counts.NumThreadsGoal)
                                {
                                    if (debuggerBreakOnWorkStarvation)
                                    {
                                        Debugger.Break();
                                    }

                                    ThreadCounts newCounts         = counts;
                                    short        newNumThreadsGoal = (short)(counts.NumProcessingWork + 1);
                                    newCounts.NumThreadsGoal = newNumThreadsGoal;

                                    ThreadCounts oldCounts = threadPoolInstance._separated.counts.InterlockedCompareExchange(newCounts, counts);
                                    if (oldCounts == counts)
                                    {
                                        HillClimbing.ThreadPoolHillClimber.ForceChange(newNumThreadsGoal, HillClimbing.StateOrTransition.Starvation);
                                        WorkerThread.MaybeAddWorkingWorker(threadPoolInstance);
                                        break;
                                    }

                                    counts = oldCounts;
                                }
                            }
                            finally
                            {
                                hillClimbingThreadAdjustmentLock.Release();
                            }
                        }
                    } while (
                        needGateThreadForRuntime ||
                        threadPoolInstance._separated.numRequestedWorkers > 0 ||
                        Interlocked.Decrement(ref threadPoolInstance._separated.gateThreadRunningState) > GetRunningStateForNumRuns(0));
                }
            }
 public LockHolder(LowLevelLock l)
 {
     l.Acquire();
     _lock = l;
 }
            private static void GateThreadStart()
            {
                bool disableStarvationDetection =
                    AppContextConfigHelper.GetBooleanConfig("System.Threading.ThreadPool.DisableStarvationDetection", false);
                bool debuggerBreakOnWorkStarvation =
                    AppContextConfigHelper.GetBooleanConfig("System.Threading.ThreadPool.DebugBreakOnWorkerStarvation", false);

                // The first reading is over a time range other than what we are focusing on, so we do not use the read other
                // than to send it to any runtime-specific implementation that may also use the CPU utilization.
                CpuUtilizationReader cpuUtilizationReader = default;

                _ = cpuUtilizationReader.CurrentUtilization;

                PortableThreadPool threadPoolInstance   = ThreadPoolInstance;
                LowLevelLock       threadAdjustmentLock = threadPoolInstance._threadAdjustmentLock;
                DelayHelper        delayHelper          = default;

                if (BlockingConfig.IsCooperativeBlockingEnabled)
                {
                    // Initialize memory usage and limits, and register to update them on gen 2 GCs
                    threadPoolInstance.OnGen2GCCallback();
                    Gen2GcCallback.Register(threadPoolInstance.OnGen2GCCallback);
                }

                while (true)
                {
                    RunGateThreadEvent.WaitOne();
                    int currentTimeMs = Environment.TickCount;
                    delayHelper.SetGateActivitiesTime(currentTimeMs);

                    while (true)
                    {
                        bool wasSignaledToWake = DelayEvent.WaitOne((int)delayHelper.GetNextDelay(currentTimeMs));
                        currentTimeMs = Environment.TickCount;

                        // Thread count adjustment for cooperative blocking
                        do
                        {
                            PendingBlockingAdjustment pendingBlockingAdjustment = threadPoolInstance._pendingBlockingAdjustment;
                            if (pendingBlockingAdjustment == PendingBlockingAdjustment.None)
                            {
                                delayHelper.ClearBlockingAdjustmentDelay();
                                break;
                            }

                            bool previousDelayElapsed = false;
                            if (delayHelper.HasBlockingAdjustmentDelay)
                            {
                                previousDelayElapsed =
                                    delayHelper.HasBlockingAdjustmentDelayElapsed(currentTimeMs, wasSignaledToWake);
                                if (pendingBlockingAdjustment == PendingBlockingAdjustment.WithDelayIfNecessary &&
                                    !previousDelayElapsed)
                                {
                                    break;
                                }
                            }

                            uint nextDelayMs = threadPoolInstance.PerformBlockingAdjustment(previousDelayElapsed);
                            if (nextDelayMs <= 0)
                            {
                                delayHelper.ClearBlockingAdjustmentDelay();
                            }
                            else
                            {
                                delayHelper.SetBlockingAdjustmentTimeAndDelay(currentTimeMs, nextDelayMs);
                            }
                        } while (false);

                        //
                        // Periodic gate activities
                        //

                        if (!delayHelper.ShouldPerformGateActivities(currentTimeMs, wasSignaledToWake))
                        {
                            continue;
                        }

                        if (ThreadPool.EnableWorkerTracking && NativeRuntimeEventSource.Log.IsEnabled())
                        {
                            NativeRuntimeEventSource.Log.ThreadPoolWorkingThreadCount(
                                (uint)threadPoolInstance.GetAndResetHighWatermarkCountOfThreadsProcessingUserCallbacks());
                        }

                        int cpuUtilization = cpuUtilizationReader.CurrentUtilization;
                        threadPoolInstance._cpuUtilization = cpuUtilization;

                        bool needGateThreadForRuntime = ThreadPool.PerformRuntimeSpecificGateActivities(cpuUtilization);

                        if (!disableStarvationDetection &&
                            threadPoolInstance._pendingBlockingAdjustment == PendingBlockingAdjustment.None &&
                            threadPoolInstance._separated.numRequestedWorkers > 0 &&
                            SufficientDelaySinceLastDequeue(threadPoolInstance))
                        {
                            bool addWorker = false;
                            threadAdjustmentLock.Acquire();
                            try
                            {
                                // Don't add a thread if we're at max or if we are already in the process of adding threads.
                                // This logic is slightly different from the native implementation in CoreCLR because there are
                                // no retired threads. In the native implementation, when hill climbing reduces the thread count
                                // goal, threads that are stopped from processing work are switched to "retired" state, and they
                                // don't count towards the equivalent existing thread count. In this implementation, the
                                // existing thread count includes any worker thread that has not yet exited, including those
                                // stopped from working by hill climbing, so here the number of threads processing work, instead
                                // of the number of existing threads, is compared with the goal. There may be alternative
                                // solutions, for now this is only to maintain consistency in behavior.
                                ThreadCounts counts = threadPoolInstance._separated.counts;
                                if (counts.NumProcessingWork < threadPoolInstance._maxThreads &&
                                    counts.NumProcessingWork >= threadPoolInstance._separated.numThreadsGoal)
                                {
                                    if (debuggerBreakOnWorkStarvation)
                                    {
                                        Debugger.Break();
                                    }

                                    short newNumThreadsGoal = (short)(counts.NumProcessingWork + 1);
                                    threadPoolInstance._separated.numThreadsGoal = newNumThreadsGoal;
                                    HillClimbing.ThreadPoolHillClimber.ForceChange(
                                        newNumThreadsGoal,
                                        HillClimbing.StateOrTransition.Starvation);
                                    addWorker = true;
                                }
                            }
                            finally
                            {
                                threadAdjustmentLock.Release();
                            }

                            if (addWorker)
                            {
                                WorkerThread.MaybeAddWorkingWorker(threadPoolInstance);
                            }
                        }

                        if (!needGateThreadForRuntime &&
                            threadPoolInstance._separated.numRequestedWorkers <= 0 &&
                            threadPoolInstance._pendingBlockingAdjustment == PendingBlockingAdjustment.None &&
                            Interlocked.Decrement(ref threadPoolInstance._separated.gateThreadRunningState) <= GetRunningStateForNumRuns(0))
                        {
                            break;
                        }
                    }
                }
            }
            private static void WorkerThreadStart()
            {
                Thread.CurrentThread.SetThreadPoolWorkerThreadName();

                PortableThreadPool threadPoolInstance = ThreadPoolInstance;

                if (PortableThreadPoolEventSource.Log.IsEnabled(EventLevel.Informational, PortableThreadPoolEventSource.Keywords.ThreadingKeyword))
                {
                    PortableThreadPoolEventSource.Log.ThreadPoolWorkerThreadStart(
                        (uint)threadPoolInstance._separated.counts.VolatileRead().NumExistingThreads);
                }

                LowLevelLock          hillClimbingThreadAdjustmentLock = threadPoolInstance._hillClimbingThreadAdjustmentLock;
                LowLevelLifoSemaphore semaphore = s_semaphore;

                while (true)
                {
                    bool spinWait = true;
                    while (semaphore.Wait(ThreadPoolThreadTimeoutMs, spinWait))
                    {
                        bool alreadyRemovedWorkingWorker = false;
                        while (TakeActiveRequest(threadPoolInstance))
                        {
                            Volatile.Write(ref threadPoolInstance._separated.lastDequeueTime, Environment.TickCount);
                            if (!ThreadPoolWorkQueue.Dispatch())
                            {
                                // ShouldStopProcessingWorkNow() caused the thread to stop processing work, and it would have
                                // already removed this working worker in the counts. This typically happens when hill climbing
                                // decreases the worker thread count goal.
                                alreadyRemovedWorkingWorker = true;
                                break;
                            }
                        }

                        // Don't spin-wait on the semaphore next time if the thread was actively stopped from processing work,
                        // as it's unlikely that the worker thread count goal would be increased again so soon afterwards that
                        // the semaphore would be released within the spin-wait window
                        spinWait = !alreadyRemovedWorkingWorker;

                        if (!alreadyRemovedWorkingWorker)
                        {
                            // If we woke up but couldn't find a request, or ran out of work items to process, we need to update
                            // the number of working workers to reflect that we are done working for now
                            RemoveWorkingWorker(threadPoolInstance);
                        }
                    }

                    hillClimbingThreadAdjustmentLock.Acquire();
                    try
                    {
                        // At this point, the thread's wait timed out. We are shutting down this thread.
                        // We are going to decrement the number of exisiting threads to no longer include this one
                        // and then change the max number of threads in the thread pool to reflect that we don't need as many
                        // as we had. Finally, we are going to tell hill climbing that we changed the max number of threads.
                        ThreadCounts counts = threadPoolInstance._separated.counts.VolatileRead();
                        while (true)
                        {
                            // Since this thread is currently registered as an existing thread, if more work comes in meanwhile,
                            // this thread would be expected to satisfy the new work. Ensure that NumExistingThreads is not
                            // decreased below NumProcessingWork, as that would be indicative of such a case.
                            short numExistingThreads = counts.NumExistingThreads;
                            if (numExistingThreads <= counts.NumProcessingWork)
                            {
                                // In this case, enough work came in that this thread should not time out and should go back to work.
                                break;
                            }

                            ThreadCounts newCounts = counts;
                            newCounts.SubtractNumExistingThreads(1);
                            short newNumExistingThreads = (short)(numExistingThreads - 1);
                            short newNumThreadsGoal     = Math.Max(threadPoolInstance._minThreads, Math.Min(newNumExistingThreads, newCounts.NumThreadsGoal));
                            newCounts.NumThreadsGoal = newNumThreadsGoal;

                            ThreadCounts oldCounts = threadPoolInstance._separated.counts.InterlockedCompareExchange(newCounts, counts);
                            if (oldCounts == counts)
                            {
                                HillClimbing.ThreadPoolHillClimber.ForceChange(newNumThreadsGoal, HillClimbing.StateOrTransition.ThreadTimedOut);

                                if (PortableThreadPoolEventSource.Log.IsEnabled(EventLevel.Informational, PortableThreadPoolEventSource.Keywords.ThreadingKeyword))
                                {
                                    PortableThreadPoolEventSource.Log.ThreadPoolWorkerThreadStop((uint)newNumExistingThreads);
                                }
                                return;
                            }

                            counts = oldCounts;
                        }
                    }
                    finally
                    {
                        hillClimbingThreadAdjustmentLock.Release();
                    }
                }
            }