/// <summary>
            /// Reduce the number of working workers by one, but maybe add back a worker (possibily this thread) if a thread request comes in while we are marking this thread as not working.
            /// </summary>
            private static void RemoveWorkingWorker(PortableThreadPool threadPoolInstance)
            {
                // A compare-exchange loop is used instead of Interlocked.Decrement or Interlocked.Add to defensively prevent
                // NumProcessingWork from underflowing. See the setter for NumProcessingWork.
                ThreadCounts counts = threadPoolInstance._separated.counts;

                while (true)
                {
                    ThreadCounts newCounts = counts;
                    newCounts.NumProcessingWork--;

                    ThreadCounts countsBeforeUpdate =
                        threadPoolInstance._separated.counts.InterlockedCompareExchange(newCounts, counts);
                    if (countsBeforeUpdate == counts)
                    {
                        break;
                    }

                    counts = countsBeforeUpdate;
                }

                // It's possible that we decided we had thread requests just before a request came in,
                // but reduced the worker count *after* the request came in.  In this case, we might
                // miss the notification of a thread request.  So we wake up a thread (maybe this one!)
                // if there is work to do.
                if (threadPoolInstance._separated.numRequestedWorkers > 0)
                {
                    MaybeAddWorkingWorker(threadPoolInstance);
                }
            }
            /// <summary>
            /// Returns if the current thread should stop processing work on the thread pool.
            /// A thread should stop processing work on the thread pool when work remains only when
            /// there are more worker threads in the thread pool than we currently want.
            /// </summary>
            /// <returns>Whether or not this thread should stop processing work even if there is still work in the queue.</returns>
            internal static bool ShouldStopProcessingWorkNow(PortableThreadPool threadPoolInstance)
            {
                ThreadCounts counts = threadPoolInstance._separated.counts;

                while (true)
                {
                    // When there are more threads processing work than the thread count goal, it may have been decided
                    // to decrease the number of threads. Stop processing if the counts can be updated. We may have more
                    // threads existing than the thread count goal and that is ok, the cold ones will eventually time out if
                    // the thread count goal is not increased again. This logic is a bit different from the original CoreCLR
                    // code from which this implementation was ported, which turns a processing thread into a retired thread
                    // and checks for pending requests like RemoveWorkingWorker. In this implementation there are
                    // no retired threads, so only the count of threads processing work is considered.
                    if (counts.NumProcessingWork <= counts.NumThreadsGoal)
                    {
                        return(false);
                    }

                    ThreadCounts newCounts = counts;
                    newCounts.NumProcessingWork--;

                    ThreadCounts oldCounts = threadPoolInstance._separated.counts.InterlockedCompareExchange(newCounts, counts);

                    if (oldCounts == counts)
                    {
                        return(true);
                    }
                    counts = oldCounts;
                }
            }
예제 #3
0
        public void GetAvailableThreads(out int workerThreads, out int ioCompletionThreads)
        {
            ThreadCounts counts = _separated.counts.VolatileRead();

            workerThreads       = Math.Max(0, _maxThreads - counts.NumProcessingWork);
            ioCompletionThreads = _legacy_maxIOCompletionThreads;
        }
예제 #4
0
 public static ThreadCounts VolatileReadCounts(ref ThreadCounts counts)
 {
     return(new ThreadCounts
     {
         _asLong = Volatile.Read(ref counts._asLong)
     });
 }
예제 #5
0
            /// <summary>
            /// Reduce the number of working workers by one, but maybe add back a worker (possibily this thread) if a thread request comes in while we are marking this thread as not working.
            /// </summary>
            private static void RemoveWorkingWorker()
            {
                ThreadCounts currentCounts = ThreadCounts.VolatileReadCounts(ref ThreadPoolInstance._separated.counts);

                while (true)
                {
                    ThreadCounts newCounts = currentCounts;
                    newCounts.numProcessingWork--;
                    ThreadCounts oldCounts = ThreadCounts.CompareExchangeCounts(ref ThreadPoolInstance._separated.counts, newCounts, currentCounts);

                    if (oldCounts == currentCounts)
                    {
                        break;
                    }
                    currentCounts = oldCounts;
                }

                // It's possible that we decided we had thread requests just before a request came in,
                // but reduced the worker count *after* the request came in.  In this case, we might
                // miss the notification of a thread request.  So we wake up a thread (maybe this one!)
                // if there is work to do.
                if (ThreadPoolInstance._numRequestedWorkers > 0)
                {
                    MaybeAddWorkingWorker();
                }
            }
예제 #6
0
            internal static void MaybeAddWorkingWorker()
            {
                ThreadCounts counts = ThreadCounts.VolatileReadCounts(ref ThreadPoolInstance._separated.counts);
                ThreadCounts newCounts;

                while (true)
                {
                    newCounts = counts;
                    newCounts.numProcessingWork  = Math.Max(counts.numProcessingWork, Math.Min((short)(counts.numProcessingWork + 1), counts.numThreadsGoal));
                    newCounts.numExistingThreads = Math.Max(counts.numExistingThreads, newCounts.numProcessingWork);

                    if (newCounts == counts)
                    {
                        return;
                    }

                    ThreadCounts oldCounts = ThreadCounts.CompareExchangeCounts(ref ThreadPoolInstance._separated.counts, newCounts, counts);

                    if (oldCounts == counts)
                    {
                        break;
                    }

                    counts = oldCounts;
                }

                int toCreate  = newCounts.numExistingThreads - counts.numExistingThreads;
                int toRelease = newCounts.numProcessingWork - counts.numProcessingWork;

                if (toRelease > 0)
                {
                    s_semaphore.Release(toRelease);
                }

                while (toCreate > 0)
                {
                    if (TryCreateWorkerThread())
                    {
                        toCreate--;
                    }
                    else
                    {
                        counts = ThreadCounts.VolatileReadCounts(ref ThreadPoolInstance._separated.counts);
                        while (true)
                        {
                            newCounts = counts;
                            newCounts.numProcessingWork  -= (short)toCreate;
                            newCounts.numExistingThreads -= (short)toCreate;

                            ThreadCounts oldCounts = ThreadCounts.CompareExchangeCounts(ref ThreadPoolInstance._separated.counts, newCounts, counts);
                            if (oldCounts == counts)
                            {
                                break;
                            }
                            counts = oldCounts;
                        }
                        toCreate = 0;
                    }
                }
            }
예제 #7
0
            private static void WorkerThreadStart()
            {
                ClrThreadPoolEventSource.Log.WorkerThreadStart(ThreadCounts.VolatileReadCounts(ref ThreadPoolInstance._separated.counts).numExistingThreads);
                RuntimeThread currentThread = RuntimeThread.CurrentThread;

                while (true)
                {
                    while (WaitForRequest())
                    {
                        if (TakeActiveRequest())
                        {
                            Volatile.Write(ref ThreadPoolInstance._separated.lastDequeueTime, Environment.TickCount);
                            if (ThreadPoolWorkQueue.Dispatch())
                            {
                                // If the queue runs out of work for us, we need to update the number of working workers to reflect that we are done working for now
                                RemoveWorkingWorker();
                            }
                        }
                        else
                        {
                            // If we woke up but couldn't find a request, we need to update the number of working workers to reflect that we are done working for now
                            RemoveWorkingWorker();
                        }
                    }

                    ThreadPoolInstance._hillClimbingThreadAdjustmentLock.Acquire();
                    try
                    {
                        // At this point, the thread's wait timed out. We are shutting down this thread.
                        // We are going to decrement the number of exisiting threads to no longer include this one
                        // and then change the max number of threads in the thread pool to reflect that we don't need as many
                        // as we had. Finally, we are going to tell hill climbing that we changed the max number of threads.
                        ThreadCounts counts = ThreadCounts.VolatileReadCounts(ref ThreadPoolInstance._separated.counts);
                        while (true)
                        {
                            if (counts.numExistingThreads == counts.numProcessingWork)
                            {
                                // In this case, enough work came in that this thread should not time out and should go back to work.
                                break;
                            }

                            ThreadCounts newCounts = counts;
                            newCounts.numExistingThreads--;
                            newCounts.numThreadsGoal = Math.Max(ThreadPoolInstance._minThreads, Math.Min(newCounts.numExistingThreads, newCounts.numThreadsGoal));
                            ThreadCounts oldCounts = ThreadCounts.CompareExchangeCounts(ref ThreadPoolInstance._separated.counts, newCounts, counts);
                            if (oldCounts == counts)
                            {
                                HillClimbing.ThreadPoolHillClimber.ForceChange(newCounts.numThreadsGoal, HillClimbing.StateOrTransition.ThreadTimedOut);
                                ClrThreadPoolEventSource.Log.WorkerThreadStop(newCounts.numExistingThreads);
                                return;
                            }
                        }
                    }
                    finally
                    {
                        ThreadPoolInstance._hillClimbingThreadAdjustmentLock.Release();
                    }
                }
            }
예제 #8
0
 public TestTheadingServices()
 {
     MaxThreads       = new ThreadCounts(short.MaxValue, 1000);
     MinThreads       = new ThreadCounts(8, 4);
     AvailableThreads = MaxThreads;
     Callbacks        = new List <Tuple <WaitCallback, object> >();
     Timers           = new List <Tuple <TimeSpan, Action> >();
 }
예제 #9
0
            // TODO: CoreCLR: Worker Tracking in CoreCLR? (Config name: ThreadPool_EnableWorkerTracking)
            private static void GateThreadStart()
            {
                AppContext.TryGetSwitch("System.Threading.ThreadPool.DisableStarvationDetection", out bool disableStarvationDetection);
                AppContext.TryGetSwitch("System.Threading.ThreadPool.DebugBreakOnWorkerStarvation", out bool debuggerBreakOnWorkStarvation);
                while (true)
                {
                    RuntimeThread.Sleep(GateThreadDelayMs);

                    if (ThreadPoolInstance._numRequestedWorkers > 0)
                    {
                        WorkerThread.MaybeAddWorkingWorker();
                    }

                    if (!s_requested)
                    {
                        continue;
                    }
                    s_requested = false;

                    ThreadPoolInstance._cpuUtilization = s_cpu.CurrentUtilization;

                    if (!disableStarvationDetection)
                    {
                        if (ThreadPoolInstance._numRequestedWorkers > 0 && SufficientDelaySinceLastDequeue())
                        {
                            try
                            {
                                ThreadPoolInstance._hillClimbingThreadAdjustmentLock.Acquire();
                                ThreadCounts counts = ThreadCounts.VolatileReadCounts(ref ThreadPoolInstance._separated.counts);
                                // don't add a thread if we're at max or if we are already in the process of adding threads
                                while (counts.numExistingThreads < ThreadPoolInstance._maxThreads && counts.numExistingThreads >= counts.numThreadsGoal)
                                {
                                    if (debuggerBreakOnWorkStarvation)
                                    {
                                        Debug.WriteLine("The CLR ThreadPool detected work starvation!");
                                        Debugger.Break();
                                    }

                                    ThreadCounts newCounts = counts;
                                    newCounts.numThreadsGoal = (short)(newCounts.numExistingThreads + 1);
                                    ThreadCounts oldCounts = ThreadCounts.CompareExchangeCounts(ref ThreadPoolInstance._separated.counts, newCounts, counts);
                                    if (oldCounts == counts)
                                    {
                                        HillClimbing.ThreadPoolHillClimber.ForceChange(newCounts.numThreadsGoal, HillClimbing.StateOrTransition.Starvation);
                                        WorkerThread.MaybeAddWorkingWorker();
                                        break;
                                    }
                                    counts = oldCounts;
                                }
                            }
                            finally
                            {
                                ThreadPoolInstance._hillClimbingThreadAdjustmentLock.Release();
                            }
                        }
                    }
                }
            }
예제 #10
0
        /**
         * @param myTotal
         * @param string
         * @return
         */
        private static String Format(String name, RunningSample s, String type)
        {
            StringBuilder tmp = new StringBuilder(20);  // for intermediate use
            StringBuilder sb  = new StringBuilder(100); // output line buffer

            sb.Append(name);
            sb.Append(" ");
            sb.Append(type);
            sb.Append(" ");
            sb.Append(s.getNumSamples());
            sb.Append(" in ");
            long elapsed    = s.getElapsed();
            long elapsedSec = (elapsed + 500) / 1000; // rounded seconds

            if (elapsedSec > 100 ||                   // No point displaying decimals (less than 1% error)
                (elapsed - elapsedSec * 1000) < 50    // decimal would be zero
                )
            {
                sb.Append(elapsedSec);
            }
            else
            {
                double elapsedSecf = elapsed / 1000.0d; // fractional seconds
                sb.Append(elapsedSecf);                 // This will round
            }
            sb.Append("s = ");
            if (elapsed > 0)
            {
                sb.Append(s.getRate());
            }
            else
            {
                sb.Append("******");// Rate is effectively infinite
            }
            sb.Append("/s Avg: ");
            sb.Append(s.getAverage());
            sb.Append(" Min: ");
            sb.Append(s.getMin());
            sb.Append(" Max: ");
            sb.Append(s.getMax());
            sb.Append(" Err: ");
            sb.Append(s.getErrorCount());
            sb.Append(" (");
            sb.Append(s.getErrorPercentageString());
            sb.Append(")");
            if ("+".Equals(type))
            {
                ThreadCounts tc = NetMeterContextManager.GetThreadCounts();
                sb.Append(" Active: ");
                sb.Append(tc.activeThreads);
                sb.Append(" Started: ");
                sb.Append(tc.startedThreads);
                sb.Append(" Finished: ");
                sb.Append(tc.finishedThreads);
            }
            return(sb.ToString());
        }
예제 #11
0
        //
        // This method must only be called if ShouldAdjustMaxWorkersActive has returned true, *and*
        // _hillClimbingThreadAdjustmentLock is held.
        //
        private void AdjustMaxWorkersActive()
        {
            _hillClimbingThreadAdjustmentLock.VerifyIsLocked();
            int  currentTicks        = Environment.TickCount;
            int  totalNumCompletions = (int)_completionCounter.Count;
            int  numCompletions      = totalNumCompletions - _separated.priorCompletionCount;
            long startTime           = _currentSampleStartTime;
            long endTime             = Stopwatch.GetTimestamp();
            long freq = Stopwatch.Frequency;

            double elapsedSeconds = (double)(endTime - startTime) / freq;

            if (elapsedSeconds * 1000 >= _threadAdjustmentIntervalMs / 2)
            {
                ThreadCounts currentCounts = ThreadCounts.VolatileReadCounts(ref _separated.counts);
                int          newMax;
                (newMax, _threadAdjustmentIntervalMs) = HillClimbing.ThreadPoolHillClimber.Update(currentCounts.numThreadsGoal, elapsedSeconds, numCompletions);

                while (newMax != currentCounts.numThreadsGoal)
                {
                    ThreadCounts newCounts = currentCounts;
                    newCounts.numThreadsGoal = (short)newMax;

                    ThreadCounts oldCounts = ThreadCounts.CompareExchangeCounts(ref _separated.counts, newCounts, currentCounts);
                    if (oldCounts == currentCounts)
                    {
                        //
                        // If we're increasing the max, inject a thread.  If that thread finds work, it will inject
                        // another thread, etc., until nobody finds work or we reach the new maximum.
                        //
                        // If we're reducing the max, whichever threads notice this first will sleep and timeout themselves.
                        //
                        if (newMax > oldCounts.numThreadsGoal)
                        {
                            WorkerThread.MaybeAddWorkingWorker();
                        }
                        break;
                    }
                    else
                    {
                        if (oldCounts.numThreadsGoal > currentCounts.numThreadsGoal && oldCounts.numThreadsGoal >= newMax)
                        {
                            // someone (probably the gate thread) increased the thread count more than
                            // we are about to do.  Don't interfere.
                            break;
                        }

                        currentCounts = oldCounts;
                    }
                }
                _separated.priorCompletionCount          = totalNumCompletions;
                _separated.nextCompletedWorkRequestsTime = currentTicks + _threadAdjustmentIntervalMs;
                Volatile.Write(ref _separated.priorCompletedWorkRequestsTime, currentTicks);
                _currentSampleStartTime = endTime;
            }
        }
예제 #12
0
            /// <summary>
            /// Waits for a request to work.
            /// </summary>
            /// <returns>If this thread was woken up before it timed out.</returns>
            private static bool WaitForRequest()
            {
                PortableThreadPoolEventSource log = PortableThreadPoolEventSource.Log;

                if (log.IsEnabled())
                {
                    log.WorkerThreadWait(ThreadCounts.VolatileReadCounts(ref ThreadPoolInstance._separated.counts).numExistingThreads);
                }
                return(s_semaphore.Wait(ThreadPoolThreadTimeoutMs));
            }
            public ThreadCounts InterlockedCompareExchange(ThreadCounts newCounts, ThreadCounts oldCounts)
            {
#if DEBUG
                if (newCounts.NumThreadsGoal != oldCounts.NumThreadsGoal)
                {
                    ThreadPoolInstance._threadAdjustmentLock.VerifyIsLocked();
                }
#endif

                return(new ThreadCounts(Interlocked.CompareExchange(ref _data, newCounts._data, oldCounts._data)));
            }
예제 #14
0
        public int GetAvailableThreads()
        {
            ThreadCounts counts = ThreadCounts.VolatileReadCounts(ref _separated.counts);
            int          count  = _maxThreads - counts.numProcessingWork;

            if (count < 0)
            {
                return(0);
            }
            return(count);
        }
예제 #15
0
            // TODO: CoreCLR: Worker Tracking in CoreCLR? (Config name: ThreadPool_EnableWorkerTracking)
            private static void GateThreadStart()
            {
                _ = s_cpu.CurrentUtilization; // The first reading is over a time range other than what we are focusing on, so we do not use the read.

                AppContext.TryGetSwitch("System.Threading.ThreadPool.DisableStarvationDetection", out bool disableStarvationDetection);
                AppContext.TryGetSwitch("System.Threading.ThreadPool.DebugBreakOnWorkerStarvation", out bool debuggerBreakOnWorkStarvation);

                while (true)
                {
                    s_runGateThreadEvent.WaitOne();
                    do
                    {
                        Thread.Sleep(GateThreadDelayMs);

                        ThreadPoolInstance._cpuUtilization = s_cpu.CurrentUtilization;

                        if (!disableStarvationDetection)
                        {
                            if (ThreadPoolInstance._numRequestedWorkers > 0 && SufficientDelaySinceLastDequeue())
                            {
                                try
                                {
                                    ThreadPoolInstance._hillClimbingThreadAdjustmentLock.Acquire();
                                    ThreadCounts counts = ThreadCounts.VolatileReadCounts(ref ThreadPoolInstance._separated.counts);
                                    // don't add a thread if we're at max or if we are already in the process of adding threads
                                    while (counts.numExistingThreads < ThreadPoolInstance._maxThreads && counts.numExistingThreads >= counts.numThreadsGoal)
                                    {
                                        if (debuggerBreakOnWorkStarvation)
                                        {
                                            Debugger.Break();
                                        }

                                        ThreadCounts newCounts = counts;
                                        newCounts.numThreadsGoal = (short)(newCounts.numExistingThreads + 1);
                                        ThreadCounts oldCounts = ThreadCounts.CompareExchangeCounts(ref ThreadPoolInstance._separated.counts, newCounts, counts);
                                        if (oldCounts == counts)
                                        {
                                            HillClimbing.ThreadPoolHillClimber.ForceChange(newCounts.numThreadsGoal, HillClimbing.StateOrTransition.Starvation);
                                            WorkerThread.MaybeAddWorkingWorker();
                                            break;
                                        }
                                        counts = oldCounts;
                                    }
                                }
                                finally
                                {
                                    ThreadPoolInstance._hillClimbingThreadAdjustmentLock.Release();
                                }
                            }
                        }
                    } while (ThreadPoolInstance._numRequestedWorkers > 0 || Interlocked.Decrement(ref s_runningState) > GetRunningStateForNumRuns(0));
                }
            }
예제 #16
0
        public bool SetMinThreads(int workerThreads, int ioCompletionThreads)
        {
            if (workerThreads < 0 || ioCompletionThreads < 0)
            {
                return(false);
            }

            _maxMinThreadLock.Acquire();
            try
            {
                if (workerThreads > _maxThreads || !ThreadPool.CanSetMinIOCompletionThreads(ioCompletionThreads))
                {
                    return(false);
                }

                ThreadPool.SetMinIOCompletionThreads(ioCompletionThreads);

                if (s_forcedMinWorkerThreads != 0)
                {
                    return(true);
                }

                short newMinThreads = (short)Math.Max(1, Math.Min(workerThreads, MaxPossibleThreadCount));
                _minThreads = newMinThreads;

                ThreadCounts counts = _separated.counts.VolatileRead();
                while (counts.NumThreadsGoal < newMinThreads)
                {
                    ThreadCounts newCounts = counts;
                    newCounts.NumThreadsGoal = newMinThreads;

                    ThreadCounts oldCounts = _separated.counts.InterlockedCompareExchange(newCounts, counts);
                    if (oldCounts == counts)
                    {
                        if (_separated.numRequestedWorkers > 0)
                        {
                            WorkerThread.MaybeAddWorkingWorker(this);
                        }
                        break;
                    }

                    counts = oldCounts;
                }

                return(true);
            }
            finally
            {
                _maxMinThreadLock.Release();
            }
        }
예제 #17
0
        private bool ShouldAdjustMaxWorkersActive()
        {
            // We need to subtract by prior time because Environment.TickCount can wrap around, making a comparison of absolute times unreliable.
            int priorTime        = Volatile.Read(ref _separated.priorCompletedWorkRequestsTime);
            int requiredInterval = _separated.nextCompletedWorkRequestsTime - priorTime;
            int elapsedInterval  = Environment.TickCount - priorTime;

            if (elapsedInterval >= requiredInterval)
            {
                ThreadCounts counts = ThreadCounts.VolatileReadCounts(ref _separated.counts);
                return(counts.numExistingThreads >= counts.numThreadsGoal);
            }
            return(false);
        }
예제 #18
0
            public static ThreadCounts CompareExchangeCounts(ref ThreadCounts location, ThreadCounts newCounts, ThreadCounts oldCounts)
            {
                ThreadCounts result = new ThreadCounts
                {
                    _asLong = Interlocked.CompareExchange(ref location._asLong, newCounts._asLong, oldCounts._asLong)
                };

                if (result == oldCounts)
                {
                    result.Validate();
                    newCounts.Validate();
                }
                return(result);
            }
예제 #19
0
        public bool SetMaxThreads(int workerThreads, int ioCompletionThreads)
        {
            if (workerThreads <= 0 || ioCompletionThreads <= 0)
            {
                return(false);
            }

            _maxMinThreadLock.Acquire();
            try
            {
                if (workerThreads < _minThreads || !ThreadPool.CanSetMaxIOCompletionThreads(ioCompletionThreads))
                {
                    return(false);
                }

                ThreadPool.SetMaxIOCompletionThreads(ioCompletionThreads);

                if (s_forcedMaxWorkerThreads != 0)
                {
                    return(true);
                }

                short newMaxThreads = (short)Math.Min(workerThreads, MaxPossibleThreadCount);
                _maxThreads = newMaxThreads;

                ThreadCounts counts = _separated.counts.VolatileRead();
                while (counts.NumThreadsGoal > newMaxThreads)
                {
                    ThreadCounts newCounts = counts;
                    newCounts.NumThreadsGoal = newMaxThreads;

                    ThreadCounts oldCounts = _separated.counts.InterlockedCompareExchange(newCounts, counts);
                    if (oldCounts == counts)
                    {
                        break;
                    }

                    counts = oldCounts;
                }

                return(true);
            }
            finally
            {
                _maxMinThreadLock.Release();
            }
        }
예제 #20
0
        public bool SetMinThreads(int minThreads)
        {
            _maxMinThreadLock.Acquire();
            try
            {
                if (minThreads < 0 || minThreads > _maxThreads)
                {
                    return(false);
                }
                else
                {
                    short threads = (short)Math.Min(minThreads, MaxPossibleThreadCount);
                    if (s_forcedMinWorkerThreads == 0)
                    {
                        _minThreads = threads;

                        ThreadCounts counts = ThreadCounts.VolatileReadCounts(ref _separated.counts);
                        while (counts.numThreadsGoal < _minThreads)
                        {
                            ThreadCounts newCounts = counts;
                            newCounts.numThreadsGoal = _minThreads;

                            ThreadCounts oldCounts = ThreadCounts.CompareExchangeCounts(ref _separated.counts, newCounts, counts);
                            if (oldCounts == counts)
                            {
                                counts = newCounts;

                                if (newCounts.numThreadsGoal > oldCounts.numThreadsGoal && _numRequestedWorkers > 0)
                                {
                                    WorkerThread.MaybeAddWorkingWorker();
                                }
                            }
                            else
                            {
                                counts = oldCounts;
                            }
                        }
                    }
                    return(true);
                }
            }
            finally
            {
                _maxMinThreadLock.Release();
            }
        }
예제 #21
0
            // called by logic to spawn new worker threads, return true if it's been too long
            // since the last dequeue operation - takes number of worker threads into account
            // in deciding "too long"
            private static bool SufficientDelaySinceLastDequeue()
            {
                int delay = Environment.TickCount - Volatile.Read(ref ThreadPoolInstance._separated.lastDequeueTime);

                int minimumDelay;

                if (ThreadPoolInstance._cpuUtilization < CpuUtilizationLow)
                {
                    minimumDelay = GateThreadDelayMs;
                }
                else
                {
                    ThreadCounts counts     = ThreadCounts.VolatileReadCounts(ref ThreadPoolInstance._separated.counts);
                    int          numThreads = counts.numThreadsGoal;
                    minimumDelay = numThreads * DequeueDelayThresholdMs;
                }
                return(delay > minimumDelay);
            }
            public ThreadCounts InterlockedSetNumThreadsGoal(short value)
            {
                ThreadPoolInstance._threadAdjustmentLock.VerifyIsLocked();

                ThreadCounts counts = this;

                while (true)
                {
                    ThreadCounts newCounts = counts;
                    newCounts.NumThreadsGoal = value;

                    ThreadCounts countsBeforeUpdate = InterlockedCompareExchange(newCounts, counts);
                    if (countsBeforeUpdate == counts)
                    {
                        return(newCounts);
                    }

                    counts = countsBeforeUpdate;
                }
            }
            /// <summary>
            /// Reduce the number of working workers by one, but maybe add back a worker (possibily this thread) if a thread request comes in while we are marking this thread as not working.
            /// </summary>
            private static void RemoveWorkingWorker(PortableThreadPool threadPoolInstance)
            {
                ThreadCounts currentCounts = threadPoolInstance._separated.counts.VolatileRead();

                while (true)
                {
                    ThreadCounts newCounts = currentCounts;
                    newCounts.SubtractNumProcessingWork(1);
                    ThreadCounts oldCounts = threadPoolInstance._separated.counts.InterlockedCompareExchange(newCounts, currentCounts);

                    if (oldCounts == currentCounts)
                    {
                        break;
                    }
                    currentCounts = oldCounts;
                }

                if (currentCounts.NumProcessingWork > 1)
                {
                    // In highly bursty cases with short bursts of work, especially in the portable thread pool implementation,
                    // worker threads are being released and entering Dispatch very quickly, not finding much work in Dispatch,
                    // and soon afterwards going back to Dispatch, causing extra thrashing on data and some interlocked
                    // operations. If this is not the last thread to stop processing work, introduce a slight delay to help
                    // other threads make more efficient progress. The spin-wait is mainly for when the sleep is not effective
                    // due to there being no other threads to schedule.
                    Thread.UninterruptibleSleep0();
                    if (!Environment.IsSingleProcessor)
                    {
                        Thread.SpinWait(1);
                    }
                }

                // It's possible that we decided we had thread requests just before a request came in,
                // but reduced the worker count *after* the request came in.  In this case, we might
                // miss the notification of a thread request.  So we wake up a thread (maybe this one!)
                // if there is work to do.
                if (threadPoolInstance._separated.numRequestedWorkers > 0)
                {
                    MaybeAddWorkingWorker(threadPoolInstance);
                }
            }
예제 #24
0
        public bool SetMaxThreads(int maxThreads)
        {
            _maxMinThreadLock.Acquire();
            try
            {
                if (maxThreads < _minThreads || maxThreads == 0)
                {
                    return(false);
                }
                else
                {
                    short threads = (short)Math.Min(maxThreads, MaxPossibleThreadCount);
                    if (s_forcedMaxWorkerThreads == 0)
                    {
                        _maxThreads = threads;

                        ThreadCounts counts = ThreadCounts.VolatileReadCounts(ref _separated.counts);
                        while (counts.numThreadsGoal > maxThreads)
                        {
                            ThreadCounts newCounts = counts;
                            newCounts.numThreadsGoal = _maxThreads;

                            ThreadCounts oldCounts = ThreadCounts.CompareExchangeCounts(ref _separated.counts, newCounts, counts);
                            if (oldCounts == counts)
                            {
                                counts = newCounts;
                            }
                            else
                            {
                                counts = oldCounts;
                            }
                        }
                    }
                    return(true);
                }
            }
            finally
            {
                _maxMinThreadLock.Release();
            }
        }
예제 #25
0
        private bool ShouldAdjustMaxWorkersActive()
        {
            // We need to subtract by prior time because Environment.TickCount can wrap around, making a comparison of absolute times unreliable.
            int priorTime        = Volatile.Read(ref _separated.priorCompletedWorkRequestsTime);
            int requiredInterval = _separated.nextCompletedWorkRequestsTime - priorTime;
            int elapsedInterval  = Environment.TickCount - priorTime;

            if (elapsedInterval >= requiredInterval)
            {
                // Avoid trying to adjust the thread count goal if there are already more threads than the thread count goal.
                // In that situation, hill climbing must have previously decided to decrease the thread count goal, so let's
                // wait until the system responds to that change before calling into hill climbing again. This condition should
                // be the opposite of the condition in WorkerThread.ShouldStopProcessingWorkNow that causes
                // threads processing work to stop in response to a decreased thread count goal. The logic here is a bit
                // different from the original CoreCLR code from which this implementation was ported because in this
                // implementation there are no retired threads, so only the count of threads processing work is considered.
                ThreadCounts counts = ThreadCounts.VolatileReadCounts(ref _separated.counts);
                return(counts.numProcessingWork <= counts.numThreadsGoal);
            }
            return(false);
        }
예제 #26
0
            /// <summary>
            /// Returns if the current thread should stop processing work on the thread pool.
            /// A thread should stop processing work on the thread pool when work remains only when
            /// there are more worker threads in the thread pool than we currently want.
            /// </summary>
            /// <returns>Whether or not this thread should stop processing work even if there is still work in the queue.</returns>
            internal static bool ShouldStopProcessingWorkNow()
            {
                ThreadCounts counts = ThreadCounts.VolatileReadCounts(ref ThreadPoolInstance._separated.counts);
                while (true)
                {
                    if (counts.numExistingThreads <= counts.numThreadsGoal)
                    {
                        return false;
                    }

                    ThreadCounts newCounts = counts;
                    newCounts.numProcessingWork--;

                    ThreadCounts oldCounts = ThreadCounts.CompareExchangeCounts(ref ThreadPoolInstance._separated.counts, newCounts, counts);

                    if (oldCounts == counts)
                    {
                        return true;
                    }
                    counts = oldCounts;
                }
            }
예제 #27
0
        private bool ShouldAdjustMaxWorkersActive(int currentTimeMs)
        {
            if (HillClimbing.IsDisabled)
            {
                return(false);
            }

            // We need to subtract by prior time because Environment.TickCount can wrap around, making a comparison of absolute
            // times unreliable. Intervals are unsigned to avoid wrapping around on the subtract after enough time elapses, and
            // this also prevents the initial elapsed interval from being negative due to the prior and next times being
            // initialized to zero.
            int  priorTime        = Volatile.Read(ref _separated.priorCompletedWorkRequestsTime);
            uint requiredInterval = (uint)(_separated.nextCompletedWorkRequestsTime - priorTime);
            uint elapsedInterval  = (uint)(currentTimeMs - priorTime);

            if (elapsedInterval < requiredInterval)
            {
                return(false);
            }

            // Avoid trying to adjust the thread count goal if there are already more threads than the thread count goal.
            // In that situation, hill climbing must have previously decided to decrease the thread count goal, so let's
            // wait until the system responds to that change before calling into hill climbing again. This condition should
            // be the opposite of the condition in WorkerThread.ShouldStopProcessingWorkNow that causes
            // threads processing work to stop in response to a decreased thread count goal. The logic here is a bit
            // different from the original CoreCLR code from which this implementation was ported because in this
            // implementation there are no retired threads, so only the count of threads processing work is considered.
            ThreadCounts counts = _separated.counts;

            if (counts.NumProcessingWork > counts.NumThreadsGoal)
            {
                return(false);
            }

            // Skip hill climbing when there is a pending blocking adjustment. Hill climbing may otherwise bypass the
            // blocking adjustment heuristics and increase the thread count too quickly.
            return(_pendingBlockingAdjustment == PendingBlockingAdjustment.None);
        }
            private static void WorkerThreadStart()
            {
                Thread.CurrentThread.SetThreadPoolWorkerThreadName();

                PortableThreadPool threadPoolInstance = ThreadPoolInstance;

                if (NativeRuntimeEventSource.Log.IsEnabled())
                {
                    NativeRuntimeEventSource.Log.ThreadPoolWorkerThreadStart(
                        (uint)threadPoolInstance._separated.counts.VolatileRead().NumExistingThreads);
                }

                LowLevelLock          threadAdjustmentLock = threadPoolInstance._threadAdjustmentLock;
                LowLevelLifoSemaphore semaphore            = s_semaphore;

                while (true)
                {
                    bool spinWait = true;
                    while (semaphore.Wait(ThreadPoolThreadTimeoutMs, spinWait))
                    {
                        bool alreadyRemovedWorkingWorker = false;
                        while (TakeActiveRequest(threadPoolInstance))
                        {
                            threadPoolInstance._separated.lastDequeueTime = Environment.TickCount;
                            if (!ThreadPoolWorkQueue.Dispatch())
                            {
                                // ShouldStopProcessingWorkNow() caused the thread to stop processing work, and it would have
                                // already removed this working worker in the counts. This typically happens when hill climbing
                                // decreases the worker thread count goal.
                                alreadyRemovedWorkingWorker = true;
                                break;
                            }

                            if (threadPoolInstance._separated.numRequestedWorkers <= 0)
                            {
                                break;
                            }

                            // In highly bursty cases with short bursts of work, especially in the portable thread pool
                            // implementation, worker threads are being released and entering Dispatch very quickly, not finding
                            // much work in Dispatch, and soon afterwards going back to Dispatch, causing extra thrashing on
                            // data and some interlocked operations, and similarly when the thread pool runs out of work. Since
                            // there is a pending request for work, introduce a slight delay before serving the next request.
                            // The spin-wait is mainly for when the sleep is not effective due to there being no other threads
                            // to schedule.
                            Thread.UninterruptibleSleep0();
                            if (!Environment.IsSingleProcessor)
                            {
                                Thread.SpinWait(1);
                            }
                        }

                        // Don't spin-wait on the semaphore next time if the thread was actively stopped from processing work,
                        // as it's unlikely that the worker thread count goal would be increased again so soon afterwards that
                        // the semaphore would be released within the spin-wait window
                        spinWait = !alreadyRemovedWorkingWorker;

                        if (!alreadyRemovedWorkingWorker)
                        {
                            // If we woke up but couldn't find a request, or ran out of work items to process, we need to update
                            // the number of working workers to reflect that we are done working for now
                            RemoveWorkingWorker(threadPoolInstance);
                        }
                    }

                    threadAdjustmentLock.Acquire();
                    try
                    {
                        // At this point, the thread's wait timed out. We are shutting down this thread.
                        // We are going to decrement the number of existing threads to no longer include this one
                        // and then change the max number of threads in the thread pool to reflect that we don't need as many
                        // as we had. Finally, we are going to tell hill climbing that we changed the max number of threads.
                        ThreadCounts counts = threadPoolInstance._separated.counts;
                        while (true)
                        {
                            // Since this thread is currently registered as an existing thread, if more work comes in meanwhile,
                            // this thread would be expected to satisfy the new work. Ensure that NumExistingThreads is not
                            // decreased below NumProcessingWork, as that would be indicative of such a case.
                            if (counts.NumExistingThreads <= counts.NumProcessingWork)
                            {
                                // In this case, enough work came in that this thread should not time out and should go back to work.
                                break;
                            }

                            ThreadCounts newCounts             = counts;
                            short        newNumExistingThreads = --newCounts.NumExistingThreads;
                            short        newNumThreadsGoal     =
                                Math.Max(
                                    threadPoolInstance.MinThreadsGoal,
                                    Math.Min(newNumExistingThreads, counts.NumThreadsGoal));
                            newCounts.NumThreadsGoal = newNumThreadsGoal;

                            ThreadCounts oldCounts =
                                threadPoolInstance._separated.counts.InterlockedCompareExchange(newCounts, counts);
                            if (oldCounts == counts)
                            {
                                HillClimbing.ThreadPoolHillClimber.ForceChange(
                                    newNumThreadsGoal,
                                    HillClimbing.StateOrTransition.ThreadTimedOut);
                                if (NativeRuntimeEventSource.Log.IsEnabled())
                                {
                                    NativeRuntimeEventSource.Log.ThreadPoolWorkerThreadStop((uint)newNumExistingThreads);
                                }
                                return;
                            }

                            counts = oldCounts;
                        }
                    }
                    finally
                    {
                        threadAdjustmentLock.Release();
                    }
                }
            }
            internal static void MaybeAddWorkingWorker(PortableThreadPool threadPoolInstance)
            {
                ThreadCounts counts = threadPoolInstance._separated.counts;
                short        numExistingThreads, numProcessingWork, newNumExistingThreads, newNumProcessingWork;

                while (true)
                {
                    numProcessingWork = counts.NumProcessingWork;
                    if (numProcessingWork >= counts.NumThreadsGoal)
                    {
                        return;
                    }

                    newNumProcessingWork  = (short)(numProcessingWork + 1);
                    numExistingThreads    = counts.NumExistingThreads;
                    newNumExistingThreads = Math.Max(numExistingThreads, newNumProcessingWork);

                    ThreadCounts newCounts = counts;
                    newCounts.NumProcessingWork  = newNumProcessingWork;
                    newCounts.NumExistingThreads = newNumExistingThreads;

                    ThreadCounts oldCounts = threadPoolInstance._separated.counts.InterlockedCompareExchange(newCounts, counts);

                    if (oldCounts == counts)
                    {
                        break;
                    }

                    counts = oldCounts;
                }

                int toCreate  = newNumExistingThreads - numExistingThreads;
                int toRelease = newNumProcessingWork - numProcessingWork;

                if (toRelease > 0)
                {
                    s_semaphore.Release(toRelease);
                }

                while (toCreate > 0)
                {
                    if (TryCreateWorkerThread())
                    {
                        toCreate--;
                        continue;
                    }

                    counts = threadPoolInstance._separated.counts;
                    while (true)
                    {
                        ThreadCounts newCounts = counts;
                        newCounts.NumProcessingWork  -= (short)toCreate;
                        newCounts.NumExistingThreads -= (short)toCreate;

                        ThreadCounts oldCounts = threadPoolInstance._separated.counts.InterlockedCompareExchange(newCounts, counts);
                        if (oldCounts == counts)
                        {
                            break;
                        }
                        counts = oldCounts;
                    }
                    break;
                }
            }
예제 #30
0
 public ThreadCounts Subtract(ThreadCounts counts)
 {
     return new ThreadCounts(WorkerThreads - counts.WorkerThreads, CompletionPortThreads - counts.CompletionPortThreads);
 }
예제 #31
0
        //
        // This method must only be called if ShouldAdjustMaxWorkersActive has returned true, *and*
        // _hillClimbingThreadAdjustmentLock is held.
        //
        private void AdjustMaxWorkersActive()
        {
            LowLevelLock threadAdjustmentLock = _threadAdjustmentLock;

            if (!threadAdjustmentLock.TryAcquire())
            {
                // The lock is held by someone else, they will take care of this for us
                return;
            }

            bool addWorker = false;

            try
            {
                // Repeated checks from ShouldAdjustMaxWorkersActive() inside the lock
                ThreadCounts counts = _separated.counts;
                if (counts.NumProcessingWork > counts.NumThreadsGoal ||
                    _pendingBlockingAdjustment != PendingBlockingAdjustment.None)
                {
                    return;
                }

                long   endTime        = Stopwatch.GetTimestamp();
                double elapsedSeconds = Stopwatch.GetElapsedTime(_currentSampleStartTime, endTime).TotalSeconds;

                if (elapsedSeconds * 1000 >= _threadAdjustmentIntervalMs / 2)
                {
                    int currentTicks        = Environment.TickCount;
                    int totalNumCompletions = (int)_completionCounter.Count;
                    int numCompletions      = totalNumCompletions - _separated.priorCompletionCount;

                    short oldNumThreadsGoal = counts.NumThreadsGoal;
                    int   newNumThreadsGoal;
                    (newNumThreadsGoal, _threadAdjustmentIntervalMs) =
                        HillClimbing.ThreadPoolHillClimber.Update(oldNumThreadsGoal, elapsedSeconds, numCompletions);
                    if (oldNumThreadsGoal != (short)newNumThreadsGoal)
                    {
                        _separated.counts.InterlockedSetNumThreadsGoal((short)newNumThreadsGoal);

                        //
                        // If we're increasing the goal, inject a thread.  If that thread finds work, it will inject
                        // another thread, etc., until nobody finds work or we reach the new goal.
                        //
                        // If we're reducing the goal, whichever threads notice this first will sleep and timeout themselves.
                        //
                        if (newNumThreadsGoal > oldNumThreadsGoal)
                        {
                            addWorker = true;
                        }
                    }

                    _separated.priorCompletionCount          = totalNumCompletions;
                    _separated.nextCompletedWorkRequestsTime = currentTicks + _threadAdjustmentIntervalMs;
                    Volatile.Write(ref _separated.priorCompletedWorkRequestsTime, currentTicks);
                    _currentSampleStartTime = endTime;
                }
            }
            finally
            {
                threadAdjustmentLock.Release();
            }

            if (addWorker)
            {
                WorkerThread.MaybeAddWorkingWorker(this);
            }
        }