Example #1
0
 private static bool GetEnableWorkerTracking() =>
 UsePortableThreadPool
         ? AppContextConfigHelper.GetBooleanConfig(
     "System.Threading.ThreadPool.EnableWorkerTracking",
     false
     )
         : GetEnableWorkerTrackingNative();
Example #2
0
            private int _logSize;                                         // SOS's ThreadPool command depends on this name

            public HillClimbing()
            {
                _wavePeriod                = AppContextConfigHelper.GetInt32Config("System.Threading.ThreadPool.HillClimbing.WavePeriod", 4, false);
                _maxThreadWaveMagnitude    = AppContextConfigHelper.GetInt32Config("System.Threading.ThreadPool.HillClimbing.MaxWaveMagnitude", 20, false);
                _threadMagnitudeMultiplier = AppContextConfigHelper.GetInt32Config("System.Threading.ThreadPool.HillClimbing.WaveMagnitudeMultiplier", 100, false) / 100.0;
                _samplesToMeasure          = _wavePeriod * AppContextConfigHelper.GetInt32Config("System.Threading.ThreadPool.HillClimbing.WaveHistorySize", 8, false);
                _targetThroughputRatio     = AppContextConfigHelper.GetInt32Config("System.Threading.ThreadPool.HillClimbing.Bias", 15, false) / 100.0;
                _targetSignalToNoiseRatio  = AppContextConfigHelper.GetInt32Config("System.Threading.ThreadPool.HillClimbing.TargetSignalToNoiseRatio", 300, false) / 100.0;
                _maxChangePerSecond        = AppContextConfigHelper.GetInt32Config("System.Threading.ThreadPool.HillClimbing.MaxChangePerSecond", 4, false);
                _maxChangePerSample        = AppContextConfigHelper.GetInt32Config("System.Threading.ThreadPool.HillClimbing.MaxChangePerSample", 20, false);
                int sampleIntervalMsLow  = AppContextConfigHelper.GetInt32Config("System.Threading.ThreadPool.HillClimbing.SampleIntervalLow", DefaultSampleIntervalMsLow, false);
                int sampleIntervalMsHigh = AppContextConfigHelper.GetInt32Config("System.Threading.ThreadPool.HillClimbing.SampleIntervalHigh", DefaultSampleIntervalMsHigh, false);

                if (sampleIntervalMsLow <= sampleIntervalMsHigh)
                {
                    _sampleIntervalMsLow  = sampleIntervalMsLow;
                    _sampleIntervalMsHigh = sampleIntervalMsHigh;
                }
                else
                {
                    _sampleIntervalMsLow  = DefaultSampleIntervalMsLow;
                    _sampleIntervalMsHigh = DefaultSampleIntervalMsHigh;
                }
                _throughputErrorSmoothingFactor = AppContextConfigHelper.GetInt32Config("System.Threading.ThreadPool.HillClimbing.ErrorSmoothingFactor", 1, false) / 100.0;
                _gainExponent   = AppContextConfigHelper.GetInt32Config("System.Threading.ThreadPool.HillClimbing.GainExponent", 200, false) / 100.0;
                _maxSampleError = AppContextConfigHelper.GetInt32Config("System.Threading.ThreadPool.HillClimbing.MaxSampleErrorPercent", 15, false) / 100.0;

                _samples      = new double[_samplesToMeasure];
                _threadCounts = new double[_samplesToMeasure];

                _currentSampleMs = _randomIntervalGenerator.Next(_sampleIntervalMsLow, _sampleIntervalMsHigh + 1);
            }
Example #3
0
        private static bool CheckEnableAutoreleasePool()
        {
            const string feature = "System.Threading.Thread.EnableAutoreleasePool";
#if !CORECLR
            return AppContextConfigHelper.GetBooleanConfig(feature, false);
#else
            bool isEnabled = CLRConfig.GetBoolValue(feature, out bool isSet);
            if (!isSet)
                return false;

            return isEnabled;
#endif
        }
 private static HillClimbing CreateHillClimber()
 {
     // Default values pulled from CoreCLR
     return(new HillClimbing(wavePeriod: AppContextConfigHelper.GetInt32Config("HillClimbing_WavePeriod", 4, false),
                             maxWaveMagnitude: AppContextConfigHelper.GetInt32Config("HillClimbing_MaxWaveMagnitude", 20, false),
                             waveMagnitudeMultiplier: AppContextConfigHelper.GetInt32Config("HillClimbing_WaveMagnitudeMultiplier", 100, false) / 100.0,
                             waveHistorySize: AppContextConfigHelper.GetInt32Config("HillClimbing_WaveHistorySize", 8, false),
                             targetThroughputRatio: AppContextConfigHelper.GetInt32Config("HillClimbing_Bias", 15, false) / 100.0,
                             targetSignalToNoiseRatio: AppContextConfigHelper.GetInt32Config("HillClimbing_TargetSignalToNoiseRatio", 300, false) / 100.0,
                             maxChangePerSecond: AppContextConfigHelper.GetInt32Config("HillClimbing_MaxChangePerSecond", 4, false),
                             maxChangePerSample: AppContextConfigHelper.GetInt32Config("HillClimbing_MaxChangePerSample", 20, false),
                             sampleIntervalMsLow: AppContextConfigHelper.GetInt32Config("HillClimbing_SampleIntervalLow", DefaultSampleIntervalMsLow, false),
                             sampleIntervalMsHigh: AppContextConfigHelper.GetInt32Config("HillClimbing_SampleIntervalHigh", DefaultSampleIntervalMsHigh, false),
                             errorSmoothingFactor: AppContextConfigHelper.GetInt32Config("HillClimbing_ErrorSmoothingFactor", 1, false) / 100.0,
                             gainExponent: AppContextConfigHelper.GetInt32Config("HillClimbing_GainExponent", 200, false) / 100.0,
                             maxSampleError: AppContextConfigHelper.GetInt32Config("HillClimbing_MaxSampleErrorPercent", 15, false) / 100.0
                             ));
 }
Example #5
0
        private static bool CheckEnableAutoreleasePool()
        {
            const string feature = "System.Threading.Thread.EnableAutoreleasePool";

#if CORECLR
            // In coreclr_initialize, we call ICLRRuntimeHost4->Start() which, among other things,
            // starts a finalizer thread for Objective-C's NSAutoreleasePool interop on macOS.
            // Although AppContext.Setup() is done during CreateAppDomainWithManager() call which
            // is made in coreclr_initialize right after the host has started, there is a chance of
            // race between the call to CreateAppDomainWithManager in coreclr_initialize and the
            // finalizer thread starting, that will call into the changed managed code.
            //
            // Therefore, we are using CLR configuration via QCall here, instead of AppContext.

            return(CLRConfig.GetBoolValue(feature, out bool isSet) && isSet);
#else
            return(AppContextConfigHelper.GetBooleanConfig(feature, false));
#endif
        }
Example #6
0
        private static int GetIOCompletionPollerCount()
        {
            // Named for consistency with SocketAsyncEngine.Unix.cs, this environment variable is checked to override the exact
            // number of IO completion poller threads to use. See the comment in SocketAsyncEngine.Unix.cs about its potential
            // uses. For this implementation, the ProcessorsPerIOPollerThread config option below may be preferable as it may be
            // less machine-specific.
            if (uint.TryParse(Environment.GetEnvironmentVariable("DOTNET_SYSTEM_NET_SOCKETS_THREAD_COUNT"), out uint count))
            {
                return(Math.Min((int)count, MaxPossibleThreadCount));
            }

            if (UnsafeInlineIOCompletionCallbacks)
            {
                // In this mode, default to ProcessorCount pollers to ensure that all processors can be utilized if more work
                // happens on the poller threads
                return(Environment.ProcessorCount);
            }

            int processorsPerPoller =
                AppContextConfigHelper.GetInt32Config("System.Threading.ThreadPool.ProcessorsPerIOPollerThread", 12, false);

            return((Environment.ProcessorCount - 1) / processorsPerPoller + 1);
        }
            private static void GateThreadStart()
            {
                bool disableStarvationDetection =
                    AppContextConfigHelper.GetBooleanConfig("System.Threading.ThreadPool.DisableStarvationDetection", false);
                bool debuggerBreakOnWorkStarvation =
                    AppContextConfigHelper.GetBooleanConfig("System.Threading.ThreadPool.DebugBreakOnWorkerStarvation", false);

                // The first reading is over a time range other than what we are focusing on, so we do not use the read other
                // than to send it to any runtime-specific implementation that may also use the CPU utilization.
                CpuUtilizationReader cpuUtilizationReader = default;

                _ = cpuUtilizationReader.CurrentUtilization;

                PortableThreadPool threadPoolInstance = ThreadPoolInstance;
                LowLevelLock       hillClimbingThreadAdjustmentLock = threadPoolInstance._hillClimbingThreadAdjustmentLock;

                while (true)
                {
                    s_runGateThreadEvent.WaitOne();

                    bool needGateThreadForRuntime;
                    do
                    {
                        Thread.Sleep(GateThreadDelayMs);

                        if (ThreadPool.EnableWorkerTracking && PortableThreadPoolEventSource.Log.IsEnabled())
                        {
                            PortableThreadPoolEventSource.Log.ThreadPoolWorkingThreadCount(
                                (uint)threadPoolInstance.GetAndResetHighWatermarkCountOfThreadsProcessingUserCallbacks());
                        }

                        int cpuUtilization = cpuUtilizationReader.CurrentUtilization;
                        threadPoolInstance._cpuUtilization = cpuUtilization;

                        needGateThreadForRuntime = ThreadPool.PerformRuntimeSpecificGateActivities(cpuUtilization);

                        if (!disableStarvationDetection &&
                            threadPoolInstance._separated.numRequestedWorkers > 0 &&
                            SufficientDelaySinceLastDequeue(threadPoolInstance))
                        {
                            try
                            {
                                hillClimbingThreadAdjustmentLock.Acquire();
                                ThreadCounts counts = threadPoolInstance._separated.counts.VolatileRead();

                                // Don't add a thread if we're at max or if we are already in the process of adding threads.
                                // This logic is slightly different from the native implementation in CoreCLR because there are
                                // no retired threads. In the native implementation, when hill climbing reduces the thread count
                                // goal, threads that are stopped from processing work are switched to "retired" state, and they
                                // don't count towards the equivalent existing thread count. In this implementation, the
                                // existing thread count includes any worker thread that has not yet exited, including those
                                // stopped from working by hill climbing, so here the number of threads processing work, instead
                                // of the number of existing threads, is compared with the goal. There may be alternative
                                // solutions, for now this is only to maintain consistency in behavior.
                                while (
                                    counts.NumExistingThreads < threadPoolInstance._maxThreads &&
                                    counts.NumProcessingWork >= counts.NumThreadsGoal)
                                {
                                    if (debuggerBreakOnWorkStarvation)
                                    {
                                        Debugger.Break();
                                    }

                                    ThreadCounts newCounts         = counts;
                                    short        newNumThreadsGoal = (short)(counts.NumProcessingWork + 1);
                                    newCounts.NumThreadsGoal = newNumThreadsGoal;

                                    ThreadCounts oldCounts = threadPoolInstance._separated.counts.InterlockedCompareExchange(newCounts, counts);
                                    if (oldCounts == counts)
                                    {
                                        HillClimbing.ThreadPoolHillClimber.ForceChange(newNumThreadsGoal, HillClimbing.StateOrTransition.Starvation);
                                        WorkerThread.MaybeAddWorkingWorker(threadPoolInstance);
                                        break;
                                    }

                                    counts = oldCounts;
                                }
                            }
                            finally
                            {
                                hillClimbingThreadAdjustmentLock.Release();
                            }
                        }
                    } while (
                        needGateThreadForRuntime ||
                        threadPoolInstance._separated.numRequestedWorkers > 0 ||
                        Interlocked.Decrement(ref threadPoolInstance._separated.gateThreadRunningState) > GetRunningStateForNumRuns(0));
                }
            }
Example #8
0
 private static bool GetEnableWorkerTracking() =>
 AppContextConfigHelper.GetBooleanConfig("System.Threading.ThreadPool.EnableWorkerTracking", false);
Example #9
0
 private static bool GetInvariantSwitchValue() =>
 AppContextConfigHelper.GetBooleanConfig("System.Globalization.Invariant", "DOTNET_SYSTEM_GLOBALIZATION_INVARIANT");
#pragma warning disable CA1810 // remove the explicit static constructor
            static BlockingConfig()
            {
                // Summary description of how blocking compensation works and how the config settings below are used:
                // - After the thread count based on MinThreads is reached, up to ThreadsToAddWithoutDelay additional threads
                //   may be created without a delay
                // - After that, before each additional thread is created, a delay is induced, starting with DelayStepMs
                // - For every ThreadsPerDelayStep threads that are added with a delay, an additional DelayStepMs is added to
                //   the delay
                // - The delay may not exceed MaxDelayMs
                // - Delays are only induced before creating threads. If threads are already available, they would be released
                //   without delay to compensate for cooperative blocking.
                // - Physical memory usage and limits are also used and beyond a threshold, the system switches to fallback mode
                //   where threads would be created if starvation is detected, typically with higher delays

                // After the thread count based on MinThreads is reached, this value (after it is multiplied by the processor
                // count) specifies how many additional threads may be created without a delay
                int blocking_threadsToAddWithoutDelay_procCountFactor =
                    AppContextConfigHelper.GetInt32Config(
                        "System.Threading.ThreadPool.Blocking.ThreadsToAddWithoutDelay_ProcCountFactor",
                        1,
                        false);

                // After the thread count based on ThreadsToAddWithoutDelay is reached, this value (after it is multiplied by
                // the processor count) specifies after how many threads an additional DelayStepMs would be added to the delay
                // before each new thread is created
                int blocking_threadsPerDelayStep_procCountFactor =
                    AppContextConfigHelper.GetInt32Config(
                        "System.Threading.ThreadPool.Blocking.ThreadsPerDelayStep_ProcCountFactor",
                        1,
                        false);

                // After the thread count based on ThreadsToAddWithoutDelay is reached, this value specifies how much additional
                // delay to add per ThreadsPerDelayStep threads, which would be applied before each new thread is created
                DelayStepMs =
                    (uint)AppContextConfigHelper.GetInt32Config(
                        "System.Threading.ThreadPool.Blocking.DelayStepMs",
                        25,
                        false);

                // After the thread count based on ThreadsToAddWithoutDelay is reached, this value specifies the max delay to
                // use before each new thread is created
                MaxDelayMs =
                    (uint)AppContextConfigHelper.GetInt32Config(
                        "System.Threading.ThreadPool.Blocking.MaxDelayMs",
                        250,
                        false);

                int processorCount = Environment.ProcessorCount;

                ThreadsToAddWithoutDelay = (short)(processorCount * blocking_threadsToAddWithoutDelay_procCountFactor);
                if (ThreadsToAddWithoutDelay > MaxPossibleThreadCount ||
                    ThreadsToAddWithoutDelay / processorCount != blocking_threadsToAddWithoutDelay_procCountFactor)
                {
                    ThreadsToAddWithoutDelay = MaxPossibleThreadCount;
                }

                blocking_threadsPerDelayStep_procCountFactor = Math.Max(1, blocking_threadsPerDelayStep_procCountFactor);
                short maxThreadsPerDelayStep = (short)(MaxPossibleThreadCount - ThreadsToAddWithoutDelay);

                ThreadsPerDelayStep =
                    (short)(processorCount * blocking_threadsPerDelayStep_procCountFactor);
                if (ThreadsPerDelayStep > maxThreadsPerDelayStep ||
                    ThreadsPerDelayStep / processorCount != blocking_threadsPerDelayStep_procCountFactor)
                {
                    ThreadsPerDelayStep = maxThreadsPerDelayStep;
                }

                MaxDelayMs  = Math.Max(1, Math.Min(MaxDelayMs, GateThread.GateActivitiesPeriodMs));
                DelayStepMs = Math.Max(1, Math.Min(DelayStepMs, MaxDelayMs));
            }
            private static void GateThreadStart()
            {
                bool disableStarvationDetection =
                    AppContextConfigHelper.GetBooleanConfig("System.Threading.ThreadPool.DisableStarvationDetection", false);
                bool debuggerBreakOnWorkStarvation =
                    AppContextConfigHelper.GetBooleanConfig("System.Threading.ThreadPool.DebugBreakOnWorkerStarvation", false);

                // The first reading is over a time range other than what we are focusing on, so we do not use the read other
                // than to send it to any runtime-specific implementation that may also use the CPU utilization.
                CpuUtilizationReader cpuUtilizationReader = default;

                _ = cpuUtilizationReader.CurrentUtilization;

                PortableThreadPool threadPoolInstance   = ThreadPoolInstance;
                LowLevelLock       threadAdjustmentLock = threadPoolInstance._threadAdjustmentLock;
                DelayHelper        delayHelper          = default;

                if (BlockingConfig.IsCooperativeBlockingEnabled)
                {
                    // Initialize memory usage and limits, and register to update them on gen 2 GCs
                    threadPoolInstance.OnGen2GCCallback();
                    Gen2GcCallback.Register(threadPoolInstance.OnGen2GCCallback);
                }

                while (true)
                {
                    RunGateThreadEvent.WaitOne();
                    int currentTimeMs = Environment.TickCount;
                    delayHelper.SetGateActivitiesTime(currentTimeMs);

                    while (true)
                    {
                        bool wasSignaledToWake = DelayEvent.WaitOne((int)delayHelper.GetNextDelay(currentTimeMs));
                        currentTimeMs = Environment.TickCount;

                        // Thread count adjustment for cooperative blocking
                        do
                        {
                            PendingBlockingAdjustment pendingBlockingAdjustment = threadPoolInstance._pendingBlockingAdjustment;
                            if (pendingBlockingAdjustment == PendingBlockingAdjustment.None)
                            {
                                delayHelper.ClearBlockingAdjustmentDelay();
                                break;
                            }

                            bool previousDelayElapsed = false;
                            if (delayHelper.HasBlockingAdjustmentDelay)
                            {
                                previousDelayElapsed =
                                    delayHelper.HasBlockingAdjustmentDelayElapsed(currentTimeMs, wasSignaledToWake);
                                if (pendingBlockingAdjustment == PendingBlockingAdjustment.WithDelayIfNecessary &&
                                    !previousDelayElapsed)
                                {
                                    break;
                                }
                            }

                            uint nextDelayMs = threadPoolInstance.PerformBlockingAdjustment(previousDelayElapsed);
                            if (nextDelayMs <= 0)
                            {
                                delayHelper.ClearBlockingAdjustmentDelay();
                            }
                            else
                            {
                                delayHelper.SetBlockingAdjustmentTimeAndDelay(currentTimeMs, nextDelayMs);
                            }
                        } while (false);

                        //
                        // Periodic gate activities
                        //

                        if (!delayHelper.ShouldPerformGateActivities(currentTimeMs, wasSignaledToWake))
                        {
                            continue;
                        }

                        if (ThreadPool.EnableWorkerTracking && NativeRuntimeEventSource.Log.IsEnabled())
                        {
                            NativeRuntimeEventSource.Log.ThreadPoolWorkingThreadCount(
                                (uint)threadPoolInstance.GetAndResetHighWatermarkCountOfThreadsProcessingUserCallbacks());
                        }

                        int cpuUtilization = cpuUtilizationReader.CurrentUtilization;
                        threadPoolInstance._cpuUtilization = cpuUtilization;

                        bool needGateThreadForRuntime = ThreadPool.PerformRuntimeSpecificGateActivities(cpuUtilization);

                        if (!disableStarvationDetection &&
                            threadPoolInstance._pendingBlockingAdjustment == PendingBlockingAdjustment.None &&
                            threadPoolInstance._separated.numRequestedWorkers > 0 &&
                            SufficientDelaySinceLastDequeue(threadPoolInstance))
                        {
                            bool addWorker = false;
                            threadAdjustmentLock.Acquire();
                            try
                            {
                                // Don't add a thread if we're at max or if we are already in the process of adding threads.
                                // This logic is slightly different from the native implementation in CoreCLR because there are
                                // no retired threads. In the native implementation, when hill climbing reduces the thread count
                                // goal, threads that are stopped from processing work are switched to "retired" state, and they
                                // don't count towards the equivalent existing thread count. In this implementation, the
                                // existing thread count includes any worker thread that has not yet exited, including those
                                // stopped from working by hill climbing, so here the number of threads processing work, instead
                                // of the number of existing threads, is compared with the goal. There may be alternative
                                // solutions, for now this is only to maintain consistency in behavior.
                                ThreadCounts counts = threadPoolInstance._separated.counts;
                                if (counts.NumProcessingWork < threadPoolInstance._maxThreads &&
                                    counts.NumProcessingWork >= threadPoolInstance._separated.numThreadsGoal)
                                {
                                    if (debuggerBreakOnWorkStarvation)
                                    {
                                        Debugger.Break();
                                    }

                                    short newNumThreadsGoal = (short)(counts.NumProcessingWork + 1);
                                    threadPoolInstance._separated.numThreadsGoal = newNumThreadsGoal;
                                    HillClimbing.ThreadPoolHillClimber.ForceChange(
                                        newNumThreadsGoal,
                                        HillClimbing.StateOrTransition.Starvation);
                                    addWorker = true;
                                }
                            }
                            finally
                            {
                                threadAdjustmentLock.Release();
                            }

                            if (addWorker)
                            {
                                WorkerThread.MaybeAddWorkingWorker(threadPoolInstance);
                            }
                        }

                        if (!needGateThreadForRuntime &&
                            threadPoolInstance._separated.numRequestedWorkers <= 0 &&
                            threadPoolInstance._pendingBlockingAdjustment == PendingBlockingAdjustment.None &&
                            Interlocked.Decrement(ref threadPoolInstance._separated.gateThreadRunningState) <= GetRunningStateForNumRuns(0))
                        {
                            break;
                        }
                    }
                }
            }