Ejemplo n.º 1
0
        protected override void Dispose(bool disposing)
        {
            // Unadvise solution listeners.
            try
            {
                if (disposing)
                {
                    // only decrement the reference count once, regardless of the number of times Dispose is called.
                    // Ignore if Initialize was never called.
                    if (_initialized && !_disposed && Interlocked.Decrement(ref _singleFileGeneratorNodeExtenderReferenceCount) == 0)
                    {
                        ObjectExtenders objectExtenders = (ObjectExtenders)GetService(typeof(ObjectExtenders));
                        objectExtenders.UnregisterExtenderProvider(_singleFileGeneratorNodeExtenderCookie);
                    }

                    foreach (SolutionListener solutionListener in this.solutionListeners)
                    {
                        solutionListener.Dispose();
                    }
                }
            }
            finally
            {
                _disposed = true;
                base.Dispose(disposing);
            }
        }
Ejemplo n.º 2
0
        protected override async System.Threading.Tasks.Task InitializeAsync(CancellationToken cancellationToken, IProgress <ServiceProgressData> progress)
        {
            await this.JoinableTaskFactory.SwitchToMainThreadAsync(cancellationToken);

            base.InitializeAsync(cancellationToken, progress);

            try
            {
                // this block assumes that the ProjectPackage instances will all be initialized on the same thread,
                // but doesn't assume that only one ProjectPackage instance exists at a time
                if (Interlocked.Increment(ref _singleFileGeneratorNodeExtenderReferenceCount) == 1)
                {
                    ObjectExtenders objectExtenders = (ObjectExtenders)GetService(typeof(ObjectExtenders));
                    _singleFileGeneratorNodeExtenderProvider = new SingleFileGeneratorNodeExtenderProvider();
                    string extenderCatId = typeof(FileNodeProperties).GUID.ToString("B");
                    string extenderName  = SingleFileGeneratorNodeExtenderProvider.Name;
                    string localizedName = extenderName;
                    _singleFileGeneratorNodeExtenderCookie = objectExtenders.RegisterExtenderProvider(extenderCatId, extenderName, _singleFileGeneratorNodeExtenderProvider, localizedName);
                }
            }
            finally
            {
                _initialized = true;
            }
        }
Ejemplo n.º 3
0
        public override void Add(T element)
        {
            int index         = Interlocked.Increment(ref _index) - 1;
            int adjustedIndex = index;

            int arrayIndex = GetArrayIndex(index + 1);

            if (arrayIndex > 0)
            {
                adjustedIndex -= Counts[arrayIndex - 1];
            }

            if (_array[arrayIndex] == null)
            {
                int arrayLength = Sizes[arrayIndex];
                Interlocked.CompareExchange(ref _array[arrayIndex], new T[arrayLength], null);
            }

            _array[arrayIndex][adjustedIndex] = element;

            int count      = _count;
            int fuzzyCount = Interlocked.Increment(ref _fuzzyCount);

            if (fuzzyCount == index + 1)
            {
                Interlocked.CompareExchange(ref _count, fuzzyCount, count);
            }
            ItemAddedEvent?.Invoke(element, index);
        }
Ejemplo n.º 4
0
        protected override void Initialize()
        {
            base.Initialize();

            // Subscribe to the solution events
            this.solutionListeners.Add(new SolutionListenerForProjectReferenceUpdate(this));
            this.solutionListeners.Add(new SolutionListenerForProjectOpen(this));
            this.solutionListeners.Add(new SolutionListenerForBuildDependencyUpdate(this));
            this.solutionListeners.Add(new SolutionListenerForProjectEvents(this));

            foreach (SolutionListener solutionListener in this.solutionListeners)
            {
                solutionListener.Init();
            }

            try
            {
                // this block assumes that the ProjectPackage instances will all be initialized on the same thread,
                // but doesn't assume that only one ProjectPackage instance exists at a time
                if (Interlocked.Increment(ref _singleFileGeneratorNodeExtenderReferenceCount) == 1)
                {
                    ObjectExtenders objectExtenders = (ObjectExtenders)GetService(typeof(ObjectExtenders));
                    _singleFileGeneratorNodeExtenderProvider = new SingleFileGeneratorNodeExtenderProvider();
                    string extenderCatId = typeof(FileNodeProperties).GUID.ToString("B");
                    string extenderName  = SingleFileGeneratorNodeExtenderProvider.Name;
                    string localizedName = extenderName;
                    _singleFileGeneratorNodeExtenderCookie = objectExtenders.RegisterExtenderProvider(extenderCatId, extenderName, _singleFileGeneratorNodeExtenderProvider, localizedName);
                }
            }
            finally
            {
                _initialized = true;
            }
        }
Ejemplo n.º 5
0
        public override T this[int key]
        {
            get
            {
                if (key < minIndex || key > maxIndex)
                {
                    return(null);
                }

#if NET45PLUS
                return(Volatile.Read(ref arrayData[key - minIndex]));
#else
                return(Interlocked.CompareExchange(ref arrayData[key - minIndex], null, null));
#endif
            }
        }
Ejemplo n.º 6
0
        /// <summary>
        /// Executes a foreach loop in which iterations may run in parallel
        /// </summary>
        /// <typeparam name="T">Object type that the collection wraps</typeparam>
        /// <param name="threadCount">The number of concurrent execution threads to run</param>
        /// <param name="enumerable">An enumerable collection to iterate over</param>
        /// <param name="body">Method body to run for each object in the collection</param>
        public static void ForEach <T>(int threadCount, IEnumerable <T> enumerable, Action <T> body)
        {
            int             counter           = threadCount;
            AutoResetEvent  threadFinishEvent = new AutoResetEvent(false);
            IEnumerator <T> enumerator        = enumerable.GetEnumerator();
            Exception       exception         = null;

            for (int i = 0; i < threadCount; i++)
            {
                ThreadPool.QueueUserWorkItem(
                    delegate(object o)
                {
                    int threadIndex = (int)o;

                    while (exception == null)
                    {
                        T entry;

                        lock (enumerator)
                        {
                            if (!enumerator.MoveNext())
                            {
                                break;
                            }
                            entry = (T)enumerator.Current;     // Explicit typecast for Mono's sake
                        }

                        try { body(entry); }
                        catch (Exception ex) { exception = ex; break; }
                    }

                    if (Interlocked.Decrement(ref counter) == 0)
                    {
                        threadFinishEvent.Set();
                    }
                }, i
                    );
            }

            threadFinishEvent.WaitOne();

            if (exception != null)
            {
                throw exception;
            }
        }
Ejemplo n.º 7
0
 public override AbstractEdgeMap <T> Put(int key, T value)
 {
     if (key >= minIndex && key <= maxIndex)
     {
         T existing = Interlocked.Exchange(ref arrayData[key - minIndex], value);
         if (existing == null && value != null)
         {
             Interlocked.Increment(ref size);
         }
         else
         {
             if (existing != null && value == null)
             {
                 Interlocked.Decrement(ref size);
             }
         }
     }
     return(this);
 }
Ejemplo n.º 8
0
 private void SetCallbacks()
 {
     for (int i = 0; i < _pkg.Connections.Count; i++)
     {
         int ind = i;
         _pkg.Connections[i].On(_pkg.Job.CallbackName, (string uid, string time) =>
         {
             var receiveTimestamp = Util.Timestamp();
             var sendTimestamp    = Convert.ToInt64(time);
             //Util.Log($"diff time: {receiveTimestamp - sendTimestamp}");
             Counters.CountLatency(sendTimestamp, receiveTimestamp);
             Interlocked.Increment(ref totalReceivedMsg);
             if (ind == 0)
             {
                 Util.Log($"#### echocallback");
             }
         });
     }
 }
Ejemplo n.º 9
0
        /// <summary>
        /// Executes a for loop in which iterations may run in parallel
        /// </summary>
        /// <param name="threadCount">The number of concurrent execution threads to run</param>
        /// <param name="fromInclusive">The loop will be started at this index</param>
        /// <param name="toExclusive">The loop will be terminated before this index is reached</param>
        /// <param name="body">Method body to run for each iteration of the loop</param>
        public static void For(int threadCount, int fromInclusive, int toExclusive, Action <int> body)
        {
            int            counter           = threadCount;
            AutoResetEvent threadFinishEvent = new AutoResetEvent(false);
            Exception      exception         = null;

            --fromInclusive;

            for (int i = 0; i < threadCount; i++)
            {
                ThreadPool.QueueUserWorkItem(
                    delegate(object o)
                {
                    int threadIndex = (int)o;

                    while (exception == null)
                    {
                        int currentIndex = Interlocked.Increment(ref fromInclusive);

                        if (currentIndex >= toExclusive)
                        {
                            break;
                        }

                        try { body(currentIndex); }
                        catch (Exception ex) { exception = ex; break; }
                    }

                    if (Interlocked.Decrement(ref counter) == 0)
                    {
                        threadFinishEvent.Set();
                    }
                }, i
                    );
            }

            threadFinishEvent.WaitOne();

            if (exception != null)
            {
                throw exception;
            }
        }
Ejemplo n.º 10
0
            private SynchronizedBlock EnterLock()
            {
                this.IsLockTaken = true;
                SystemInterlocked.Increment(ref this.UseCount);

                if (this.Owner is null)
                {
                    // If this operation is trying to acquire this lock while it is free, then inject a scheduling
                    // point to give another enabled operation the chance to race and acquire this lock.
                    this.Resource.Runtime.ScheduleNextOperation(SchedulingPointType.Acquire);
                }

                if (this.Owner != null)
                {
                    var op = this.Resource.Runtime.GetExecutingOperation();
                    if (this.Owner == op)
                    {
                        // The owner is re-entering the lock.
                        this.LockCountMap[op]++;
                        return(this);
                    }
                    else
                    {
                        // Another op has the lock right now, so add the executing op
                        // to the ready queue and block it.
                        this.WaitQueue.Remove(op);
                        if (!this.ReadyQueue.Contains(op))
                        {
                            this.ReadyQueue.Add(op);
                        }

                        this.Resource.Wait();
                        this.LockCountMap.Add(op, 1);
                        return(this);
                    }
                }

                // The executing op acquired the lock and can proceed.
                this.Owner = this.Resource.Runtime.GetExecutingOperation();
                this.LockCountMap.Add(this.Owner, 1);
                return(this);
            }
Ejemplo n.º 11
0
        /// <summary>
        /// Executes a series of tasks in parallel
        /// </summary>
        /// <param name="threadCount">The number of concurrent execution threads to run</param>
        /// <param name="actions">A series of method bodies to execute</param>
        public static void Invoke(int threadCount, params Action[] actions)
        {
            int            counter           = threadCount;
            AutoResetEvent threadFinishEvent = new AutoResetEvent(false);
            int            index             = -1;
            Exception      exception         = null;

            for (int i = 0; i < threadCount; i++)
            {
                ThreadPool.QueueUserWorkItem(
                    delegate(object o)
                {
                    int threadIndex = (int)o;

                    while (exception == null)
                    {
                        int currentIndex = Interlocked.Increment(ref index);

                        if (currentIndex >= actions.Length)
                        {
                            break;
                        }

                        try { actions[currentIndex](); }
                        catch (Exception ex) { exception = ex; break; }
                    }

                    if (Interlocked.Decrement(ref counter) == 0)
                    {
                        threadFinishEvent.Set();
                    }
                }, i
                    );
            }

            threadFinishEvent.WaitOne();

            if (exception != null)
            {
                throw exception;
            }
        }
Ejemplo n.º 12
0
        //protected void SetTimers()
        //{
        //    TimerPerConnection = new List<System.Timers.Timer>(_pkg.Job.Connections);
        //    DelayPerConnection = new List<TimeSpan>(_pkg.Job.Connections);

        //    for (int i = 0; i < _pkg.Connections.Count; i++)
        //    {
        //        var delay = StartTimeOffsetGenerator.Delay(TimeSpan.FromSeconds(_pkg.Job.Interval));
        //        DelayPerConnection.Add(delay);

        //        TimerPerConnection.Add(new System.Timers.Timer());

        //        var ind = i;
        //        var startTime = Util.Timestamp();
        //        TimerPerConnection[i].AutoReset = true;
        //        TimerPerConnection[i].Elapsed += (sender, e) =>
        //        {
        //            // set new interval
        //            TimerPerConnection[ind].Stop();
        //            TimerPerConnection[ind].Interval = _pkg.Job.Interval * 1000;
        //            TimerPerConnection[ind].Start();

        //            if (_pkg.SentMassage[ind] >= _pkg.Job.Duration * _pkg.Job.Interval)
        //            {
        //                TimerPerConnection[ind].Stop();
        //                return;
        //            }

        //            if (ind == 0)
        //            {
        //                Util.Log($"Sending Message");
        //            }

        //            try
        //            {
        //                _pkg.Connections[ind].SendAsync("echo", $"{GuidEncoder.Encode(Guid.NewGuid())}", $"{Util.Timestamp()}").Wait();
        //            }
        //            catch (Exception ex)
        //            {
        //                Console.WriteLine($"Failed to send massage: {ex} \n");
        //            }
        //            Interlocked.Increment(ref totalSentMsg);
        //            _pkg.SentMassage[ind]++;
        //            Counters.IncreseSentMsg();

        //        };
        //    }


        //}

        private async Task StartSendingMessageAsync(HubConnection connection)
        {
            await Task.Delay(StartTimeOffsetGenerator.Delay(TimeSpan.FromSeconds(_pkg.Job.Interval)));

            using (var cts = new CancellationTokenSource(TimeSpan.FromSeconds(_pkg.Job.Duration)))
            {
                while (!cts.IsCancellationRequested)
                {
                    try
                    {
                        await connection.SendAsync("echo", "id", $"{Util.Timestamp()}");

                        Interlocked.Increment(ref totalSentMsg);
                    }
                    catch
                    {
                        Interlocked.Increment(ref totalErrMsg);
                    }

                    await Task.Delay(TimeSpan.FromSeconds(_pkg.Job.Interval));
                }
            }
        }
Ejemplo n.º 13
0
            internal void Exit()
            {
                var op = this.Resource.Runtime.GetExecutingOperation <AsyncOperation>();

                this.Resource.Runtime.Assert(this.LockCountMap.ContainsKey(op), "Cannot invoke Dispose without acquiring the lock.");

                this.LockCountMap[op]--;
                if (this.LockCountMap[op] is 0)
                {
                    // Only release the lock if the invocation is not reentrant.
                    this.LockCountMap.Remove(op);
                    this.UnlockNextReady();
                    this.Resource.Runtime.ScheduleNextOperation(AsyncOperationType.Release);
                }

                int useCount = SystemInterlocked.Decrement(ref this.UseCount);

                if (useCount is 0 && Cache[this.SyncObject].Value == this)
                {
                    // It is safe to remove this instance from the cache.
                    Cache.TryRemove(this.SyncObject, out _);
                }
            }
Ejemplo n.º 14
0
 /// <summary>
 /// Ensure the lock object is initialized.
 /// </summary>
 /// <param name="syncLock">A reference to a location containing a mutual exclusive lock. If <paramref name="syncLock"/> is null,
 /// a new object will be instantiated.</param>
 /// <returns>Initialized lock object.</returns>
 private static object EnsureLockInitialized(ref object syncLock) =>
 syncLock ??
 Interlocked.CompareExchange(ref syncLock, new object(), null) ??
 syncLock;
Ejemplo n.º 15
0
            /// <summary>
            /// If this table is not full, then just return "this".  Otherwise, create and return a new table with
            /// additional capacity, and rehash all values in the table.
            /// </summary>
            public XHashtableState Resize()
            {
                // No need to resize if there are open entries
                if (_numEntries < _buckets.Length)
                {
                    return(this);
                }

                int newSize = 0;

                // Determine capacity of resized hash table by first counting number of valid, non-orphaned entries
                // As this count proceeds, close all linked lists so that no additional entries can be added to them
                for (int bucketIdx = 0; bucketIdx < _buckets.Length; bucketIdx++)
                {
                    int entryIdx = _buckets[bucketIdx];

                    if (entryIdx == EndOfList)
                    {
                        // Replace EndOfList with FullList, so that any threads still attempting to add will be forced to resize
                        entryIdx = Interlocked.CompareExchange(ref _buckets[bucketIdx], FullList, EndOfList);
                    }

                    // Loop until we've guaranteed that the list has been counted and closed to further adds
                    while (entryIdx > EndOfList)
                    {
                        // Count each valid entry
                        if (_extractKey(_entries[entryIdx].Value) != null)
                        {
                            newSize++;
                        }

                        if (_entries[entryIdx].Next == EndOfList)
                        {
                            // Replace EndOfList with FullList, so that any threads still attempting to add will be forced to resize
                            entryIdx = Interlocked.CompareExchange(ref _entries[entryIdx].Next, FullList, EndOfList);
                        }
                        else
                        {
                            // Move to next entry in the list
                            entryIdx = _entries[entryIdx].Next;
                        }
                    }
                    Debug.Assert(entryIdx == EndOfList, "Resize() should only be called by one thread");
                }

                // Double number of valid entries; if result is less than current capacity, then use current capacity
                if (newSize < _buckets.Length / 2)
                {
                    newSize = _buckets.Length;
                }
                else
                {
                    newSize = _buckets.Length * 2;

                    if (newSize < 0)
                    {
                        throw new OverflowException();
                    }
                }

                // Create new hash table with additional capacity
                XHashtableState newHashtable = new XHashtableState(_extractKey, newSize);

                // Rehash names (TryAdd will always succeed, since we won't fill the new table)
                // Do not simply walk over entries and add them to table, as that would add orphaned
                // entries.  Instead, walk the linked lists and add each name.
                for (int bucketIdx = 0; bucketIdx < _buckets.Length; bucketIdx++)
                {
                    int    entryIdx = _buckets[bucketIdx];
                    TValue newValue;

                    while (entryIdx > EndOfList)
                    {
                        newHashtable.TryAdd(_entries[entryIdx].Value, out newValue);

                        entryIdx = _entries[entryIdx].Next;
                    }
                    Debug.Assert(entryIdx == FullList, "Linked list should have been closed when it was counted");
                }

                return(newHashtable);
            }
Ejemplo n.º 16
0
            /// <summary>
            /// Attempt to add "value" to the table, hashed by an embedded string key.  If a value having the same key already exists,
            /// then return the existing value in "newValue".  Otherwise, return the newly added value in "newValue".
            ///
            /// If the hash table is full, return false.  Otherwise, return true.
            /// </summary>
            public bool TryAdd(TValue value, out TValue newValue)
            {
                int    newEntry, entryIndex;
                string key;
                int    hashCode;

                // Assume "value" will be added and returned as "newValue"
                newValue = value;

                // Extract the key from the value.  If it's null, then value is invalid and does not need to be added to table.
                key = _extractKey(value);
                if (key == null)
                {
                    return(true);
                }

                // Compute hash code over entire length of key
                hashCode = ComputeHashCode(key, 0, key.Length);

                // Assume value is not yet in the hash table, and prepare to add it (if table is full, return false).
                // Use the entry index returned from Increment, which will never be zero, as zero conflicts with EndOfList.
                // Although this means that the first entry will never be used, it avoids the need to initialize all
                // starting buckets to the EndOfList value.
                newEntry = Interlocked.Increment(ref _numEntries);
                if (newEntry < 0 || newEntry >= _buckets.Length)
                {
                    return(false);
                }

                _entries[newEntry].Value    = value;
                _entries[newEntry].HashCode = hashCode;

                // Ensure that all writes to the entry can't be reordered past this barrier (or other threads might see new entry
                // in list before entry has been initialized!).
                Thread.MemoryBarrier();

                // Loop until a matching entry is found, a new entry is added, or linked list is found to be full
                entryIndex = 0;
                while (!FindEntry(hashCode, key, 0, key.Length, ref entryIndex))
                {
                    // PUBLISH (buckets slot)
                    // No matching entry found, so add the new entry to the end of the list ("entryIndex" is index of last entry)
                    if (entryIndex == 0)
                    {
                        entryIndex = Interlocked.CompareExchange(ref _buckets[hashCode & (_buckets.Length - 1)], newEntry, EndOfList);
                    }
                    else
                    {
                        entryIndex = Interlocked.CompareExchange(ref _entries[entryIndex].Next, newEntry, EndOfList);
                    }

                    // Return true only if the CompareExchange succeeded (happens when replaced value is EndOfList).
                    // Return false if the linked list turned out to be full because another thread is currently resizing
                    // the hash table.  In this case, entries[newEntry] is orphaned (not part of any linked list) and the
                    // Add needs to be performed on the new hash table.  Otherwise, keep looping, looking for new end of list.
                    if (entryIndex <= EndOfList)
                    {
                        return(entryIndex == EndOfList);
                    }
                }

                // Another thread already added the value while this thread was trying to add, so return that instance instead.
                // Note that entries[newEntry] will be orphaned (not part of any linked list) in this case
                newValue = _entries[entryIndex].Value;

                return(true);
            }
Ejemplo n.º 17
0
            private static void GateThreadStart()
            {
                bool disableStarvationDetection =
                    AppContextConfigHelper.GetBooleanConfig("System.Threading.ThreadPool.DisableStarvationDetection", false);
                bool debuggerBreakOnWorkStarvation =
                    AppContextConfigHelper.GetBooleanConfig("System.Threading.ThreadPool.DebugBreakOnWorkerStarvation", false);

                // The first reading is over a time range other than what we are focusing on, so we do not use the read other
                // than to send it to any runtime-specific implementation that may also use the CPU utilization.
                CpuUtilizationReader cpuUtilizationReader = default;

                _ = cpuUtilizationReader.CurrentUtilization;

                PortableThreadPool threadPoolInstance   = ThreadPoolInstance;
                LowLevelLock       threadAdjustmentLock = threadPoolInstance._threadAdjustmentLock;
                DelayHelper        delayHelper          = default;

                if (BlockingConfig.IsCooperativeBlockingEnabled)
                {
                    // Initialize memory usage and limits, and register to update them on gen 2 GCs
                    threadPoolInstance.OnGen2GCCallback();
                    Gen2GcCallback.Register(threadPoolInstance.OnGen2GCCallback);
                }

                while (true)
                {
                    RunGateThreadEvent.WaitOne();
                    int currentTimeMs = Environment.TickCount;
                    delayHelper.SetGateActivitiesTime(currentTimeMs);

                    while (true)
                    {
                        bool wasSignaledToWake = DelayEvent.WaitOne((int)delayHelper.GetNextDelay(currentTimeMs));
                        currentTimeMs = Environment.TickCount;

                        // Thread count adjustment for cooperative blocking
                        do
                        {
                            PendingBlockingAdjustment pendingBlockingAdjustment = threadPoolInstance._pendingBlockingAdjustment;
                            if (pendingBlockingAdjustment == PendingBlockingAdjustment.None)
                            {
                                delayHelper.ClearBlockingAdjustmentDelay();
                                break;
                            }

                            bool previousDelayElapsed = false;
                            if (delayHelper.HasBlockingAdjustmentDelay)
                            {
                                previousDelayElapsed =
                                    delayHelper.HasBlockingAdjustmentDelayElapsed(currentTimeMs, wasSignaledToWake);
                                if (pendingBlockingAdjustment == PendingBlockingAdjustment.WithDelayIfNecessary &&
                                    !previousDelayElapsed)
                                {
                                    break;
                                }
                            }

                            uint nextDelayMs = threadPoolInstance.PerformBlockingAdjustment(previousDelayElapsed);
                            if (nextDelayMs <= 0)
                            {
                                delayHelper.ClearBlockingAdjustmentDelay();
                            }
                            else
                            {
                                delayHelper.SetBlockingAdjustmentTimeAndDelay(currentTimeMs, nextDelayMs);
                            }
                        } while (false);

                        //
                        // Periodic gate activities
                        //

                        if (!delayHelper.ShouldPerformGateActivities(currentTimeMs, wasSignaledToWake))
                        {
                            continue;
                        }

                        if (ThreadPool.EnableWorkerTracking && NativeRuntimeEventSource.Log.IsEnabled())
                        {
                            NativeRuntimeEventSource.Log.ThreadPoolWorkingThreadCount(
                                (uint)threadPoolInstance.GetAndResetHighWatermarkCountOfThreadsProcessingUserCallbacks());
                        }

                        int cpuUtilization = cpuUtilizationReader.CurrentUtilization;
                        threadPoolInstance._cpuUtilization = cpuUtilization;

                        bool needGateThreadForRuntime = ThreadPool.PerformRuntimeSpecificGateActivities(cpuUtilization);

                        if (!disableStarvationDetection &&
                            threadPoolInstance._pendingBlockingAdjustment == PendingBlockingAdjustment.None &&
                            threadPoolInstance._separated.numRequestedWorkers > 0 &&
                            SufficientDelaySinceLastDequeue(threadPoolInstance))
                        {
                            bool addWorker = false;
                            threadAdjustmentLock.Acquire();
                            try
                            {
                                // Don't add a thread if we're at max or if we are already in the process of adding threads.
                                // This logic is slightly different from the native implementation in CoreCLR because there are
                                // no retired threads. In the native implementation, when hill climbing reduces the thread count
                                // goal, threads that are stopped from processing work are switched to "retired" state, and they
                                // don't count towards the equivalent existing thread count. In this implementation, the
                                // existing thread count includes any worker thread that has not yet exited, including those
                                // stopped from working by hill climbing, so here the number of threads processing work, instead
                                // of the number of existing threads, is compared with the goal. There may be alternative
                                // solutions, for now this is only to maintain consistency in behavior.
                                ThreadCounts counts = threadPoolInstance._separated.counts;
                                while (
                                    counts.NumProcessingWork < threadPoolInstance._maxThreads &&
                                    counts.NumProcessingWork >= counts.NumThreadsGoal)
                                {
                                    if (debuggerBreakOnWorkStarvation)
                                    {
                                        Debugger.Break();
                                    }

                                    ThreadCounts newCounts         = counts;
                                    short        newNumThreadsGoal = (short)(counts.NumProcessingWork + 1);
                                    newCounts.NumThreadsGoal = newNumThreadsGoal;

                                    ThreadCounts countsBeforeUpdate =
                                        threadPoolInstance._separated.counts.InterlockedCompareExchange(newCounts, counts);
                                    if (countsBeforeUpdate == counts)
                                    {
                                        HillClimbing.ThreadPoolHillClimber.ForceChange(
                                            newNumThreadsGoal,
                                            HillClimbing.StateOrTransition.Starvation);
                                        addWorker = true;
                                        break;
                                    }

                                    counts = countsBeforeUpdate;
                                }
                            }
                            finally
                            {
                                threadAdjustmentLock.Release();
                            }

                            if (addWorker)
                            {
                                WorkerThread.MaybeAddWorkingWorker(threadPoolInstance);
                            }
                        }

                        if (!needGateThreadForRuntime &&
                            threadPoolInstance._separated.numRequestedWorkers <= 0 &&
                            threadPoolInstance._pendingBlockingAdjustment == PendingBlockingAdjustment.None &&
                            Interlocked.Decrement(ref threadPoolInstance._separated.gateThreadRunningState) <= GetRunningStateForNumRuns(0))
                        {
                            break;
                        }
                    }
                }
            }