public static unsafe void StoreReferenceTypeField(IntPtr address, object fieldValue) { Volatile.Write <Object>(ref Unsafe.As <IntPtr, object>(ref *(IntPtr *)address), fieldValue); }
void OnApplicationQuit() { Volatile.Write(ref _quitThread, true); OtherThreadFlushLogger(); }
private ManualResetEvent GetOrCreateWaitHandle() { // At the end of this method: _status will be (int)Status.HandleCreated or ObjectDisposedException is thrown var spinWait = new SpinWait(); while (true) { var status = (Status)Volatile.Read(ref _status); switch (status) { case Status.Disposed: // Disposed throw new ObjectDisposedException(nameof(ManualResetEventSlim)); case Status.NotSet: // Indicate we will be creating the handle status = (Status)Interlocked.CompareExchange(ref _status, (int)Status.HandleRequestedNotSet, (int)status); if (status == Status.NotSet) { // Create the handle var created = new ManualResetEvent(false); // Set the handle Volatile.Write(ref _handle, created); // Notify that the handle is ready Volatile.Write(ref _status, (int)Status.HandleReadyNotSet); // Return the handle we created return(created); } // Must has been disposed, or another thread is creating the handle break; case Status.Set: // Indicate we will be creating the handle status = (Status)Interlocked.CompareExchange(ref _status, (int)Status.HandleRequestedSet, (int)status); if (status == Status.Set) { // Create the handle var created = new ManualResetEvent(true); // Set the handle Volatile.Write(ref _handle, created); // Notify that the handle is ready Volatile.Write(ref _status, (int)Status.HandleReadySet); // Return the handle we created return(created); } // Must has been disposed, or another thread is creating the handle break; case Status.HandleRequestedNotSet: case Status.HandleRequestedSet: // Another thread is creating the wait handle // SpinWait break; case Status.HandleReadyNotSet: case Status.HandleReadySet: // The handle already exists // Get the handle that is already created var handle = Volatile.Read(ref _handle); if (handle != null) { // Return it return(handle); } // Probably Disposed break; default: // Should not happen break; } spinWait.SpinOnce(); } }
private void ExitMyLock() { Debug.Assert(_myLock != 0, "Exiting spin lock that is not held"); Volatile.Write(ref _myLock, 0); }
public static void VolatileWrite(ref ulong address, ulong value) => Volatile.Write(ref address, value);
public static void VolatileWrite(ref UIntPtr address, UIntPtr value) => Volatile.Write(ref address, value);
public static void VolatileWrite(ref float address, float value) => Volatile.Write(ref address, value);
public static void VolatileWrite(ref uint address, uint value) => Volatile.Write(ref address, value);
public static void VolatileWrite(ref object address, object value) => Volatile.Write(ref address, value);
public static void VolatileWrite(ref sbyte address, sbyte value) => Volatile.Write(ref address, value);
public static void VolatileWrite(ref short address, short value) => Volatile.Write(ref address, value);
public static void VolatileWrite(ref double address, double value) => Volatile.Write(ref address, value);
public bool LocalFindAndPop(IThreadPoolWorkItem obj) { // Fast path: check the tail. If equal, we can skip the lock. if (m_array[(m_tailIndex - 1) & m_mask] == obj) { IThreadPoolWorkItem unused; if (LocalPop(out unused)) { Debug.Assert(unused == obj); return(true); } return(false); } // Else, do an O(N) search for the work item. The theory of work stealing and our // inlining logic is that most waits will happen on recently queued work. And // since recently queued work will be close to the tail end (which is where we // begin our search), we will likely find it quickly. In the worst case, we // will traverse the whole local queue; this is typically not going to be a // problem (although degenerate cases are clearly an issue) because local work // queues tend to be somewhat shallow in length, and because if we fail to find // the work item, we are about to block anyway (which is very expensive). for (int i = m_tailIndex - 2; i >= m_headIndex; i--) { if (m_array[i & m_mask] == obj) { // If we found the element, block out steals to avoid interference. // @TODO: optimize away the lock? bool lockTaken = false; try { m_foreignLock.Enter(ref lockTaken); // If we lost the race, bail. if (m_array[i & m_mask] == null) { return(false); } // Otherwise, null out the element. Volatile.Write(ref m_array[i & m_mask], null); // And then check to see if we can fix up the indexes (if we're at // the edge). If we can't, we just leave nulls in the array and they'll // get filtered out eventually (but may lead to superflous resizing). if (i == m_tailIndex) { m_tailIndex -= 1; } else if (i == m_headIndex) { m_headIndex += 1; } return(true); } finally { if (lockTaken) { m_foreignLock.Exit(false); } } } } return(false); }
public void LocalPush(IThreadPoolWorkItem obj) { int tail = m_tailIndex; // We're going to increment the tail; if we'll overflow, then we need to reset our counts if (tail == int.MaxValue) { bool lockTaken = false; try { m_foreignLock.Enter(ref lockTaken); if (m_tailIndex == int.MaxValue) { // // Rather than resetting to zero, we'll just mask off the bits we don't care about. // This way we don't need to rearrange the items already in the queue; they'll be found // correctly exactly where they are. One subtlety here is that we need to make sure that // if head is currently < tail, it remains that way. This happens to just fall out from // the bit-masking, because we only do this if tail == int.MaxValue, meaning that all // bits are set, so all of the bits we're keeping will also be set. Thus it's impossible // for the head to end up > than the tail, since you can't set any more bits than all of // them. // m_headIndex = m_headIndex & m_mask; m_tailIndex = tail = m_tailIndex & m_mask; Debug.Assert(m_headIndex <= m_tailIndex); } } finally { if (lockTaken) { m_foreignLock.Exit(true); } } } // When there are at least 2 elements' worth of space, we can take the fast path. if (tail < m_headIndex + m_mask) { Volatile.Write(ref m_array[tail & m_mask], obj); m_tailIndex = tail + 1; } else { // We need to contend with foreign pops, so we lock. bool lockTaken = false; try { m_foreignLock.Enter(ref lockTaken); int head = m_headIndex; int count = m_tailIndex - m_headIndex; // If there is still space (one left), just add the element. if (count >= m_mask) { // We're full; expand the queue by doubling its size. IThreadPoolWorkItem[] newArray = new IThreadPoolWorkItem[m_array.Length << 1]; for (int i = 0; i < m_array.Length; i++) { newArray[i] = m_array[(i + head) & m_mask]; } // Reset the field values, incl. the mask. m_array = newArray; m_headIndex = 0; m_tailIndex = tail = count; m_mask = (m_mask << 1) | 1; } Volatile.Write(ref m_array[tail & m_mask], obj); m_tailIndex = tail + 1; } finally { if (lockTaken) { m_foreignLock.Exit(false); } } } }
private static void WorkerThreadStart() { Thread.CurrentThread.SetThreadPoolWorkerThreadName(); PortableThreadPool threadPoolInstance = ThreadPoolInstance; if (PortableThreadPoolEventSource.Log.IsEnabled(EventLevel.Informational, PortableThreadPoolEventSource.Keywords.ThreadingKeyword)) { PortableThreadPoolEventSource.Log.ThreadPoolWorkerThreadStart( (uint)threadPoolInstance._separated.counts.VolatileRead().NumExistingThreads); } LowLevelLock hillClimbingThreadAdjustmentLock = threadPoolInstance._hillClimbingThreadAdjustmentLock; LowLevelLifoSemaphore semaphore = s_semaphore; while (true) { bool spinWait = true; while (semaphore.Wait(ThreadPoolThreadTimeoutMs, spinWait)) { bool alreadyRemovedWorkingWorker = false; while (TakeActiveRequest(threadPoolInstance)) { Volatile.Write(ref threadPoolInstance._separated.lastDequeueTime, Environment.TickCount); if (!ThreadPoolWorkQueue.Dispatch()) { // ShouldStopProcessingWorkNow() caused the thread to stop processing work, and it would have // already removed this working worker in the counts. This typically happens when hill climbing // decreases the worker thread count goal. alreadyRemovedWorkingWorker = true; break; } } // Don't spin-wait on the semaphore next time if the thread was actively stopped from processing work, // as it's unlikely that the worker thread count goal would be increased again so soon afterwards that // the semaphore would be released within the spin-wait window spinWait = !alreadyRemovedWorkingWorker; if (!alreadyRemovedWorkingWorker) { // If we woke up but couldn't find a request, or ran out of work items to process, we need to update // the number of working workers to reflect that we are done working for now RemoveWorkingWorker(threadPoolInstance); } } hillClimbingThreadAdjustmentLock.Acquire(); try { // At this point, the thread's wait timed out. We are shutting down this thread. // We are going to decrement the number of exisiting threads to no longer include this one // and then change the max number of threads in the thread pool to reflect that we don't need as many // as we had. Finally, we are going to tell hill climbing that we changed the max number of threads. ThreadCounts counts = threadPoolInstance._separated.counts.VolatileRead(); while (true) { // Since this thread is currently registered as an existing thread, if more work comes in meanwhile, // this thread would be expected to satisfy the new work. Ensure that NumExistingThreads is not // decreased below NumProcessingWork, as that would be indicative of such a case. short numExistingThreads = counts.NumExistingThreads; if (numExistingThreads <= counts.NumProcessingWork) { // In this case, enough work came in that this thread should not time out and should go back to work. break; } ThreadCounts newCounts = counts; newCounts.SubtractNumExistingThreads(1); short newNumExistingThreads = (short)(numExistingThreads - 1); short newNumThreadsGoal = Math.Max(threadPoolInstance._minThreads, Math.Min(newNumExistingThreads, newCounts.NumThreadsGoal)); newCounts.NumThreadsGoal = newNumThreadsGoal; ThreadCounts oldCounts = threadPoolInstance._separated.counts.InterlockedCompareExchange(newCounts, counts); if (oldCounts == counts) { HillClimbing.ThreadPoolHillClimber.ForceChange(newNumThreadsGoal, HillClimbing.StateOrTransition.ThreadTimedOut); if (PortableThreadPoolEventSource.Log.IsEnabled(EventLevel.Informational, PortableThreadPoolEventSource.Keywords.ThreadingKeyword)) { PortableThreadPoolEventSource.Log.ThreadPoolWorkerThreadStop((uint)newNumExistingThreads); } return; } counts = oldCounts; } } finally { hillClimbingThreadAdjustmentLock.Release(); } } }