// Get all workitems. Called by TaskScheduler in its debugger hooks. internal IEnumerable <ISpreadsThreadPoolWorkItem> GetQueuedWorkItems() { // Enumerate global queue foreach (var workItem in workQueue.workItems) { yield return(workItem); } // Enumerate each local queue foreach (ThreadPoolWorkQueue.WorkStealingQueue wsq in ThreadPoolWorkQueue.WorkStealingQueueList.Queues) { if (wsq != null && wsq.m_array != null) { ISpreadsThreadPoolWorkItem[] items = wsq.m_array; for (int i = 0; i < items.Length; i++) { ISpreadsThreadPoolWorkItem item = items[i]; if (item != null) { yield return(item); } } } } }
public void QueueCompletableItem(ISpreadsThreadPoolWorkItem workItem, bool preferLocal) { if (workItem == null) { ThrowWorkItemIsNull(); } workQueue.Enqueue(workItem, forceGlobal: !preferLocal); }
public void UnsafeQueueCompletableItem(ISpreadsThreadPoolWorkItem workItem, bool preferLocal) { if (workItem == null) { ThrowHelper.ThrowArgumentNullException(nameof(workItem)); } workQueue.Enqueue(workItem, forceGlobal: !preferLocal); }
public ISpreadsThreadPoolWorkItem TrySteal(ref bool missedSteal) { while (true) { if (CanSteal) { bool taken = false; try { m_foreignLock.TryEnter(ref taken); if (taken) { // Increment head, and ensure read of tail doesn't move before it (fence). int head = m_headIndex; Interlocked.Exchange(ref m_headIndex, head + 1); if (head < m_tailIndex) { int idx = head & m_mask; ISpreadsThreadPoolWorkItem obj = Volatile.Read(ref m_array[idx]); // Check for nulls in the array. if (obj == null) { continue; } m_array[idx] = null; return(obj); } else { // Failed, restore head. m_headIndex = head; } } } finally { if (taken) { m_foreignLock.Exit(useMemoryBarrier: false); } } missedSteal = true; } return(null); } }
internal IEnumerable <ISpreadsThreadPoolWorkItem> GetLocallyQueuedWorkItems() { ThreadPoolWorkQueue.WorkStealingQueue wsq = ThreadPoolWorkQueue.ThreadPoolWorkQueueThreadLocals.threadLocals.workStealingQueue; if (wsq != null && wsq.m_array != null) { ISpreadsThreadPoolWorkItem[] items = wsq.m_array; for (int i = 0; i < items.Length; i++) { ISpreadsThreadPoolWorkItem item = items[i]; if (item != null) { yield return(item); } } } }
public void Enqueue(ISpreadsThreadPoolWorkItem callback, bool forceGlobal) { ThreadPoolWorkQueueThreadLocals tl = null; if (!forceGlobal) { tl = ThreadPoolWorkQueueThreadLocals.threadLocals; } if (null != tl) { tl.workStealingQueue.LocalPush(callback); } else { workItems.Enqueue(callback); } EnsureThreadRequested(); }
internal bool LocalFindAndPop(ISpreadsThreadPoolWorkItem callback) { ThreadPoolWorkQueueThreadLocals tl = ThreadPoolWorkQueueThreadLocals.threadLocals; return(tl != null && tl.workStealingQueue.LocalFindAndPop(callback)); }
private ISpreadsThreadPoolWorkItem LocalPopCore() { while (true) { int tail = m_tailIndex; if (m_headIndex >= tail) { return(null); } // Decrement the tail using a fence to ensure subsequent read doesn't come before. tail -= 1; Interlocked.Exchange(ref m_tailIndex, tail); // If there is no interaction with a take, we can head down the fast path. if (m_headIndex <= tail) { int idx = tail & m_mask; ISpreadsThreadPoolWorkItem obj = Volatile.Read(ref m_array[idx]); // Check for nulls in the array. if (obj == null) { continue; } m_array[idx] = null; return(obj); } else { // Interaction with takes: 0 or 1 elements left. bool lockTaken = false; try { m_foreignLock.Enter(ref lockTaken); if (m_headIndex <= tail) { // Element still available. Take it. int idx = tail & m_mask; ISpreadsThreadPoolWorkItem obj = Volatile.Read(ref m_array[idx]); // Check for nulls in the array. if (obj == null) { continue; } m_array[idx] = null; return(obj); } else { // If we encountered a race condition and element was stolen, restore the tail. m_tailIndex = tail + 1; return(null); } } finally { if (lockTaken) { m_foreignLock.Exit(useMemoryBarrier: false); } } } } }
public void LocalPush(ISpreadsThreadPoolWorkItem obj) { int tail = m_tailIndex; // We're going to increment the tail; if we'll overflow, then we need to reset our counts if (tail == int.MaxValue) { bool lockTaken = false; try { m_foreignLock.Enter(ref lockTaken); if (m_tailIndex == int.MaxValue) { // // Rather than resetting to zero, we'll just mask off the bits we don't care about. // This way we don't need to rearrange the items already in the queue; they'll be found // correctly exactly where they are. One subtlety here is that we need to make sure that // if head is currently < tail, it remains that way. This happens to just fall out from // the bit-masking, because we only do this if tail == int.MaxValue, meaning that all // bits are set, so all of the bits we're keeping will also be set. Thus it's impossible // for the head to end up > than the tail, since you can't set any more bits than all of // them. // m_headIndex = m_headIndex & m_mask; m_tailIndex = tail = m_tailIndex & m_mask; Debug.Assert(m_headIndex <= m_tailIndex); } } finally { if (lockTaken) { m_foreignLock.Exit(useMemoryBarrier: true); } } } // When there are at least 2 elements' worth of space, we can take the fast path. if (tail < m_headIndex + m_mask) { Volatile.Write(ref m_array[tail & m_mask], obj); m_tailIndex = tail + 1; } else { // We need to contend with foreign pops, so we lock. bool lockTaken = false; try { m_foreignLock.Enter(ref lockTaken); int head = m_headIndex; int count = m_tailIndex - m_headIndex; // If there is still space (one left), just add the element. if (count >= m_mask) { // We're full; expand the queue by doubling its size. var newArray = new ISpreadsThreadPoolWorkItem[m_array.Length << 1]; for (int i = 0; i < m_array.Length; i++) { newArray[i] = m_array[(i + head) & m_mask]; } // Reset the field values, incl. the mask. m_array = newArray; m_headIndex = 0; m_tailIndex = tail = count; m_mask = (m_mask << 1) | 1; } Volatile.Write(ref m_array[tail & m_mask], obj); m_tailIndex = tail + 1; } finally { if (lockTaken) { m_foreignLock.Exit(useMemoryBarrier: false); } } } }