private static void ScheduleCallback(Action <object> callback, object state, bool lowPriority) { Fx.Assert(callback != null, "Cannot schedule a null callback"); if (lowPriority) { IoThreadScheduler.ScheduleCallbackLowPriNoFlow(callback, state); } else { IoThreadScheduler.ScheduleCallbackNoFlow(callback, state); } }
private bool ScheduleCallbackLowPriHelper(Action <object> callback, object state) { // See if there's a free slot. Fortunately the overflow bit is simply lost. int slot = Interlocked.Add(ref _headTailLowPri, Bits.HiOne); // If this is the first low-priority work item, make sure we're not idle. bool wasIdle = false; if (Bits.CountNoIdle(slot) == 1) { // Since Interlocked calls create a full thread barrier, this will read the value of headTail // at the time of the Interlocked.Add or later. The invariant is that the IOTS is unidle at some // point after the Add. int ht = _headTail; if (Bits.Count(ht) == -1) { // Use a temporary local here to store the result of the Interlocked.CompareExchange. This // works around a codegen bug in the 32-bit JIT (TFS 749182). int interlockedResult = Interlocked.CompareExchange(ref _headTail, ht + Bits.HiOne, ht); if (ht == interlockedResult) { wasIdle = true; } } } // Check if we wrapped *around* to empty. if (Bits.CountNoIdle(slot) == 0) { // Since the capacity is limited to 32k, this means we wrapped the array at least twice. That's bad // because headTail no longer knows how many work items we have - it looks like zero. This can // only happen if 32k threads come through here while one is swapped out. throw Fx.AssertAndThrowFatal("Low-priority Head/Tail overflow!"); } bool queued = _slotsLowPri[slot >> Bits.HiShift & SlotMaskLowPri].TryEnqueueWorkItem(callback, state, out bool wrapped); if (wrapped) { var next = new IoThreadScheduler(_slots.Length, Math.Min(_slotsLowPri.Length * 2, MaximumCapacity)); Interlocked.CompareExchange(ref current, next, this); } if (wasIdle) { // It's our responsibility to kick off the overlapped. overlapped.Post(this); } return(queued); }
private bool ScheduleCallbackHelper(Action <object> callback, object state) { // See if there's a free slot. Fortunately the overflow bit is simply lost. int slot = Interlocked.Add(ref _headTail, Bits.HiOne); // If this brings us to 'empty', then the IOTS used to be 'idle'. Remember that, and increment // again. This doesn't need to be in a loop, because until we call Post(), we can't go back to idle. bool wasIdle = Bits.Count(slot) == 0; if (wasIdle) { slot = Interlocked.Add(ref _headTail, Bits.HiOne); Fx.Assert(Bits.Count(slot) != 0, "IOTS went idle when it shouldn't have."); } // Check if we wrapped *around* to idle. if (Bits.Count(slot) == -1) { // Since the capacity is limited to 32k, this means we wrapped the array at least twice. That's bad // because headTail no longer knows how many work items we have - it looks like zero. This can // only happen if 32k threads come through here while one is swapped out. throw Fx.AssertAndThrowFatal("Head/Tail overflow!"); } bool queued = _slots[slot >> Bits.HiShift & SlotMask].TryEnqueueWorkItem(callback, state, out bool wrapped); if (wrapped) { // Wrapped around the circular buffer. Create a new, bigger IoThreadScheduler. var next = new IoThreadScheduler(Math.Min(_slots.Length * 2, MaximumCapacity), _slotsLowPri.Length); Interlocked.CompareExchange(ref current, next, this); } if (wasIdle) { // It's our responsibility to kick off the overlapped. overlapped.Post(this); } return(queued); }