private bool ScheduleCallbackLowPriHelper(Action <object> callback, object state) { bool flag; int num = Interlocked.Add(ref this.headTailLowPri, 65536); bool flag1 = false; if (IOThreadScheduler.Bits.CountNoIdle(num) == 1) { int num1 = this.headTail; if (IOThreadScheduler.Bits.Count(num1) == -1 && num1 == Interlocked.CompareExchange(ref this.headTail, num1 + 65536, num1)) { flag1 = true; } } if (IOThreadScheduler.Bits.CountNoIdle(num) == 0) { throw Fx.AssertAndThrowFatal("Low-priority Head/Tail overflow!"); } bool flag2 = this.slotsLowPri[num >> 16 & this.SlotMaskLowPri].TryEnqueueWorkItem(callback, state, out flag); if (flag) { IOThreadScheduler oThreadScheduler = new IOThreadScheduler((int)this.slots.Length, Math.Min((int)this.slotsLowPri.Length * 2, 32768)); Interlocked.CompareExchange <IOThreadScheduler>(ref IOThreadScheduler.current, oThreadScheduler, this); } if (flag1) { this.overlapped.Post(this); } return(flag2); }
void IOCallback(uint errorCode, uint numBytes, NativeOverlapped *nativeOverlappedCallback) { // Unhook the IOThreadScheduler ASAP to prevent it from leaking. IOThreadScheduler iots = this.scheduler; this.scheduler = null; Fx.Assert(iots != null, "Overlapped completed without a scheduler."); Action <object> callback; object state; try { } finally { // Called in a finally because it needs to run uninterrupted in order to maintain consistency. iots.CompletionCallback(out callback, out state); } bool found = true; while (found) { // The callback can be null if synchronization misses result in unsuable slots. Keep going onto // the next slot in such cases until there are no more slots. if (callback != null) { callback(state); } try { } finally { // Called in a finally because it needs to run uninterrupted in order to maintain consistency. found = iots.TryCoalesce(out callback, out state); } } }
private unsafe void IOCallback(uint errorCode, uint numBytes, NativeOverlapped *nativeOverlappedCallback) { Action <object> action = null; object obj = null; IOThreadScheduler oThreadScheduler = this.scheduler; this.scheduler = null; try { } finally { oThreadScheduler.CompletionCallback(out action, out obj); } bool flag = true; while (flag) { if (action != null) { action(obj); } try { } finally { flag = oThreadScheduler.TryCoalesce(out action, out obj); } } }
private bool ScheduleCallbackHelper(Action <object> callback, object state) { bool flag; int num = Interlocked.Add(ref this.headTail, 65536); bool flag1 = IOThreadScheduler.Bits.Count(num) == 0; if (flag1) { num = Interlocked.Add(ref this.headTail, 65536); } if (IOThreadScheduler.Bits.Count(num) == -1) { throw Fx.AssertAndThrowFatal("Head/Tail overflow!"); } bool flag2 = this.slots[num >> 16 & this.SlotMask].TryEnqueueWorkItem(callback, state, out flag); if (flag) { IOThreadScheduler oThreadScheduler = new IOThreadScheduler(Math.Min((int)this.slots.Length * 2, 32768), (int)this.slotsLowPri.Length); Interlocked.CompareExchange <IOThreadScheduler>(ref IOThreadScheduler.current, oThreadScheduler, this); } if (flag1) { this.overlapped.Post(this); } return(flag2); }
public void Post(IOThreadScheduler iots) { Fx.Assert(this.scheduler == null, "Post called on an overlapped that is already posted."); Fx.Assert(iots != null, "Post called with a null scheduler."); this.scheduler = iots; ThreadPool.UnsafeQueueNativeOverlapped(this.nativeOverlapped); }
private static void ScheduleCallback(Action <object> callback, object state, bool lowPriority) { if (lowPriority) { IOThreadScheduler.ScheduleCallbackLowPriNoFlow(callback, state); return; } IOThreadScheduler.ScheduleCallbackNoFlow(callback, state); }
public void Signal(bool completedSynchronously, bool result) { this.result = result; if (completedSynchronously) { base.Complete(true); return; } IOThreadScheduler.ScheduleCallbackNoFlow((object o) => ((AsyncSemaphore.SemaphoreWaiter)o).Complete(false), this); }
bool ScheduleCallbackLowPriHelper(Action <object> callback, object state) { // See if there's a free slot. Fortunately the overflow bit is simply lost. int slot = Interlocked.Add(ref this.headTailLowPri, Bits.HiOne); // If this is the first low-priority work item, make sure we're not idle. bool wasIdle = false; if (Bits.CountNoIdle(slot) == 1) { // Since Interlocked calls create a full thread barrier, this will read the value of headTail // at the time of the Interlocked.Add or later. The invariant is that the IOTS is unidle at some // point after the Add. int ht = this.headTail; if (Bits.Count(ht) == -1) { // Use a temporary local here to store the result of the Interlocked.CompareExchange. This // works around a codegen bug in the 32-bit JIT (TFS 749182). int interlockedResult = Interlocked.CompareExchange(ref this.headTail, ht + Bits.HiOne, ht); if (ht == interlockedResult) { wasIdle = true; } } } // Check if we wrapped *around* to empty. if (Bits.CountNoIdle(slot) == 0) { // Since the capacity is limited to 32k, this means we wrapped the array at least twice. That's bad // because headTail no longer knows how many work items we have - it looks like zero. This can // only happen if 32k threads come through here while one is swapped out. throw Fx.AssertAndThrowFatal("Low-priority Head/Tail overflow!"); } bool wrapped; bool queued = this.slotsLowPri[slot >> Bits.HiShift & SlotMaskLowPri].TryEnqueueWorkItem( callback, state, out wrapped); if (wrapped) { IOThreadScheduler next = new IOThreadScheduler(this.slots.Length, Math.Min(this.slotsLowPri.Length * 2, MaximumCapacity)); Interlocked.CompareExchange <IOThreadScheduler>(ref IOThreadScheduler.current, next, this); } if (wasIdle) { // It's our responsibility to kick off the overlapped. this.overlapped.Post(this); } return(queued); }
static void ScheduleCallback(Action <object> callback, object state, bool lowPriority) { Fx.Assert(callback != null, "Cannot schedule a null callback"); if (lowPriority) { IOThreadScheduler.ScheduleCallbackLowPriNoFlow(callback, state); } else { IOThreadScheduler.ScheduleCallbackNoFlow(callback, state); } }
bool ScheduleCallbackHelper(Action <object> callback, object state) { // See if there's a free slot. Fortunately the overflow bit is simply lost. int slot = Interlocked.Add(ref this.headTail, Bits.HiOne); // If this brings us to 'empty', then the IOTS used to be 'idle'. Remember that, and increment // again. This doesn't need to be in a loop, because until we call Post(), we can't go back to idle. bool wasIdle = Bits.Count(slot) == 0; if (wasIdle) { slot = Interlocked.Add(ref this.headTail, Bits.HiOne); Fx.Assert(Bits.Count(slot) != 0, "IOTS went idle when it shouldn't have."); } // Check if we wrapped *around* to idle. if (Bits.Count(slot) == -1) { // Since the capacity is limited to 32k, this means we wrapped the array at least twice. That's bad // because headTail no longer knows how many work items we have - it looks like zero. This can // only happen if 32k threads come through here while one is swapped out. throw Fx.AssertAndThrowFatal("Head/Tail overflow!"); } bool wrapped; bool queued = this.slots[slot >> Bits.HiShift & SlotMask].TryEnqueueWorkItem(callback, state, out wrapped); if (wrapped) { // Wrapped around the circular buffer. Create a new, bigger IOThreadScheduler. IOThreadScheduler next = new IOThreadScheduler(Math.Min(this.slots.Length * 2, MaximumCapacity), this.slotsLowPri.Length); Interlocked.CompareExchange <IOThreadScheduler>(ref IOThreadScheduler.current, next, this); } if (wasIdle) { // It's our responsibility to kick off the overlapped. this.overlapped.Post(this); } return(queued); }
static IOThreadScheduler() { IOThreadScheduler.current = new IOThreadScheduler(32, 32); }
public void Post(IOThreadScheduler iots) { this.scheduler = iots; ThreadPool.UnsafeQueueNativeOverlapped(this.nativeOverlapped); }
void IOCallback(uint errorCode, uint numBytes, NativeOverlapped* nativeOverlappedCallback) { // Unhook the IOThreadScheduler ASAP to prevent it from leaking. IOThreadScheduler iots = this.scheduler; this.scheduler = null; Fx.Assert(iots != null, "Overlapped completed without a scheduler."); Action<object> callback; object state; try { } finally { // Called in a finally because it needs to run uninterrupted in order to maintain consistency. iots.CompletionCallback(out callback, out state); } bool found = true; while (found) { // The callback can be null if synchronization misses result in unsuable slots. Keep going onto // the next slot in such cases until there are no more slots. if (callback != null) { callback(state); } try { } finally { // Called in a finally because it needs to run uninterrupted in order to maintain consistency. found = iots.TryCoalesce(out callback, out state); } } }
bool ScheduleCallbackLowPriHelper(Action<object> callback, object state) { // See if there's a free slot. Fortunately the overflow bit is simply lost. int slot = Interlocked.Add(ref this.headTailLowPri, Bits.HiOne); // If this is the first low-priority work item, make sure we're not idle. bool wasIdle = false; if (Bits.CountNoIdle(slot) == 1) { // Since Interlocked calls create a full thread barrier, this will read the value of headTail // at the time of the Interlocked.Add or later. The invariant is that the IOTS is unidle at some // point after the Add. int ht = this.headTail; if (Bits.Count(ht) == -1) { // Use a temporary local here to store the result of the Interlocked.CompareExchange. This // works around a codegen bug in the 32-bit JIT (TFS 749182). int interlockedResult = Interlocked.CompareExchange(ref this.headTail, ht + Bits.HiOne, ht); if (ht == interlockedResult) { wasIdle = true; } } } // Check if we wrapped *around* to empty. if (Bits.CountNoIdle(slot) == 0) { // Since the capacity is limited to 32k, this means we wrapped the array at least twice. That's bad // because headTail no longer knows how many work items we have - it looks like zero. This can // only happen if 32k threads come through here while one is swapped out. throw Fx.AssertAndThrowFatal("Low-priority Head/Tail overflow!"); } bool wrapped; bool queued = this.slotsLowPri[slot >> Bits.HiShift & SlotMaskLowPri].TryEnqueueWorkItem( callback, state, out wrapped); if (wrapped) { IOThreadScheduler next = new IOThreadScheduler(this.slots.Length, Math.Min(this.slotsLowPri.Length * 2, MaximumCapacity)); Interlocked.CompareExchange<IOThreadScheduler>(ref IOThreadScheduler.current, next, this); } if (wasIdle) { // It's our responsibility to kick off the overlapped. this.overlapped.Post(this); } return queued; }
bool ScheduleCallbackHelper(Action<object> callback, object state) { // See if there's a free slot. Fortunately the overflow bit is simply lost. int slot = Interlocked.Add(ref this.headTail, Bits.HiOne); // If this brings us to 'empty', then the IOTS used to be 'idle'. Remember that, and increment // again. This doesn't need to be in a loop, because until we call Post(), we can't go back to idle. bool wasIdle = Bits.Count(slot) == 0; if (wasIdle) { slot = Interlocked.Add(ref this.headTail, Bits.HiOne); Fx.Assert(Bits.Count(slot) != 0, "IOTS went idle when it shouldn't have."); } // Check if we wrapped *around* to idle. if (Bits.Count(slot) == -1) { // Since the capacity is limited to 32k, this means we wrapped the array at least twice. That's bad // because headTail no longer knows how many work items we have - it looks like zero. This can // only happen if 32k threads come through here while one is swapped out. throw Fx.AssertAndThrowFatal("Head/Tail overflow!"); } bool wrapped; bool queued = this.slots[slot >> Bits.HiShift & SlotMask].TryEnqueueWorkItem(callback, state, out wrapped); if (wrapped) { // Wrapped around the circular buffer. Create a new, bigger IOThreadScheduler. IOThreadScheduler next = new IOThreadScheduler(Math.Min(this.slots.Length * 2, MaximumCapacity), this.slotsLowPri.Length); Interlocked.CompareExchange<IOThreadScheduler>(ref IOThreadScheduler.current, next, this); } if (wasIdle) { // It's our responsibility to kick off the overlapped. this.overlapped.Post(this); } return queued; }