internal static void Start <TStateMachine>(ref TStateMachine stateMachine) where TStateMachine : IAsyncStateMachine { Thread currentThread = Thread.CurrentThread; ExecutionContext previousExecutionCtx = currentThread.ExecutionContext; SynchronizationContext previousSyncCtx = currentThread.SynchronizationContext; // Async state machines are required not to throw, so no need for try/finally here. stateMachine.MoveNext(); // The common case is that these have not changed, so avoid the cost of a write barrier if not needed. if (previousSyncCtx != currentThread.SynchronizationContext) { // Restore changed SynchronizationContext back to previous currentThread.SynchronizationContext = previousSyncCtx; } ExecutionContext currentExecutionCtx = currentThread.ExecutionContext; if (previousExecutionCtx != currentExecutionCtx) { // Restore changed ExecutionContext back to previous currentThread.ExecutionContext = previousExecutionCtx; if ((currentExecutionCtx != null && currentExecutionCtx.HasChangeNotifications) || (previousExecutionCtx != null && previousExecutionCtx.HasChangeNotifications)) { // There are change notifications; trigger any affected ExecutionContext.OnValuesChanged(currentExecutionCtx, previousExecutionCtx); } } }
public FastRandom random = new FastRandom(Environment.CurrentManagedThreadId); // mutable struct, do not copy or make readonly public ThreadPoolWorkQueueThreadLocals(ThreadPoolWorkQueue tpq) { workQueue = tpq; workStealingQueue = new ThreadPoolWorkQueue.WorkStealingQueue(); ThreadPoolWorkQueue.WorkStealingQueueList.Add(workStealingQueue); currentThread = Thread.CurrentThread; }
internal static void Start <TStateMachine>(ref TStateMachine stateMachine) where TStateMachine : IAsyncStateMachine { // Async state machines are required not to throw, so no need for try/finally here. Thread currentThread = Thread.CurrentThread; ExecutionContextSwitcher ecs = default(ExecutionContextSwitcher); ExecutionContext.EstablishCopyOnWriteScope(currentThread, ref ecs); stateMachine.MoveNext(); ecs.Undo(currentThread); }
/// <summary> /// Dispatches work items to this thread. /// </summary> /// <returns> /// <c>true</c> if this thread did as much work as was available or its quantum expired. /// <c>false</c> if this thread stopped working early. /// </returns> internal static bool Dispatch() { var workQueue = ThreadPoolGlobals.workQueue; // // Save the start time // int startTickCount = Environment.TickCount; // // Update our records to indicate that an outstanding request for a thread has now been fulfilled. // From this point on, we are responsible for requesting another thread if we stop working for any // reason, and we believe there might still be work in the queue. // workQueue.MarkThreadRequestSatisfied(); Interlocked.Increment(ref workQueue.numWorkingThreads); // // Assume that we're going to need another thread if this one returns to the VM. We'll set this to // false later, but only if we're absolutely certain that the queue is empty. // bool needAnotherThread = true; object workItem = null; try { // // Set up our thread-local data // ThreadPoolWorkQueueThreadLocals tl = workQueue.EnsureCurrentThreadHasQueue(); Thread currentThread = tl.currentThread; // // Loop until our quantum expires or there is no work. // while (ThreadPool.KeepDispatching(startTickCount)) { bool missedSteal = false; workItem = workQueue.Dequeue(tl, ref missedSteal); if (workItem == null) { // // No work. // If we missed a steal, though, there may be more work in the queue. // Instead of looping around and trying again, we'll just request another thread. Hopefully the thread // that owns the contended work-stealing queue will pick up its own workitems in the meantime, // which will be more efficient than this thread doing it anyway. // needAnotherThread = missedSteal; // Tell the VM we're returning normally, not because Hill Climbing asked us to return. return(true); } // // If we found work, there may be more work. Ask for another thread so that the other work can be processed // in parallel. Note that this will only ask for a max of #procs threads, so it's safe to call it for every dequeue. // workQueue.EnsureThreadRequested(); try { SynchronizationContext.SetSynchronizationContext(null); if (workItem is Task task) { task.ExecuteFromThreadPool(currentThread); } else { Debug.Assert(workItem is IThreadPoolWorkItem); Unsafe.As <IThreadPoolWorkItem>(workItem).Execute(); } } finally { workItem = null; SynchronizationContext.SetSynchronizationContext(null); } RuntimeThread.CurrentThread.ResetThreadPoolThread(); if (!ThreadPool.NotifyWorkItemComplete()) { return(false); } } // If we get here, it's because our quantum expired. return(true); } catch (Exception e) { // Work items should not allow exceptions to escape. For example, Task catches and stores any exceptions. Environment.FailFast("Unhandled exception in ThreadPool dispatch loop", e); return(true); // Will never actually be executed because Environment.FailFast doesn't return } finally { int numWorkers = Interlocked.Decrement(ref workQueue.numWorkingThreads); Debug.Assert(numWorkers >= 0); // // If we are exiting for any reason other than that the queue is definitely empty, ask for another // thread to pick up where we left off. // if (needAnotherThread) { workQueue.EnsureThreadRequested(); } } }
internal override void ExecuteFromThreadPool(Thread threadPoolThread) { bool setSuccessfully = TrySetResult(true); Debug.Assert(setSuccessfully, "Should have been able to complete task"); }