public override AbstractEdgeMap <T> Put(int key, T value) { if (key >= minIndex && key <= maxIndex) { T existing = Interlocked.Exchange(ref arrayData[key - minIndex], value); if (existing == null && value != null) { Interlocked.Increment(ref size); } else { if (existing != null && value == null) { Interlocked.Decrement(ref size); } } } return(this); }
/// <summary> /// Invoke the Canceled event. /// </summary> /// <remarks> /// The handlers are invoked synchronously in LIFO order. /// </remarks> private void ExecuteCallbackHandlers(bool throwOnFirstException) { Contract.Assert(IsCancellationRequested, "ExecuteCallbackHandlers should only be called after setting IsCancellationRequested->true"); Contract.Assert(ThreadIDExecutingCallbacks != -1, "ThreadIDExecutingCallbacks should have been set."); // Design decision: call the delegates in LIFO order so that callbacks fire 'deepest first'. // This is intended to help with nesting scenarios so that child enlisters cancel before their parents. List <Exception> exceptionList = null; SparselyPopulatedArray <CancellationCallbackInfo>[] callbackLists = m_registeredCallbacksLists; // If there are no callbacks to run, we can safely exit. Any races to lazy initialize it // will see IsCancellationRequested and will then run the callback themselves. if (callbackLists == null) { Interlocked.Exchange(ref m_state, NOTIFYINGCOMPLETE); return; } try { for (int index = 0; index < callbackLists.Length; index++) { SparselyPopulatedArray <CancellationCallbackInfo> list = callbackLists[index]; if (list != null) { SparselyPopulatedArrayFragment <CancellationCallbackInfo> currArrayFragment = list.Tail; while (currArrayFragment != null) { for (int i = currArrayFragment.Length - 1; i >= 0; i--) { // 1a. publish the indended callback, to ensure ctr.Dipose can tell if a wait is necessary. // 1b. transition to the target syncContext and continue there.. // On the target SyncContext. // 2. actually remove the callback // 3. execute the callback // re:#2 we do the remove on the syncCtx so that we can be sure we have control of the syncCtx before // grabbing the callback. This prevents a deadlock if ctr.Dispose() might run on the syncCtx too. m_executingCallback = currArrayFragment[i]; if (m_executingCallback != null) { //Transition to the target sync context (if necessary), and continue our work there. CancellationCallbackCoreWorkArguments args = new CancellationCallbackCoreWorkArguments(currArrayFragment, i); // marshal exceptions: either aggregate or perform an immediate rethrow // We assume that syncCtx.Send() has forwarded on user exceptions when appropriate. try { if (m_executingCallback.TargetSyncContext != null) { #pragma warning disable 0618 // This API isn't available in Metro, but we never run in metro. m_executingCallback.TargetSyncContext.Send(CancellationCallbackCoreWork_OnSyncContext, args); #pragma warning restore 0618 // CancellationCallbackCoreWork_OnSyncContext may have altered ThreadIDExecutingCallbacks, so reset it. ThreadIDExecutingCallbacks = Thread.CurrentThread.ManagedThreadId; } else { CancellationCallbackCoreWork_OnSyncContext(args); } } catch (Exception ex) { if (throwOnFirstException) { throw; } // Otherwise, log it and proceed. if (exceptionList == null) { exceptionList = new List <Exception>(); } exceptionList.Add(ex); } } } currArrayFragment = currArrayFragment.Prev; } } } } finally { m_state = NOTIFYINGCOMPLETE; m_executingCallback = null; Thread.MemoryBarrier(); // for safety, prevent reorderings crossing this point and seeing inconsistent state. } if (exceptionList != null) { Contract.Assert(exceptionList.Count > 0, "Expected exception count > 0"); throw new AggregateException(exceptionList); } }
private static int MakeForCurrentThread() { // // Get the current thread handle. We need to use DuplicateHandle, because GetCurrentThread returns a pseudo-handle // that cannot be used outside of this thread. // IntPtr thisNativeThreadHandle; Interop.mincore.DuplicateHandle( Interop.mincore.GetCurrentProcess(), Interop.mincore.GetCurrentThread(), Interop.mincore.GetCurrentProcess(), out thisNativeThreadHandle, 0, false, (uint)Interop.Constants.DuplicateSameAccess); // // First, search for a dead thread, so we can reuse its thread ID // for (ManagedThreadId current = s_list; current != null; current = current._next) { // // Try to take the lock on this ID. If another thread already has it, just move on to the next ID. // if (Interlocked.Exchange(ref current._lock, 1) != 0) { continue; } try { // // Does the ID currently belong to a dead thread? // if (LowLevelThread.WaitForSingleObject(current._nativeThreadHandle, 0) == (uint)Interop.Constants.WaitObject0) { // // The thread is dead. We can claim this ID by swapping in our own thread handle. // Interop.mincore.CloseHandle(current._nativeThreadHandle); current._nativeThreadHandle = thisNativeThreadHandle; t_currentManagedThreadId = current._managedThreadId; return(current._managedThreadId); } } finally { // // Release the lock. // current._lock = 0; } } // // We couldn't find a dead thread, so we can't reuse a thread ID. Create a new one. // ManagedThreadId newManagedThreadId = new ManagedThreadId(Interlocked.Increment(ref s_nextThreadId)); newManagedThreadId._nativeThreadHandle = thisNativeThreadHandle; while (true) { ManagedThreadId oldList = s_list; newManagedThreadId._next = oldList; if (Interlocked.CompareExchange(ref s_list, newManagedThreadId, oldList) == oldList) { t_currentManagedThreadId = newManagedThreadId._managedThreadId; return(newManagedThreadId._managedThreadId); } } }
private static AutoResetEvent RentEvent() => Interlocked.Exchange(ref s_cachedEvent, null) ?? new AutoResetEvent(false);
private static unsafe Win32ThreadPoolNativeOverlapped *AllocateNew() { IntPtr freePtr; Win32ThreadPoolNativeOverlapped *overlapped; OverlappedData data; // Find a free Overlapped while ((freePtr = Volatile.Read(ref s_freeList)) != IntPtr.Zero) { overlapped = (Win32ThreadPoolNativeOverlapped *)freePtr; if (Interlocked.CompareExchange(ref s_freeList, overlapped->_nextFree, freePtr) != freePtr) { continue; } overlapped->_nextFree = IntPtr.Zero; return(overlapped); } // None are free; allocate a new one. overlapped = (Win32ThreadPoolNativeOverlapped *)Interop.MemAlloc((UIntPtr)sizeof(Win32ThreadPoolNativeOverlapped)); *overlapped = default(Win32ThreadPoolNativeOverlapped); // Allocate a OverlappedData object, and an index at which to store it in _dataArray. data = new OverlappedData(); int dataIndex = Interlocked.Increment(ref s_dataCount) - 1; // Make sure we didn't wrap around. if (dataIndex < 0) { Environment.FailFast("Too many outstanding Win32ThreadPoolNativeOverlapped instances"); } while (true) { OverlappedData[] dataArray = Volatile.Read(ref s_dataArray); int currentLength = dataArray == null ? 0 : dataArray.Length; // If the current array is too small, create a new, larger one. if (currentLength <= dataIndex) { int newLength = currentLength; if (newLength == 0) { newLength = 128; } while (newLength <= dataIndex) { newLength = (newLength * 3) / 2; } OverlappedData[] newDataArray = dataArray; Array.Resize(ref newDataArray, newLength); if (Interlocked.CompareExchange(ref s_dataArray, newDataArray, dataArray) != dataArray) { continue; // Someone else got the free one, try again } dataArray = newDataArray; } // If we haven't stored this object in the array yet, do so now. Then we need to make another pass through // the loop, in case another thread resized the array before we made this update. if (s_dataArray[dataIndex] == null) { // Full fence so this write can't move past subsequent reads. Interlocked.Exchange(ref dataArray[dataIndex], data); continue; } // We're already in the array, so we're done. Debug.Assert(dataArray[dataIndex] == data); overlapped->_dataIndex = dataIndex; return(overlapped); } }