public void OnError(Exception error)
 {
     this.error = error;
     Volatile.Write(ref done, true);
     Signal();
 }
 /// <summary>
 ///     Returns the latest value of this instance written by any processor.
 /// </summary>
 /// <param name="index">index in the array</param>
 /// <returns>The latest written value of this instance.</returns>
 public long GetValue(int index)
 {
     return(Volatile.Read(ref _array[index]));
 }
Exemple #3
0
 public Task Close(AsyncWrappingCommonArgs async)
 {
     Volatile.Write(ref _closing, true);
     return(_database.CloseConnection(async));
 }
Exemple #4
0
        public async Task no_data_should_be_dispatched_after_tcp_connection_closed()
        {
            for (int i = 0; i < 1000; i++)
            {
                bool closed = false;
                bool dataReceivedAfterClose = false;
                var  listeningSocket        = CreateListeningSocket();

                var mre = new ManualResetEventSlim(false);
                var clientTcpConnection = TcpConnectionSsl.CreateConnectingConnection(
                    Guid.NewGuid(),
                    (IPEndPoint)listeningSocket.LocalEndPoint,
                    "localhost",
                    false,
                    new TcpClientConnector(),
                    TimeSpan.FromSeconds(5),
                    (conn) => mre.Set(),
                    (conn, error) => {
                    Assert.Fail($"Connection failed: {error}");
                },
                    false);

                var serverSocket        = listeningSocket.Accept();
                var serverTcpConnection = TcpConnectionSsl.CreateServerFromSocket(Guid.NewGuid(),
                                                                                  (IPEndPoint)serverSocket.RemoteEndPoint, serverSocket, GetCertificate(), false);

                mre.Wait(TimeSpan.FromSeconds(3));
                try {
                    clientTcpConnection.ConnectionClosed += (connection, error) => {
                        Volatile.Write(ref closed, true);
                    };

                    clientTcpConnection.ReceiveAsync((connection, data) => {
                        if (Volatile.Read(ref closed))
                        {
                            dataReceivedAfterClose = true;
                        }
                    });

                    using (var b = new Barrier(2)) {
                        Task sendData = Task.Factory.StartNew(() => {
                            b.SignalAndWait();
                            for (int i = 0; i < 1000; i++)
                            {
                                serverTcpConnection.EnqueueSend(GenerateData());
                            }
                        }, CancellationToken.None, TaskCreationOptions.LongRunning, TaskScheduler.Default);

                        Task closeConnection = Task.Factory.StartNew(() => {
                            b.SignalAndWait();
                            serverTcpConnection.Close("Intentional close");
                        }, CancellationToken.None, TaskCreationOptions.LongRunning, TaskScheduler.Default);

                        await Task.WhenAll(sendData, closeConnection);

                        Assert.False(dataReceivedAfterClose);
                    }
                } finally {
                    clientTcpConnection.Close("Shut down");
                    serverTcpConnection.Close("Shut down");
                    listeningSocket.Dispose();
                }
            }
        }
Exemple #5
0
 internal int CurrentStatus()
 {
     return(Volatile.Read(ref _statusDotNotCallMeDirectly));
 }
            public void LocalPush(WorkItem obj)
            {
                int tail = m_tailIndex;

                // We're going to increment the tail; if we'll overflow, then we need to reset our counts
                if (tail == int.MaxValue)
                {
                    bool lockTaken = false;
                    try
                    {
                        m_foreignLock.Enter(ref lockTaken);

                        if (m_tailIndex == int.MaxValue)
                        {
                            //
                            // Rather than resetting to zero, we'll just mask off the bits we don't care about.
                            // This way we don't need to rearrange the items already in the queue; they'll be found
                            // correctly exactly where they are.  One subtlety here is that we need to make sure that
                            // if head is currently < tail, it remains that way.  This happens to just fall out from
                            // the bit-masking, because we only do this if tail == int.MaxValue, meaning that all
                            // bits are set, so all of the bits we're keeping will also be set.  Thus it's impossible
                            // for the head to end up > than the tail, since you can't set any more bits than all of 
                            // them.
                            //
                            m_headIndex = m_headIndex & m_mask;
                            m_tailIndex = tail = m_tailIndex & m_mask;
                            Debug.Assert(m_headIndex <= m_tailIndex);
                        }
                    }
                    finally
                    {
                        if (lockTaken)
                            m_foreignLock.Exit(useMemoryBarrier: true);
                    }
                }

                // When there are at least 2 elements' worth of space, we can take the fast path.
                if (tail < m_headIndex + m_mask)
                {
                    Volatile.Write(ref m_array[tail & m_mask], obj);
                    m_tailIndex = tail + 1;
                }
                else
                {
                    // We need to contend with foreign pops, so we lock.
                    bool lockTaken = false;
                    try
                    {
                        m_foreignLock.Enter(ref lockTaken);

                        int head = m_headIndex;
                        int count = m_tailIndex - m_headIndex;

                        // If there is still space (one left), just add the element.
                        if (count >= m_mask)
                        {
                            // We're full; expand the queue by doubling its size.
                            var newArray = new WorkItem[m_array.Length << 1];
                            for (int i = 0; i < m_array.Length; i++)
                                newArray[i] = m_array[(i + head) & m_mask];

                            // Reset the field values, incl. the mask.
                            m_array = newArray;
                            m_headIndex = 0;
                            m_tailIndex = tail = count;
                            m_mask = (m_mask << 1) | 1;
                        }

                        Volatile.Write(ref m_array[tail & m_mask], obj);
                        m_tailIndex = tail + 1;
                    }
                    finally
                    {
                        if (lockTaken)
                            m_foreignLock.Exit(useMemoryBarrier: false);
                    }
                }
            }
        public void TestHardStopWork()
        {
            int processed               = 0;
            int startedTask             = 0;
            ManualResetEventSlim waiter = new ManualResetEventSlim(false);

            using (DelegateQueueAsyncProcessor <int> proc = new DelegateQueueAsyncProcessor <int>(Environment.ProcessorCount, 1000, "name", (elem, token) =>
            {
                try
                {
                    Interlocked.Increment(ref startedTask);
                    waiter.Wait(token);
                }
                finally
                {
                    Interlocked.Increment(ref processed);
                }
            }))
            {
                proc.Start();

                for (int i = 0; i < 5 * Environment.ProcessorCount; i++)
                {
                    proc.Add(i);
                }

                Assert.IsTrue(proc.ThreadCount > 0, "proc.ThreadCount > 0");
                Assert.IsTrue(proc.ThreadCount == Environment.ProcessorCount, "proc.ThreadCount == Environment.ProcessorCount");

                TimingAssert.IsTrue(10000, () => proc.ActiveThreadCount >= 0, "FAILED: wait while thread activated");
                TimingAssert.IsTrue(10000, () => proc.ActiveThreadCount == proc.ThreadCount, "FAILED: wait while all threads activated");

                TimingAssert.IsTrue(10000, () => Volatile.Read(ref startedTask) >= 0, "FAILED: wait while first thread blocked");
                TimingAssert.IsTrue(10000, () => Volatile.Read(ref startedTask) == proc.ThreadCount, () => "FAILED: wait while all thread blocked. Currently blocked = " + Volatile.Read(ref startedTask).ToString() + ", expected = " + proc.ThreadCount.ToString());
                proc.Stop(true, false, true);

                Assert.IsTrue(proc.State == QueueAsyncProcessorState.Stopped, "proc.State == QueueAsyncProcessorState.Stopped");
                Assert.IsTrue(processed > 0, "processed > 0");
            }
        }
 public bool IsReadOnly()
 {
     EnsureNotDisposed();
     return(Volatile.Read(ref _readOnly));
 }
Exemple #9
0
 public void SetVolatileValue(long value)
 {
     Volatile.Write(ref Value, value);
 }
Exemple #10
0
        public bool WaitUntilDrained(int timeoutMs = -1)
        {
            if (IsEmpty)
            {
                return(true);
            }

            var  resultCount = Interlocked.Increment(ref NumWaitingForDrain);
            long endWhen     =
                timeoutMs >= 0
                    ? Time.Ticks + (Time.MillisecondInTicks * timeoutMs)
                    : long.MaxValue - 1;

            bool doWait    = false;
            var  waterMark = ItemsQueued;

            try {
                do
                {
                    lock (ItemsLock) {
                        // Read these first and read the queued count last so that if we lose a race
                        //  we will lose it in the 'there's extra work' fashion instead of 'we're done
                        //  but not really' fashion
                        var processed = Volatile.Read(ref ItemsProcessed);
                        var queued    = Volatile.Read(ref ItemsQueued);
                        doWait = (processed < queued) || (_Count > 0);
                    }
                    if (doWait)
                    {
                        var now = Time.Ticks;
                        if (now > endWhen)
                        {
                            break;
                        }
                        var maxWait = (timeoutMs <= 0)
                            ? timeoutMs
                            : (int)(Math.Max(0, endWhen - now) / Time.MillisecondInTicks);
                        NotifyChanged();

                        if (DrainedSignal.Wait(maxWait))
                        {
                            // We successfully got the drain signal, now wait for the 'processing is probably done' signal
                            now     = Time.Ticks;
                            maxWait = (timeoutMs <= 0)
                                ? timeoutMs
                                : (int)(Math.Max(0, endWhen - now) / Time.MillisecondInTicks);
                            if (now > endWhen)
                            {
                                break;
                            }
                            // Note that we may still spin after getting this signal because it will fire when all the workers
                            //  stop running, but that doesn't necessarily mean all the work is done
                            if (FinishedProcessingSignal.Wait(maxWait))
                            {
                                FinishedProcessingSignal.Reset();
                                DrainedSignal.Reset();
                            }
                        }
                    }
                    else
                    {
#if DEBUG
                        if (!IsEmpty)
                        {
                            throw new WorkQueueException(this, "Queue is not empty");
                        }
                        Thread.Yield();
                        if (!IsEmpty)
                        {
                            throw new WorkQueueException(this, "Queue is not empty");
                        }
                        if (ItemsProcessed < waterMark)
                        {
                            throw new WorkQueueException(this, "AssertDrained returned before reaching watermark");
                        }
#endif
                        return(ItemsProcessed >= waterMark);
                    }
                } while (true);
            } finally {
                Interlocked.Decrement(ref NumWaitingForDrain);

                var uhe = Interlocked.Exchange(ref UnhandledException, null);
                if (uhe != null)
                {
                    uhe.Throw();
                }
            }

            return(false);
        }
Exemple #11
0
 public long GetValue()
 {
     return(Volatile.Read(ref Value));
 }
Exemple #12
0
 public void OnCompleted()
 {
     Volatile.Write(ref _done, true);
     Drain();
 }
 /// <summary>
 ///     Write a new value to this instance. The value is immediately seen by all processors.
 /// </summary>
 /// <param name="index">index in the array</param>
 /// <param name="value">The new value for this instance.</param>
 public void SetValue(int index, int value)
 {
     Volatile.Write(ref _array[index], value);
 }
        /// <summary>Tries to dequeue an element from the queue.</summary>
        public bool TryDequeue([MaybeNullWhen(false)] out T item)
        {
            Slot[] slots = _slots;

            // Loop in case of contention...
            SpinWait spinner = default;

            while (true)
            {
                // Get the head at which to try to dequeue.
                int currentHead = Volatile.Read(ref _headAndTail.Head);
                int slotsIndex  = currentHead & _slotsMask;

                // Read the sequence number for the head position.
                int sequenceNumber = Volatile.Read(ref slots[slotsIndex].SequenceNumber);

                // We can dequeue from this slot if it's been filled by an enqueuer, which
                // would have left the sequence number at pos+1.
                int diff = sequenceNumber - (currentHead + 1);
                if (diff == 0)
                {
                    // We may be racing with other dequeuers.  Try to reserve the slot by incrementing
                    // the head.  Once we've done that, no one else will be able to read from this slot,
                    // and no enqueuer will be able to read from this slot until we've written the new
                    // sequence number. WARNING: The next few lines are not reliable on a runtime that
                    // supports thread aborts. If a thread abort were to sneak in after the CompareExchange
                    // but before the Volatile.Write, enqueuers trying to enqueue into this slot would
                    // spin indefinitely.  If this implementation is ever used on such a platform, this
                    // if block should be wrapped in a finally / prepared region.
                    if (Interlocked.CompareExchange(ref _headAndTail.Head, currentHead + 1, currentHead) == currentHead)
                    {
                        // Successfully reserved the slot.  Note that after the above CompareExchange, other threads
                        // trying to dequeue from this slot will end up spinning until we do the subsequent Write.
                        item = slots[slotsIndex].Item !;
                        if (!Volatile.Read(ref _preservedForObservation))
                        {
                            // If we're preserving, though, we don't zero out the slot, as we need it for
                            // enumerations, peeking, ToArray, etc.  And we don't update the sequence number,
                            // so that an enqueuer will see it as full and be forced to move to a new segment.
                            slots[slotsIndex].Item = default;
                            Volatile.Write(ref slots[slotsIndex].SequenceNumber, currentHead + slots.Length);
                        }
                        return(true);
                    }

                    // The head was already advanced by another thread. A newer head has already been observed and the next
                    // iteration would make forward progress, so there's no need to spin-wait before trying again.
                }
                else if (diff < 0)
                {
                    // The sequence number was less than what we needed, which means this slot doesn't
                    // yet contain a value we can dequeue, i.e. the segment is empty.  Technically it's
                    // possible that multiple enqueuers could have written concurrently, with those
                    // getting later slots actually finishing first, so there could be elements after
                    // this one that are available, but we need to dequeue in order.  So before declaring
                    // failure and that the segment is empty, we check the tail to see if we're actually
                    // empty or if we're just waiting for items in flight or after this one to become available.
                    bool frozen      = _frozenForEnqueues;
                    int  currentTail = Volatile.Read(ref _headAndTail.Tail);
                    if (currentTail - currentHead <= 0 || (frozen && (currentTail - FreezeOffset - currentHead <= 0)))
                    {
                        item = default;
                        return(false);
                    }

                    // It's possible it could have become frozen after we checked _frozenForEnqueues
                    // and before reading the tail.  That's ok: in that rare race condition, we just
                    // loop around again. This is not necessarily an always-forward-progressing
                    // situation since this thread is waiting for another to write to the slot and
                    // this thread may have to check the same slot multiple times. Spin-wait to avoid
                    // a potential busy-wait, and then try again.
                    spinner.SpinOnce(sleep1Threshold: -1);
                }
                else
                {
                    // The item was already dequeued by another thread. The head has already been updated beyond what was
                    // observed above, and the sequence number observed above as a volatile load is more recent than the update
                    // to the head. So, the next iteration of the loop is guaranteed to see a new head. Since this is an
                    // always-forward-progressing situation, there's no need to spin-wait before trying again.
                }
            }
        }
Exemple #15
0
        /// <summary>
        /// Opens one connection.
        /// If a connection is being opened it yields the same task, preventing creation in parallel.
        /// </summary>
        /// <param name="satisfyWithAnOpenConnection">
        /// Determines whether the Task should be marked as completed when there is a connection already opened.
        /// </param>
        /// <param name="isReconnection">Determines whether this is a reconnection</param>
        /// <exception cref="SocketException">Throws a SocketException when the connection could not be established with the host</exception>
        /// <exception cref="AuthenticationException" />
        /// <exception cref="UnsupportedProtocolVersionException" />
        private async Task <IConnection> CreateOpenConnection(bool satisfyWithAnOpenConnection, bool isReconnection)
        {
            var concurrentOpenTcs = Volatile.Read(ref _connectionOpenTcs);

            // Try to exit early (cheap) as there could be another thread creating / finishing creating
            if (concurrentOpenTcs != null)
            {
                // There is another thread opening a new connection
                return(await concurrentOpenTcs.Task.ConfigureAwait(false));
            }
            var tcs = new TaskCompletionSource <IConnection>();

            // Try to set the creation task source
            concurrentOpenTcs = Interlocked.CompareExchange(ref _connectionOpenTcs, tcs, null);
            if (concurrentOpenTcs != null)
            {
                // There is another thread opening a new connection
                return(await concurrentOpenTcs.Task.ConfigureAwait(false));
            }

            if (IsClosing)
            {
                return(await FinishOpen(tcs, false, HostConnectionPool.GetNotConnectedException()).ConfigureAwait(false));
            }

            // Before creating, make sure that its still needed
            // This method is the only one that adds new connections
            // But we don't control the removal, use snapshot
            var connectionsSnapshot = _connections.GetSnapshot();

            if (connectionsSnapshot.Length >= _expectedConnectionLength)
            {
                if (connectionsSnapshot.Length == 0)
                {
                    // Avoid race condition while removing
                    return(await FinishOpen(tcs, false, HostConnectionPool.GetNotConnectedException()).ConfigureAwait(false));
                }
                return(await FinishOpen(tcs, true, null, connectionsSnapshot[0]).ConfigureAwait(false));
            }

            if (satisfyWithAnOpenConnection && !_canCreateForeground)
            {
                // We only care about a single connection, if its already there, yield it
                connectionsSnapshot = _connections.GetSnapshot();
                if (connectionsSnapshot.Length == 0)
                {
                    // When creating in foreground, it failed
                    return(await FinishOpen(tcs, false, HostConnectionPool.GetNotConnectedException()).ConfigureAwait(false));
                }
                return(await FinishOpen(tcs, false, null, connectionsSnapshot[0]).ConfigureAwait(false));
            }

            HostConnectionPool.Logger.Info("Creating a new connection to {0}", _host.Address);
            IConnection c;

            try
            {
                c = await DoCreateAndOpen(isReconnection).ConfigureAwait(false);
            }
            catch (Exception ex)
            {
                HostConnectionPool.Logger.Info("Connection to {0} could not be created: {1}", _host.Address, ex);
                return(await FinishOpen(tcs, true, ex).ConfigureAwait(false));
            }

            if (IsClosing)
            {
                HostConnectionPool.Logger.Info("Connection to {0} opened successfully but pool #{1} was being closed",
                                               _host.Address, GetHashCode());
                c.Dispose();
                return(await FinishOpen(tcs, false, HostConnectionPool.GetNotConnectedException()).ConfigureAwait(false));
            }

            var newLength = _connections.AddNew(c);

            HostConnectionPool.Logger.Info("Connection to {0} opened successfully, pool #{1} length: {2}",
                                           _host.Address, GetHashCode(), newLength);

            if (IsClosing)
            {
                // We haven't use a CAS operation, so it's possible that the pool is being closed while adding a new
                // connection, we should remove it.
                HostConnectionPool.Logger.Info("Connection to {0} opened successfully and added to the pool #{1} but it was being closed",
                                               _host.Address, GetHashCode());
                _connections.Remove(c);
                c.Dispose();
                return(await FinishOpen(tcs, false, HostConnectionPool.GetNotConnectedException()).ConfigureAwait(false));
            }

            return(await FinishOpen(tcs, true, null, c).ConfigureAwait(false));
        }
            protected override void DrainLoop()
            {
                int missed      = 1;
                var downstream  = this.downstream;
                var delayErrors = this.delayErrors;


                for (; ;)
                {
                    for (; ;)
                    {
                        if (Volatile.Read(ref disposed))
                        {
                            var q = GetQueue();
                            if (q != null)
                            {
                                while (q.TryDequeue(out var _))
                                {
                                    ;
                                }
                            }

                            break;
                        }
                        else
                        {
                            if (!delayErrors)
                            {
                                var ex = Volatile.Read(ref errors);
                                if (ex != null)
                                {
                                    Volatile.Write(ref disposed, true);
                                    downstream.OnError(ex);
                                    base.Dispose();
                                    DisposeAll();
                                    continue;
                                }
                            }

                            bool d     = Volatile.Read(ref active) == 0;
                            var  q     = GetQueue();
                            var  v     = default(T);
                            bool empty = q == null || !q.TryDequeue(out v);

                            if (d && empty)
                            {
                                var ex = Volatile.Read(ref errors);
                                if (ex != null)
                                {
                                    downstream.OnError(ex);
                                }
                                else
                                {
                                    downstream.OnCompleted();
                                }
                                Volatile.Write(ref disposed, true);
                                base.Dispose();
                                DisposeAll();
                                continue;
                            }

                            if (empty)
                            {
                                break;
                            }

                            downstream.OnNext(v);
                        }
                    }
                    missed = Interlocked.Add(ref wip, -missed);
                    if (missed == 0)
                    {
                        break;
                    }
                }
            }
        /// <summary>
        /// Removes and disposes the first occurrence of a disposable from the <see cref="CompositeDisposable"/>.
        /// </summary>
        /// <param name="item">Disposable to remove.</param>
        /// <returns>true if found; false otherwise.</returns>
        /// <exception cref="ArgumentNullException"><paramref name="item"/> is <c>null</c>.</exception>
        public bool Remove(IDisposable item)
        {
            if (item == null)
            {
                throw new ArgumentNullException(nameof(item));
            }

            lock (_gate)
            {
                // this composite was already disposed and if the item was in there
                // it has been already removed/disposed
                if (_disposed)
                {
                    return(false);
                }

                //
                // List<T> doesn't shrink the size of the underlying array but does collapse the array
                // by copying the tail one position to the left of the removal index. We don't need
                // index-based lookup but only ordering for sequential disposal. So, instead of spending
                // cycles on the Array.Copy imposed by Remove, we use a null sentinel value. We also
                // do manual Swiss cheese detection to shrink the list if there's a lot of holes in it.
                //

                // read fields as infrequently as possible
                var current = _disposables;

                var i = current.IndexOf(item);
                if (i < 0)
                {
                    // not found, just return
                    return(false);
                }

                current[i] = null;

                if (current.Capacity > ShrinkThreshold && _count < current.Capacity / 2)
                {
                    var fresh = new List <IDisposable>(current.Capacity / 2);

                    foreach (var d in current)
                    {
                        if (d != null)
                        {
                            fresh.Add(d);
                        }
                    }

                    _disposables = fresh;
                }

                // make sure the Count property sees an atomic update
                Volatile.Write(ref _count, _count - 1);
            }

            // if we get here, the item was found and removed from the list
            // just dispose it and report success

            item.Dispose();

            return(true);
        }
 public void MakeReadOnly()
 {
     EnsureNotDisposed();
     Volatile.Write(ref _readOnly, true);
 }
            private async Task UpdateLastParsedDocumentAsync(Solution newSolution, CancellationToken cancellationToken)
            {
                // lastParsedDocument only updated in the same sequential queue so don't need lock to use it
                var lastDocument = Volatile.Read(ref _lastProcessedDocument);

                if (lastDocument == null)
                {
                    return;
                }

                var document = newSolution.GetDocument(lastDocument.Id);

                if (document == null)
                {
                    // document no longer exist. reset it to null, if somebody calls us, we will answer using lexer.
                    ResetLastParsedDocument();
                    return;
                }

                // it is already updated. nothing to do here.
                if (lastDocument == document)
                {
                    return;
                }

                var lastParsedText = await lastDocument.GetTextAsync(cancellationToken).ConfigureAwait(false);

                var lastParsedSnapshot = lastParsedText.FindCorrespondingEditorTextSnapshot();

                var newText = await document.GetTextAsync(cancellationToken).ConfigureAwait(false);

                var newSnapshot = newText.FindCorrespondingEditorTextSnapshot();

                if (newSnapshot == null)
                {
                    // It's possible that we're seeing a notification for an update that happened
                    // just before the file was opened, and so the document we're given is still the
                    // old one.
                    return;
                }

#if DEBUG
                // this must exist since we are holding it in the field.
                Contract.ThrowIfNull(lastParsedSnapshot);
#endif
                if (lastParsedSnapshot == newSnapshot)
                {
                    // update document to new snapshot with same content
                    lock (_gate)
                    {
                        _lastProcessedDocument = document;
                    }
                }
                else
                {
                    // This workspace change must have also implicitly changed the text of our file. This can happen
                    // if it's a linked file (and we are observing the non-active linked file changing before our own active file)
                    // or some other workspace change (say a SolutionChanged) caused a text edit to happen and we didn't process
                    // it directly. In that case, requeue a parse. This might be a redundant parse in the linked file case
                    // since we might also get a DocumentChanged event for our ID. It's fine.
                    ProcessIfThisDocument(newSolution, document.Id);
                }
            }
Exemple #20
0
 private void SetStatus(int newStatus)
 {
     Volatile.Write(ref _statusDotNotCallMeDirectly, newStatus);
 }
Exemple #21
0
 /// <summary>
 ///     Sets or clears the TASK_STATE_WAIT_COMPLETION_NOTIFICATION state bit.
 ///     The debugger sets this bit to aid it in "stepping out" of an async method body.
 ///     If enabled is true, this must only be called on a task that has not yet been completed.
 ///     If enabled is false, this may be called on completed tasks.
 ///     Either way, it should only be used for promise-style tasks.
 /// </summary>
 /// <param name="enabled">true to set the bit; false to unset the bit.</param>
 internal void SetNotificationForWaitCompletion(bool enabled)
 {
     Contract.Assert(IsPromiseTask, "Should only be used for promise-style tasks"); // hasn't been vetted on other kinds as there hasn't been a need
     Volatile.Write(ref _waitNotificationEnabled, enabled ? 1 : 0);
 }
            private WorkItem LocalPopCore()
            {
                while (true)
                {
                    int tail = m_tailIndex;
                    if (m_headIndex >= tail)
                    {
                        return null;
                    }

                    // Decrement the tail using a fence to ensure subsequent read doesn't come before.
                    tail -= 1;
                    Interlocked.Exchange(ref m_tailIndex, tail);

                    // If there is no interaction with a take, we can head down the fast path.
                    if (m_headIndex <= tail)
                    {
                        int idx = tail & m_mask;
                        WorkItem obj = Volatile.Read(ref m_array[idx]);

                        // Check for nulls in the array.
                        if (obj == null) continue;

                        m_array[idx] = null;
                        return obj;
                    }
                    else
                    {
                        // Interaction with takes: 0 or 1 elements left.
                        bool lockTaken = false;
                        try
                        {
                            m_foreignLock.Enter(ref lockTaken);

                            if (m_headIndex <= tail)
                            {
                                // Element still available. Take it.
                                int idx = tail & m_mask;
                                WorkItem obj = Volatile.Read(ref m_array[idx]);

                                // Check for nulls in the array.
                                if (obj == null) continue;

                                m_array[idx] = null;
                                return obj;
                            }
                            else
                            {
                                // If we encountered a race condition and element was stolen, restore the tail.
                                m_tailIndex = tail + 1;
                                return null;
                            }
                        }
                        finally
                        {
                            if (lockTaken)
                                m_foreignLock.Exit(useMemoryBarrier: false);
                        }
                    }
                }
            }
 protected ConcurrentQueue <T> GetQueue()
 {
     return(Volatile.Read(ref queue));
 }
Exemple #24
0
        private bool AddInternal(T item, int hashcode, bool acquireLock)
        {
            while (true)
            {
                var tables = _tables;

                GetBucketAndLockNo(hashcode, out int bucketNo, out int lockNo, tables.Buckets.Length, tables.Locks.Length);

                var resizeDesired = false;
                var lockTaken     = false;
                try
                {
                    if (acquireLock)
                    {
                        Monitor.Enter(tables.Locks[lockNo], ref lockTaken);
                    }

                    // If the table just got resized, we may not be holding the right lock, and must retry.
                    // This should be a rare occurrence.
                    if (tables != _tables)
                    {
                        continue;
                    }

                    // Try to find this item in the bucket
                    Node previous = null;
                    for (var current = tables.Buckets[bucketNo]; current != null; current = current.Next)
                    {
                        Debug.Assert(previous == null && current == tables.Buckets[bucketNo] || previous.Next == current);
                        if (hashcode == current.Hashcode && _comparer.Equals(current.Item, item))
                        {
                            return(false);
                        }
                        previous = current;
                    }

                    // The item was not found in the bucket. Insert the new item.
                    Volatile.Write(ref tables.Buckets[bucketNo], new Node(item, hashcode, tables.Buckets[bucketNo]));
                    checked
                    {
                        tables.CountPerLock[lockNo]++;
                    }

                    //
                    // If the number of elements guarded by this lock has exceeded the budget, resize the bucket table.
                    // It is also possible that GrowTable will increase the budget but won't resize the bucket table.
                    // That happens if the bucket table is found to be poorly utilized due to a bad hash function.
                    //
                    if (tables.CountPerLock[lockNo] > _budget)
                    {
                        resizeDesired = true;
                    }
                }
                finally
                {
                    if (lockTaken)
                    {
                        Monitor.Exit(tables.Locks[lockNo]);
                    }
                }

                //
                // The fact that we got here means that we just performed an insertion. If necessary, we will grow the table.
                //
                // Concurrency notes:
                // - Notice that we are not holding any locks at when calling GrowTable. This is necessary to prevent deadlocks.
                // - As a result, it is possible that GrowTable will be called unnecessarily. But, GrowTable will obtain lock 0
                //   and then verify that the table we passed to it as the argument is still the current table.
                //
                if (resizeDesired)
                {
                    GrowTable(tables);
                }

                return(true);
            }
        }
 public override void OnCompleted()
 {
     Volatile.Write(ref done, true);
     Drain();
 }
 public void OnCompleted()
 {
     Volatile.Write(ref done, true);
     Signal();
 }
            protected override void DrainLoop()
            {
                var missed      = 1;
                var downstream  = this.downstream;
                var delayErrors = this.delayErrors;
                var sources     = this.sources;

                for (; ;)
                {
                    if (Volatile.Read(ref disposed))
                    {
                        var q = GetQueue();
                        if (q != null)
                        {
                            while (q.TryDequeue(out var _))
                            {
                                ;
                            }
                        }

                        while (sources.TryDequeue(out var _))
                        {
                            ;
                        }
                    }
                    else
                    {
                        var continueOuter = false;
                        var r             = maxConcurrency;

                        while (Volatile.Read(ref active) < r)
                        {
                            if (Volatile.Read(ref disposed))
                            {
                                continueOuter = true;
                                break;
                            }

                            if (sources.TryDequeue(out var src))
                            {
                                UpstreamNext(src);
                            }
                            else
                            {
                                break;
                            }
                        }

                        if (continueOuter)
                        {
                            continue;
                        }

                        continueOuter = false;

                        var act = Volatile.Read(ref active);

                        for (; ;)
                        {
                            if (Volatile.Read(ref disposed))
                            {
                                continueOuter = true;
                                break;
                            }

                            if (!delayErrors)
                            {
                                var ex = Volatile.Read(ref errors);
                                if (ex != null)
                                {
                                    Volatile.Write(ref disposed, true);
                                    downstream.OnError(ex);
                                    base.Dispose();
                                    DisposeAll();
                                    continueOuter = true;
                                    break;
                                }
                            }

                            bool d     = Volatile.Read(ref done) && Volatile.Read(ref active) == 0;
                            var  q     = GetQueue();
                            var  v     = default(T);
                            bool empty = q == null || !q.TryDequeue(out v);

                            if (d && empty && sources.IsEmpty)
                            {
                                var ex = Volatile.Read(ref errors);
                                if (ex != null)
                                {
                                    downstream.OnError(ex);
                                }
                                else
                                {
                                    downstream.OnCompleted();
                                }
                                Volatile.Write(ref disposed, true);
                                base.Dispose();
                                DisposeAll();
                                break;
                            }

                            if (empty)
                            {
                                break;
                            }

                            downstream.OnNext(v);

                            if (act != Volatile.Read(ref active))
                            {
                                continueOuter = true;
                                break;
                            }
                        }

                        if (continueOuter)
                        {
                            continue;
                        }
                    }
                    missed = Interlocked.Add(ref wip, -missed);
                    if (missed == 0)
                    {
                        break;
                    }
                }
            }
        internal void Run()
        {
            var q = queue;

            for (; ;)
            {
                if (DisposableHelper.IsDisposed(ref upstream))
                {
                    while (q.TryDequeue(out var _))
                    {
                        ;
                    }
                    return;
                }

                var d     = Volatile.Read(ref done);
                var empty = !q.TryDequeue(out var v);

                if (d && empty)
                {
                    var ex = error;
                    try
                    {
                        if (ex != null)
                        {
                            Error(ex);
                        }
                        else
                        {
                            Completed();
                        }
                    }
                    finally
                    {
                        Dispose();
                    }
                    return;
                }

                if (!empty)
                {
                    Interlocked.Decrement(ref wip);
                    var b = false;

                    try
                    {
                        b = Next(v);
                    }
                    catch (Exception ex)
                    {
                        try
                        {
                            Error(ex);
                        }
                        finally
                        {
                            Dispose();
                        }
                    }

                    if (b)
                    {
                        continue;
                    }
                    else
                    {
                        try
                        {
                            Completed();
                        }
                        finally
                        {
                            Dispose();
                        }
                    }
                }

                if (Volatile.Read(ref wip) == 0)
                {
                    lock (this)
                    {
                        while (Volatile.Read(ref wip) == 0)
                        {
                            Monitor.Wait(this);
                        }
                    }
                }
            }
        }
 internal MergeManyMaxObserver(IObserver <T> downstream, bool delayErrors, int capacityHint) : base(downstream, delayErrors, capacityHint)
 {
     Volatile.Write(ref active, 1);
 }
Exemple #30
0
 private static void BeforeVolatile()
 {
     Volatile v=new Volatile();
     v.BeforeVolatileUsage();
 }
Exemple #31
0
 /// <inheritdoc />
 public void SetValue(double newValue)
 {
     Volatile.Write(ref _value, newValue);
 }