//--------------------------------------------------------------------------------------- // Straightforward IEnumerator<T> methods. // internal override bool MoveNext([MaybeNullWhen(false), AllowNull] ref TResult currentElement, ref TKey currentKey) { Debug.Assert(_sharedIndices != null); // If the buffer has not been created, we will populate it lazily on demand. if (_buffer == null && _count > 0) { // Create a buffer, but don't publish it yet (in case of exception). List <Pair <TResult, TKey> > buffer = new List <Pair <TResult, TKey> >(); // Enter the search phase. In this phase, all partitions race to populate // the shared indices with their first 'count' contiguous elements. TResult current = default(TResult) !; TKey index = default(TKey) !; int i = 0; //counter to help with cancellation while (buffer.Count < _count && _source.MoveNext(ref current !, ref index)) { if ((i++ & CancellationState.POLL_INTERVAL) == 0) { _cancellationToken.ThrowIfCancellationRequested(); } ; // Add the current element to our buffer. buffer.Add(new Pair <TResult, TKey>(current, index)); // Now we will try to insert our index into the shared indices list, quitting if // our index is greater than all of the indices already inside it. lock (_sharedIndices) { if (!_sharedIndices.Insert(index)) { // We have read past the maximum index. We can move to the barrier now. break; } } } // Before exiting the search phase, we will synchronize with others. This is a barrier. _sharedBarrier.Signal(); _sharedBarrier.Wait(_cancellationToken); // Publish the buffer and set the index to just before the 1st element. _buffer = buffer; _bufferIndex = new Shared <int>(-1); } // Now either enter (or continue) the yielding phase. As soon as we reach this, we know the // index of the 'count'-th input element. if (_take) { Debug.Assert(_buffer != null && _bufferIndex != null); // In the case of a Take, we will yield each element from our buffer for which // the element is lesser than the 'count'-th index found. if (_count == 0 || _bufferIndex.Value >= _buffer.Count - 1) { return(false); } // Increment the index, and remember the values. ++_bufferIndex.Value; currentElement = _buffer[_bufferIndex.Value].First; currentKey = _buffer[_bufferIndex.Value].Second; // Only yield the element if its index is less than or equal to the max index. return(_sharedIndices.Count == 0 || _keyComparer.Compare(_buffer[_bufferIndex.Value].Second, _sharedIndices.MaxValue) <= 0); } else { TKey minKey = default(TKey) !; // If the count to skip was greater than 0, look at the buffer. if (_count > 0) { // If there wasn't enough input to skip, return right away. if (_sharedIndices.Count < _count) { return(false); } minKey = _sharedIndices.MaxValue; Debug.Assert(_buffer != null && _bufferIndex != null); // In the case of a skip, we must skip over elements whose index is lesser than the // 'count'-th index found. Once we've exhausted the buffer, we must go back and continue // enumerating the data source until it is empty. if (_bufferIndex.Value < _buffer.Count - 1) { for (_bufferIndex.Value++; _bufferIndex.Value < _buffer.Count; _bufferIndex.Value++) { // If the current buffered element's index is greater than the 'count'-th index, // we will yield it as a result. if (_keyComparer.Compare(_buffer[_bufferIndex.Value].Second, minKey) > 0) { currentElement = _buffer[_bufferIndex.Value].First; currentKey = _buffer[_bufferIndex.Value].Second; return(true); } } } } // Lastly, so long as our input still has elements, they will be yieldable. if (_source.MoveNext(ref currentElement !, ref currentKey)) { Debug.Assert(_count <= 0 || _keyComparer.Compare(currentKey, minKey) > 0, "expected remaining element indices to be greater than smallest"); return(true); } } return(false); }
/// <summary> /// Moves the enumerator to the next result, or returns false if there are no more results to yield. /// </summary> public override bool MoveNext() { if (!m_initialized) { // // Initialization: wait until each producer has produced at least one element. Since the order indices // are increasing, we cannot start yielding until we have at least one element from each producer. // m_initialized = true; for (int producer = 0; producer < m_mergeHelper.m_partitions.PartitionCount; producer++) { Pair <TKey, TOutput> element = default(Pair <TKey, TOutput>); // Get the first element from this producer if (TryWaitForElement(producer, ref element)) { // Update the producer heap and its helper array with the received element m_producerHeap.Insert(new Producer <TKey>(element.First, producer)); m_producerNextElement[producer] = element.Second; } else { // If this producer didn't produce any results because it encountered an exception, // cancellation would have been initiated by now. If cancellation has started, we will // propagate the exception now. ThrowIfInTearDown(); } } } else { // If the producer heap is empty, we are done. In fact, we know that a previous MoveNext() call // already returned false. if (m_producerHeap.Count == 0) { return(false); } // // Get the next element from the producer that yielded a value last. Update the producer heap. // The next producer to yield will be in the front of the producer heap. // // The last producer whose result the merge yielded int lastProducer = m_producerHeap.MaxValue.ProducerIndex; // Get the next element from the same producer Pair <TKey, TOutput> element = default(Pair <TKey, TOutput>); if (TryGetPrivateElement(lastProducer, ref element) || TryWaitForElement(lastProducer, ref element)) { // Update the producer heap and its helper array with the received element m_producerHeap.ReplaceMax(new Producer <TKey>(element.First, lastProducer)); m_producerNextElement[lastProducer] = element.Second; } else { // If this producer is done because it encountered an exception, cancellation // would have been initiated by now. If cancellation has started, we will propagate // the exception now. ThrowIfInTearDown(); // This producer is done. Remove it from the producer heap. m_producerHeap.RemoveMax(); } } return(m_producerHeap.Count > 0); }
//--------------------------------------------------------------------------------------- // Straightforward IEnumerator<T> methods. // internal override bool MoveNext(ref TResult currentElement, ref int currentKey) { Contract.Assert(m_sharedIndices != null); // If the buffer has not been created, we will populate it lazily on demand. if (m_buffer == null && m_count > 0) { // Create a buffer, but don't publish it yet (in case of exception). List <Pair <TResult, int> > buffer = new List <Pair <TResult, int> >(); // Enter the search phase. In this phase, all partitions race to populate // the shared indices with their first 'count' contiguous elements. TResult current = default(TResult); int index = default(int); int i = 0; //counter to help with cancellation while (buffer.Count < m_count && m_source.MoveNext(ref current, ref index)) { if ((i++ & CancellationState.POLL_INTERVAL) == 0) { CancellationState.ThrowIfCanceled(m_cancellationToken); } // Add the current element to our buffer. // @TODO: @PERF: @BUG#414: some day we can optimize this, e.g. if the input is an array, // we can always just rescan it later. Could expose this via a "Reset" mechanism. buffer.Add(new Pair <TResult, int>(current, index)); // Now we will try to insert our index into the shared indices list, quitting if // our index is greater than all of the indices already inside it. lock (m_sharedIndices) { if (!m_sharedIndices.Insert(index)) { // We have read past the maximum index. We can move to the barrier now. break; } } } // Before exiting the search phase, we will synchronize with others. This is a barrier. m_sharedBarrier.Signal(); m_sharedBarrier.Wait(m_cancellationToken); // Publish the buffer and set the index to just before the 1st element. m_buffer = buffer; m_bufferIndex = new Shared <int>(-1); } // Now either enter (or continue) the yielding phase. As soon as we reach this, we know the // index of the 'count'-th input element. if (m_take) { // In the case of a Take, we will yield each element from our buffer for which // the element is lesser than the 'count'-th index found. if (m_count == 0 || m_bufferIndex.Value >= m_buffer.Count - 1) { return(false); } // Increment the index, and remember the values. ++m_bufferIndex.Value; currentElement = m_buffer[m_bufferIndex.Value].First; currentKey = m_buffer[m_bufferIndex.Value].Second; // Only yield the element if its index is less than or equal to the max index. int maxIndex = m_sharedIndices.MaxValue; return(maxIndex == -1 || m_buffer[m_bufferIndex.Value].Second <= maxIndex); } else { int minIndex = -1; // If the count to skip was greater than 0, look at the buffer. if (m_count > 0) { // If there wasn't enough input to skip, return right away. if (m_sharedIndices.Count < m_count) { return(false); } minIndex = m_sharedIndices.MaxValue; // In the case of a skip, we must skip over elements whose index is lesser than the // 'count'-th index found. Once we've exhausted the buffer, we must go back and continue // enumerating the data source until it is empty. if (m_bufferIndex.Value < m_buffer.Count - 1) { for (m_bufferIndex.Value++; m_bufferIndex.Value < m_buffer.Count; m_bufferIndex.Value++) { // If the current buffered element's index is greater than the 'count'-th index, // we will yield it as a result. if (m_buffer[m_bufferIndex.Value].Second > minIndex) { currentElement = m_buffer[m_bufferIndex.Value].First; currentKey = m_buffer[m_bufferIndex.Value].Second; return(true); } } } } // Lastly, so long as our input still has elements, they will be yieldable. if (m_source.MoveNext(ref currentElement, ref currentKey)) { Contract.Assert(currentKey > minIndex, "expected remaining element indices to be greater than smallest"); return(true); } } return(false); }