internal RangeWorker(IndexRange[] ranges, int nInitialRange, long nStep) { this.m_indexRanges = ranges; this.m_nCurrentIndexRange = nInitialRange; this.m_nStep = nStep; this.m_nIncrementValue = nStep; this.m_nMaxIncrementValue = 0x10L * nStep; }
/// <summary> /// Initializes a RangeWorker struct /// </summary> internal RangeWorker(IndexRange[] ranges, int nInitialRange, long nStep) { m_indexRanges = ranges; m_nCurrentIndexRange = nInitialRange; m_nStep = nStep; m_nIncrementValue = nStep; m_nMaxIncrementValue = Parallel.DEFAULT_LOOP_STRIDE * nStep; }
/// <summary> /// Implements the core work search algorithm that will be used for this range worker. /// </summary> /// /// Usage pattern is: /// 1) the thread associated with this rangeworker calls FindNewWork /// 2) if we return true, the worker uses the nFromInclusiveLocal and nToExclusiveLocal values /// to execute the sequential loop /// 3) if we return false it means there is no more work left. It's time to quit. /// internal bool FindNewWork(out long nFromInclusiveLocal, out long nToExclusiveLocal) { // since we iterate over index ranges circularly, we will use the // count of visited ranges as our exit condition int numIndexRangesToVisit = m_indexRanges.Length; do { // local snap to save array access bounds checks in places where we only read fields IndexRange currentRange = m_indexRanges[m_nCurrentIndexRange]; if (currentRange.m_bRangeFinished == 0) { if (m_indexRanges[m_nCurrentIndexRange].m_nSharedCurrentIndexOffset == null) { Interlocked.CompareExchange(ref m_indexRanges[m_nCurrentIndexRange].m_nSharedCurrentIndexOffset, new Shared <long>(0), null); } // this access needs to be on the array slot long nMyOffset; if (IntPtr.Size == 4 && _use32BitCurrentIndex) { // In 32-bit processes, we prefer to use 32-bit interlocked operations, to avoid the possibility of doing // a 64-bit interlocked when the target value crosses a cache line, as that can be super expensive. // We use the first 32 bits of the Int64 index in such cases. unsafe { fixed(long *indexPtr = &m_indexRanges[m_nCurrentIndexRange].m_nSharedCurrentIndexOffset.Value) { nMyOffset = Interlocked.Add(ref *(int *)indexPtr, (int)m_nIncrementValue) - m_nIncrementValue; } } } else { nMyOffset = Interlocked.Add(ref m_indexRanges[m_nCurrentIndexRange].m_nSharedCurrentIndexOffset.Value, m_nIncrementValue) - m_nIncrementValue; } if (currentRange.m_nToExclusive - currentRange.m_nFromInclusive > nMyOffset) { // we found work nFromInclusiveLocal = currentRange.m_nFromInclusive + nMyOffset; nToExclusiveLocal = nFromInclusiveLocal + m_nIncrementValue; // Check for going past end of range, or wrapping if ((nToExclusiveLocal > currentRange.m_nToExclusive) || (nToExclusiveLocal < currentRange.m_nFromInclusive)) { nToExclusiveLocal = currentRange.m_nToExclusive; } // We will double our unit of increment until it reaches the maximum. if (m_nIncrementValue < m_nMaxIncrementValue) { m_nIncrementValue *= 2; if (m_nIncrementValue > m_nMaxIncrementValue) { m_nIncrementValue = m_nMaxIncrementValue; } } return(true); } else { // this index range is completed, mark it so that others can skip it quickly Interlocked.Exchange(ref m_indexRanges[m_nCurrentIndexRange].m_bRangeFinished, 1); } } // move on to the next index range, in circular order. m_nCurrentIndexRange = (m_nCurrentIndexRange + 1) % m_indexRanges.Length; numIndexRangesToVisit--; } while (numIndexRangesToVisit > 0); // we've visited all index ranges possible => there's no work remaining nFromInclusiveLocal = 0; nToExclusiveLocal = 0; return(false); }
/// <summary> /// Implements the core work search algorithm that will be used for this range worker. /// </summary> /// /// Usage pattern is: /// 1) the thread associated with this rangeworker calls FindNewWork /// 2) if we return true, the worker uses the nFromInclusiveLocal and nToExclusiveLocal values /// to execute the sequential loop /// 3) if we return false it means there is no more work left. It's time to quit. /// internal bool FindNewWork(out long nFromInclusiveLocal, out long nToExclusiveLocal) { // since we iterate over index ranges circularly, we will use the // count of visited ranges as our exit condition int numIndexRangesToVisit = _indexRanges.Length; do { // local snap to save array access bounds checks in places where we only read fields IndexRange currentRange = _indexRanges[_nCurrentIndexRange]; if (currentRange._bRangeFinished == 0) { if (_indexRanges[_nCurrentIndexRange]._nSharedCurrentIndexOffset == null) { Interlocked.CompareExchange(ref _indexRanges[_nCurrentIndexRange]._nSharedCurrentIndexOffset, new Box <long>(0), null); } // this access needs to be on the array slot long nMyOffset = Interlocked.Add(ref _indexRanges[_nCurrentIndexRange]._nSharedCurrentIndexOffset.Value, _nIncrementValue) - _nIncrementValue; if (currentRange._nToExclusive - currentRange._nFromInclusive > nMyOffset) { // we found work nFromInclusiveLocal = currentRange._nFromInclusive + nMyOffset; nToExclusiveLocal = nFromInclusiveLocal + _nIncrementValue; // Check for going past end of range, or wrapping if ((nToExclusiveLocal > currentRange._nToExclusive) || (nToExclusiveLocal < currentRange._nFromInclusive)) { nToExclusiveLocal = currentRange._nToExclusive; } // We will double our unit of increment until it reaches the maximum. if (_nIncrementValue < _nMaxIncrementValue) { _nIncrementValue *= 2; if (_nIncrementValue > _nMaxIncrementValue) { _nIncrementValue = _nMaxIncrementValue; } } return(true); } else { // this index range is completed, mark it so that others can skip it quickly Interlocked.Exchange(ref _indexRanges[_nCurrentIndexRange]._bRangeFinished, 1); } } // move on to the next index range, in circular order. _nCurrentIndexRange = (_nCurrentIndexRange + 1) % _indexRanges.Length; numIndexRangesToVisit--; } while (numIndexRangesToVisit > 0); // we've visited all index ranges possible => there's no work remaining nFromInclusiveLocal = 0; nToExclusiveLocal = 0; return(false); }