/// <exception cref="System.IO.IOException"/> private void AdjustPriorityQueue(Merger.Segment <K, V> reader) { long startPos = reader.GetReader().bytesRead; bool hasNext = reader.NextRawKey(); long endPos = reader.GetReader().bytesRead; totalBytesProcessed += endPos - startPos; mergeProgress.Set(totalBytesProcessed * progPerByte); if (hasNext) { AdjustTop(); } else { Pop(); reader.Close(); } }
/// <exception cref="System.IO.IOException"/> public virtual bool HasNext() { if (lastSegmentEOF) { return(false); } // We read the next KV from the cache to decide if there is any left. // Since hasNext can be called several times before the actual call to // next(), we use hasMore to avoid extra reads. hasMore is set to false // when the user actually consumes this record in next() if (hasMore) { return(true); } Merger.Segment <K, V> seg = segmentList[readSegmentIndex]; // Mark the current position. This would be set to currentKVOffset // when the user consumes this record in next(). nextKVOffset = (int)seg.GetActualPosition(); if (seg.NextRawKey()) { currentKey = seg.GetKey(); seg.GetValue(currentValue); hasMore = true; return(true); } else { if (!seg.InMemory()) { seg.CloseReader(); } } // If this is the last segment, mark the lastSegmentEOF flag and return if (readSegmentIndex == segmentList.Count - 1) { nextKVOffset = -1; lastSegmentEOF = true; return(false); } nextKVOffset = 0; readSegmentIndex++; Merger.Segment <K, V> nextSegment = segmentList[readSegmentIndex]; // We possibly are moving from a memory segment to a disk segment. // Reset so that we do not corrupt the in-memory segment buffer. // See HADOOP-5494 if (!nextSegment.InMemory()) { currentValue.Reset(currentDiskValue.GetData(), currentDiskValue.GetLength()); nextSegment.Init(null); } if (nextSegment.NextRawKey()) { currentKey = nextSegment.GetKey(); nextSegment.GetValue(currentValue); hasMore = true; return(true); } else { throw new IOException("New segment did not have even one K/V"); } }