private void INTERNAL_segmentWalkForNextKey( IComparable<RecordKey> startkeytest, bool direction_is_forward, ISortedSegment curseg_raw, RangeKey curseg_rangekey, IScannableDictionary<RecordKey, RecordData> handledIndexRecords, int maxgen, IScannableDictionary<RecordKey, RecordData> recordsBeingAssembled, bool equal_ok, SegmentWalkStats stats) { // TODO: convert all ISortedSegments to be IScannable IScannable<RecordKey, RecordUpdate> curseg = (IScannable<RecordKey, RecordUpdate>)curseg_raw; stats.segmentWalkInvocations++; // first look in this segment for a next-key **IF** it may contain one if (curseg_rangekey.directlyContainsKey(startkeytest)) { // we need to keep looking until we find a live record, as we need all the deletion tombstones // between startkey and the next live record. IEnumerable<KeyValuePair<RecordKey, RecordUpdate>> seg_scanner; if (direction_is_forward) { seg_scanner = curseg.scanForward( new ScanRange<RecordKey>( startkeytest, new ScanRange<RecordKey>.maxKey(), null)); } else { seg_scanner = curseg.scanBackward( new ScanRange<RecordKey>( new ScanRange<RecordKey>.minKey(), startkeytest, null)); } foreach (var kvp in seg_scanner) { if (!equal_ok) { // have ">" test vs ">=" if (startkeytest.CompareTo(kvp.Key) == 0) { continue; } } RecordData partial_record; stats.rowAccumulate_TryGet++; if (!recordsBeingAssembled.TryGetValue(kvp.Key, out partial_record)) { partial_record = new RecordData(RecordDataState.NOT_PROVIDED, kvp.Key); recordsBeingAssembled[kvp.Key] = partial_record; } else { stats.rowDuplicatesAppeared++; } partial_record.applyUpdate(kvp.Value); stats.rowUpdatesApplied++; #if DEBUG_SEGMENT_ACCUMULATION for (int depth = 10; depth > maxgen; depth--) { Console.Write(" "); } Console.WriteLine("accumulated update: {0}", kvp); #endif if (partial_record.State != RecordDataState.DELETED) { // we accumulated to at least one live record, so stop adding potential records break; } } } // find all generation range references that are relevant for this key // .. make a note of which ones are "current" if (curseg_rangekey.directlyContainsKey(GEN_KEY_PREFIX)) { BDSkipList<RecordKey, RecordUpdate> todo_list = new BDSkipList<RecordKey, RecordUpdate>(); for (int i = maxgen - 1; i >= 0; i--) { stats.segmentRangeRowScansPerformed++; foreach (KeyValuePair<RecordKey, RecordUpdate> rangerow in RangeKey.findAllEligibleRangeRows(curseg, startkeytest, i, stats)) { // see if it is new for our handledIndexRecords dataset RecordData partial_rangedata; stats.segmentAccumulate_TryGet++; if (!handledIndexRecords.TryGetValue(rangerow.Key, out partial_rangedata)) { partial_rangedata = new RecordData(RecordDataState.NOT_PROVIDED, rangerow.Key); handledIndexRecords[rangerow.Key] = partial_rangedata; } if ((partial_rangedata.State == RecordDataState.INCOMPLETE) || (partial_rangedata.State == RecordDataState.NOT_PROVIDED)) { // we're suppilying new data for this index record partial_rangedata.applyUpdate(rangerow.Value); stats.segmentUpdatesApplied++; // because we're suppilying new data, we should add this to our // private TODO list if it is a FULL update, NOT a tombstone if (rangerow.Value.type == RecordUpdateTypes.FULL) { #if DEBUG_SEGMENT_RANGE_WALK for (int depth = 10; depth > maxgen; depth--) { Console.Write(" "); } Console.WriteLine("adding SegmentRangeRow: {0}", rangerow); #endif todo_list.Add(rangerow); } } } } // now repeat the walk through our todo list: foreach (KeyValuePair<RecordKey, RecordUpdate> rangepointer in todo_list.scanBackward(null)) { if (rangepointer.Value.type == RecordUpdateTypes.DELETION_TOMBSTONE) { // skip deletion tombstones stats.segmentDeletionTombstonesSkipped++; continue; } SegmentReader next_seg = segmentReaderFromRow(rangepointer); RangeKey next_seg_rangekey = RangeKey.decodeFromRecordKey(rangepointer.Key); #if DEBUG_SEGMENT_WALK for (int depth = 10; depth > maxgen; depth--) { Console.Write(" "); } Console.WriteLine("..WalkForNextKey descending to: {0}", rangepointer); #endif // RECURSE INTERNAL_segmentWalkForNextKey( startkeytest, direction_is_forward, next_seg, next_seg_rangekey, handledIndexRecords, maxgen - 1, recordsBeingAssembled, equal_ok, stats); } // now repeat the walk of range references in this segment, this time actually descending } }
private void INTERNAL_segmentWalkCursorSetupForNextKey( IComparable<RecordKey> startkeytest, bool direction_is_forward, ISortedSegment curseg_raw, RangeKey curseg_rangekey, IScannableDictionary<RecordKey, RecordData> handledIndexRecords, int maxgen, IScannableDictionary<RangeKey, IScannable<RecordKey, RecordUpdate>> segmentsWithRecords, bool equal_ok, SegmentWalkStats stats) { // TODO: convert all ISortedSegments to be IScannable IScannable<RecordKey, RecordUpdate> curseg = (IScannable<RecordKey, RecordUpdate>)curseg_raw; stats.segmentWalkInvocations++; // first look in this segment for a next-key **IF** it may contain one if (curseg_rangekey.directlyContainsKey(startkeytest)) { // add the current segment to the list of segments with records. segmentsWithRecords.Add(curseg_rangekey,curseg); } // find all generation range references that are relevant for this key // .. make a note of which ones are "current" if (curseg_rangekey.directlyContainsKey(GEN_KEY_PREFIX)) { BDSkipList<RecordKey, RecordUpdate> todo_list = new BDSkipList<RecordKey, RecordUpdate>(); if (curseg_rangekey.generation > stats.handlingGeneration) { throw new Exception("cursor segup generation priority inversion"); } stats.handlingGeneration = curseg_rangekey.generation; for (int i = maxgen - 1; i >= 0; i--) { stats.segmentRangeRowScansPerformed++; foreach (KeyValuePair<RecordKey, RecordUpdate> rangerow in RangeKey.findAllEligibleRangeRows(curseg, startkeytest, i, stats)) { // see if it is new for our handledIndexRecords dataset RecordData partial_rangedata; stats.segmentAccumulate_TryGet++; if (!handledIndexRecords.TryGetValue(rangerow.Key, out partial_rangedata)) { partial_rangedata = new RecordData(RecordDataState.NOT_PROVIDED, rangerow.Key); handledIndexRecords[rangerow.Key] = partial_rangedata; } if ((partial_rangedata.State == RecordDataState.INCOMPLETE) || (partial_rangedata.State == RecordDataState.NOT_PROVIDED)) { // we're suppilying new data for this index record partial_rangedata.applyUpdate(rangerow.Value); stats.segmentUpdatesApplied++; // because we're suppilying new data, we should add this to our // private TODO list if it is a FULL update, NOT a tombstone if (rangerow.Value.type == RecordUpdateTypes.FULL) { #if DEBUG_SEGMENT_RANGE_WALK for (int depth = 10; depth > maxgen; depth--) { Console.Write(" "); } Console.WriteLine("adding SegmentRangeRow: {0}", rangerow); #endif todo_list.Add(rangerow); } } } } // now repeat the walk through our todo list: foreach (KeyValuePair<RecordKey, RecordUpdate> rangepointer in todo_list.scanBackward(null)) { if (rangepointer.Value.type == RecordUpdateTypes.DELETION_TOMBSTONE) { // skip deletion tombstones stats.segmentDeletionTombstonesSkipped++; continue; } SegmentReader next_seg = segmentReaderFromRow(rangepointer); RangeKey next_seg_rangekey = RangeKey.decodeFromRecordKey(rangepointer.Key); #if DEBUG_SEGMENT_WALK for (int depth = 10; depth > maxgen; depth--) { Console.Write(" "); } Console.WriteLine("..WalkForNextKey descending to: {0}", rangepointer); #endif // RECURSE INTERNAL_segmentWalkCursorSetupForNextKey( startkeytest, direction_is_forward, next_seg, next_seg_rangekey, handledIndexRecords, maxgen - 1, segmentsWithRecords, equal_ok, stats); } // now repeat the walk of range references in this segment, this time actually descending } }