public List<MappedResultInfo> GetMappedResults(int view, HashSet<string> keysLeftToReduce, bool loadData, int take, HashSet<string> keysReturned, CancellationToken cancellationToken, List<MappedResultInfo> outputCollection = null) { if (outputCollection == null) outputCollection = new List<MappedResultInfo>(); Api.JetSetCurrentIndex(session, MappedResults, "by_view_hashed_reduce_key_and_bucket"); var keysToReduce = new HashSet<string>(keysLeftToReduce); foreach (var reduceKey in keysToReduce) { cancellationToken.ThrowIfCancellationRequested(); keysLeftToReduce.Remove(reduceKey); Api.MakeKey(session, MappedResults, view, MakeKeyGrbit.NewKey); var hashReduceKey = HashReduceKey(reduceKey); keysReturned.Add(reduceKey); Api.MakeKey(session, MappedResults, hashReduceKey, MakeKeyGrbit.None); if (Api.TrySeek(session, MappedResults, SeekGrbit.SeekGE) == false) continue; do { cancellationToken.ThrowIfCancellationRequested(); var indexFromDb = Api.RetrieveColumnAsInt32(session, MappedResults, tableColumnsCache.MappedResultsColumns["view"]); var hashKeyFromDb = Api.RetrieveColumn(session, MappedResults, tableColumnsCache.MappedResultsColumns["hashed_reduce_key"]); if (indexFromDb != view || hashReduceKey.SequenceEqual(hashKeyFromDb) == false) break; var timestamp = Api.RetrieveColumnAsInt64(session, MappedResults, tableColumnsCache.MappedResultsColumns["timestamp"]).Value; var keyFromDb = Api.RetrieveColumnAsString(session, MappedResults, tableColumnsCache.MappedResultsColumns["reduce_key"]); take--; // We have worked with this reduce key, so we consider it an output even if we don't add it. RavenJObject data = null; if ( loadData ) { data = LoadMappedResults(keyFromDb); if ( data == null ) continue; } var result = new MappedResultInfo { Bucket = Api.RetrieveColumnAsInt32(session, MappedResults, tableColumnsCache.MappedResultsColumns["bucket"]).Value, ReduceKey = keyFromDb, Etag = Etag.Parse(Api.RetrieveColumn(session, MappedResults, tableColumnsCache.MappedResultsColumns["etag"])), Timestamp = DateTime.FromBinary(timestamp), Data = data, Size = Api.RetrieveColumnSize(session, MappedResults, tableColumnsCache.MappedResultsColumns["data"]) ?? 0 }; outputCollection.Add(result); } while (Api.TryMoveNext(session, MappedResults)); if (take < 0) return outputCollection; } return outputCollection; }
public List<MappedResultInfo> GetMappedResults(int view, HashSet<string> keysLeftToReduce, bool loadData, int take, HashSet<string> keysReturned, CancellationToken cancellationToken, List<MappedResultInfo> outputCollection = null) { if (outputCollection == null) outputCollection = new List<MappedResultInfo>(); var mappedResultsByViewAndReduceKey = tableStorage.MappedResults.GetIndex(Tables.MappedResults.Indices.ByViewAndReduceKey); var mappedResultsData = tableStorage.MappedResults.GetIndex(Tables.MappedResults.Indices.Data); var keysToReduce = new HashSet<string>(keysLeftToReduce); foreach (var reduceKey in keysToReduce) { cancellationToken.ThrowIfCancellationRequested(); keysLeftToReduce.Remove(reduceKey); var reduceKeyHash = HashKey(reduceKey); var viewAndReduceKey = (Slice)CreateKey(view, ReduceKeySizeLimited(reduceKey), reduceKeyHash); using (var iterator = mappedResultsByViewAndReduceKey.MultiRead(Snapshot, viewAndReduceKey)) { keysReturned.Add(reduceKey); if (!iterator.Seek(Slice.BeforeAllKeys)) continue; do { cancellationToken.ThrowIfCancellationRequested(); ushort version; var value = LoadStruct(tableStorage.MappedResults, iterator.CurrentKey, writeBatch.Value, out version); if (value == null) continue; var size = tableStorage.MappedResults.GetDataSize(Snapshot, iterator.CurrentKey); var readReduceKey = value.ReadString(MappedResultFields.ReduceKey); take--; // We have worked with this reduce key, so we consider it an output even if we don't add it. RavenJObject data = null; if ( loadData ) { data = LoadMappedResult(iterator.CurrentKey, readReduceKey, mappedResultsData); if (data == null) continue; // If we request to load data and it is not there, we ignore it } var mappedResult = new MappedResultInfo { Bucket = value.ReadInt(MappedResultFields.Bucket), ReduceKey = readReduceKey, Etag = Etag.Parse(value.ReadBytes(MappedResultFields.Etag)), Timestamp = DateTime.FromBinary(value.ReadLong(MappedResultFields.Timestamp)), Data = data, Size = size }; outputCollection.Add(mappedResult); } while (iterator.MoveNext()); } if (take < 0) return outputCollection; } return outputCollection; }