/// <inheritdoc /> public Task <Result <IReadOnlyList <ContentLocationEntry> > > GetBulkAsync(OperationContext context, IReadOnlyList <ShortHash> contentHashes) { return(context.PerformOperationAsync( Tracer, async() => { var results = new ContentLocationEntry[contentHashes.Count]; UnixTime now = _clock.UtcNow; int dualResultCount = 0; foreach (var page in contentHashes.AsIndexed().GetPages(Configuration.RedisBatchPageSize)) { var batchResult = await RaidedRedis.ExecuteRedisAsync(context, async(redisDb, token) => { var redisBatch = redisDb.CreateBatch(RedisOperation.GetBulkGlobal); foreach (var indexedHash in page) { var key = GetRedisKey(indexedHash.Item); redisBatch.AddOperationAndTraceIfFailure(context, key, async batch => { var redisEntry = await batch.StringGetAsync(key); ContentLocationEntry entry; if (redisEntry.IsNullOrEmpty) { entry = ContentLocationEntry.Missing; } else { entry = ContentLocationEntry.FromRedisValue(redisEntry, now, missingSizeHandling: true); } var originalEntry = Interlocked.CompareExchange(ref results[indexedHash.Index], entry, null); if (originalEntry != null) { // Existing entry was there. Merge the entries. entry = ContentLocationEntry.MergeEntries(entry, originalEntry); Interlocked.Exchange(ref results[indexedHash.Index], entry); Interlocked.Increment(ref dualResultCount); } return Unit.Void; }); } // TODO ST: now this operation may fail with TaskCancelledException. But this should be traced differently! return await redisDb.ExecuteBatchOperationAsync(context, redisBatch, token); }, Configuration.RetryWindow); if (!batchResult) { return new Result <IReadOnlyList <ContentLocationEntry> >(batchResult); } } if (RaidedRedis.HasSecondary) { Counters[GlobalStoreCounters.GetBulkEntrySingleResult].Add(contentHashes.Count - dualResultCount); } return Result.Success <IReadOnlyList <ContentLocationEntry> >(results); }, Counters[GlobalStoreCounters.GetBulk], traceErrorsOnly: true)); }
/// <inheritdoc /> public Task <Result <IReadOnlyList <ContentLocationEntry> > > GetBulkAsync(OperationContext context, IReadOnlyList <ContentHash> contentHashes) { return(context.PerformOperationAsync( Tracer, async() => { var results = new ContentLocationEntry[contentHashes.Count]; UnixTime now = _clock.UtcNow; int dualResultCount = 0; foreach (var page in contentHashes.AsIndexed().GetPages(_configuration.RedisBatchPageSize)) { var batchResult = await ExecuteRedisAsync(context, async redisDb => { var redisBatch = redisDb.CreateBatch(RedisOperation.GetBulkGlobal); foreach (var indexedHash in page) { var key = GetRedisKey(indexedHash.Item); redisBatch.AddOperation(key, async batch => { var redisEntry = await batch.StringGetAsync(key); ContentLocationEntry entry; if (redisEntry.IsNullOrEmpty) { entry = ContentLocationEntry.Missing; } else { entry = ContentLocationEntry.FromRedisValue(redisEntry, now, missingSizeHandling: true); } var originalEntry = Interlocked.CompareExchange(ref results[indexedHash.Index], entry, null); if (originalEntry != null) { // Existing entry was there. Merge the entries. entry = MergeEntries(entry, originalEntry); results[indexedHash.Index] = entry; Interlocked.Increment(ref dualResultCount); } return Unit.Void; }).FireAndForget(context); } return await redisDb.ExecuteBatchOperationAsync(context, redisBatch, context.Token); }); if (!batchResult) { return new Result <IReadOnlyList <ContentLocationEntry> >(batchResult); } } if (HasSecondary) { Counters[GlobalStoreCounters.GetBulkEntrySingleResult].Add(contentHashes.Count - dualResultCount); } return Result.Success <IReadOnlyList <ContentLocationEntry> >(results); }, Counters[GlobalStoreCounters.GetBulk])); }