private async Task <StreamWithRange?> GetStreamInternalAsync(OperationContext context, ContentHash contentHash, long offset, int?overrideStreamMinimumReadSizeInBytes) { Uri?azureBlobUri = default; try { azureBlobUri = await GetUriAsync(context, contentHash); if (azureBlobUri == null) { return(null); } return(await GetStreamThroughAzureBlobsAsync( azureBlobUri, offset, overrideStreamMinimumReadSizeInBytes, _parallelSegmentDownloadConfig.SegmentDownloadTimeout, context.Token).ConfigureAwait(false)); } catch (Exception e) { TraceException(context, contentHash, azureBlobUri, e); throw; } }
/// <summary> /// Checks the current keepUntil of a node. Returns null if the node is not found. /// </summary> protected async Task <Result <DateTime?> > CheckNodeKeepUntilAsync(OperationContext context, VstsDedupIdentifier dedupId) { TryReferenceNodeResponse referenceResult; try { // Pinning with keepUntil of now means that, if the content is available, the call will always succeed. referenceResult = await TryGatedArtifactOperationAsync( context, dedupId.ValueString, "TryKeepUntilReferenceNode", innerCts => DedupStoreClient.Client.TryKeepUntilReferenceNodeAsync(dedupId.CastToNodeDedupIdentifier(), new KeepUntilBlobReference(DateTime.UtcNow), null, innerCts)); } catch (Exception ex) { return(new Result <DateTime?>(ex)); } DateTime?keepUntil = null; referenceResult.Match( (notFound) => { /* Do nothing */ }, (needAction) => { // For the reason explained above, this case where children need to be pinned should never happen. // However, as a best approximation, we take the min of all the children, which always outlive the parent. keepUntil = needAction.Receipts.Select(r => r.Value.KeepUntil.KeepUntil).Min(); }, (added) => { keepUntil = added.Receipts[dedupId].KeepUntil.KeepUntil; }); return(new Result <DateTime?>(keepUntil, isNullAllowed: true)); }
public async Task <BoolResult> OnChangeCheckpointAsync(OperationContext context, CheckpointState initialState, CheckpointManifest manifest) { _activeCheckpointInfo = _activeCheckpointInfo with { Manifest = manifest }; if (initialState.FileName == null) { return(BoolResult.Success); } return(await context.PerformOperationAsync( Tracer, async() => { var(state, index) = await _storage.ReadModifyWriteAsync <CheckpointState, int>(context, initialState.FileName.Value, state => { var index = state.Consumers.Count; var updated = state.Consumers.TryAdd(_primaryMachineLocation); if (!updated) { index = state.Consumers.IndexOf(_primaryMachineLocation); } return (state, index, updated); }, defaultValue: () => initialState).ThrowIfFailureAsync(); var locations = GetCandidateLocations(state, index); _activeCheckpointInfo = new CheckpointInfoSnapshot(manifest, locations); return Result.Success(index); },
internal Task <BoolResult> GarbageCollectAsync(OperationContext context, int retentionLimit) { Contract.Requires(retentionLimit >= 0); return(context.PerformOperationWithTimeoutAsync( Tracer, async context => { var blobs = ListBlobsRecentFirstAsync(context) .Skip(retentionLimit); await foreach (var blob in blobs) { try { var deleteSucceeded = await _storage.DeleteIfExistsAsync(context, blob); Tracer.Info(context, $"Delete attempt Name=[{blob.Name}] Succeeded=[{deleteSucceeded}]"); } catch (Exception e) { Tracer.Error(context, e, $"Delete attempt Name=[{blob.Name}]"); } } return BoolResult.Success; }, timeout: _configuration.GarbageCollectionTimeout)); }
/// <summary> /// Update all chunks if they exist. Returns success only if all chunks are found and extended. /// </summary> private async Task <PinResult> TryPinChunksAsync(OperationContext context, IEnumerable <VstsDedupIdentifier> dedupIdentifiers) { if (!dedupIdentifiers.Any()) { return(PinResult.Success); } // TODO: Support batched TryKeepUntilReferenceChunkAsync in Artifact. (bug 1428612) var tryReferenceBlock = new TransformBlock <VstsDedupIdentifier, PinResult>( async dedupId => await TryPinChunkAsync(context, dedupId), new ExecutionDataflowBlockOptions { MaxDegreeOfParallelism = DefaultMaxParallelism }); tryReferenceBlock.PostAll(dedupIdentifiers); var pinResults = await Task.WhenAll(Enumerable.Range(0, dedupIdentifiers.ToList().Count).Select(i => tryReferenceBlock.ReceiveAsync())); tryReferenceBlock.Complete(); foreach (var result in pinResults) { if (!result.Succeeded) { return(result); // An error updating one of the chunks occured. Fail fast. } } return(PinResult.Success); }
public Task <BoolResult> ClearCheckpointsAsync(OperationContext context) { return(context.PerformOperationAsync( Tracer, async() => await GarbageCollectAsync(context, retentionLimit: 0), traceOperationStarted: false)); }
/// <summary> /// Attempt to update expiry of all children. Pin parent node if all children were extended successfully. /// </summary> private async Task <PinResult> TryPinChildrenAsync(OperationContext context, VstsDedupIdentifier parentNode, IEnumerable <VstsDedupIdentifier> dedupIdentifiers) { var chunks = new List <VstsDedupIdentifier>(); var nodes = new List <VstsDedupIdentifier>(); foreach (var id in dedupIdentifiers) { if (id.AlgorithmId == Hashing.ChunkDedupIdentifier.ChunkAlgorithmId) { chunks.Add(id); } else if (((NodeAlgorithmId)id.AlgorithmId).IsValidNode()) { nodes.Add(id); } else { throw new InvalidOperationException($"Unknown dedup algorithm id detected for dedup {id.ValueString} : {id.AlgorithmId}"); } } // Attempt to save all children. Tracer.Debug(context, $"Pinning children: nodes=[{string.Join(",", nodes.Select(x => x.ValueString))}] chunks=[{string.Join(",", chunks.Select(x => x.ValueString))}]"); var result = await TryPinNodesAsync(context, nodes) & await TryPinChunksAsync(context, chunks); if (result == PinResult.Success) { // If all children are saved, pin parent. result = await TryPinNodeAsync(context, parentNode); } return(result); }
/// <summary> /// Because pinning requires recursing an entire tree, we need to limit the number of simultaneous calls to DedupStore. /// </summary> protected async Task <TResult> TryGatedArtifactOperationAsync <TResult>( OperationContext context, string content, string operationName, Func <CancellationToken, Task <TResult> > func, [CallerMemberName] string caller = null) { var sw = Stopwatch.StartNew(); await ConnectionGate.WaitAsync(context.Token); var elapsed = sw.Elapsed; if (elapsed.TotalSeconds >= MinLogWaitTimeInSeconds) { Tracer.Warning(context, $"Operation '{caller}' for {content} was throttled for {elapsed.TotalSeconds}sec"); } try { return(await ArtifactHttpClientErrorDetectionStrategy.ExecuteWithTimeoutAsync( context, operationName, innerCts => func(innerCts), context.Token)); } finally { ConnectionGate.Release(); } }
protected override async Task <BoolResult> StartupCoreAsync(OperationContext context) { await _container.CreateIfNotExistsAsync( accessType : BlobContainerPublicAccessType.Off, options : null, operationContext : null, cancellationToken : context.Token); // Any logs in the staging are basically lost: they were in memory only, and we crashed or failed as we // were writing them. We just recreate the directory. try { _fileSystem.DeleteDirectory(_configuration.StagingFolderPath, DeleteOptions.Recurse); } catch (DirectoryNotFoundException) { } _fileSystem.CreateDirectory(_configuration.StagingFolderPath); _fileSystem.CreateDirectory(_configuration.UploadFolderPath); _writeQueue.Start(WriteBatchAsync); _uploadQueue.Start(UploadBatchAsync); return(RecoverFromCrash(context)); }
public Task <BoolResult> RegisterCheckpointAsync(OperationContext context, CheckpointState checkpointState) { TriggerGarbageCollection(context); var msg = checkpointState.ToString(); return(context.PerformOperationWithTimeoutAsync( Tracer, context => { var blobName = GenerateBlobName(); checkpointState.Consumers.TryAdd(_primaryMachineLocation); if (_configuration.WriteLegacyFormat) { return _storage.WriteAsync(context, blobName, JsonSerializer.Serialize(checkpointState, _jsonSerializerOptions)); } else { return _storage.WriteAsync(context, blobName, checkpointState); } }, traceOperationStarted: false, extraStartMessage: msg, extraEndMessage: _ => msg, timeout: _configuration.RegisterCheckpointTimeout)); }
public AzureBlobStorageLog(AzureBlobStorageLogConfiguration configuration, OperationContext context, IClock clock, IAbsFileSystem fileSystem, ITelemetryFieldsProvider telemetryFieldsProvider, AzureBlobStorageCredentials credentials) { _configuration = configuration; _context = context; _clock = clock; _fileSystem = fileSystem; _telemetryFieldsProvider = telemetryFieldsProvider; var cloudBlobClient = credentials.CreateCloudBlobClient(); _container = cloudBlobClient.GetContainerReference(configuration.ContainerName); _writeQueue = NagleQueue <string> .CreateUnstarted( configuration.WriteMaxDegreeOfParallelism, configuration.WriteMaxInterval, configuration.WriteMaxBatchSize); _uploadQueue = NagleQueue <LogFile> .CreateUnstarted( configuration.UploadMaxDegreeOfParallelism, configuration.UploadMaxInterval, 1); // TODO: this component doesn't have a quota, which could potentially be useful. If Azure Blob Storage // becomes unavailable for an extended period of time, we might cause disk space issues. }
private async Task <Uri?> GetUriAsync(OperationContext context, ContentHash contentHash) { if (!DownloadUriCache.Instance.TryGetDownloadUri(contentHash, out var uri)) { _blobCounters[Counters.VstsDownloadUriFetchedFromRemote].Increment(); var blobId = BuildXL.Cache.ContentStore.Hashing.BlobIdentifierHelperExtensions.ToBlobIdentifier(contentHash); var mappings = await ArtifactHttpClientErrorDetectionStrategy.ExecuteWithTimeoutAsync( context, "GetStreamInternal", innerCts => BlobStoreHttpClient.GetDownloadUrisAsync( new[] { ToVstsBlobIdentifier(blobId) }, EdgeCache.NotAllowed, cancellationToken: innerCts), context.Token).ConfigureAwait(false); if (mappings == null || !mappings.TryGetValue(ToVstsBlobIdentifier(blobId), out uri)) { return(null); } DownloadUriCache.Instance.AddDownloadUri(contentHash, uri); } else { _blobCounters[Counters.VstsDownloadUriFetchedInMemory].Increment(); } return(uri.NotNullUri); }
private Task <BoolResult> PlaceFileInternalAsync( OperationContext context, ContentHash contentHash, string path, FileMode fileMode) { try { return(GetFileWithDedupAsync(context, contentHash, path)); } catch (Exception e) when(fileMode == FileMode.CreateNew && !IsErrorFileExists(e)) { try { // Need to delete here so that a partial download doesn't run afoul of FileReplacementMode.FailIfExists upon retry // Don't do this if the error itself was that the file already existed File.Delete(path); } catch (Exception ex) { Tracer.Warning(context, $"Error deleting file at {path}: {ex}"); } throw; } catch (StorageException storageEx) when(storageEx.InnerException is WebException) { var webEx = (WebException)storageEx.InnerException; if (((HttpWebResponse)webEx.Response).StatusCode == HttpStatusCode.NotFound) { return(null); } throw; } }
private async Task <long?> DownloadUsingHttpDownloaderAsync(OperationContext context, ContentHash contentHash, string path) { var downloader = new ManagedParallelBlobDownloader( _parallelSegmentDownloadConfig, new AppTraceSourceContextAdapter(context, Tracer.Name, SourceLevels.All), VssClientHttpRequestSettings.Default.SessionId, _httpClient); var uri = await GetUriAsync(context, contentHash); if (uri == null) { return(null); } DownloadResult result = await downloader.DownloadAsync(path, uri.ToString(), knownSize : null, cancellationToken : context.Token); if (result.HttpStatusCode == HttpStatusCode.NotFound) { return(null); } else if (result.HttpStatusCode != HttpStatusCode.OK) { throw new ResultPropagationException(new ErrorResult($"Error in DownloadAsync({uri}) => [{path}]: HttpStatusCode={result.HttpStatusCode}. ErrorCode={result.ErrorCode}")); } return(result.BytesDownloaded); }
private async Task <BoolResult> GetFileWithDedupAsync(OperationContext context, ContentHash contentHash, string path) { VstsBlobIdentifier blobId = ToVstsBlobIdentifier(contentHash.ToBlobIdentifier()); VstsDedupIdentifier dedupId = blobId.ToDedupIdentifier(); try { await TryGatedArtifactOperationAsync <object>( context, contentHash.ToString(), "DownloadToFileAsync", async innerCts => { await DedupStoreClient.DownloadToFileAsync(dedupId, path, null, null, EdgeCache.Allowed, innerCts); return(null); }); } catch (NullReferenceException) // Null reference thrown when DedupIdentifier doesn't exist in VSTS. { return(new BoolResult("DedupIdentifier not found.")); } catch (Exception ex) { return(new BoolResult(ex)); } return(BoolResult.Success); }
private async Task <PinResult> PinImplAsync(OperationContext context, ContentHash contentHash) { try { PinResult pinResult; var dedupId = ToVstsBlobIdentifier(contentHash.ToBlobIdentifier()).ToDedupIdentifier(); if (dedupId.AlgorithmId == Hashing.ChunkDedupIdentifier.ChunkAlgorithmId) { pinResult = await TryPinChunkAsync(context, dedupId); } else if (((NodeAlgorithmId)dedupId.AlgorithmId).IsValidNode()) { pinResult = await TryPinNodeAsync(context, dedupId); } else { throw new InvalidOperationException($"Unknown dedup algorithm id detected for dedup {dedupId.ValueString} : {dedupId.AlgorithmId}"); } if (pinResult.Succeeded) { _counters[BackingContentStore.SessionCounters.PinSatisfiedFromRemote].Increment(); BackingContentStoreExpiryCache.Instance.AddExpiry(contentHash, EndDateTime); } return(pinResult); } catch (Exception ex) { return(new PinResult(ex)); } }
public AzureBlobStorageLog( AzureBlobStorageLogConfiguration configuration, OperationContext context, IClock clock, IAbsFileSystem fileSystem, ITelemetryFieldsProvider telemetryFieldsProvider, CloudBlobContainer container, IReadOnlyDictionary <string, string> additionalBlobMetadata) { _configuration = configuration; _context = context; _clock = clock; _fileSystem = fileSystem; _telemetryFieldsProvider = telemetryFieldsProvider; _container = container; _additionalBlobMetadata = additionalBlobMetadata; _writeQueue = NagleQueue <string> .CreateUnstarted( configuration.WriteMaxDegreeOfParallelism, configuration.WriteMaxInterval, configuration.WriteMaxBatchSize); _uploadQueue = NagleQueue <LogFile> .CreateUnstarted( configuration.UploadMaxDegreeOfParallelism, configuration.UploadMaxInterval, 1); // TODO: this component doesn't have a quota, which could potentially be useful. If Azure Blob Storage // becomes unavailable for an extended period of time, we might cause disk space issues. }
public async Task <Result <HeartbeatMachineResponse> > HeartbeatAsync(OperationContext context, HeartbeatMachineRequest request) { var pr = _primary.HeartbeatAsync(context, request); var sr = _secondary.HeartbeatAsync(context, request).FireAndForgetErrorsAsync(context); await Task.WhenAll(pr, sr); return(await pr); }
/// <inheritdoc /> protected override async Task <PlaceFileResult> PlaceFileCoreAsync( OperationContext context, ContentHash contentHash, AbsolutePath path, FileAccessMode accessMode, FileReplacementMode replacementMode, FileRealizationMode realizationMode, UrgencyHint urgencyHint, Counter retryCount) { if (!contentHash.HashType.IsValidDedup()) { return(new PlaceFileResult($"DedupStore client requires a HashType that supports dedup. Given hash type: {contentHash.HashType}.")); } try { if (replacementMode != FileReplacementMode.ReplaceExisting && File.Exists(path.Path)) { return(new PlaceFileResult(PlaceFileResult.ResultCode.NotPlacedAlreadyExists)); } if (ImplicitPin.HasFlag(ImplicitPin.Get)) { var pinResult = await PinAsync(context, contentHash, context.Token, urgencyHint).ConfigureAwait(false); if (!pinResult.Succeeded) { return(pinResult.Code == PinResult.ResultCode.ContentNotFound ? new PlaceFileResult(PlaceFileResult.ResultCode.NotPlacedContentNotFound) : new PlaceFileResult(pinResult)); } } var fileMode = replacementMode == FileReplacementMode.ReplaceExisting ? FileMode.Create : FileMode.CreateNew; var placeResult = await PlaceFileInternalAsync(context, contentHash, path.Path, fileMode).ConfigureAwait(false); if (!placeResult.Succeeded) { return(new PlaceFileResult(placeResult, PlaceFileResult.ResultCode.NotPlacedContentNotFound)); } var contentSize = GetContentSize(path); return(new PlaceFileResult(PlaceFileResult.ResultCode.PlacedWithCopy, contentSize)); } catch (IOException e) when(IsErrorFileExists(e)) { return(new PlaceFileResult(PlaceFileResult.ResultCode.NotPlacedAlreadyExists)); } catch (Exception e) { return(new PlaceFileResult(e)); } }
private Task <Result <LogFile> > WriteLogsToFileAsync(OperationContext context, AbsolutePath logFilePath, string[] logs) { return(context.PerformOperationAsync(Tracer, async() => { long compressedSizeBytes = 0; long uncompressedSizeBytes = 0; using (Stream fileStream = await _fileSystem.OpenSafeAsync( logFilePath, FileAccess.Write, FileMode.CreateNew, FileShare.None, FileOptions.SequentialScan | FileOptions.Asynchronous)) { // We need to make sure we close the compression stream before we take the fileStream's // position, because the compression stream won't write everything until it's been closed, // which leads to bad recorded values in compressedSizeBytes. using (var gzipStream = new GZipStream(fileStream, CompressionLevel.Fastest, leaveOpen: true)) { using var recordingStream = new CountingStream(gzipStream); using var streamWriter = new StreamWriter(recordingStream, Encoding.UTF8, bufferSize: 32 * 1024, leaveOpen: true); if (OnFileOpen != null) { await OnFileOpen(streamWriter); } foreach (var log in logs) { await streamWriter.WriteLineAsync(log); } if (OnFileClose != null) { await OnFileClose(streamWriter); } // Needed to ensure the recording stream receives everything it needs to receive await streamWriter.FlushAsync(); uncompressedSizeBytes = recordingStream.BytesWritten; } compressedSizeBytes = fileStream.Position; } Tracer.TrackMetric(context, $"LogLinesWritten", logs.Length); Tracer.TrackMetric(context, $"CompressedBytesWritten", compressedSizeBytes); Tracer.TrackMetric(context, $"UncompressedBytesWritten", uncompressedSizeBytes); return new Result <LogFile>(new LogFile() { Path = logFilePath, UncompressedSizeBytes = uncompressedSizeBytes, CompressedSizeBytes = compressedSizeBytes, }); },
private async Task <Stream?> GetStreamInternalAsync(OperationContext context, ContentHash contentHash, int?overrideStreamMinimumReadSizeInBytes) { Uri?azureBlobUri = default; try { if (_downloadBlobsThroughBlobStore) { return(await ArtifactHttpClientErrorDetectionStrategy.ExecuteWithTimeoutAsync( context, "GetStreamInternalThroughBlobStore", innerCts => BlobStoreHttpClient.GetBlobAsync(ToVstsBlobIdentifier(contentHash.ToBlobIdentifier()), cancellationToken: innerCts), context.Token).ConfigureAwait(false)); } else { if (!DownloadUriCache.Instance.TryGetDownloadUri(contentHash, out var uri)) { _blobCounters[Counters.VstsDownloadUriFetchedFromRemote].Increment(); var blobId = contentHash.ToBlobIdentifier(); var mappings = await ArtifactHttpClientErrorDetectionStrategy.ExecuteWithTimeoutAsync( context, "GetStreamInternal", innerCts => BlobStoreHttpClient.GetDownloadUrisAsync( new[] { ToVstsBlobIdentifier(blobId) }, EdgeCache.NotAllowed, cancellationToken: innerCts), context.Token).ConfigureAwait(false); if (mappings == null || !mappings.TryGetValue(ToVstsBlobIdentifier(blobId), out uri)) { return(null); } DownloadUriCache.Instance.AddDownloadUri(contentHash, uri); } else { _blobCounters[Counters.VstsDownloadUriFetchedInMemory].Increment(); } azureBlobUri = uri.NotNullUri; return(await GetStreamThroughAzureBlobs( uri.NotNullUri, overrideStreamMinimumReadSizeInBytes, _parallelSegmentDownloadConfig.SegmentDownloadTimeout, context.Token).ConfigureAwait(false)); } } catch (Exception e) { TraceException(context, contentHash, azureBlobUri, e); throw; } }
public async Task<Result<MasterElectionState>> GetRoleAsync(OperationContext context) { var r = await UpdateRoleAsync(context, tryUpdateLease: TryCreateOrExtendLease); if (r.Succeeded) { _lastElection = r.Value; } return r; }
internal Task <Result <MachineMapping> > RegisterMachineAsync(OperationContext context, MachineLocation machineLocation) { Contract.Requires(machineLocation.IsValid, $"Specified machine location `{machineLocation}` can't be registered because it is invalid"); if (_configuration.DistributedContentConsumerOnly) { return(Task.FromResult(Result.Success(new MachineMapping(machineLocation, MachineId.Invalid)))); } return(_storage.RegisterMachineAsync(context, machineLocation)); }
internal void TriggerGarbageCollection(OperationContext context) { context.PerformOperationAsync <BoolResult>(Tracer, () => { return(_gcGate.DeduplicatedOperationAsync( (timeWaiting, currentCount) => GarbageCollectAsync(context, retentionLimit: _configuration.CheckpointLimit), (timeWaiting, currentCount) => BoolResult.SuccessTask, token: context.Token)); }, traceOperationStarted: false).FireAndForget(context); }
private Task <long?> PlaceFileInternalAsync( OperationContext context, ContentHash contentHash, string path, FileMode fileMode) { #if PLATFORM_WIN if (Configuration.DownloadBlobsUsingHttpClient) { return(DownloadUsingHttpDownloaderAsync(context, contentHash, path)); } #endif return(DownloadUsingAzureBlobsAsync(context, contentHash, path, fileMode)); }
public async Task<Result<Role>> ReleaseRoleIfNecessaryAsync(OperationContext context, bool shuttingDown = false) { if (!_configuration.IsMasterEligible) { return Result.Success<Role>(Role.Worker); } var r = await UpdateRoleAsync(context, tryUpdateLease: TryReleaseLeaseIfHeld).AsAsync(s => s.Role); if (r.Succeeded) { // We don't know who the master is any more _lastElection = new MasterElectionState(Master: default, Role: r.Value);
public AzureBlobStorageLog( AzureBlobStorageLogConfiguration configuration, OperationContext context, IClock clock, IAbsFileSystem fileSystem, ITelemetryFieldsProvider telemetryFieldsProvider, AzureBlobStorageCredentials credentials, IReadOnlyDictionary <string, string> additionalBlobMetadata) : this(configuration, context, clock, fileSystem, telemetryFieldsProvider, credentials.CreateCloudBlobClient().GetContainerReference(configuration.ContainerName), additionalBlobMetadata) { }
/// <inheritdoc /> protected override Task <IEnumerable <Task <Indexed <PinResult> > > > PinCoreAsync( OperationContext context, IReadOnlyList <ContentHash> contentHashes, UrgencyHint urgencyHint, Counter retryCounter, Counter fileCounter) { try { return(Task.FromResult(contentHashes.Select(async(contentHash, i) => (await PinAsync(context, contentHash, context.Token, urgencyHint)).WithIndex(i)))); } catch (Exception ex) { context.TracingContext.Warning($"Exception when querying pins against the VSTS services {ex}"); return(Task.FromResult(contentHashes.Select((_, index) => Task.FromResult(new PinResult(ex).WithIndex(index))))); } }
protected override Task <BoolResult> ShutdownCoreAsync(OperationContext context) { // This stops uploading more logs, wait for all in-memory logs to flush to disk, and then wait for ongoing // transfers to finish. Note that contrary to its name, the method is not actually asynchronous. if (!_configuration.DrainUploadsOnShutdown) { _uploadQueue.Suspend(); } _writeQueue.Dispose(); _uploadQueue.Dispose(); return(BoolResult.SuccessTask); }
public Task <Result <CheckpointState> > GetCheckpointStateAsync(OperationContext context) { // NOTE: this function is naturally retried by the heartbeat mechanisms in LLS return(context.PerformOperationWithTimeoutAsync( Tracer, async context => { var blobs = ListBlobsRecentFirstAsync(context); await foreach (var blob in blobs) { try { var checkpointState = await _storage.ReadAsync <CheckpointState>(context, blob).ThrowIfFailureAsync(); checkpointState.FileName = blob; foreach (var consumer in checkpointState.Consumers) { _pushLocations.Add(consumer, _configuration.PushCheckpointCandidateExpiry); } return Result.Success(checkpointState); } catch (TaskCanceledException) when(context.Token.IsCancellationRequested) { // We hit timeout or a proper cancellation. // Breaking from the loop instead of tracing error for each iteration. break; } catch (Exception e) { Tracer.Error(context, e, $"Failed to obtain {nameof(CheckpointState)} from blob `{blob.Name}`. Skipping."); continue; } } // Add slack for start cursor to account for clock skew return CheckpointState.CreateUnavailable(_clock.UtcNow - _configuration.NewEpochEventStartCursorDelay); }, extraEndMessage: result => { if (!result.Succeeded) { return string.Empty; } var checkpointState = result.Value; return $"CheckpointId=[{checkpointState.CheckpointId}] SequencePoint=[{checkpointState.StartSequencePoint}]"; }, timeout: _configuration.CheckpointStateTimeout)); }