private async Task <(DedupIdentifier dedupId, ulong length)> UploadToBlobStore(IAsyncCommandContext context, string itemPath, CancellationToken cancellationToken) { var verbose = String.Equals(context.GetVariableValueOrDefault("system.debug"), "true", StringComparison.InvariantCultureIgnoreCase); return(await BlobStoreUtils.UploadToBlobStore(verbose, itemPath, (level, uri, type) => new BuildArtifactActionRecord(level, uri, type, nameof(UploadToBlobStore), context), (str) => context.Output(str), _dedupClient, _blobTelemetry, cancellationToken)); }
private async Task <(DedupIdentifier dedupId, ulong length)> UploadToBlobStore(IAsyncCommandContext context, string itemPath, CancellationToken cancellationToken) { // Create chunks and identifier var chunk = await ChunkerHelper.CreateFromFileAsync(FileSystem.Instance, itemPath, cancellationToken, false); var rootNode = new DedupNode(new [] { chunk }); var dedupId = rootNode.GetDedupIdentifier(HashType.Dedup64K); // Setup upload session to keep file for at mimimum one day var verbose = String.Equals(context.GetVariableValueOrDefault("system.debug"), "true", StringComparison.InvariantCultureIgnoreCase); var tracer = DedupManifestArtifactClientFactory.CreateArtifactsTracer(verbose, (str) => context.Output(str)); var keepUntulRef = new KeepUntilBlobReference(DateTime.UtcNow.AddDays(1)); var uploadSession = _dedupClient.CreateUploadSession(keepUntulRef, tracer, FileSystem.Instance); // Upload the chunks var uploadRecord = _blobTelemetry.CreateRecord <BuildArtifactActionRecord>((level, uri, type) => new BuildArtifactActionRecord(level, uri, type, nameof(UploadAsync), context)); await _blobTelemetry.MeasureActionAsync( record : uploadRecord, actionAsync : async() => await AsyncHttpRetryHelper.InvokeAsync( async() => { return(await uploadSession.UploadAsync(rootNode, new Dictionary <DedupIdentifier, string>() { [dedupId] = itemPath }, cancellationToken)); }, maxRetries: 3, tracer: tracer, canRetryDelegate: e => true, // this isn't great, but failing on upload stinks, so just try a couple of times cancellationToken: cancellationToken, continueOnCapturedContext: false) ); return(dedupId, rootNode.TransitiveContentBytes); }
private async Task <UploadResult> ParallelUploadAsync(IAsyncCommandContext context, IReadOnlyList <string> files, int concurrentUploads, CancellationToken token) { // return files that fail to upload and total artifact size var uploadResult = new UploadResult(); // nothing needs to upload if (files.Count == 0) { return(uploadResult); } var uploadToBlob = String.Equals(context.GetVariableValueOrDefault("agent.UploadBuildArtifactsToBlob"), "true", StringComparison.InvariantCultureIgnoreCase); if (uploadToBlob) { var verbose = String.Equals(context.GetVariableValueOrDefault("system.debug"), "true", StringComparison.InvariantCultureIgnoreCase); var(dedupClient, clientTelemetry) = await DedupManifestArtifactClientFactory.Instance .CreateDedupClientAsync(verbose, (str) => context.Output(str), this._connection, token); _dedupClient = dedupClient; _blobTelemetry = clientTelemetry; } // ensure the file upload queue is empty. if (!_fileUploadQueue.IsEmpty) { throw new ArgumentOutOfRangeException(nameof(_fileUploadQueue)); } // enqueue file into upload queue. foreach (var file in files) { _fileUploadQueue.Enqueue(file); } // Start upload monitor task. _filesProcessed = 0; _uploadFinished = new TaskCompletionSource <int>(); _fileUploadTraceLog.Clear(); _fileUploadProgressLog.Clear(); Task uploadMonitor = ReportingAsync(context, files.Count(), _uploadCancellationTokenSource.Token); // Start parallel upload tasks. List <Task <UploadResult> > parallelUploadingTasks = new List <Task <UploadResult> >(); for (int uploader = 0; uploader < concurrentUploads; uploader++) { parallelUploadingTasks.Add(UploadAsync(context, uploader, uploadToBlob, _uploadCancellationTokenSource.Token)); } // Wait for parallel upload finish. await Task.WhenAll(parallelUploadingTasks); foreach (var uploadTask in parallelUploadingTasks) { // record all failed files. uploadResult.AddUploadResult(await uploadTask); } // Stop monitor task; _uploadFinished.TrySetResult(0); await uploadMonitor; // report telemetry if (uploadToBlob) { if (!Guid.TryParse(context.GetVariableValueOrDefault(WellKnownDistributedTaskVariables.PlanId), out var planId)) { planId = Guid.Empty; } if (!Guid.TryParse(context.GetVariableValueOrDefault(WellKnownDistributedTaskVariables.JobId), out var jobId)) { jobId = Guid.Empty; } await _blobTelemetry.CommitTelemetry(planId, jobId); } return(uploadResult); }
public async Task <long> CopyToContainerAsync( IAsyncCommandContext context, String source, CancellationToken cancellationToken) { ArgUtil.NotNull(context, nameof(context)); ArgUtil.NotNull(source, nameof(source)); //set maxConcurrentUploads up to 2 until figure out how to use WinHttpHandler.MaxConnectionsPerServer modify DefaultConnectionLimit int maxConcurrentUploads = Math.Min(Environment.ProcessorCount, 2); //context.Output($"Max Concurrent Uploads {maxConcurrentUploads}"); List <String> files; if (File.Exists(source)) { files = new List <String>() { source }; _sourceParentDirectory = Path.GetDirectoryName(source); } else { files = Directory.EnumerateFiles(source, "*", SearchOption.AllDirectories).ToList(); _sourceParentDirectory = source.TrimEnd(Path.DirectorySeparatorChar, Path.AltDirectorySeparatorChar); } context.Output(StringUtil.Loc("TotalUploadFiles", files.Count())); using (_uploadCancellationTokenSource = CancellationTokenSource.CreateLinkedTokenSource(cancellationToken)) { // hook up reporting event from file container client. _fileContainerHttpClient.UploadFileReportTrace += UploadFileTraceReportReceived; _fileContainerHttpClient.UploadFileReportProgress += UploadFileProgressReportReceived; try { var uploadToBlob = String.Equals(context.GetVariableValueOrDefault(WellKnownDistributedTaskVariables.UploadBuildArtifactsToBlob), "true", StringComparison.InvariantCultureIgnoreCase) && !AgentKnobs.DisableBuildArtifactsToBlob.GetValue(context).AsBoolean(); // try upload all files for the first time. UploadResult uploadResult = null; if (uploadToBlob) { try { uploadResult = await BlobUploadAsync(context, files, maxConcurrentUploads, _uploadCancellationTokenSource.Token); } catch { // Fall back to FCS upload if we cannot upload to blob context.Warn(StringUtil.Loc("BlobStoreUploadWarning")); uploadToBlob = false; } } if (!uploadToBlob) { uploadResult = await ParallelUploadAsync(context, files, maxConcurrentUploads, _uploadCancellationTokenSource.Token); } if (uploadResult.FailedFiles.Count == 0) { // all files have been upload succeed. context.Output(StringUtil.Loc("FileUploadSucceed")); return(uploadResult.TotalFileSizeUploaded); } else { context.Output(StringUtil.Loc("FileUploadFailedRetryLater", uploadResult.FailedFiles.Count)); } // Delay 1 min then retry failed files. for (int timer = 60; timer > 0; timer -= 5) { context.Output(StringUtil.Loc("FileUploadRetryInSecond", timer)); await Task.Delay(TimeSpan.FromSeconds(5), _uploadCancellationTokenSource.Token); } // Retry upload all failed files. context.Output(StringUtil.Loc("FileUploadRetry", uploadResult.FailedFiles.Count)); UploadResult retryUploadResult; if (uploadToBlob) { retryUploadResult = await BlobUploadAsync(context, uploadResult.FailedFiles, maxConcurrentUploads, _uploadCancellationTokenSource.Token); } else { retryUploadResult = await ParallelUploadAsync(context, uploadResult.FailedFiles, maxConcurrentUploads, _uploadCancellationTokenSource.Token); } if (retryUploadResult.FailedFiles.Count == 0) { // all files have been upload succeed after retry. context.Output(StringUtil.Loc("FileUploadRetrySucceed")); return(uploadResult.TotalFileSizeUploaded + retryUploadResult.TotalFileSizeUploaded); } else { throw new Exception(StringUtil.Loc("FileUploadFailedAfterRetry")); } } finally { _fileContainerHttpClient.UploadFileReportTrace -= UploadFileTraceReportReceived; _fileContainerHttpClient.UploadFileReportProgress -= UploadFileProgressReportReceived; } } }
private async Task <UploadResult> BlobUploadAsync(IAsyncCommandContext context, IReadOnlyList <string> files, int concurrentUploads, CancellationToken token) { // return files that fail to upload and total artifact size var uploadResult = new UploadResult(); // nothing needs to upload if (files.Count == 0) { return(uploadResult); } var verbose = String.Equals(context.GetVariableValueOrDefault("system.debug"), "true", StringComparison.InvariantCultureIgnoreCase); var(dedupClient, clientTelemetry) = await DedupManifestArtifactClientFactory.Instance .CreateDedupClientAsync(verbose, (str) => context.Output(str), this._connection, token); // Upload to blobstore var results = await BlobStoreUtils.UploadBatchToBlobstore(verbose, files, (level, uri, type) => new BuildArtifactActionRecord(level, uri, type, nameof(BlobUploadAsync), context), (str) => context.Output(str), dedupClient, clientTelemetry, token, enableReporting : true); // Associate with TFS context.Output(StringUtil.Loc("AssociateFiles")); var queue = new ConcurrentQueue <BlobFileInfo>(); foreach (var file in results.fileDedupIds) { queue.Enqueue(file); } // Start associate monitor var uploadFinished = new TaskCompletionSource <int>(); var associateMonitor = AssociateReportingAsync(context, files.Count(), uploadFinished, token); // Start parallel associate tasks. var parallelAssociateTasks = new List <Task <UploadResult> >(); for (int uploader = 0; uploader < concurrentUploads; uploader++) { parallelAssociateTasks.Add(AssociateAsync(context, queue, token)); } // Wait for parallel associate tasks to finish. await Task.WhenAll(parallelAssociateTasks); foreach (var associateTask in parallelAssociateTasks) { // record all failed files. uploadResult.AddUploadResult(await associateTask); } // Stop monitor task uploadFinished.SetResult(0); await associateMonitor; // report telemetry if (!Guid.TryParse(context.GetVariableValueOrDefault(WellKnownDistributedTaskVariables.PlanId), out var planId)) { planId = Guid.Empty; } if (!Guid.TryParse(context.GetVariableValueOrDefault(WellKnownDistributedTaskVariables.JobId), out var jobId)) { jobId = Guid.Empty; } await clientTelemetry.CommitTelemetryUpload(planId, jobId); return(uploadResult); }
private async Task <UploadResult> BlobUploadAsync(IAsyncCommandContext context, IReadOnlyList <string> files, int concurrentUploads, CancellationToken token) { // return files that fail to upload and total artifact size var uploadResult = new UploadResult(); // nothing needs to upload if (files.Count == 0) { return(uploadResult); } DedupStoreClient dedupClient = null; BlobStoreClientTelemetryTfs clientTelemetry = null; try { var verbose = String.Equals(context.GetVariableValueOrDefault("system.debug"), "true", StringComparison.InvariantCultureIgnoreCase); int maxParallelism = context.GetHostContext().GetService <IConfigurationStore>().GetSettings().MaxDedupParallelism; (dedupClient, clientTelemetry) = await DedupManifestArtifactClientFactory.Instance .CreateDedupClientAsync(verbose, (str) => context.Output(str), this._connection, maxParallelism, token); // Upload to blobstore var results = await BlobStoreUtils.UploadBatchToBlobstore(verbose, files, (level, uri, type) => new BuildArtifactActionRecord(level, uri, type, nameof(BlobUploadAsync), context), (str) => context.Output(str), dedupClient, clientTelemetry, token, enableReporting : true); // Associate with TFS context.Output(StringUtil.Loc("AssociateFiles")); var queue = new ConcurrentQueue <BlobFileInfo>(); foreach (var file in results.fileDedupIds) { queue.Enqueue(file); } // Start associate monitor var uploadFinished = new TaskCompletionSource <int>(); var associateMonitor = AssociateReportingAsync(context, files.Count(), uploadFinished, token); // Start parallel associate tasks. var parallelAssociateTasks = new List <Task <UploadResult> >(); for (int uploader = 0; uploader < concurrentUploads; uploader++) { parallelAssociateTasks.Add(AssociateAsync(context, queue, token)); } // Wait for parallel associate tasks to finish. await Task.WhenAll(parallelAssociateTasks); foreach (var associateTask in parallelAssociateTasks) { // record all failed files. uploadResult.AddUploadResult(await associateTask); } // Stop monitor task uploadFinished.SetResult(0); await associateMonitor; // report telemetry if (!Guid.TryParse(context.GetVariableValueOrDefault(WellKnownDistributedTaskVariables.PlanId), out var planId)) { planId = Guid.Empty; } if (!Guid.TryParse(context.GetVariableValueOrDefault(WellKnownDistributedTaskVariables.JobId), out var jobId)) { jobId = Guid.Empty; } await clientTelemetry.CommitTelemetryUpload(planId, jobId); } catch (SocketException e) { ExceptionsUtil.HandleSocketException(e, this._connection.Uri.ToString(), context.Warn); throw; } catch { var blobStoreHost = dedupClient.Client.BaseAddress.Host; var allowListLink = BlobStoreWarningInfoProvider.GetAllowListLinkForCurrentPlatform(); var warningMessage = StringUtil.Loc("BlobStoreUploadWarning", blobStoreHost, allowListLink); context.Warn(warningMessage); throw; } return(uploadResult); }