Exemplo n.º 1
0
 /// <summary>
 /// Initializes a new instance of the <see cref="DedupContentSession"/> class.
 /// </summary>
 public DedupContentSession(
     Context context,
     IAbsFileSystem fileSystem,
     string name,
     ImplicitPin implicitPin,
     IDedupStoreHttpClient dedupStoreHttpClient,
     TimeSpan timeToKeepContent,
     int maxConnections = DefaultMaxConnections)
     : base(fileSystem, name, implicitPin, dedupStoreHttpClient, timeToKeepContent, maxConnections)
 {
     _artifactFileSystem = VstsFileSystem.Instance;
     _uploadSession      = DedupStoreClient.CreateUploadSession(
         DedupStoreClient,
         new KeepUntilBlobReference(EndDateTime),
         new AppTraceSourceContextAdapter(context, "CreateUploadSession", SourceLevels.All),
         _artifactFileSystem);
 }
        public static async Task <(DedupIdentifier dedupId, ulong length)> UploadToBlobStore(
            bool verbose,
            string itemPath,
            Func <TelemetryInformationLevel, Uri, string, BlobStoreTelemetryRecord> telemetryRecordFactory,
            Action <string> traceOutput,
            DedupStoreClient dedupClient,
            BlobStoreClientTelemetry clientTelemetry,
            CancellationToken cancellationToken)
        {
            // Create chunks and identifier
            var chunk = await ChunkerHelper.CreateFromFileAsync(FileSystem.Instance, itemPath, cancellationToken, false);

            var rootNode = new DedupNode(new [] { chunk });
            // ChunkHelper uses 64k block default size
            var dedupId = rootNode.GetDedupIdentifier(HashType.Dedup64K);

            // Setup upload session to keep file for at mimimum one day
            // Blobs will need to be associated with the server with an ID ref otherwise they will be
            // garbage collected after one day
            var tracer        = DedupManifestArtifactClientFactory.CreateArtifactsTracer(verbose, traceOutput);
            var keepUntilRef  = new KeepUntilBlobReference(DateTime.UtcNow.AddDays(1));
            var uploadSession = dedupClient.CreateUploadSession(keepUntilRef, tracer, FileSystem.Instance);

            // Upload the chunks
            var uploadRecord = clientTelemetry.CreateRecord <BlobStoreTelemetryRecord>(telemetryRecordFactory);
            await clientTelemetry.MeasureActionAsync(
                record : uploadRecord,
                actionAsync : async() => await AsyncHttpRetryHelper.InvokeAsync(
                    async() =>
            {
                await uploadSession.UploadAsync(rootNode, new Dictionary <DedupIdentifier, string>()
                {
                    [dedupId] = itemPath
                }, cancellationToken);
                return(uploadSession.UploadStatistics);
            },
                    maxRetries: 3,
                    tracer: tracer,
                    canRetryDelegate: e => true,     // this isn't great, but failing on upload stinks, so just try a couple of times
                    cancellationToken: cancellationToken,
                    continueOnCapturedContext: false)
                );

            return(dedupId, rootNode.TransitiveContentBytes);
        }
        private async Task <UploadResult> ParallelUploadAsync(IAsyncCommandContext context, IReadOnlyList <string> files, int concurrentUploads, CancellationToken token)
        {
            // return files that fail to upload and total artifact size
            var uploadResult = new UploadResult();

            // nothing needs to upload
            if (files.Count == 0)
            {
                return(uploadResult);
            }

            var uploadToBlob = String.Equals(context.GetVariableValueOrDefault("agent.UploadBuildArtifactsToBlob"), "true", StringComparison.InvariantCultureIgnoreCase);

            if (uploadToBlob)
            {
                var verbose = String.Equals(context.GetVariableValueOrDefault("system.debug"), "true", StringComparison.InvariantCultureIgnoreCase);
                var(dedupClient, clientTelemetry) = await DedupManifestArtifactClientFactory.Instance
                                                    .CreateDedupClientAsync(verbose, (str) => context.Output(str), this._connection, token);

                _dedupClient   = dedupClient;
                _blobTelemetry = clientTelemetry;
            }

            // ensure the file upload queue is empty.
            if (!_fileUploadQueue.IsEmpty)
            {
                throw new ArgumentOutOfRangeException(nameof(_fileUploadQueue));
            }

            // enqueue file into upload queue.
            foreach (var file in files)
            {
                _fileUploadQueue.Enqueue(file);
            }

            // Start upload monitor task.
            _filesProcessed = 0;
            _uploadFinished = new TaskCompletionSource <int>();
            _fileUploadTraceLog.Clear();
            _fileUploadProgressLog.Clear();
            Task uploadMonitor = ReportingAsync(context, files.Count(), _uploadCancellationTokenSource.Token);

            // Start parallel upload tasks.
            List <Task <UploadResult> > parallelUploadingTasks = new List <Task <UploadResult> >();

            for (int uploader = 0; uploader < concurrentUploads; uploader++)
            {
                parallelUploadingTasks.Add(UploadAsync(context, uploader, uploadToBlob, _uploadCancellationTokenSource.Token));
            }

            // Wait for parallel upload finish.
            await Task.WhenAll(parallelUploadingTasks);

            foreach (var uploadTask in parallelUploadingTasks)
            {
                // record all failed files.
                uploadResult.AddUploadResult(await uploadTask);
            }

            // Stop monitor task;
            _uploadFinished.TrySetResult(0);
            await uploadMonitor;

            // report telemetry
            if (uploadToBlob)
            {
                if (!Guid.TryParse(context.GetVariableValueOrDefault(WellKnownDistributedTaskVariables.PlanId), out var planId))
                {
                    planId = Guid.Empty;
                }
                if (!Guid.TryParse(context.GetVariableValueOrDefault(WellKnownDistributedTaskVariables.JobId), out var jobId))
                {
                    jobId = Guid.Empty;
                }
                await _blobTelemetry.CommitTelemetry(planId, jobId);
            }

            return(uploadResult);
        }
        private async Task DownloadFileContainerAsync(IEnumerable <FileContainerItem> items, ArtifactDownloadParameters downloadParameters, BuildArtifact artifact, string rootPath, AgentTaskPluginExecutionContext context, CancellationToken cancellationToken, bool isSingleArtifactDownload = true)
        {
            var containerIdAndRoot = ParseContainerId(artifact.Resource.Data);
            var projectId          = downloadParameters.ProjectId;

            tracer.Info($"Start downloading FCS artifact- {artifact.Name}");

            if (!isSingleArtifactDownload && items.Any())
            {
                Directory.CreateDirectory(rootPath);
            }

            var folderItems = items.Where(i => i.ItemType == ContainerItemType.Folder);

            Parallel.ForEach(folderItems, (folder) =>
            {
                var targetPath = ResolveTargetPath(rootPath, folder, containerIdAndRoot.Item2, downloadParameters.IncludeArtifactNameInPath);
                Directory.CreateDirectory(targetPath);
            });

            var fileItems = items.Where(i => i.ItemType == ContainerItemType.File);

            // Only initialize these clients if we know we need to download from Blobstore
            // If a client cannot connect to Blobstore, we shouldn't stop them from downloading from FCS
            var downloadFromBlob = !AgentKnobs.DisableBuildArtifactsToBlob.GetValue(context).AsBoolean();
            DedupStoreClient            dedupClient     = null;
            BlobStoreClientTelemetryTfs clientTelemetry = null;

            if (downloadFromBlob && fileItems.Any(x => x.BlobMetadata != null))
            {
                try
                {
                    (dedupClient, clientTelemetry) = await DedupManifestArtifactClientFactory.Instance.CreateDedupClientAsync(
                        false, (str) => this.tracer.Info(str), this.connection, DedupManifestArtifactClientFactory.Instance.GetDedupStoreClientMaxParallelism(context), cancellationToken);
                }
                catch (SocketException e)
                {
                    ExceptionsUtil.HandleSocketException(e, connection.Uri.ToString(), context.Warning);
                }
                catch
                {
                    var blobStoreHost  = dedupClient.Client.BaseAddress.Host;
                    var allowListLink  = BlobStoreWarningInfoProvider.GetAllowListLinkForCurrentPlatform();
                    var warningMessage = StringUtil.Loc("BlobStoreDownloadWarning", blobStoreHost, allowListLink);

                    // Fall back to streaming through TFS if we cannot reach blobstore
                    downloadFromBlob = false;
                    tracer.Warn(warningMessage);
                }
            }

            var downloadBlock = NonSwallowingActionBlock.Create <FileContainerItem>(
                async item =>
            {
                var targetPath = ResolveTargetPath(rootPath, item, containerIdAndRoot.Item2, downloadParameters.IncludeArtifactNameInPath);
                var directory  = Path.GetDirectoryName(targetPath);
                Directory.CreateDirectory(directory);
                await AsyncHttpRetryHelper.InvokeVoidAsync(
                    async() =>
                {
                    tracer.Info($"Downloading: {targetPath}");
                    if (item.BlobMetadata != null && downloadFromBlob)
                    {
                        await this.DownloadFileFromBlobAsync(context, containerIdAndRoot, targetPath, projectId, item, dedupClient, clientTelemetry, cancellationToken);
                    }
                    else
                    {
                        using (var sourceStream = await this.DownloadFileAsync(containerIdAndRoot, projectId, containerClient, item, cancellationToken))
                            using (var targetStream = new FileStream(targetPath, FileMode.Create))
                            {
                                await sourceStream.CopyToAsync(targetStream);
                            }
                    }
                },
                    maxRetries: downloadParameters.RetryDownloadCount,
                    cancellationToken: cancellationToken,
                    tracer: tracer,
                    continueOnCapturedContext: false,
                    canRetryDelegate: exception => exception is IOException,
                    context: null
                    );
            },
                new ExecutionDataflowBlockOptions()
            {
                BoundedCapacity        = 5000,
                MaxDegreeOfParallelism = downloadParameters.ParallelizationLimit,
                CancellationToken      = cancellationToken,
            });

            await downloadBlock.SendAllAndCompleteSingleBlockNetworkAsync(fileItems, cancellationToken);

            // Send results to CustomerIntelligence
            if (clientTelemetry != null)
            {
                var planId = new Guid(context.Variables.GetValueOrDefault(WellKnownDistributedTaskVariables.PlanId)?.Value ?? Guid.Empty.ToString());
                var jobId  = new Guid(context.Variables.GetValueOrDefault(WellKnownDistributedTaskVariables.JobId)?.Value ?? Guid.Empty.ToString());
                context.PublishTelemetry(area: PipelineArtifactConstants.AzurePipelinesAgent, feature: PipelineArtifactConstants.BuildArtifactDownload,
                                         properties: clientTelemetry.GetArtifactDownloadTelemetry(planId, jobId));
            }

            // check files (will throw an exception if a file is corrupt)
            if (downloadParameters.CheckDownloadedFiles)
            {
                CheckDownloads(items, rootPath, containerIdAndRoot.Item2, downloadParameters.IncludeArtifactNameInPath);
            }
        }
Exemplo n.º 5
0
        private async Task DownloadFileContainerAsync(ArtifactDownloadParameters downloadParameters, BuildArtifact artifact, string rootPath, AgentTaskPluginExecutionContext context, CancellationToken cancellationToken, bool isSingleArtifactDownload = true)
        {
            var containerIdAndRoot = ParseContainerId(artifact.Resource.Data);
            var projectId          = downloadParameters.ProjectId;
            var minimatchPatterns  = downloadParameters.MinimatchFilters;

            var items = await containerClient.QueryContainerItemsAsync(containerIdAndRoot.Item1, projectId, isShallow : false, includeBlobMetadata : true, containerIdAndRoot.Item2);

            tracer.Info($"Start downloading FCS artifact- {artifact.Name}");
            IEnumerable <Func <string, bool> > minimatcherFuncs = MinimatchHelper.GetMinimatchFuncs(minimatchPatterns, tracer, downloadParameters.CustomMinimatchOptions);

            if (minimatcherFuncs != null && minimatcherFuncs.Count() != 0)
            {
                items = this.GetFilteredItems(items, minimatcherFuncs);
            }

            if (!isSingleArtifactDownload && items.Any())
            {
                Directory.CreateDirectory(rootPath);
            }

            var folderItems = items.Where(i => i.ItemType == ContainerItemType.Folder);

            Parallel.ForEach(folderItems, (folder) =>
            {
                var targetPath = ResolveTargetPath(rootPath, folder, containerIdAndRoot.Item2, downloadParameters.IncludeArtifactNameInPath);
                Directory.CreateDirectory(targetPath);
            });

            var fileItems = items.Where(i => i.ItemType == ContainerItemType.File);

            // Only initialize these clients if we know we need to download from Blobstore
            // If a client cannot connect to Blobstore, we shouldn't stop them from downloading from FCS
            var downloadFromBlob = !AgentKnobs.DisableBuildArtifactsToBlob.GetValue(context).AsBoolean();
            DedupStoreClient            dedupClient     = null;
            BlobStoreClientTelemetryTfs clientTelemetry = null;

            if (downloadFromBlob && fileItems.Any(x => x.BlobMetadata != null))
            {
                (dedupClient, clientTelemetry) = await DedupManifestArtifactClientFactory.Instance.CreateDedupClientAsync(
                    false, (str) => this.tracer.Info(str), this.connection, cancellationToken);
            }

            var downloadBlock = NonSwallowingActionBlock.Create <FileContainerItem>(
                async item =>
            {
                var targetPath = ResolveTargetPath(rootPath, item, containerIdAndRoot.Item2, downloadParameters.IncludeArtifactNameInPath);
                var directory  = Path.GetDirectoryName(targetPath);
                Directory.CreateDirectory(directory);
                await AsyncHttpRetryHelper.InvokeVoidAsync(
                    async() =>
                {
                    tracer.Info($"Downloading: {targetPath}");
                    if (item.BlobMetadata != null && downloadFromBlob)
                    {
                        await this.DownloadFileFromBlobAsync(context, containerIdAndRoot, targetPath, projectId, item, dedupClient, clientTelemetry, cancellationToken);
                    }
                    else
                    {
                        using (var sourceStream = await this.DownloadFileAsync(containerIdAndRoot, projectId, containerClient, item, cancellationToken))
                            using (var targetStream = new FileStream(targetPath, FileMode.Create))
                            {
                                await sourceStream.CopyToAsync(targetStream);
                            }
                    }
                },
                    maxRetries: downloadParameters.RetryDownloadCount,
                    cancellationToken: cancellationToken,
                    tracer: tracer,
                    continueOnCapturedContext: false,
                    canRetryDelegate: exception => exception is IOException,
                    context: null
                    );
            },
                new ExecutionDataflowBlockOptions()
            {
                BoundedCapacity        = 5000,
                MaxDegreeOfParallelism = downloadParameters.ParallelizationLimit,
                CancellationToken      = cancellationToken,
            });

            await downloadBlock.SendAllAndCompleteSingleBlockNetworkAsync(fileItems, cancellationToken);

            // Send results to CustomerIntelligence
            if (clientTelemetry != null)
            {
                var planId = new Guid(context.Variables.GetValueOrDefault(WellKnownDistributedTaskVariables.PlanId)?.Value ?? Guid.Empty.ToString());
                var jobId  = new Guid(context.Variables.GetValueOrDefault(WellKnownDistributedTaskVariables.JobId)?.Value ?? Guid.Empty.ToString());
                context.PublishTelemetry(area: PipelineArtifactConstants.AzurePipelinesAgent, feature: PipelineArtifactConstants.BuildArtifactDownload,
                                         properties: clientTelemetry.GetArtifactDownloadTelemetry(planId, jobId));
            }

            // check files (will throw an exception if a file is corrupt)
            if (downloadParameters.CheckDownloadedFiles)
            {
                CheckDownloads(items, rootPath, containerIdAndRoot.Item2, downloadParameters.IncludeArtifactNameInPath);
            }
        }
        private async Task <UploadResult> BlobUploadAsync(IAsyncCommandContext context, IReadOnlyList <string> files, int concurrentUploads, CancellationToken token)
        {
            // return files that fail to upload and total artifact size
            var uploadResult = new UploadResult();

            // nothing needs to upload
            if (files.Count == 0)
            {
                return(uploadResult);
            }

            DedupStoreClient            dedupClient     = null;
            BlobStoreClientTelemetryTfs clientTelemetry = null;

            try
            {
                var verbose        = String.Equals(context.GetVariableValueOrDefault("system.debug"), "true", StringComparison.InvariantCultureIgnoreCase);
                int maxParallelism = context.GetHostContext().GetService <IConfigurationStore>().GetSettings().MaxDedupParallelism;
                (dedupClient, clientTelemetry) = await DedupManifestArtifactClientFactory.Instance
                                                 .CreateDedupClientAsync(verbose, (str) => context.Output(str), this._connection, maxParallelism, token);

                // Upload to blobstore
                var results = await BlobStoreUtils.UploadBatchToBlobstore(verbose, files, (level, uri, type) =>
                                                                          new BuildArtifactActionRecord(level, uri, type, nameof(BlobUploadAsync), context), (str) => context.Output(str), dedupClient, clientTelemetry, token, enableReporting : true);

                // Associate with TFS
                context.Output(StringUtil.Loc("AssociateFiles"));
                var queue = new ConcurrentQueue <BlobFileInfo>();
                foreach (var file in results.fileDedupIds)
                {
                    queue.Enqueue(file);
                }

                // Start associate monitor
                var uploadFinished   = new TaskCompletionSource <int>();
                var associateMonitor = AssociateReportingAsync(context, files.Count(), uploadFinished, token);

                // Start parallel associate tasks.
                var parallelAssociateTasks = new List <Task <UploadResult> >();
                for (int uploader = 0; uploader < concurrentUploads; uploader++)
                {
                    parallelAssociateTasks.Add(AssociateAsync(context, queue, token));
                }

                // Wait for parallel associate tasks to finish.
                await Task.WhenAll(parallelAssociateTasks);

                foreach (var associateTask in parallelAssociateTasks)
                {
                    // record all failed files.
                    uploadResult.AddUploadResult(await associateTask);
                }

                // Stop monitor task
                uploadFinished.SetResult(0);
                await associateMonitor;

                // report telemetry
                if (!Guid.TryParse(context.GetVariableValueOrDefault(WellKnownDistributedTaskVariables.PlanId), out var planId))
                {
                    planId = Guid.Empty;
                }
                if (!Guid.TryParse(context.GetVariableValueOrDefault(WellKnownDistributedTaskVariables.JobId), out var jobId))
                {
                    jobId = Guid.Empty;
                }
                await clientTelemetry.CommitTelemetryUpload(planId, jobId);
            }
            catch (SocketException e)
            {
                ExceptionsUtil.HandleSocketException(e, this._connection.Uri.ToString(), context.Warn);

                throw;
            }
            catch
            {
                var blobStoreHost  = dedupClient.Client.BaseAddress.Host;
                var allowListLink  = BlobStoreWarningInfoProvider.GetAllowListLinkForCurrentPlatform();
                var warningMessage = StringUtil.Loc("BlobStoreUploadWarning", blobStoreHost, allowListLink);

                context.Warn(warningMessage);

                throw;
            }

            return(uploadResult);
        }
        public static async Task <(List <BlobFileInfo> fileDedupIds, ulong length)> UploadBatchToBlobstore(
            bool verbose,
            IReadOnlyList <string> itemPaths,
            Func <TelemetryInformationLevel, Uri, string, BlobStoreTelemetryRecord> telemetryRecordFactory,
            Action <string> traceOutput,
            DedupStoreClient dedupClient,
            BlobStoreClientTelemetry clientTelemetry,
            CancellationToken cancellationToken,
            bool enableReporting = false)
        {
            // Create chunks and identifier
            traceOutput(StringUtil.Loc("BuildingFileTree"));
            var fileNodes = await GenerateHashes(itemPaths, cancellationToken);

            var rootNode = CreateNodeToUpload(fileNodes.Where(x => x.Success).Select(y => y.Node));

            // If there are multiple paths to one DedupId (duplicate files)
            // take the last one
            var fileDedupIds = new Dictionary <DedupIdentifier, string>();

            foreach (var file in fileNodes.Where(x => x.Success))
            {
                // ChunkHelper uses 64k block default size
                var dedupId = file.Node.GetDedupIdentifier(HashType.Dedup64K);
                fileDedupIds[dedupId] = file.Path;
            }

            // Setup upload session to keep file for at mimimum one day
            // Blobs will need to be associated with the server with an ID ref otherwise they will be
            // garbage collected after one day
            var tracer        = DedupManifestArtifactClientFactory.CreateArtifactsTracer(verbose, traceOutput);
            var keepUntilRef  = new KeepUntilBlobReference(DateTime.UtcNow.AddDays(1));
            var uploadSession = dedupClient.CreateUploadSession(keepUntilRef, tracer, FileSystem.Instance);

            using (var reportingCancelSrc = CancellationTokenSource.CreateLinkedTokenSource(cancellationToken))
            {
                // Log stats
                Task reportingTask = null;
                if (enableReporting)
                {
                    reportingTask = StartReportingTask(traceOutput, (long)rootNode.TransitiveContentBytes, uploadSession, reportingCancelSrc);
                }

                // Upload the chunks
                var uploadRecord = clientTelemetry.CreateRecord <BlobStoreTelemetryRecord>(telemetryRecordFactory);
                await clientTelemetry.MeasureActionAsync(
                    record : uploadRecord,
                    actionAsync : async() => await AsyncHttpRetryHelper.InvokeAsync(
                        async() =>
                {
                    await uploadSession.UploadAsync(rootNode, fileDedupIds, cancellationToken);
                    return(uploadSession.UploadStatistics);
                },
                        maxRetries: 3,
                        tracer: tracer,
                        canRetryDelegate: e => true,     // this isn't great, but failing on upload stinks, so just try a couple of times
                        cancellationToken: cancellationToken,
                        continueOnCapturedContext: false)
                    );

                if (enableReporting)
                {
                    reportingCancelSrc.Cancel();
                    await reportingTask;
                }
            }

            return(fileNodes, rootNode.TransitiveContentBytes);
        }