public async Task <(DedupManifestArtifactClient client, BlobStoreClientTelemetry telemetry)> CreateDedupManifestClientAsync(AgentTaskPluginExecutionContext context, VssConnection connection, CancellationToken cancellationToken)
        {
            const int maxRetries           = 5;
            var       tracer               = context.CreateArtifactsTracer();
            var       dedupStoreHttpClient = await AsyncHttpRetryHelper.InvokeAsync(
                () =>
            {
                ArtifactHttpClientFactory factory = new ArtifactHttpClientFactory(
                    connection.Credentials,
                    TimeSpan.FromSeconds(50),
                    tracer,
                    cancellationToken);

                // this is actually a hidden network call to the location service:
                return(Task.FromResult(factory.CreateVssHttpClient <IDedupStoreHttpClient, DedupStoreHttpClient>(connection.GetClient <DedupStoreHttpClient>().BaseAddress)));
            },
                maxRetries : maxRetries,
                tracer : tracer,
                canRetryDelegate : e => true,
                context : nameof(CreateDedupManifestClientAsync),
                cancellationToken : cancellationToken,
                continueOnCapturedContext : false);

            var telemetry = new BlobStoreClientTelemetry(tracer, dedupStoreHttpClient.BaseAddress);
            var client    = new DedupStoreClientWithDataport(dedupStoreHttpClient, PipelineArtifactProvider.GetDedupStoreClientMaxParallelism(context));

            return(new DedupManifestArtifactClient(telemetry, client, tracer), telemetry);
        }
        public static DedupManifestArtifactClient CreateDedupManifestClient(AgentTaskPluginExecutionContext context, VssConnection connection, out BlobStoreClientTelemetry telemetry)
        {
            var dedupStoreHttpClient = connection.GetClient <DedupStoreHttpClient>();
            var tracer = new CallbackAppTraceSource(str => context.Output(str), SourceLevels.Information);

            dedupStoreHttpClient.SetTracer(tracer);
            var client = new DedupStoreClientWithDataport(dedupStoreHttpClient, PipelineArtifactProvider.GetDedupStoreClientMaxParallelism(context));

            return(new DedupManifestArtifactClient(telemetry = new BlobStoreClientTelemetry(tracer, dedupStoreHttpClient.BaseAddress), client, tracer));
        }
コード例 #3
0
        public DedupManifestArtifactClient CreateDedupManifestClient(AgentTaskPluginExecutionContext context, VssConnection connection, CancellationToken cancellationToken, out BlobStoreClientTelemetry telemetry)
        {
            telemetrySender = new TestTelemetrySender();
            telemetry       = new BlobStoreClientTelemetry(
                NoopAppTraceSource.Instance,
                baseAddress,
                telemetrySender);

            return(null);
        }
コード例 #4
0
        private PipelineCacheClient CreateClient(
            BlobStoreClientTelemetry blobStoreClientTelemetry,
            AgentTaskPluginExecutionContext context,
            VssConnection connection)
        {
            var    tracer = new CallbackAppTraceSource(str => context.Output(str), System.Diagnostics.SourceLevels.Information);
            IClock clock  = UtcClock.Instance;
            var    pipelineCacheHttpClient = connection.GetClient <PipelineCacheHttpClient>();
            var    pipelineCacheClient     = new PipelineCacheClient(blobStoreClientTelemetry, pipelineCacheHttpClient, clock, tracer);

            return(pipelineCacheClient);
        }
コード例 #5
0
        private PipelineCacheClient CreateClient(
            BlobStoreClientTelemetry blobStoreClientTelemetry,
            AgentTaskPluginExecutionContext context,
            VssConnection connection)
        {
            var    tracer = context.CreateArtifactsTracer();
            IClock clock  = UtcClock.Instance;
            var    pipelineCacheHttpClient = connection.GetClient <PipelineCacheHttpClient>();
            var    pipelineCacheClient     = new PipelineCacheClient(blobStoreClientTelemetry, pipelineCacheHttpClient, clock, tracer);

            return(pipelineCacheClient);
        }
コード例 #6
0
 private Task <PipelineCacheClient> CreateClientWithRetryAsync(
     BlobStoreClientTelemetry blobStoreClientTelemetry,
     AgentTaskPluginExecutionContext context,
     VssConnection connection,
     CancellationToken cancellationToken)
 {
     // this uses location service so needs http retries.
     return(AsyncHttpRetryHelper.InvokeAsync(
                async() => await this.CreateClientAsync(blobStoreClientTelemetry, context, connection),
                maxRetries: 3,
                tracer: tracer,
                canRetryDelegate: e => true, // this isn't great, but failing on upload stinks, so just try a couple of times
                cancellationToken: cancellationToken,
                continueOnCapturedContext: false));
 }
コード例 #7
0
        public DedupManifestArtifactClient CreateDedupManifestClient(AgentTaskPluginExecutionContext context, VssConnection connection, CancellationToken cancellationToken, out BlobStoreClientTelemetry telemetry)
        {
            var tracer = context.CreateArtifactsTracer();

            ArtifactHttpClientFactory factory = new ArtifactHttpClientFactory(
                connection.Credentials,
                TimeSpan.FromSeconds(50),
                tracer,
                cancellationToken);

            var dedupStoreHttpClient = factory.CreateVssHttpClient <IDedupStoreHttpClient, DedupStoreHttpClient>(connection.GetClient <DedupStoreHttpClient>().BaseAddress);

            var client = new DedupStoreClientWithDataport(dedupStoreHttpClient, PipelineArtifactProvider.GetDedupStoreClientMaxParallelism(context));

            return(new DedupManifestArtifactClient(telemetry = new BlobStoreClientTelemetry(tracer, dedupStoreHttpClient.BaseAddress), client, tracer));
        }
コード例 #8
0
        public static async Task <(DedupIdentifier dedupId, ulong length)> UploadToBlobStore(
            bool verbose,
            string itemPath,
            Func <TelemetryInformationLevel, Uri, string, BlobStoreTelemetryRecord> telemetryRecordFactory,
            Action <string> traceOutput,
            DedupStoreClient dedupClient,
            BlobStoreClientTelemetry clientTelemetry,
            CancellationToken cancellationToken)
        {
            // Create chunks and identifier
            var chunk = await ChunkerHelper.CreateFromFileAsync(FileSystem.Instance, itemPath, cancellationToken, false);

            var rootNode = new DedupNode(new [] { chunk });
            // ChunkHelper uses 64k block default size
            var dedupId = rootNode.GetDedupIdentifier(HashType.Dedup64K);

            // Setup upload session to keep file for at mimimum one day
            // Blobs will need to be associated with the server with an ID ref otherwise they will be
            // garbage collected after one day
            var tracer        = DedupManifestArtifactClientFactory.CreateArtifactsTracer(verbose, traceOutput);
            var keepUntilRef  = new KeepUntilBlobReference(DateTime.UtcNow.AddDays(1));
            var uploadSession = dedupClient.CreateUploadSession(keepUntilRef, tracer, FileSystem.Instance);

            // Upload the chunks
            var uploadRecord = clientTelemetry.CreateRecord <BlobStoreTelemetryRecord>(telemetryRecordFactory);
            await clientTelemetry.MeasureActionAsync(
                record : uploadRecord,
                actionAsync : async() => await AsyncHttpRetryHelper.InvokeAsync(
                    async() =>
            {
                await uploadSession.UploadAsync(rootNode, new Dictionary <DedupIdentifier, string>()
                {
                    [dedupId] = itemPath
                }, cancellationToken);
                return(uploadSession.UploadStatistics);
            },
                    maxRetries: 3,
                    tracer: tracer,
                    canRetryDelegate: e => true,     // this isn't great, but failing on upload stinks, so just try a couple of times
                    cancellationToken: cancellationToken,
                    continueOnCapturedContext: false)
                );

            return(dedupId, rootNode.TransitiveContentBytes);
        }
コード例 #9
0
        private async Task <UploadResult> ParallelUploadAsync(IAsyncCommandContext context, IReadOnlyList <string> files, int concurrentUploads, CancellationToken token)
        {
            // return files that fail to upload and total artifact size
            var uploadResult = new UploadResult();

            // nothing needs to upload
            if (files.Count == 0)
            {
                return(uploadResult);
            }

            var uploadToBlob = String.Equals(context.GetVariableValueOrDefault("agent.UploadBuildArtifactsToBlob"), "true", StringComparison.InvariantCultureIgnoreCase);

            if (uploadToBlob)
            {
                var verbose = String.Equals(context.GetVariableValueOrDefault("system.debug"), "true", StringComparison.InvariantCultureIgnoreCase);
                var(dedupClient, clientTelemetry) = await DedupManifestArtifactClientFactory.Instance
                                                    .CreateDedupClientAsync(verbose, (str) => context.Output(str), this._connection, token);

                _dedupClient   = dedupClient;
                _blobTelemetry = clientTelemetry;
            }

            // ensure the file upload queue is empty.
            if (!_fileUploadQueue.IsEmpty)
            {
                throw new ArgumentOutOfRangeException(nameof(_fileUploadQueue));
            }

            // enqueue file into upload queue.
            foreach (var file in files)
            {
                _fileUploadQueue.Enqueue(file);
            }

            // Start upload monitor task.
            _filesProcessed = 0;
            _uploadFinished = new TaskCompletionSource <int>();
            _fileUploadTraceLog.Clear();
            _fileUploadProgressLog.Clear();
            Task uploadMonitor = ReportingAsync(context, files.Count(), _uploadCancellationTokenSource.Token);

            // Start parallel upload tasks.
            List <Task <UploadResult> > parallelUploadingTasks = new List <Task <UploadResult> >();

            for (int uploader = 0; uploader < concurrentUploads; uploader++)
            {
                parallelUploadingTasks.Add(UploadAsync(context, uploader, uploadToBlob, _uploadCancellationTokenSource.Token));
            }

            // Wait for parallel upload finish.
            await Task.WhenAll(parallelUploadingTasks);

            foreach (var uploadTask in parallelUploadingTasks)
            {
                // record all failed files.
                uploadResult.AddUploadResult(await uploadTask);
            }

            // Stop monitor task;
            _uploadFinished.TrySetResult(0);
            await uploadMonitor;

            return(uploadResult);
        }
コード例 #10
0
        public static async Task <(List <BlobFileInfo> fileDedupIds, ulong length)> UploadBatchToBlobstore(
            bool verbose,
            IReadOnlyList <string> itemPaths,
            Func <TelemetryInformationLevel, Uri, string, BlobStoreTelemetryRecord> telemetryRecordFactory,
            Action <string> traceOutput,
            DedupStoreClient dedupClient,
            BlobStoreClientTelemetry clientTelemetry,
            CancellationToken cancellationToken,
            bool enableReporting = false)
        {
            // Create chunks and identifier
            traceOutput(StringUtil.Loc("BuildingFileTree"));
            var fileNodes = await GenerateHashes(itemPaths, cancellationToken);

            var rootNode = CreateNodeToUpload(fileNodes.Where(x => x.Success).Select(y => y.Node));

            // If there are multiple paths to one DedupId (duplicate files)
            // take the last one
            var fileDedupIds = new Dictionary <DedupIdentifier, string>();

            foreach (var file in fileNodes.Where(x => x.Success))
            {
                // ChunkHelper uses 64k block default size
                var dedupId = file.Node.GetDedupIdentifier(HashType.Dedup64K);
                fileDedupIds[dedupId] = file.Path;
            }

            // Setup upload session to keep file for at mimimum one day
            // Blobs will need to be associated with the server with an ID ref otherwise they will be
            // garbage collected after one day
            var tracer        = DedupManifestArtifactClientFactory.CreateArtifactsTracer(verbose, traceOutput);
            var keepUntilRef  = new KeepUntilBlobReference(DateTime.UtcNow.AddDays(1));
            var uploadSession = dedupClient.CreateUploadSession(keepUntilRef, tracer, FileSystem.Instance);

            using (var reportingCancelSrc = CancellationTokenSource.CreateLinkedTokenSource(cancellationToken))
            {
                // Log stats
                Task reportingTask = null;
                if (enableReporting)
                {
                    reportingTask = StartReportingTask(traceOutput, (long)rootNode.TransitiveContentBytes, uploadSession, reportingCancelSrc);
                }

                // Upload the chunks
                var uploadRecord = clientTelemetry.CreateRecord <BlobStoreTelemetryRecord>(telemetryRecordFactory);
                await clientTelemetry.MeasureActionAsync(
                    record : uploadRecord,
                    actionAsync : async() => await AsyncHttpRetryHelper.InvokeAsync(
                        async() =>
                {
                    await uploadSession.UploadAsync(rootNode, fileDedupIds, cancellationToken);
                    return(uploadSession.UploadStatistics);
                },
                        maxRetries: 3,
                        tracer: tracer,
                        canRetryDelegate: e => true,     // this isn't great, but failing on upload stinks, so just try a couple of times
                        cancellationToken: cancellationToken,
                        continueOnCapturedContext: false)
                    );

                if (enableReporting)
                {
                    reportingCancelSrc.Cancel();
                    await reportingTask;
                }
            }

            return(fileNodes, rootNode.TransitiveContentBytes);
        }