Example #1
0
 /// <summary>
 /// Initializes a new instance of the <see cref="DedupContentSession"/> class.
 /// </summary>
 public DedupContentSession(
     Context context,
     IAbsFileSystem fileSystem,
     string name,
     ImplicitPin implicitPin,
     IDedupStoreHttpClient dedupStoreHttpClient,
     TimeSpan timeToKeepContent,
     int maxConnections = DefaultMaxConnections)
     : base(fileSystem, name, implicitPin, dedupStoreHttpClient, timeToKeepContent, maxConnections)
 {
     _artifactFileSystem = VstsFileSystem.Instance;
     _uploadSession      = DedupStoreClient.CreateUploadSession(
         DedupStoreClient,
         new KeepUntilBlobReference(EndDateTime),
         new AppTraceSourceContextAdapter(context, "CreateUploadSession", SourceLevels.All),
         _artifactFileSystem);
 }
        public static async Task <(DedupIdentifier dedupId, ulong length)> UploadToBlobStore(
            bool verbose,
            string itemPath,
            Func <TelemetryInformationLevel, Uri, string, BlobStoreTelemetryRecord> telemetryRecordFactory,
            Action <string> traceOutput,
            DedupStoreClient dedupClient,
            BlobStoreClientTelemetry clientTelemetry,
            CancellationToken cancellationToken)
        {
            // Create chunks and identifier
            var chunk = await ChunkerHelper.CreateFromFileAsync(FileSystem.Instance, itemPath, cancellationToken, false);

            var rootNode = new DedupNode(new [] { chunk });
            // ChunkHelper uses 64k block default size
            var dedupId = rootNode.GetDedupIdentifier(HashType.Dedup64K);

            // Setup upload session to keep file for at mimimum one day
            // Blobs will need to be associated with the server with an ID ref otherwise they will be
            // garbage collected after one day
            var tracer        = DedupManifestArtifactClientFactory.CreateArtifactsTracer(verbose, traceOutput);
            var keepUntilRef  = new KeepUntilBlobReference(DateTime.UtcNow.AddDays(1));
            var uploadSession = dedupClient.CreateUploadSession(keepUntilRef, tracer, FileSystem.Instance);

            // Upload the chunks
            var uploadRecord = clientTelemetry.CreateRecord <BlobStoreTelemetryRecord>(telemetryRecordFactory);
            await clientTelemetry.MeasureActionAsync(
                record : uploadRecord,
                actionAsync : async() => await AsyncHttpRetryHelper.InvokeAsync(
                    async() =>
            {
                await uploadSession.UploadAsync(rootNode, new Dictionary <DedupIdentifier, string>()
                {
                    [dedupId] = itemPath
                }, cancellationToken);
                return(uploadSession.UploadStatistics);
            },
                    maxRetries: 3,
                    tracer: tracer,
                    canRetryDelegate: e => true,     // this isn't great, but failing on upload stinks, so just try a couple of times
                    cancellationToken: cancellationToken,
                    continueOnCapturedContext: false)
                );

            return(dedupId, rootNode.TransitiveContentBytes);
        }
Example #3
0
        private async Task <(DedupIdentifier dedupId, ulong length)> UploadToBlobStore(IAsyncCommandContext context, string itemPath, CancellationToken cancellationToken)
        {
            // Create chunks and identifier
            var chunk = await ChunkerHelper.CreateFromFileAsync(FileSystem.Instance, itemPath, cancellationToken, false);

            var rootNode = new DedupNode(new [] { chunk });
            var dedupId  = rootNode.GetDedupIdentifier(HashType.Dedup64K);

            // Setup upload session to keep file for at mimimum one day
            var verbose       = String.Equals(context.GetVariableValueOrDefault("system.debug"), "true", StringComparison.InvariantCultureIgnoreCase);
            var tracer        = DedupManifestArtifactClientFactory.CreateArtifactsTracer(verbose, (str) => context.Output(str));
            var keepUntulRef  = new KeepUntilBlobReference(DateTime.UtcNow.AddDays(1));
            var uploadSession = _dedupClient.CreateUploadSession(keepUntulRef, tracer, FileSystem.Instance);

            // Upload the chunks
            var uploadRecord = _blobTelemetry.CreateRecord <BuildArtifactActionRecord>((level, uri, type) =>
                                                                                       new BuildArtifactActionRecord(level, uri, type, nameof(UploadAsync), context));
            await _blobTelemetry.MeasureActionAsync(
                record : uploadRecord,
                actionAsync : async() => await AsyncHttpRetryHelper.InvokeAsync(
                    async() =>
            {
                return(await uploadSession.UploadAsync(rootNode, new Dictionary <DedupIdentifier, string>()
                {
                    [dedupId] = itemPath
                }, cancellationToken));
            },
                    maxRetries: 3,
                    tracer: tracer,
                    canRetryDelegate: e => true,     // this isn't great, but failing on upload stinks, so just try a couple of times
                    cancellationToken: cancellationToken,
                    continueOnCapturedContext: false)
                );

            return(dedupId, rootNode.TransitiveContentBytes);
        }
        public static async Task <(List <BlobFileInfo> fileDedupIds, ulong length)> UploadBatchToBlobstore(
            bool verbose,
            IReadOnlyList <string> itemPaths,
            Func <TelemetryInformationLevel, Uri, string, BlobStoreTelemetryRecord> telemetryRecordFactory,
            Action <string> traceOutput,
            DedupStoreClient dedupClient,
            BlobStoreClientTelemetry clientTelemetry,
            CancellationToken cancellationToken,
            bool enableReporting = false)
        {
            // Create chunks and identifier
            traceOutput(StringUtil.Loc("BuildingFileTree"));
            var fileNodes = await GenerateHashes(itemPaths, cancellationToken);

            var rootNode = CreateNodeToUpload(fileNodes.Where(x => x.Success).Select(y => y.Node));

            // If there are multiple paths to one DedupId (duplicate files)
            // take the last one
            var fileDedupIds = new Dictionary <DedupIdentifier, string>();

            foreach (var file in fileNodes.Where(x => x.Success))
            {
                // ChunkHelper uses 64k block default size
                var dedupId = file.Node.GetDedupIdentifier(HashType.Dedup64K);
                fileDedupIds[dedupId] = file.Path;
            }

            // Setup upload session to keep file for at mimimum one day
            // Blobs will need to be associated with the server with an ID ref otherwise they will be
            // garbage collected after one day
            var tracer        = DedupManifestArtifactClientFactory.CreateArtifactsTracer(verbose, traceOutput);
            var keepUntilRef  = new KeepUntilBlobReference(DateTime.UtcNow.AddDays(1));
            var uploadSession = dedupClient.CreateUploadSession(keepUntilRef, tracer, FileSystem.Instance);

            using (var reportingCancelSrc = CancellationTokenSource.CreateLinkedTokenSource(cancellationToken))
            {
                // Log stats
                Task reportingTask = null;
                if (enableReporting)
                {
                    reportingTask = StartReportingTask(traceOutput, (long)rootNode.TransitiveContentBytes, uploadSession, reportingCancelSrc);
                }

                // Upload the chunks
                var uploadRecord = clientTelemetry.CreateRecord <BlobStoreTelemetryRecord>(telemetryRecordFactory);
                await clientTelemetry.MeasureActionAsync(
                    record : uploadRecord,
                    actionAsync : async() => await AsyncHttpRetryHelper.InvokeAsync(
                        async() =>
                {
                    await uploadSession.UploadAsync(rootNode, fileDedupIds, cancellationToken);
                    return(uploadSession.UploadStatistics);
                },
                        maxRetries: 3,
                        tracer: tracer,
                        canRetryDelegate: e => true,     // this isn't great, but failing on upload stinks, so just try a couple of times
                        cancellationToken: cancellationToken,
                        continueOnCapturedContext: false)
                    );

                if (enableReporting)
                {
                    reportingCancelSrc.Cancel();
                    await reportingTask;
                }
            }

            return(fileNodes, rootNode.TransitiveContentBytes);
        }