/// <summary> /// This operation will create a new /// block blob of arbitrary size by uploading it as indiviually staged /// blocks if it's larger than the /// <paramref name="singleUploadThreshold"/>. /// </summary> /// <param name="content"> /// A <see cref="Stream"/> containing the content to upload. /// </param> /// <param name="blobHttpHeaders"> /// Optional standard HTTP header properties that can be set for the /// block blob. /// </param> /// <param name="metadata"> /// Optional custom metadata to set for this block blob. /// </param> /// <param name="conditions"> /// Optional <see cref="BlobRequestConditions"/> to add conditions on /// the creation of this new block blob. /// </param> /// <param name="progressHandler"> /// Optional <see cref="IProgress{Long}"/> to provide /// progress updates about data transfers. /// </param> /// <param name="accessTier"> /// Optional <see cref="AccessTier"/> /// Indicates the tier to be set on the blob. /// </param> /// <param name="singleUploadThreshold"> /// The maximum size stream that we'll upload as a single block. The /// default value is 256MB. /// </param> /// <param name="transferOptions"> /// Optional <see cref="StorageTransferOptions"/> to configure /// parallel transfer behavior. /// </param> /// <param name="async"> /// </param> /// <param name="cancellationToken"> /// Optional <see cref="CancellationToken"/> to propagate /// notifications that the operation should be cancelled. /// </param> /// <returns> /// A <see cref="Response{BlobContentInfo}"/> describing the /// state of the updated block blob. /// </returns> /// <remarks> /// A <see cref="RequestFailedException"/> will be thrown if /// a failure occurs. /// </remarks> internal async Task <Response <BlobContentInfo> > StagedUploadAsync( Stream content, BlobHttpHeaders blobHttpHeaders, Metadata metadata, BlobRequestConditions conditions, IProgress <long> progressHandler, AccessTier?accessTier = default, long?singleUploadThreshold = default, StorageTransferOptions transferOptions = default, bool async = true, CancellationToken cancellationToken = default) { var client = new BlockBlobClient(Uri, Pipeline, Version, ClientDiagnostics, CustomerProvidedKey, EncryptionScope); singleUploadThreshold ??= client.BlockBlobMaxUploadBlobBytes; Debug.Assert(singleUploadThreshold <= client.BlockBlobMaxUploadBlobBytes); PartitionedUploader uploader = new PartitionedUploader( client, transferOptions, singleUploadThreshold, operationName: $"{nameof(BlobClient)}.{nameof(Upload)}"); if (async) { return(await uploader.UploadAsync(content, blobHttpHeaders, metadata, conditions, progressHandler, accessTier, cancellationToken).ConfigureAwait(false)); } else { return(uploader.Upload(content, blobHttpHeaders, metadata, conditions, progressHandler, accessTier, cancellationToken)); } }
/// <summary> /// This operation will create a new /// block blob of arbitrary size by uploading it as indiviually staged /// blocks if it's larger than the /// <paramref name="transferOptions"/> MaximumTransferLength. /// </summary> /// <param name="content"> /// A <see cref="Stream"/> containing the content to upload. /// </param> /// <param name="blobHttpHeaders"> /// Optional standard HTTP header properties that can be set for the /// block blob. /// </param> /// <param name="metadata"> /// Optional custom metadata to set for this block blob. /// </param> /// <param name="conditions"> /// Optional <see cref="BlobRequestConditions"/> to add conditions on /// the creation of this new block blob. /// </param> /// <param name="progressHandler"> /// Optional <see cref="IProgress{Long}"/> to provide /// progress updates about data transfers. /// </param> /// <param name="accessTier"> /// Optional <see cref="AccessTier"/> /// Indicates the tier to be set on the blob. /// </param> /// <param name="transferOptions"> /// Optional <see cref="StorageTransferOptions"/> to configure /// parallel transfer behavior. /// </param> /// <param name="async"> /// </param> /// <param name="cancellationToken"> /// Optional <see cref="CancellationToken"/> to propagate /// notifications that the operation should be cancelled. /// </param> /// <returns> /// A <see cref="Response{BlobContentInfo}"/> describing the /// state of the updated block blob. /// </returns> /// <remarks> /// A <see cref="RequestFailedException"/> will be thrown if /// a failure occurs. /// </remarks> internal async Task <Response <BlobContentInfo> > StagedUploadAsync( Stream content, BlobHttpHeaders blobHttpHeaders, Metadata metadata, BlobRequestConditions conditions, IProgress <long> progressHandler, AccessTier?accessTier = default, StorageTransferOptions transferOptions = default, bool async = true, CancellationToken cancellationToken = default) { if (UsingClientSideEncryption) { // content is now unseekable, so PartitionedUploader will be forced to do a buffered multipart upload (content, metadata) = await new BlobClientSideEncryptor(new ClientSideEncryptor(ClientSideEncryption)) .ClientSideEncryptInternal(content, metadata, async, cancellationToken).ConfigureAwait(false); } var client = new BlockBlobClient(Uri, Pipeline, Version, ClientDiagnostics, CustomerProvidedKey, EncryptionScope); PartitionedUploader uploader = new PartitionedUploader( client, transferOptions, operationName: $"{nameof(BlobClient)}.{nameof(Upload)}"); if (async) { return(await uploader.UploadAsync(content, blobHttpHeaders, metadata, conditions, progressHandler, accessTier, cancellationToken).ConfigureAwait(false)); } else { return(uploader.Upload(content, blobHttpHeaders, metadata, conditions, progressHandler, accessTier, cancellationToken)); } }
/// <summary> /// This operation will create a new /// block blob of arbitrary size by uploading it as indiviually staged /// blocks if it's larger than the /// <paramref name="singleBlockThreshold"/>. /// </summary> /// <param name="file"> /// A <see cref="FileInfo"/> of the file to upload. /// </param> /// <param name="blobHttpHeaders"> /// Optional standard HTTP header properties that can be set for the /// block blob. /// </param> /// <param name="metadata"> /// Optional custom metadata to set for this block blob. /// </param> /// <param name="blobAccessConditions"> /// Optional <see cref="BlobAccessConditions"/> to add conditions on /// the creation of this new block blob. /// </param> /// <param name="progressHandler"> /// Optional <see cref="IProgress{StorageProgress}"/> to provide /// progress updates about data transfers. /// </param> /// <param name="singleBlockThreshold"> /// The maximum size stream that we'll upload as a single block. The /// default value is 256MB. /// </param> /// <param name="parallelTransferOptions"> /// Optional <see cref="ParallelTransferOptions"/> to configure /// parallel transfer behavior. /// </param> /// <param name="async"> /// </param> /// <param name="cancellationToken"> /// Optional <see cref="CancellationToken"/> to propagate /// notifications that the operation should be cancelled. /// </param> /// <returns> /// A <see cref="Response{BlobContentInfo}"/> describing the /// state of the updated block blob. /// </returns> /// <remarks> /// A <see cref="StorageRequestFailedException"/> will be thrown if /// a failure occurs. /// </remarks> internal async Task <Response <BlobContentInfo> > StagedUploadAsync( FileInfo file, BlobHttpHeaders?blobHttpHeaders, Metadata metadata, BlobAccessConditions?blobAccessConditions, IProgress <StorageProgress> progressHandler, long singleBlockThreshold = BlockBlobClient.BlockBlobMaxUploadBlobBytes, ParallelTransferOptions parallelTransferOptions = default, bool async = true, CancellationToken cancellationToken = default) { Debug.Assert(singleBlockThreshold <= BlockBlobClient.BlockBlobMaxUploadBlobBytes); var client = new BlockBlobClient(this.Uri, this.Pipeline); var blockMap = new ConcurrentDictionary <long, string>(); var blockName = 0; var uploadTask = PartitionedUploader.UploadAsync( UploadStreamAsync, StageBlockAsync, CommitBlockListAsync, threshold => file.Length < threshold, memoryPool => new StreamPartitioner(file, memoryPool), singleBlockThreshold, parallelTransferOptions, async, cancellationToken); return(async ? await uploadTask.ConfigureAwait(false) : uploadTask.EnsureCompleted()); string GetNewBase64BlockId(long blockOrdinal) { // Create and record a new block ID, storing the order information // (nominally the block's start position in the original stream) var newBlockName = Interlocked.Increment(ref blockName); var blockId = Constants.BlockNameFormat; blockId = String.Format(CultureInfo.InvariantCulture, blockId, newBlockName); blockId = Convert.ToBase64String(Encoding.UTF8.GetBytes(blockId)); var success = blockMap.TryAdd(blockOrdinal, blockId); Debug.Assert(success); return(blockId); } // Upload the entire stream async Task <Response <BlobContentInfo> > UploadStreamAsync() { using (var stream = file.OpenRead()) { return (await client.UploadInternal( stream, blobHttpHeaders, metadata, blobAccessConditions, progressHandler, async, cancellationToken) .ConfigureAwait(false)); } } // Upload a single partition of the stream Task <Response <BlockInfo> > StageBlockAsync( Stream partition, long blockOrdinal, bool async, CancellationToken cancellation) { var base64BlockId = GetNewBase64BlockId(blockOrdinal); //var bytes = new byte[10]; //partition.Read(bytes, 0, 10); partition.Position = 0; //Console.WriteLine($"Commiting partition {blockOrdinal} => {base64BlockId}, {String.Join(" ", bytes)}"); // Upload the block return(client.StageBlockInternal( base64BlockId, partition, null, blobAccessConditions?.LeaseAccessConditions, progressHandler, async, cancellationToken)); } // Commit a series of partitions Task <Response <BlobContentInfo> > CommitBlockListAsync( bool async, CancellationToken cancellation) { var base64BlockIds = blockMap.OrderBy(kvp => kvp.Key).Select(kvp => kvp.Value).ToArray(); //Console.WriteLine($"Commiting block list:\n{String.Join("\n", base64BlockIds)}"); return (client.CommitBlockListInternal( base64BlockIds, blobHttpHeaders, metadata, blobAccessConditions, async, cancellationToken)); } }