public async Task <Response <BlobContentInfo> > UploadAsync(
            Stream content,
            BlobHttpHeaders blobHttpHeaders,
            IDictionary <string, string> metadata,
            BlobRequestConditions conditions,
            IProgress <long> progressHandler,
            AccessTier?accessTier = default,
            CancellationToken cancellationToken = default)
        {
            // If we can compute the size and it's small enough
            if (PartitionedUploadExtensions.TryGetLength(content, out long length) && length < _singleUploadThreshold)
            {
                // Upload it in a single request
                return(await _client.UploadInternal(
                           content,
                           blobHttpHeaders,
                           metadata,
                           conditions,
                           accessTier,
                           progressHandler,
                           _operationName,
                           async : true,
                           cancellationToken)
                       .ConfigureAwait(false));
            }

            // If the caller provided an explicit block size, we'll use it.
            // Otherwise we'll adjust dynamically based on the size of the
            // content.
            int blockSize =
                _blockSize != null ? _blockSize.Value :
                length < Constants.LargeUploadThreshold ?
                Constants.DefaultBufferSize :
                Constants.LargeBufferSize;

            // Otherwise stage individual blocks in parallel
            return(await UploadInParallelAsync(
                       content,
                       blockSize,
                       blobHttpHeaders,
                       metadata,
                       conditions,
                       progressHandler,
                       accessTier,
                       cancellationToken)
                   .ConfigureAwait(false));
        }
Esempio n. 2
0
        /// <summary>
        /// The <see cref="StagedUploadAsync"/> operation will create a new
        /// block blob of arbitrary size by uploading it as indiviually staged
        /// blocks if it's larger than the
        /// <paramref name="singleBlockThreshold"/>.
        /// </summary>
        /// <param name="content">
        /// A <see cref="Stream"/> containing the content to upload.
        /// </param>
        /// <param name="blobHttpHeaders">
        /// Optional standard HTTP header properties that can be set for the
        /// block blob.
        /// </param>
        /// <param name="metadata">
        /// Optional custom metadata to set for this block blob.
        /// </param>
        /// <param name="blobAccessConditions">
        /// Optional <see cref="BlobAccessConditions"/> to add conditions on
        /// the creation of this new block blob.
        /// </param>
        /// <param name="progressHandler">
        /// Optional <see cref="IProgress{StorageProgress}"/> to provide
        /// progress updates about data transfers.
        /// </param>
        /// <param name="singleBlockThreshold">
        /// The maximum size stream that we'll upload as a single block.  The
        /// default value is 256MB.
        /// </param>
        /// <param name="blockSize">
        /// The size of individually staged blocks.  The default value is 4MB.
        /// </param>
        /// <param name="async">
        /// </param>
        /// <param name="cancellationToken">
        /// Optional <see cref="CancellationToken"/> to propagate
        /// notifications that the operation should be cancelled.
        /// </param>
        /// <returns>
        /// A <see cref="Response{BlobContentInfo}"/> describing the
        /// state of the updated block blob.
        /// </returns>
        /// <remarks>
        /// A <see cref="StorageRequestFailedException"/> will be thrown if
        /// a failure occurs.
        /// </remarks>
        internal async Task <Response <BlobContentInfo> > StagedUploadAsync(
            Stream content,
            BlobHttpHeaders?blobHttpHeaders,
            Metadata metadata,
            BlobAccessConditions?blobAccessConditions,
            IProgress <StorageProgress> progressHandler,
            long singleBlockThreshold = BlockBlobClient.BlockBlobMaxUploadBlobBytes,
            int blockSize             = Constants.DefaultBufferSize,
            bool async = true,
            CancellationToken cancellationToken = default)
        {
            Debug.Assert(singleBlockThreshold <= BlockBlobClient.BlockBlobMaxUploadBlobBytes);

            var client     = new BlockBlobClient(this.Uri, this.Pipeline);
            var blockList  = new List <string>();
            var uploadTask = ChunkedUploader.UploadAsync(
                UploadStreamAsync,
                StageBlockAsync,
                CommitBlockListAsync,
                content,
                singleBlockThreshold,
                blockSize,
                async,
                cancellationToken);

            return(async ?
                   await uploadTask.ConfigureAwait(false) :
                   uploadTask.EnsureCompleted());

            // Upload the entire stream
            Task <Response <BlobContentInfo> > UploadStreamAsync(
                Stream content,
                bool async,
                CancellationToken cancellation) =>
            client.UploadInternal(
                content,
                blobHttpHeaders,
                metadata,
                blobAccessConditions,
                progressHandler,
                async,
                cancellationToken);

            // Upload a single chunk of the stream
            Task <Response <BlockInfo> > StageBlockAsync(
                Stream chunk,
                int blockNumber,
                bool async,
                CancellationToken cancellation)
            {
                // Create a new block ID
                var blockId = Constants.BlockNameFormat;

                blockId = String.Format(CultureInfo.InvariantCulture, blockId, blockNumber);
                blockId = Convert.ToBase64String(Encoding.UTF8.GetBytes(blockId));
                blockList.Add(blockId);

                // Upload the block
                return(client.StageBlockInternal(
                           blockId,
                           chunk,
                           null,
                           blobAccessConditions?.LeaseAccessConditions,
                           progressHandler,
                           async,
                           cancellationToken));
            }

            // Commit a series of chunks
            Task <Response <BlobContentInfo> > CommitBlockListAsync(
                bool async,
                CancellationToken cancellation) =>
            client.CommitBlockListInternal(
                blockList,
                blobHttpHeaders,
                metadata,
                blobAccessConditions,
                async,
                cancellationToken);
        }
Esempio n. 3
0
        /// <summary>
        /// This operation will create a new
        /// block blob of arbitrary size by uploading it as indiviually staged
        /// blocks if it's larger than the
        /// <paramref name="singleBlockThreshold"/>.
        /// </summary>
        /// <param name="file">
        /// A <see cref="FileInfo"/> of the file to upload.
        /// </param>
        /// <param name="blobHttpHeaders">
        /// Optional standard HTTP header properties that can be set for the
        /// block blob.
        /// </param>
        /// <param name="metadata">
        /// Optional custom metadata to set for this block blob.
        /// </param>
        /// <param name="blobAccessConditions">
        /// Optional <see cref="BlobAccessConditions"/> to add conditions on
        /// the creation of this new block blob.
        /// </param>
        /// <param name="progressHandler">
        /// Optional <see cref="IProgress{StorageProgress}"/> to provide
        /// progress updates about data transfers.
        /// </param>
        /// <param name="singleBlockThreshold">
        /// The maximum size stream that we'll upload as a single block.  The
        /// default value is 256MB.
        /// </param>
        /// <param name="parallelTransferOptions">
        /// Optional <see cref="ParallelTransferOptions"/> to configure
        /// parallel transfer behavior.
        /// </param>
        /// <param name="async">
        /// </param>
        /// <param name="cancellationToken">
        /// Optional <see cref="CancellationToken"/> to propagate
        /// notifications that the operation should be cancelled.
        /// </param>
        /// <returns>
        /// A <see cref="Response{BlobContentInfo}"/> describing the
        /// state of the updated block blob.
        /// </returns>
        /// <remarks>
        /// A <see cref="StorageRequestFailedException"/> will be thrown if
        /// a failure occurs.
        /// </remarks>
        internal async Task <Response <BlobContentInfo> > StagedUploadAsync(
            FileInfo file,
            BlobHttpHeaders?blobHttpHeaders,
            Metadata metadata,
            BlobAccessConditions?blobAccessConditions,
            IProgress <StorageProgress> progressHandler,
            long singleBlockThreshold = BlockBlobClient.BlockBlobMaxUploadBlobBytes,
            ParallelTransferOptions parallelTransferOptions = default,
            bool async = true,
            CancellationToken cancellationToken = default)
        {
            Debug.Assert(singleBlockThreshold <= BlockBlobClient.BlockBlobMaxUploadBlobBytes);

            var client     = new BlockBlobClient(this.Uri, this.Pipeline);
            var blockMap   = new ConcurrentDictionary <long, string>();
            var blockName  = 0;
            var uploadTask = PartitionedUploader.UploadAsync(
                UploadStreamAsync,
                StageBlockAsync,
                CommitBlockListAsync,
                threshold => file.Length < threshold,
                memoryPool => new StreamPartitioner(file, memoryPool),
                singleBlockThreshold,
                parallelTransferOptions,
                async,
                cancellationToken);

            return(async ?
                   await uploadTask.ConfigureAwait(false) :
                   uploadTask.EnsureCompleted());

            string GetNewBase64BlockId(long blockOrdinal)
            {
                // Create and record a new block ID, storing the order information
                // (nominally the block's start position in the original stream)

                var newBlockName = Interlocked.Increment(ref blockName);
                var blockId      = Constants.BlockNameFormat;

                blockId = String.Format(CultureInfo.InvariantCulture, blockId, newBlockName);
                blockId = Convert.ToBase64String(Encoding.UTF8.GetBytes(blockId));
                var success = blockMap.TryAdd(blockOrdinal, blockId);

                Debug.Assert(success);

                return(blockId);
            }

            // Upload the entire stream
            async Task <Response <BlobContentInfo> > UploadStreamAsync()
            {
                using (var stream = file.OpenRead())
                {
                    return
                        (await client.UploadInternal(
                             stream,
                             blobHttpHeaders,
                             metadata,
                             blobAccessConditions,
                             progressHandler,
                             async,
                             cancellationToken)
                         .ConfigureAwait(false));
                }
            }

            // Upload a single partition of the stream
            Task <Response <BlockInfo> > StageBlockAsync(
                Stream partition,
                long blockOrdinal,
                bool async,
                CancellationToken cancellation)
            {
                var base64BlockId = GetNewBase64BlockId(blockOrdinal);

                //var bytes = new byte[10];
                //partition.Read(bytes, 0, 10);
                partition.Position = 0;
                //Console.WriteLine($"Commiting partition {blockOrdinal} => {base64BlockId}, {String.Join(" ", bytes)}");

                // Upload the block
                return(client.StageBlockInternal(
                           base64BlockId,
                           partition,
                           null,
                           blobAccessConditions?.LeaseAccessConditions,
                           progressHandler,
                           async,
                           cancellationToken));
            }

            // Commit a series of partitions
            Task <Response <BlobContentInfo> > CommitBlockListAsync(
                bool async,
                CancellationToken cancellation)
            {
                var base64BlockIds = blockMap.OrderBy(kvp => kvp.Key).Select(kvp => kvp.Value).ToArray();

                //Console.WriteLine($"Commiting block list:\n{String.Join("\n", base64BlockIds)}");

                return
                    (client.CommitBlockListInternal(
                         base64BlockIds,
                         blobHttpHeaders,
                         metadata,
                         blobAccessConditions,
                         async,
                         cancellationToken));
            }
        }