public Response <PathInfo> Upload(
            Stream content,
            PathHttpHeaders httpHeaders,
            DataLakeRequestConditions conditions,
            IProgress <long> progressHandler,
            CancellationToken cancellationToken)
        {
            _client.Create(
                httpHeaders: httpHeaders,
                conditions: conditions,
                cancellationToken: cancellationToken);

            // After the File is Create, Lease ID is the only valid request parameter.
            conditions = new DataLakeRequestConditions {
                LeaseId = conditions?.LeaseId
            };

            // If we can compute the size and it's small enough
            if (PartitionedUploadExtensions.TryGetLength(content, out long contentLength) &&
                contentLength < _singleUploadThreshold)
            {
                // Upload it in a single request
                _client.Append(
                    content,
                    offset: 0,
                    leaseId: conditions?.LeaseId,
                    progressHandler: progressHandler,
                    cancellationToken: cancellationToken);

                // Calculate flush position
                long flushPosition = contentLength;

                return(_client.Flush(
                           position: flushPosition,
                           httpHeaders: httpHeaders,
                           conditions: conditions,
                           cancellationToken: cancellationToken));
            }

            // If the caller provided an explicit block size, we'll use it.
            // Otherwise we'll adjust dynamically based on the size of the
            // content.
            int blockSize =
                _blockSize != null ? _blockSize.Value :
                contentLength < Constants.LargeUploadThreshold ?
                Constants.DefaultBufferSize :
                Constants.LargeBufferSize;

            // Otherwise stage individual blocks one at a time.  It's not as
            // fast as a parallel upload, but you get the benefit of the retry
            // policy working on a single block instead of the entire stream.
            return(UploadInSequence(
                       content,
                       blockSize,
                       httpHeaders,
                       conditions,
                       progressHandler,
                       cancellationToken));
        }
        public async Task <Response <PathInfo> > UploadAsync(
            Stream content,
            PathHttpHeaders httpHeaders,
            DataLakeRequestConditions conditions,
            IProgress <long> progressHandler,
            CancellationToken cancellationToken)
        {
            await _client.CreateAsync(
                httpHeaders : httpHeaders,
                conditions : conditions,
                cancellationToken : cancellationToken).ConfigureAwait(false);

            // After the File is Create, Lease ID is the only valid request parameter.
            conditions = new DataLakeRequestConditions {
                LeaseId = conditions?.LeaseId
            };

            // If we can compute the size and it's small enough
            if (PartitionedUploadExtensions.TryGetLength(content, out long contentLength) &&
                contentLength < _singleUploadThreshold)
            {
                // Append data
                await _client.AppendAsync(
                    content,
                    offset : 0,
                    leaseId : conditions?.LeaseId,
                    progressHandler : progressHandler,
                    cancellationToken : cancellationToken).ConfigureAwait(false);

                // Flush data
                return(await _client.FlushAsync(
                           position : contentLength,
                           httpHeaders : httpHeaders,
                           conditions : conditions)
                       .ConfigureAwait(false));
            }

            // If the caller provided an explicit block size, we'll use it.
            // Otherwise we'll adjust dynamically based on the size of the
            // content.
            int blockSize =
                _blockSize != null ? _blockSize.Value :
                contentLength < Constants.LargeUploadThreshold ?
                Constants.DefaultBufferSize :
                Constants.LargeBufferSize;

            // Otherwise stage individual blocks in parallel
            return(await UploadInParallelAsync(
                       content,
                       blockSize,
                       httpHeaders,
                       conditions,
                       progressHandler,
                       cancellationToken).ConfigureAwait(false));
        }
        public async Task <Response <BlobContentInfo> > UploadAsync(
            Stream content,
            BlobHttpHeaders blobHttpHeaders,
            IDictionary <string, string> metadata,
            BlobRequestConditions conditions,
            IProgress <long> progressHandler,
            AccessTier?accessTier = default,
            CancellationToken cancellationToken = default)
        {
            // If we can compute the size and it's small enough
            if (PartitionedUploadExtensions.TryGetLength(content, out long length) && length < _singleUploadThreshold)
            {
                // Upload it in a single request
                return(await _client.UploadInternal(
                           content,
                           blobHttpHeaders,
                           metadata,
                           conditions,
                           accessTier,
                           progressHandler,
                           _operationName,
                           async : true,
                           cancellationToken)
                       .ConfigureAwait(false));
            }

            // If the caller provided an explicit block size, we'll use it.
            // Otherwise we'll adjust dynamically based on the size of the
            // content.
            int blockSize =
                _blockSize != null ? _blockSize.Value :
                length < Constants.LargeUploadThreshold ?
                Constants.DefaultBufferSize :
                Constants.LargeBufferSize;

            // Otherwise stage individual blocks in parallel
            return(await UploadInParallelAsync(
                       content,
                       blockSize,
                       blobHttpHeaders,
                       metadata,
                       conditions,
                       progressHandler,
                       accessTier,
                       cancellationToken)
                   .ConfigureAwait(false));
        }
        public Response <BlobContentInfo> Upload(
            Stream content,
            BlobHttpHeaders blobHttpHeaders,
            IDictionary <string, string> metadata,
            BlobRequestConditions conditions,
            IProgress <long> progressHandler,
            AccessTier?accessTier = default,
            CancellationToken cancellationToken = default)
        {
            // If we can compute the size and it's small enough
            if (PartitionedUploadExtensions.TryGetLength(content, out long length) && length < _singleUploadThreshold)
            {
                // Upload it in a single request
                return(_client.UploadInternal(
                           content,
                           blobHttpHeaders,
                           metadata,
                           conditions,
                           accessTier,
                           progressHandler,
                           _operationName,
                           false,
                           cancellationToken).EnsureCompleted());
            }

            // If the caller provided an explicit block size, we'll use it.
            // Otherwise we'll adjust dynamically based on the size of the
            // content.
            int blockSize =
                _blockSize != null ? _blockSize.Value :
                length < Constants.LargeUploadThreshold ?
                Constants.DefaultBufferSize :
                Constants.LargeBufferSize;

            // Otherwise stage individual blocks one at a time.  It's not as
            // fast as a parallel upload, but you get the benefit of the retry
            // policy working on a single block instead of the entire stream.
            return(UploadInSequence(
                       content,
                       blockSize,
                       blobHttpHeaders,
                       metadata,
                       conditions,
                       progressHandler,
                       accessTier,
                       cancellationToken));
        }
        private async Task <Response <PathInfo> > UploadInParallelAsync(
            Stream content,
            int blockSize,
            PathHttpHeaders httpHeaders,
            DataLakeRequestConditions conditions,
            IProgress <long> progressHandler,
            CancellationToken cancellationToken)

        {
            // Wrap the staging and commit calls in an Upload span for
            // distributed tracing
            DiagnosticScope scope = _client.ClientDiagnostics.CreateScope(
                _operationName ?? $"{nameof(Azure)}.{nameof(Storage)}.{nameof(Files)}.{nameof(DataLake)}.{nameof(DataLakeFileClient)}.{nameof(DataLakeFileClient.Upload)}");

            try
            {
                scope.Start();

                // Wrap progressHandler in a AggregatingProgressIncrementer to prevent
                // progress from being reset with each stage blob operation.
                if (progressHandler != null)
                {
                    progressHandler = new AggregatingProgressIncrementer(progressHandler);
                }

                // A list of tasks that are currently executing which will
                // always be smaller than _maxWorkerCount
                List <Task> runningTasks = new List <Task>();

                // We need to keep track of how much data we have appended to
                // calculate offsets for the next appends, and the final
                // position to flush
                long appendedBytes = 0;

                // Partition the stream into individual blocks
                await foreach (ChunkedStream block in PartitionedUploadExtensions.GetBlocksAsync(
                                   content, blockSize, async: true, _arrayPool, cancellationToken).ConfigureAwait(false))
                {
                    // Start appending the next block (but don't await the Task!)
                    Task task = AppendBlockAsync(
                        block,
                        appendedBytes,
                        conditions?.LeaseId,
                        progressHandler,
                        cancellationToken);

                    // Add the block to our task and commit lists
                    runningTasks.Add(task);

                    appendedBytes += block.Length;

                    // If we run out of workers
                    if (runningTasks.Count >= _maxWorkerCount)
                    {
                        // Wait for at least one of them to finish
                        await Task.WhenAny(runningTasks).ConfigureAwait(false);

                        // Clear any completed blocks from the task list
                        for (int i = 0; i < runningTasks.Count; i++)
                        {
                            Task runningTask = runningTasks[i];
                            if (!runningTask.IsCompleted)
                            {
                                continue;
                            }

                            await runningTask.ConfigureAwait(false);

                            runningTasks.RemoveAt(i);
                            i--;
                        }
                    }
                }

                // Wait for all the remaining blocks to finish staging and then
                // commit the block list to complete the upload
                await Task.WhenAll(runningTasks).ConfigureAwait(false);

                return(await _client.FlushAsync(
                           position : appendedBytes,
                           httpHeaders : httpHeaders,
                           conditions : conditions,
                           cancellationToken : cancellationToken)
                       .ConfigureAwait(false));
            }
            catch (Exception ex)
            {
                scope.Failed(ex);
                throw;
            }
            finally
            {
                scope.Dispose();
            }
        }
        private Response <PathInfo> UploadInSequence(
            Stream content,
            int blockSize,
            PathHttpHeaders httpHeaders,
            DataLakeRequestConditions conditions,
            IProgress <long> progressHandler,
            CancellationToken cancellationToken)
        {
            // Wrap the append and flush calls in an Upload span for
            // distributed tracing
            DiagnosticScope scope = _client.ClientDiagnostics.CreateScope(
                _operationName ?? $"{nameof(Azure)}.{nameof(Storage)}.{nameof(Files)}.{nameof(DataLake)}.{nameof(DataLakeFileClient)}.{nameof(DataLakeFileClient.Upload)}");

            try
            {
                scope.Start();

                // Wrap progressHandler in a AggregatingProgressIncrementer to prevent
                // progress from being reset with each append file operation.
                if (progressHandler != null)
                {
                    progressHandler = new AggregatingProgressIncrementer(progressHandler);
                }

                // Partition the stream into individual blocks and stage them
                // We need to keep track of how much data we have appended to
                // calculate offsets for the next appends, and the final
                // position to flush
                long appendedBytes = 0;
                foreach (ChunkedStream block in PartitionedUploadExtensions.GetBlocksAsync(
                             content, blockSize, async: false, _arrayPool, cancellationToken).EnsureSyncEnumerable())
                {
                    // Dispose the block after the loop iterates and return its memory to our ArrayPool
                    using (block)
                    {
                        // Append the next block
                        _client.Append(
                            new MemoryStream(block.Bytes, 0, block.Length, writable: false),
                            offset: appendedBytes,
                            leaseId: conditions?.LeaseId,
                            progressHandler: progressHandler,
                            cancellationToken: cancellationToken);

                        appendedBytes += block.Length;
                    }
                }

                // Commit the block list after everything has been staged to
                // complete the upload
                return(_client.Flush(
                           position: appendedBytes,
                           httpHeaders: httpHeaders,
                           conditions: conditions,
                           cancellationToken: cancellationToken));
            }
            catch (Exception ex)
            {
                scope.Failed(ex);
                throw;
            }
            finally
            {
                scope.Dispose();
            }
        }
        private async Task <Response <BlobContentInfo> > UploadInParallelAsync(
            Stream content,
            int blockSize,
            BlobHttpHeaders blobHttpHeaders,
            IDictionary <string, string> metadata,
            BlobRequestConditions conditions,
            IProgress <long> progressHandler,
            AccessTier?accessTier,
            CancellationToken cancellationToken)
        {
            // Wrap the staging and commit calls in an Upload span for
            // distributed tracing
            DiagnosticScope scope = _client.ClientDiagnostics.CreateScope(
                _operationName ?? $"{nameof(Azure)}.{nameof(Storage)}.{nameof(Blobs)}.{nameof(BlobClient)}.{nameof(BlobClient.Upload)}");

            try
            {
                scope.Start();

                // Wrap progressHandler in a AggregatingProgressIncrementer to prevent
                // progress from being reset with each stage blob operation.
                if (progressHandler != null)
                {
                    progressHandler = new AggregatingProgressIncrementer(progressHandler);
                }

                // The list tracking blocks IDs we're going to commit
                List <string> blockIds = new List <string>();

                // A list of tasks that are currently executing which will
                // always be smaller than _maxWorkerCount
                List <Task> runningTasks = new List <Task>();

                // Partition the stream into individual blocks
                await foreach (ChunkedStream block in PartitionedUploadExtensions.GetBlocksAsync(
                                   content,
                                   blockSize,
                                   async: true,
                                   _arrayPool,
                                   cancellationToken).ConfigureAwait(false))
                {
                    // Start staging the next block (but don't await the Task!)
                    string blockId = GenerateBlockId(block.AbsolutePosition);
                    Task   task    = StageBlockAsync(
                        block,
                        blockId,
                        conditions,
                        progressHandler,
                        cancellationToken);

                    // Add the block to our task and commit lists
                    runningTasks.Add(task);
                    blockIds.Add(blockId);

                    // If we run out of workers
                    if (runningTasks.Count >= _maxWorkerCount)
                    {
                        // Wait for at least one of them to finish
                        await Task.WhenAny(runningTasks).ConfigureAwait(false);

                        // Clear any completed blocks from the task list
                        for (int i = 0; i < runningTasks.Count; i++)
                        {
                            Task runningTask = runningTasks[i];
                            if (!runningTask.IsCompleted)
                            {
                                continue;
                            }

                            await runningTask.ConfigureAwait(false);

                            runningTasks.RemoveAt(i);
                            i--;
                        }
                    }
                }

                // Wait for all the remaining blocks to finish staging and then
                // commit the block list to complete the upload
                await Task.WhenAll(runningTasks).ConfigureAwait(false);

                return(await _client.CommitBlockListAsync(
                           blockIds,
                           blobHttpHeaders,
                           metadata,
                           conditions,
                           accessTier,
                           cancellationToken)
                       .ConfigureAwait(false));
            }
            catch (Exception ex)
            {
                scope.Failed(ex);
                throw;
            }
            finally
            {
                scope.Dispose();
            }
        }
        private Response <BlobContentInfo> UploadInSequence(
            Stream content,
            int blockSize,
            BlobHttpHeaders blobHttpHeaders,
            IDictionary <string, string> metadata,
            BlobRequestConditions conditions,
            IProgress <long> progressHandler,
            AccessTier?accessTier,
            CancellationToken cancellationToken)
        {
            // Wrap the staging and commit calls in an Upload span for
            // distributed tracing
            DiagnosticScope scope = _client.ClientDiagnostics.CreateScope(
                _operationName ?? $"{nameof(Azure)}.{nameof(Storage)}.{nameof(Blobs)}.{nameof(BlobClient)}.{nameof(BlobClient.Upload)}");

            try
            {
                scope.Start();

                // Wrap progressHandler in a AggregatingProgressIncrementer to prevent
                // progress from being reset with each stage blob operation.
                if (progressHandler != null)
                {
                    progressHandler = new AggregatingProgressIncrementer(progressHandler);
                }

                // The list tracking blocks IDs we're going to commit
                List <string> blockIds = new List <string>();

                // Partition the stream into individual blocks and stage them
                foreach (ChunkedStream block in PartitionedUploadExtensions.GetBlocksAsync(
                             content, blockSize, async: false, _arrayPool, cancellationToken).EnsureSyncEnumerable())
                {
                    // Dispose the block after the loop iterates and return its memory to our ArrayPool
                    using (block)
                    {
                        // Stage the next block
                        string blockId = GenerateBlockId(block.AbsolutePosition);
                        _client.StageBlock(
                            blockId,
                            new MemoryStream(block.Bytes, 0, block.Length, writable: false),
                            conditions: conditions,
                            progressHandler: progressHandler,
                            cancellationToken: cancellationToken);

                        blockIds.Add(blockId);
                    }
                }

                // Commit the block list after everything has been staged to
                // complete the upload
                return(_client.CommitBlockList(
                           blockIds,
                           blobHttpHeaders,
                           metadata,
                           conditions,
                           accessTier,
                           cancellationToken));
            }
            catch (Exception ex)
            {
                scope.Failed(ex);
                throw;
            }
            finally
            {
                scope.Dispose();
            }
        }