private async Task <Response <PathInfo> > UploadInParallelAsync( Stream content, int blockSize, PathHttpHeaders httpHeaders, DataLakeRequestConditions conditions, IProgress <long> progressHandler, CancellationToken cancellationToken) { // Wrap the staging and commit calls in an Upload span for // distributed tracing DiagnosticScope scope = _client.ClientDiagnostics.CreateScope( _operationName ?? $"{nameof(Azure)}.{nameof(Storage)}.{nameof(Files)}.{nameof(DataLake)}.{nameof(DataLakeFileClient)}.{nameof(DataLakeFileClient.Upload)}"); try { scope.Start(); // Wrap progressHandler in a AggregatingProgressIncrementer to prevent // progress from being reset with each stage blob operation. if (progressHandler != null) { progressHandler = new AggregatingProgressIncrementer(progressHandler); } // A list of tasks that are currently executing which will // always be smaller than _maxWorkerCount List <Task> runningTasks = new List <Task>(); // We need to keep track of how much data we have appended to // calculate offsets for the next appends, and the final // position to flush long appendedBytes = 0; // Partition the stream into individual blocks await foreach (ChunkedStream block in PartitionedUploadExtensions.GetBlocksAsync( content, blockSize, async: true, _arrayPool, cancellationToken).ConfigureAwait(false)) { // Start appending the next block (but don't await the Task!) Task task = AppendBlockAsync( block, appendedBytes, conditions?.LeaseId, progressHandler, cancellationToken); // Add the block to our task and commit lists runningTasks.Add(task); appendedBytes += block.Length; // If we run out of workers if (runningTasks.Count >= _maxWorkerCount) { // Wait for at least one of them to finish await Task.WhenAny(runningTasks).ConfigureAwait(false); // Clear any completed blocks from the task list for (int i = 0; i < runningTasks.Count; i++) { Task runningTask = runningTasks[i]; if (!runningTask.IsCompleted) { continue; } await runningTask.ConfigureAwait(false); runningTasks.RemoveAt(i); i--; } } } // Wait for all the remaining blocks to finish staging and then // commit the block list to complete the upload await Task.WhenAll(runningTasks).ConfigureAwait(false); return(await _client.FlushAsync( position : appendedBytes, httpHeaders : httpHeaders, conditions : conditions, cancellationToken : cancellationToken) .ConfigureAwait(false)); } catch (Exception ex) { scope.Failed(ex); throw; } finally { scope.Dispose(); } }
private Response <PathInfo> UploadInSequence( Stream content, int blockSize, PathHttpHeaders httpHeaders, DataLakeRequestConditions conditions, IProgress <long> progressHandler, CancellationToken cancellationToken) { // Wrap the append and flush calls in an Upload span for // distributed tracing DiagnosticScope scope = _client.ClientDiagnostics.CreateScope( _operationName ?? $"{nameof(Azure)}.{nameof(Storage)}.{nameof(Files)}.{nameof(DataLake)}.{nameof(DataLakeFileClient)}.{nameof(DataLakeFileClient.Upload)}"); try { scope.Start(); // Wrap progressHandler in a AggregatingProgressIncrementer to prevent // progress from being reset with each append file operation. if (progressHandler != null) { progressHandler = new AggregatingProgressIncrementer(progressHandler); } // Partition the stream into individual blocks and stage them // We need to keep track of how much data we have appended to // calculate offsets for the next appends, and the final // position to flush long appendedBytes = 0; foreach (ChunkedStream block in PartitionedUploadExtensions.GetBlocksAsync( content, blockSize, async: false, _arrayPool, cancellationToken).EnsureSyncEnumerable()) { // Dispose the block after the loop iterates and return its memory to our ArrayPool using (block) { // Append the next block _client.Append( new MemoryStream(block.Bytes, 0, block.Length, writable: false), offset: appendedBytes, leaseId: conditions?.LeaseId, progressHandler: progressHandler, cancellationToken: cancellationToken); appendedBytes += block.Length; } } // Commit the block list after everything has been staged to // complete the upload return(_client.Flush( position: appendedBytes, httpHeaders: httpHeaders, conditions: conditions, cancellationToken: cancellationToken)); } catch (Exception ex) { scope.Failed(ex); throw; } finally { scope.Dispose(); } }
private Response <BlobContentInfo> UploadInSequence( Stream content, int blockSize, BlobHttpHeaders blobHttpHeaders, IDictionary <string, string> metadata, BlobRequestConditions conditions, IProgress <long> progressHandler, AccessTier?accessTier, CancellationToken cancellationToken) { // Wrap the staging and commit calls in an Upload span for // distributed tracing DiagnosticScope scope = _client.ClientDiagnostics.CreateScope( _operationName ?? $"{nameof(Azure)}.{nameof(Storage)}.{nameof(Blobs)}.{nameof(BlobClient)}.{nameof(BlobClient.Upload)}"); try { scope.Start(); // Wrap progressHandler in a AggregatingProgressIncrementer to prevent // progress from being reset with each stage blob operation. if (progressHandler != null) { progressHandler = new AggregatingProgressIncrementer(progressHandler); } // The list tracking blocks IDs we're going to commit List <string> blockIds = new List <string>(); // Partition the stream into individual blocks and stage them foreach (ChunkedStream block in PartitionedUploadExtensions.GetBlocksAsync( content, blockSize, async: false, _arrayPool, cancellationToken).EnsureSyncEnumerable()) { // Dispose the block after the loop iterates and return its memory to our ArrayPool using (block) { // Stage the next block string blockId = GenerateBlockId(block.AbsolutePosition); _client.StageBlock( blockId, new MemoryStream(block.Bytes, 0, block.Length, writable: false), conditions: conditions, progressHandler: progressHandler, cancellationToken: cancellationToken); blockIds.Add(blockId); } } // Commit the block list after everything has been staged to // complete the upload return(_client.CommitBlockList( blockIds, blobHttpHeaders, metadata, conditions, accessTier, cancellationToken)); } catch (Exception ex) { scope.Failed(ex); throw; } finally { scope.Dispose(); } }
private async Task <Response <BlobContentInfo> > UploadInParallelAsync( Stream content, int blockSize, BlobHttpHeaders blobHttpHeaders, IDictionary <string, string> metadata, BlobRequestConditions conditions, IProgress <long> progressHandler, AccessTier?accessTier, CancellationToken cancellationToken) { // Wrap the staging and commit calls in an Upload span for // distributed tracing DiagnosticScope scope = _client.ClientDiagnostics.CreateScope( _operationName ?? $"{nameof(Azure)}.{nameof(Storage)}.{nameof(Blobs)}.{nameof(BlobClient)}.{nameof(BlobClient.Upload)}"); try { scope.Start(); // Wrap progressHandler in a AggregatingProgressIncrementer to prevent // progress from being reset with each stage blob operation. if (progressHandler != null) { progressHandler = new AggregatingProgressIncrementer(progressHandler); } // The list tracking blocks IDs we're going to commit List <string> blockIds = new List <string>(); // A list of tasks that are currently executing which will // always be smaller than _maxWorkerCount List <Task> runningTasks = new List <Task>(); // Partition the stream into individual blocks await foreach (ChunkedStream block in PartitionedUploadExtensions.GetBlocksAsync( content, blockSize, async: true, _arrayPool, cancellationToken).ConfigureAwait(false)) { // Start staging the next block (but don't await the Task!) string blockId = GenerateBlockId(block.AbsolutePosition); Task task = StageBlockAsync( block, blockId, conditions, progressHandler, cancellationToken); // Add the block to our task and commit lists runningTasks.Add(task); blockIds.Add(blockId); // If we run out of workers if (runningTasks.Count >= _maxWorkerCount) { // Wait for at least one of them to finish await Task.WhenAny(runningTasks).ConfigureAwait(false); // Clear any completed blocks from the task list for (int i = 0; i < runningTasks.Count; i++) { Task runningTask = runningTasks[i]; if (!runningTask.IsCompleted) { continue; } await runningTask.ConfigureAwait(false); runningTasks.RemoveAt(i); i--; } } } // Wait for all the remaining blocks to finish staging and then // commit the block list to complete the upload await Task.WhenAll(runningTasks).ConfigureAwait(false); return(await _client.CommitBlockListAsync( blockIds, blobHttpHeaders, metadata, conditions, accessTier, cancellationToken) .ConfigureAwait(false)); } catch (Exception ex) { scope.Failed(ex); throw; } finally { scope.Dispose(); } }
private async Task UploadFromMultiStreamAsync(IEnumerable <Stream> streamList, AccessCondition accessCondition, BlobRequestOptions options, OperationContext operationContext, AggregatingProgressIncrementer progressIncrementer, CancellationToken cancellationToken)