public virtual async Task <Response> DeleteSubDirectoryAsync(
     string path,
     string continuation = default,
     DataLakeRequestConditions conditions = default,
     CancellationToken cancellationToken  = default) =>
 await GetSubDirectoryClient(path).DeleteAsync(
     recursive: true,
     conditions,
     cancellationToken)
 .ConfigureAwait(false);
        public async Task <Response <PathInfo> > UploadAsync(
            Stream content,
            PathHttpHeaders httpHeaders,
            DataLakeRequestConditions conditions,
            IProgress <long> progressHandler,
            CancellationToken cancellationToken)
        {
            await _client.CreateAsync(
                httpHeaders : httpHeaders,
                conditions : conditions,
                cancellationToken : cancellationToken).ConfigureAwait(false);

            // After the File is Create, Lease ID is the only valid request parameter.
            conditions = new DataLakeRequestConditions {
                LeaseId = conditions?.LeaseId
            };

            // If we can compute the size and it's small enough
            if (PartitionedUploadExtensions.TryGetLength(content, out long contentLength) &&
                contentLength < _singleUploadThreshold)
            {
                // Append data
                await _client.AppendAsync(
                    content,
                    offset : 0,
                    leaseId : conditions?.LeaseId,
                    progressHandler : progressHandler,
                    cancellationToken : cancellationToken).ConfigureAwait(false);

                // Flush data
                return(await _client.FlushAsync(
                           position : contentLength,
                           httpHeaders : httpHeaders,
                           conditions : conditions)
                       .ConfigureAwait(false));
            }

            // If the caller provided an explicit block size, we'll use it.
            // Otherwise we'll adjust dynamically based on the size of the
            // content.
            int blockSize =
                _blockSize != null ? _blockSize.Value :
                contentLength < Constants.LargeUploadThreshold ?
                Constants.DefaultBufferSize :
                Constants.LargeBufferSize;

            // Otherwise stage individual blocks in parallel
            return(await UploadInParallelAsync(
                       content,
                       blockSize,
                       httpHeaders,
                       conditions,
                       progressHandler,
                       cancellationToken).ConfigureAwait(false));
        }
 public virtual Response <PathInfo> Create(
     PathHttpHeaders?httpHeaders = default,
     Metadata metadata           = default,
     string permissions          = default,
     string umask = default,
     DataLakeRequestConditions conditions = default,
     CancellationToken cancellationToken  = default) =>
 Create(
     PathResourceType.Directory,
     httpHeaders,
     metadata,
     permissions,
     umask,
     conditions,
     cancellationToken);
 public DataLakeFileWriteStream(
     DataLakeFileClient fileClient,
     long bufferSize,
     long position,
     DataLakeRequestConditions conditions,
     IProgress <long> progressHandler) : base(
         position,
         bufferSize,
         progressHandler)
 {
     ValidateBufferSize(bufferSize);
     _fileClient = fileClient;
     _conditions = conditions ?? new DataLakeRequestConditions();
     _writeIndex = position;
 }
        public new virtual Response <DirectoryClient> Rename(
            string destinationPath,
            DataLakeRequestConditions sourceConditions      = default,
            DataLakeRequestConditions destinationConditions = default,
            CancellationToken cancellationToken             = default)
        {
            Response <PathClient> response = base.Rename(
                destinationPath,
                sourceConditions,
                destinationConditions,
                cancellationToken);

            return(Response.FromValue(
                       new DirectoryClient(response.Value.DfsUri, response.Value.Pipeline),
                       response.GetRawResponse()));
        }
 public virtual async Task <Response <PathInfo> > CreateAsync(
     PathHttpHeaders?httpHeaders = default,
     Metadata metadata           = default,
     string permissions          = default,
     string umask = default,
     DataLakeRequestConditions conditions = default,
     CancellationToken cancellationToken  = default) =>
 await CreateAsync(
     PathResourceType.Directory,
     httpHeaders,
     metadata,
     permissions,
     umask,
     conditions,
     cancellationToken)
 .ConfigureAwait(false);
        public new virtual async Task <Response <DirectoryClient> > RenameAsync(
            string destinationPath,
            DataLakeRequestConditions sourceConditions      = default,
            DataLakeRequestConditions destinationConditions = default,
            CancellationToken cancellationToken             = default)
        {
            Response <PathClient> response = await base.RenameAsync(
                destinationPath,
                sourceConditions,
                destinationConditions,
                cancellationToken)
                                             .ConfigureAwait(false);

            return(Response.FromValue(
                       new DirectoryClient(response.Value.DfsUri, response.Value.Pipeline),
                       response.GetRawResponse()));
        }
Example #8
0
        public DataLakeRequestConditions BuildDataLakeRequestConditions(
            AccessConditionParameters parameters,
            bool lease = true)
        {
            DataLakeRequestConditions conditions = new DataLakeRequestConditions()
            {
                IfModifiedSince   = parameters.IfModifiedSince,
                IfUnmodifiedSince = parameters.IfUnmodifiedSince,
                IfMatch           = parameters.Match != null ? new ETag(parameters.Match) : default(ETag?),
                IfNoneMatch       = parameters.NoneMatch != null ? new ETag(parameters.NoneMatch) : default(ETag?)
            };

            if (lease)
            {
                conditions.LeaseId = parameters.LeaseId;
            }
            return(conditions);
        }
 public DataLakeFileWriteStream(
     DataLakeFileClient fileClient,
     long bufferSize,
     long position,
     DataLakeRequestConditions conditions,
     IProgress <long> progressHandler,
     UploadTransactionalHashingOptions hashingOptions,
     bool?closeEvent) : base(
         position,
         bufferSize,
         progressHandler,
         hashingOptions)
 {
     ValidateBufferSize(bufferSize);
     _fileClient = fileClient;
     _conditions = conditions ?? new DataLakeRequestConditions();
     _writeIndex = position;
     _closeEvent = closeEvent;
 }
        public virtual Response <FileClient> CreateFile(
            string fileName,
            PathHttpHeaders?httpHeaders = default,
            Metadata metadata           = default,
            string permissions          = default,
            string umask = default,
            DataLakeRequestConditions conditions = default,
            CancellationToken cancellationToken  = default)
        {
            FileClient fileClient = GetFileClient(fileName);

            Response <PathInfo> response = fileClient.Create(
                httpHeaders,
                metadata,
                permissions,
                umask,
                conditions,
                cancellationToken);

            return(Response.FromValue(
                       fileClient,
                       response.GetRawResponse()));
        }
        public virtual Response <DirectoryClient> CreateSubDirectory(
            string path,
            PathHttpHeaders?httpHeaders = default,
            Metadata metadata           = default,
            string permissions          = default,
            string umask = default,
            DataLakeRequestConditions conditions = default,
            CancellationToken cancellationToken  = default)
        {
            DirectoryClient directoryClient = GetSubDirectoryClient(path);

            Response <PathInfo> response = directoryClient.Create(
                PathResourceType.Directory,
                httpHeaders,
                metadata,
                permissions,
                umask,
                conditions,
                cancellationToken);

            return(Response.FromValue(
                       directoryClient,
                       response.GetRawResponse()));
        }
        public virtual async Task <Response <DataLakeFileClient> > CreateFileAsync(
            string fileName,
            PathHttpHeaders httpHeaders = default,
            Metadata metadata           = default,
            string permissions          = default,
            string umask = default,
            DataLakeRequestConditions conditions = default,
            CancellationToken cancellationToken  = default)
        {
            DataLakeFileClient fileClient = GetFileClient(fileName);

            Response <PathInfo> response = await fileClient.CreateAsync(
                httpHeaders,
                metadata,
                permissions,
                umask,
                conditions,
                cancellationToken)
                                           .ConfigureAwait(false);

            return(Response.FromValue(
                       fileClient,
                       response.GetRawResponse()));
        }
        private async Task <Response <PathInfo> > UploadInParallelAsync(
            Stream content,
            int blockSize,
            PathHttpHeaders httpHeaders,
            DataLakeRequestConditions conditions,
            IProgress <long> progressHandler,
            CancellationToken cancellationToken)

        {
            // Wrap the staging and commit calls in an Upload span for
            // distributed tracing
            DiagnosticScope scope = _client.ClientDiagnostics.CreateScope(
                _operationName ?? $"{nameof(Azure)}.{nameof(Storage)}.{nameof(Files)}.{nameof(DataLake)}.{nameof(DataLakeFileClient)}.{nameof(DataLakeFileClient.Upload)}");

            try
            {
                scope.Start();

                // Wrap progressHandler in a AggregatingProgressIncrementer to prevent
                // progress from being reset with each stage blob operation.
                if (progressHandler != null)
                {
                    progressHandler = new AggregatingProgressIncrementer(progressHandler);
                }

                // A list of tasks that are currently executing which will
                // always be smaller than _maxWorkerCount
                List <Task> runningTasks = new List <Task>();

                // We need to keep track of how much data we have appended to
                // calculate offsets for the next appends, and the final
                // position to flush
                long appendedBytes = 0;

                // Partition the stream into individual blocks
                await foreach (ChunkedStream block in PartitionedUploadExtensions.GetBlocksAsync(
                                   content, blockSize, async: true, _arrayPool, cancellationToken).ConfigureAwait(false))
                {
                    // Start appending the next block (but don't await the Task!)
                    Task task = AppendBlockAsync(
                        block,
                        appendedBytes,
                        conditions?.LeaseId,
                        progressHandler,
                        cancellationToken);

                    // Add the block to our task and commit lists
                    runningTasks.Add(task);

                    appendedBytes += block.Length;

                    // If we run out of workers
                    if (runningTasks.Count >= _maxWorkerCount)
                    {
                        // Wait for at least one of them to finish
                        await Task.WhenAny(runningTasks).ConfigureAwait(false);

                        // Clear any completed blocks from the task list
                        for (int i = 0; i < runningTasks.Count; i++)
                        {
                            Task runningTask = runningTasks[i];
                            if (!runningTask.IsCompleted)
                            {
                                continue;
                            }

                            await runningTask.ConfigureAwait(false);

                            runningTasks.RemoveAt(i);
                            i--;
                        }
                    }
                }

                // Wait for all the remaining blocks to finish staging and then
                // commit the block list to complete the upload
                await Task.WhenAll(runningTasks).ConfigureAwait(false);

                return(await _client.FlushAsync(
                           position : appendedBytes,
                           httpHeaders : httpHeaders,
                           conditions : conditions,
                           cancellationToken : cancellationToken)
                       .ConfigureAwait(false));
            }
            catch (Exception ex)
            {
                scope.Failed(ex);
                throw;
            }
            finally
            {
                scope.Dispose();
            }
        }
        private Response <PathInfo> UploadInSequence(
            Stream content,
            int blockSize,
            PathHttpHeaders httpHeaders,
            DataLakeRequestConditions conditions,
            IProgress <long> progressHandler,
            CancellationToken cancellationToken)
        {
            // Wrap the append and flush calls in an Upload span for
            // distributed tracing
            DiagnosticScope scope = _client.ClientDiagnostics.CreateScope(
                _operationName ?? $"{nameof(Azure)}.{nameof(Storage)}.{nameof(Files)}.{nameof(DataLake)}.{nameof(DataLakeFileClient)}.{nameof(DataLakeFileClient.Upload)}");

            try
            {
                scope.Start();

                // Wrap progressHandler in a AggregatingProgressIncrementer to prevent
                // progress from being reset with each append file operation.
                if (progressHandler != null)
                {
                    progressHandler = new AggregatingProgressIncrementer(progressHandler);
                }

                // Partition the stream into individual blocks and stage them
                // We need to keep track of how much data we have appended to
                // calculate offsets for the next appends, and the final
                // position to flush
                long appendedBytes = 0;
                foreach (ChunkedStream block in PartitionedUploadExtensions.GetBlocksAsync(
                             content, blockSize, async: false, _arrayPool, cancellationToken).EnsureSyncEnumerable())
                {
                    // Dispose the block after the loop iterates and return its memory to our ArrayPool
                    using (block)
                    {
                        // Append the next block
                        _client.Append(
                            new MemoryStream(block.Bytes, 0, block.Length, writable: false),
                            offset: appendedBytes,
                            leaseId: conditions?.LeaseId,
                            progressHandler: progressHandler,
                            cancellationToken: cancellationToken);

                        appendedBytes += block.Length;
                    }
                }

                // Commit the block list after everything has been staged to
                // complete the upload
                return(_client.Flush(
                           position: appendedBytes,
                           httpHeaders: httpHeaders,
                           conditions: conditions,
                           cancellationToken: cancellationToken));
            }
            catch (Exception ex)
            {
                scope.Failed(ex);
                throw;
            }
            finally
            {
                scope.Dispose();
            }
        }
Example #15
0
        public Response <PathInfo> Upload(
            Stream content,
            PathHttpHeaders httpHeaders,
            Metadata metadata,
            string permissions,
            string umask,
            DataLakeRequestConditions conditions,
            IProgress <long> progressHandler,
            CancellationToken cancellationToken)
        {
            _client.Create(
                httpHeaders: httpHeaders,
                metadata: metadata,
                permissions: permissions,
                umask: umask,
                conditions: conditions,
                cancellationToken: cancellationToken);

            // After the File is Create, Lease ID is the only valid request parameter.
            conditions = new DataLakeRequestConditions {
                LeaseId = conditions?.LeaseId
            };

            // If we can compute the size and it's small enough
            if (PartitionedUploadExtensions.TryGetLength(content, out long contentLength) &&
                contentLength < _singleUploadThreshold)
            {
                // Upload it in a single request
                _client.Append(
                    content,
                    offset: 0,
                    leaseId: conditions?.LeaseId,
                    progressHandler: progressHandler,
                    cancellationToken: cancellationToken);

                // Calculate flush position
                long flushPosition = contentLength;

                return(_client.Flush(
                           position: flushPosition,
                           httpHeaders: httpHeaders,
                           conditions: conditions,
                           cancellationToken: cancellationToken));
            }

            // If the caller provided an explicit block size, we'll use it.
            // Otherwise we'll adjust dynamically based on the size of the
            // content.
            int blockSize =
                _blockSize != null ? _blockSize.Value :
                contentLength < Constants.LargeUploadThreshold ?
                Constants.DefaultBufferSize :
                Constants.LargeBufferSize;

            // Otherwise stage individual blocks one at a time.  It's not as
            // fast as a parallel upload, but you get the benefit of the retry
            // policy working on a single block instead of the entire stream.
            return(UploadInSequence(
                       content,
                       blockSize,
                       httpHeaders,
                       conditions,
                       progressHandler,
                       cancellationToken));
        }