Exemplo n.º 1
0
        public PartitionedDownloader(
            BlobBaseClient client,
            StorageTransferOptions transferOptions = default,
            // TODO #27253
            //DownloadTransactionalHashingOptions hashingOptions = default,
            IProgress <long> progress = default)
        {
            _client = client;

            // Set _maxWorkerCount
            if (transferOptions.MaximumConcurrency.HasValue &&
                transferOptions.MaximumConcurrency > 0)
            {
                _maxWorkerCount = transferOptions.MaximumConcurrency.Value;
            }
            else
            {
                _maxWorkerCount = Constants.Blob.Block.DefaultConcurrentTransfersCount;
            }

            // Set _rangeSize
            if (transferOptions.MaximumTransferSize.HasValue &&
                transferOptions.MaximumTransferSize.Value > 0)
            {
                _rangeSize = Math.Min(transferOptions.MaximumTransferSize.Value, Constants.Blob.Block.MaxDownloadBytes);
            }
            else
            {
                _rangeSize = Constants.DefaultBufferSize;
            }

            // Set _initialRangeSize
            if (transferOptions.InitialTransferSize.HasValue &&
                transferOptions.InitialTransferSize.Value > 0)
            {
                _initialRangeSize = transferOptions.InitialTransferSize.Value;
            }
            else
            {
                _initialRangeSize = Constants.Blob.Block.DefaultInitalDownloadRangeSize;
            }

            // TODO #27253
            // the caller to this stream cannot defer validation, as they cannot access a returned hash
            //if (!(hashingOptions?.Validate ?? true))
            //{
            //    throw Errors.CannotDeferTransactionalHashVerification();
            //}

            //_hashingOptions = hashingOptions;
            _progress = progress;

            /* Unlike partitioned upload, download cannot tell ahead of time if it will split and/or parallelize
             * after first call. Instead of applying progress handling to initial download stream after-the-fact,
             * wrap a given progress handler in an aggregator upfront and accept the overhead. */
            if (_progress != null && _progress is not AggregatingProgressIncrementer)
            {
                _progress = new AggregatingProgressIncrementer(_progress);
            }
        }
Exemplo n.º 2
0
        /// <summary>
        /// This operation will create a new
        /// block blob of arbitrary size by uploading it as indiviually staged
        /// blocks if it's larger than the
        /// <paramref name="singleUploadThreshold"/>.
        /// </summary>
        /// <param name="content">
        /// A <see cref="Stream"/> containing the content to upload.
        /// </param>
        /// <param name="blobHttpHeaders">
        /// Optional standard HTTP header properties that can be set for the
        /// block blob.
        /// </param>
        /// <param name="metadata">
        /// Optional custom metadata to set for this block blob.
        /// </param>
        /// <param name="conditions">
        /// Optional <see cref="BlobRequestConditions"/> to add conditions on
        /// the creation of this new block blob.
        /// </param>
        /// <param name="progressHandler">
        /// Optional <see cref="IProgress{Long}"/> to provide
        /// progress updates about data transfers.
        /// </param>
        /// <param name="accessTier">
        /// Optional <see cref="AccessTier"/>
        /// Indicates the tier to be set on the blob.
        /// </param>
        /// <param name="singleUploadThreshold">
        /// The maximum size stream that we'll upload as a single block.  The
        /// default value is 256MB.
        /// </param>
        /// <param name="transferOptions">
        /// Optional <see cref="StorageTransferOptions"/> to configure
        /// parallel transfer behavior.
        /// </param>
        /// <param name="async">
        /// </param>
        /// <param name="cancellationToken">
        /// Optional <see cref="CancellationToken"/> to propagate
        /// notifications that the operation should be cancelled.
        /// </param>
        /// <returns>
        /// A <see cref="Response{BlobContentInfo}"/> describing the
        /// state of the updated block blob.
        /// </returns>
        /// <remarks>
        /// A <see cref="RequestFailedException"/> will be thrown if
        /// a failure occurs.
        /// </remarks>
        internal async Task <Response <BlobContentInfo> > StagedUploadAsync(
            Stream content,
            BlobHttpHeaders blobHttpHeaders,
            Metadata metadata,
            BlobRequestConditions conditions,
            IProgress <long> progressHandler,
            AccessTier?accessTier                  = default,
            long?singleUploadThreshold             = default,
            StorageTransferOptions transferOptions = default,
            bool async = true,
            CancellationToken cancellationToken = default)
        {
            var client = new BlockBlobClient(Uri, Pipeline, Version, ClientDiagnostics, CustomerProvidedKey, EncryptionScope);

            singleUploadThreshold ??= client.BlockBlobMaxUploadBlobBytes;
            Debug.Assert(singleUploadThreshold <= client.BlockBlobMaxUploadBlobBytes);

            PartitionedUploader uploader = new PartitionedUploader(
                client,
                transferOptions,
                singleUploadThreshold,
                operationName: $"{nameof(BlobClient)}.{nameof(Upload)}");

            if (async)
            {
                return(await uploader.UploadAsync(content, blobHttpHeaders, metadata, conditions, progressHandler, accessTier, cancellationToken).ConfigureAwait(false));
            }
            else
            {
                return(uploader.Upload(content, blobHttpHeaders, metadata, conditions, progressHandler, accessTier, cancellationToken));
            }
        }
        public async Task <Stream> GetStockStream()
        {
            if (!await this.blockBlobClient.ExistsAsync())
            {
                throw new FileNotFoundException($"Stock file not found: {this.storageSettings.FilePath}");
            }

            MemoryStream resultStream = new MemoryStream();

            try
            {
                StorageTransferOptions options = new StorageTransferOptions();

                options.InitialTransferLength = this.blobDownloadTransferOptions.InitialTransferLength;
                options.MaximumTransferLength = this.blobDownloadTransferOptions.MaximumTransferLength;
                options.MaximumConcurrency    = this.blobDownloadTransferOptions.MaximumConcurrency;

                this.logger.LogInformation("Start download from azure blob (this might take several minutes)");
                var azureResponse = await this.blockBlobClient.DownloadToAsync(resultStream, null, options);

                azureResponse.Dispose();

                this.logger.LogInformation("Finish download from azure blob");
            }
            catch (Exception exception)
            {
                this.logger.LogError(exception, "Error getting stock file : {0}", this.storageSettings.FilePath);
            }

            return(resultStream);
        }
Exemplo n.º 4
0
        public async Task UploadAsync_Stream_StorageTransferOptions(int?maximumThreadCount)
        {
            await using DisposingContainer test = await GetTestContainerAsync();

            BlobClient blob = InstrumentClient(test.Container.GetBlobClient(GetNewBlobName()));
            var        data = GetRandomBuffer(Constants.KB);

            using (var stream = new MemoryStream(data))
            {
                var options = new StorageTransferOptions {
                    MaximumConcurrency = maximumThreadCount
                };

                await Verify(stream => blob.UploadAsync(stream, transferOptions: options));

                async Task Verify(Func <Stream, Task <Response <BlobContentInfo> > > upload)
                {
                    using (var stream = new MemoryStream(data))
                    {
                        await upload(stream);
                    }

                    Response <BlobDownloadInfo> download = await blob.DownloadAsync();

                    using var actual = new MemoryStream();
                    await download.Value.Content.CopyToAsync(actual);

                    TestHelper.AssertSequenceEqual(data, actual.ToArray());
                }
            }

            Response <BlobProperties> properties = await blob.GetPropertiesAsync();

            Assert.AreEqual(BlobType.Block, properties.Value.BlobType);
        }
        public static Response <BlobContentInfo> Upload
        (
            IAzureBlobClientBuilder clientBuilder,
            string blobName,
            string objectName,
            Stream streamToUpload,
            StorageTransferOptions transferOptions = default(StorageTransferOptions),
            EventHandler <AzureTransferProgressEventArgs> transferProgress = null,
            CancellationToken cancellationToken = default(CancellationToken)
        )
        {
            if (clientBuilder == null)
            {
                throw new ArgumentNullException(nameof(clientBuilder));
            }
            if (streamToUpload is null)
            {
                throw new ArgumentNullException(nameof(streamToUpload));
            }

            var client = clientBuilder.GetBlobContainerClient(blobName).GetBlobClient(objectName);

            long streamLength = 0;

            if (streamToUpload.CanSeek)
            {
                streamLength = streamToUpload.Length;
            }

            return(client.Upload(streamToUpload,
                                 transferOptions: transferOptions,
                                 progressHandler: new AzureTransferProgressAdapter(streamLength, transferProgress),
                                 cancellationToken: cancellationToken));
        }
Exemplo n.º 6
0
 /// <summary>
 /// The <see cref="Upload(string, BlobHttpHeaders, Metadata, BlobRequestConditions, IProgress{long}, AccessTier?, StorageTransferOptions, CancellationToken)"/>
 /// operation creates a new block blob or updates the content of an
 /// existing block blob.  Updating an existing block blob overwrites
 /// any existing metadata on the blob.
 ///
 /// For partial block blob updates and other advanced features, please
 /// see <see cref="BlockBlobClient"/>.  To create or modify page or
 /// append blobs, please see <see cref="PageBlobClient"/> or
 /// <see cref="AppendBlobClient"/>.
 ///
 /// For more information, see <see href="https://docs.microsoft.com/rest/api/storageservices/put-blob" />.
 /// </summary>
 /// <param name="path">
 /// A file path containing the content to upload.
 /// </param>
 /// <param name="httpHeaders">
 /// Optional standard HTTP header properties that can be set for the
 /// block blob.
 /// </param>
 /// <param name="metadata">
 /// Optional custom metadata to set for this block blob.
 /// </param>
 /// <param name="conditions">
 /// Optional <see cref="BlobRequestConditions"/> to add conditions on
 /// the creation of this new block blob.
 /// </param>
 /// <param name="progressHandler">
 /// Optional <see cref="IProgress{Long}"/> to provide
 /// progress updates about data transfers.
 /// </param>
 /// <param name="accessTier">
 /// Optional <see cref="AccessTier"/>
 /// Indicates the tier to be set on the blob.
 /// </param>
 /// <param name="transferOptions">
 /// Optional <see cref="StorageTransferOptions"/> to configure
 /// parallel transfer behavior.
 /// </param>
 /// <param name="cancellationToken">
 /// Optional <see cref="CancellationToken"/> to propagate
 /// notifications that the operation should be cancelled.
 /// </param>
 /// <returns>
 /// A <see cref="Response{BlobContentInfo}"/> describing the
 /// state of the updated block blob.
 /// </returns>
 /// <remarks>
 /// A <see cref="RequestFailedException"/> will be thrown if
 /// a failure occurs.
 /// </remarks>
 public virtual Response <BlobContentInfo> Upload(
     string path,
     BlobHttpHeaders httpHeaders            = default,
     Metadata metadata                      = default,
     BlobRequestConditions conditions       = default,
     IProgress <long> progressHandler       = default,
     AccessTier?accessTier                  = default,
     StorageTransferOptions transferOptions = default,
     CancellationToken cancellationToken    = default)
 {
     using (FileStream stream = new FileStream(path, FileMode.Open, FileAccess.Read))
     {
         return(StagedUploadAsync(
                    stream,
                    httpHeaders,
                    metadata,
                    conditions,
                    progressHandler,
                    accessTier,
                    transferOptions: transferOptions,
                    async: false,
                    cancellationToken: cancellationToken)
                .EnsureCompleted());
     }
 }
Exemplo n.º 7
0
 /// <summary>
 /// This operation will create a new
 /// block blob of arbitrary size by uploading it as indiviually staged
 /// blocks if it's larger than the
 /// <paramref name="singleUploadThreshold"/>.
 /// </summary>
 /// <param name="path">
 /// A file path of the file to upload.
 /// </param>
 /// <param name="blobHttpHeaders">
 /// Optional standard HTTP header properties that can be set for the
 /// block blob.
 /// </param>
 /// <param name="metadata">
 /// Optional custom metadata to set for this block blob.
 /// </param>
 /// <param name="conditions">
 /// Optional <see cref="BlobRequestConditions"/> to add conditions on
 /// the creation of this new block blob.
 /// </param>
 /// <param name="progressHandler">
 /// Optional <see cref="IProgress{Long}"/> to provide
 /// progress updates about data transfers.
 /// </param>
 /// <param name="accessTier">
 /// Optional <see cref="AccessTier"/>
 /// Indicates the tier to be set on the blob.
 /// </param>
 /// <param name="singleUploadThreshold">
 /// The maximum size stream that we'll upload as a single block.  The
 /// default value is 256MB.
 /// </param>
 /// <param name="transferOptions">
 /// Optional <see cref="StorageTransferOptions"/> to configure
 /// parallel transfer behavior.
 /// </param>
 /// <param name="async">
 /// </param>
 /// <param name="cancellationToken">
 /// Optional <see cref="CancellationToken"/> to propagate
 /// notifications that the operation should be cancelled.
 /// </param>
 /// <returns>
 /// A <see cref="Response{BlobContentInfo}"/> describing the
 /// state of the updated block blob.
 /// </returns>
 /// <remarks>
 /// A <see cref="RequestFailedException"/> will be thrown if
 /// a failure occurs.
 /// </remarks>
 internal async Task <Response <BlobContentInfo> > StagedUploadAsync(
     string path,
     BlobHttpHeaders blobHttpHeaders,
     Metadata metadata,
     BlobRequestConditions conditions,
     IProgress <long> progressHandler,
     AccessTier?accessTier                  = default,
     long?singleUploadThreshold             = default,
     StorageTransferOptions transferOptions = default,
     bool async = true,
     CancellationToken cancellationToken = default)
 {
     using (FileStream stream = new FileStream(path, FileMode.Open))
     {
         return(await StagedUploadAsync(
                    stream,
                    blobHttpHeaders,
                    metadata,
                    conditions,
                    progressHandler,
                    accessTier,
                    singleUploadThreshold : singleUploadThreshold,
                    transferOptions : transferOptions,
                    async : async,
                    cancellationToken : cancellationToken)
                .ConfigureAwait(false));
     }
 }
Exemplo n.º 8
0
 public virtual async Task <Response <BlobContentInfo> > UploadAsync(
     string path,
     BlobHttpHeaders httpHeaders            = default,
     Metadata metadata                      = default,
     BlobRequestConditions conditions       = default,
     IProgress <long> progressHandler       = default,
     AccessTier?accessTier                  = default,
     StorageTransferOptions transferOptions = default,
     CancellationToken cancellationToken    = default)
 {
     using (FileStream stream = new FileStream(path, FileMode.Open))
     {
         return(await StagedUploadAsync(
                    stream,
                    httpHeaders,
                    metadata,
                    conditions,
                    progressHandler,
                    accessTier,
                    transferOptions : transferOptions,
                    async : true,
                    cancellationToken : cancellationToken)
                .ConfigureAwait(false));
     }
 }
Exemplo n.º 9
0
        /// <summary>
        /// This operation will create a new
        /// block blob of arbitrary size by uploading it as indiviually staged
        /// blocks if it's larger than the
        /// <paramref name="transferOptions"/> MaximumTransferLength.
        /// </summary>
        /// <param name="content">
        /// A <see cref="Stream"/> containing the content to upload.
        /// </param>
        /// <param name="blobHttpHeaders">
        /// Optional standard HTTP header properties that can be set for the
        /// block blob.
        /// </param>
        /// <param name="metadata">
        /// Optional custom metadata to set for this block blob.
        /// </param>
        /// <param name="conditions">
        /// Optional <see cref="BlobRequestConditions"/> to add conditions on
        /// the creation of this new block blob.
        /// </param>
        /// <param name="progressHandler">
        /// Optional <see cref="IProgress{Long}"/> to provide
        /// progress updates about data transfers.
        /// </param>
        /// <param name="accessTier">
        /// Optional <see cref="AccessTier"/>
        /// Indicates the tier to be set on the blob.
        /// </param>
        /// <param name="transferOptions">
        /// Optional <see cref="StorageTransferOptions"/> to configure
        /// parallel transfer behavior.
        /// </param>
        /// <param name="async">
        /// </param>
        /// <param name="cancellationToken">
        /// Optional <see cref="CancellationToken"/> to propagate
        /// notifications that the operation should be cancelled.
        /// </param>
        /// <returns>
        /// A <see cref="Response{BlobContentInfo}"/> describing the
        /// state of the updated block blob.
        /// </returns>
        /// <remarks>
        /// A <see cref="RequestFailedException"/> will be thrown if
        /// a failure occurs.
        /// </remarks>
        internal async Task <Response <BlobContentInfo> > StagedUploadAsync(
            Stream content,
            BlobHttpHeaders blobHttpHeaders,
            Metadata metadata,
            BlobRequestConditions conditions,
            IProgress <long> progressHandler,
            AccessTier?accessTier = default,
            StorageTransferOptions transferOptions = default,
            bool async = true,
            CancellationToken cancellationToken = default)
        {
            if (UsingClientSideEncryption)
            {
                // content is now unseekable, so PartitionedUploader will be forced to do a buffered multipart upload
                (content, metadata) = await new BlobClientSideEncryptor(new ClientSideEncryptor(ClientSideEncryption))
                                      .ClientSideEncryptInternal(content, metadata, async, cancellationToken).ConfigureAwait(false);
            }

            var client = new BlockBlobClient(Uri, Pipeline, Version, ClientDiagnostics, CustomerProvidedKey, EncryptionScope);

            PartitionedUploader uploader = new PartitionedUploader(
                client,
                transferOptions,
                operationName: $"{nameof(BlobClient)}.{nameof(Upload)}");

            if (async)
            {
                return(await uploader.UploadAsync(content, blobHttpHeaders, metadata, conditions, progressHandler, accessTier, cancellationToken).ConfigureAwait(false));
            }
            else
            {
                return(uploader.Upload(content, blobHttpHeaders, metadata, conditions, progressHandler, accessTier, cancellationToken));
            }
        }
Exemplo n.º 10
0
        public PartitionedDownloader(
            BlobBaseClient client,
            StorageTransferOptions transferOptions = default)
        {
            _client = client;

            // Set _maxWorkerCount
            if (transferOptions.MaximumConcurrency.HasValue)
            {
                if (transferOptions.MaximumConcurrency < 1)
                {
                    _maxWorkerCount = Constants.Blob.Block.DefaultConcurrentTransfersCount;
                }
                else
                {
                    _maxWorkerCount = transferOptions.MaximumConcurrency.Value;
                }
            }
            else
            {
                _maxWorkerCount = Constants.Blob.Block.DefaultConcurrentTransfersCount;
            }

            // Set _rangeSize
            if (transferOptions.MaximumTransferLength.HasValue)
            {
                if (transferOptions.MaximumTransferLength.Value < 1)
                {
                    _rangeSize = Constants.DefaultBufferSize;
                }
                else
                {
                    _rangeSize = Math.Min(transferOptions.MaximumTransferLength.Value, Constants.Blob.Block.MaxDownloadBytes);
                }
            }
            else
            {
                _rangeSize = Constants.DefaultBufferSize;
            }

            // Set _initialRangeSize
            if (transferOptions.InitialTransferLength.HasValue)
            {
                if (transferOptions.InitialTransferLength.Value < 1)
                {
                    _initialRangeSize = Constants.DefaultBufferSize;
                }
                else
                {
                    _initialRangeSize = Math.Min(transferOptions.MaximumTransferLength.Value, Constants.DefaultBufferSize);
                }
            }
            else
            {
                // Set _initialRangeSize to _rangeSize if it wasn't specified by the customer.  This is by design.
                _initialRangeSize = _rangeSize;
            }
        }
Exemplo n.º 11
0
 protected override async Task ParallelDownloadAsync(
     TBlobClient client,
     Stream destination,
     DownloadTransactionalHashingOptions hashingOptions,
     StorageTransferOptions transferOptions)
 => await client.DownloadToAsync(destination, new BlobDownloadToOptions
 {
     TransactionalHashingOptions = hashingOptions,
     TransferOptions             = transferOptions,
 });
Exemplo n.º 12
0
        public void InitialTransferLengthBackCompatOverflowTest(long?realSize)
        {
            var options = new StorageTransferOptions
            {
                InitialTransferSize = realSize
            };

            Assert.AreEqual(options.InitialTransferSize, realSize);
            Assert.Throws <OverflowException>(() => _ = options.InitialTransferLength);
        }
Exemplo n.º 13
0
        public void InitialTransferLengthBackCompatTest(long?realSize)
        {
            var options = new StorageTransferOptions
            {
                InitialTransferSize = realSize
            };

            Assert.AreEqual(options.InitialTransferSize, realSize);
            Assert.AreEqual(options.InitialTransferLength, realSize);
        }
Exemplo n.º 14
0
        public void MaxTransferLengthBackCompatTest(long?realSize)
        {
            var options = new StorageTransferOptions
            {
                MaximumTransferSize = realSize
            };

            Assert.AreEqual(options.MaximumTransferSize, realSize);
            Assert.AreEqual(options.MaximumTransferLength, realSize);
        }
Exemplo n.º 15
0
 protected override async Task ParallelUploadAsync(
     BlockBlobClient client,
     Stream source,
     UploadTransactionalHashingOptions hashingOptions,
     StorageTransferOptions transferOptions)
 => await client.UploadAsync(source, new BlobUploadOptions
 {
     TransactionalHashingOptions = hashingOptions,
     TransferOptions             = transferOptions
 });
Exemplo n.º 16
0
 protected override Task ParallelUploadAsync(
     AppendBlobClient client,
     Stream source,
     UploadTransactionalHashingOptions hashingOptions,
     StorageTransferOptions transferOptions)
 {
     /* Need to rerecord? Azure.Core framework won't record inconclusive tests.
      * Change this to pass for recording and revert when done. */
     Assert.Inconclusive("AppendBlobClient contains no definition for parallel upload.");
     return(Task.CompletedTask);
 }
Exemplo n.º 17
0
        //------------------------------------------
        // Download multiple blobs simultaneously
        //------------------------------------------
        // <Snippet_DownloadFilesAsync>
        private static async Task DownloadFilesAsync()
        {
            BlobServiceClient blobServiceClient = GetBlobServiceClient();

            // Path to the directory to upload
            string downloadPath = Directory.GetCurrentDirectory() + "\\download\\";

            Directory.CreateDirectory(downloadPath);
            Console.WriteLine($"Created directory {downloadPath}");

            // Specify the StorageTransferOptions
            var options = new StorageTransferOptions
            {
                // Set the maximum number of workers that
                // may be used in a parallel transfer.
                MaximumConcurrency = 8,

                // Set the maximum length of a transfer to 50MB.
                MaximumTransferSize = 50 * 1024 * 1024
            };

            List <BlobContainerClient> containers = new List <BlobContainerClient>();

            foreach (BlobContainerItem container in blobServiceClient.GetBlobContainers())
            {
                containers.Add(blobServiceClient.GetBlobContainerClient(container.Name));
            }

            // Start a timer to measure how long it takes to download all the files.
            Stopwatch timer = Stopwatch.StartNew();

            // Download the blobs
            try
            {
                int count = 0;

                // Create a queue of tasks that will each upload one file.
                var tasks = new Queue <Task <Response> >();

                foreach (BlobContainerClient container in containers)
                {
                    // Iterate through the files
                    foreach (BlobItem blobItem in container.GetBlobs())
                    {
                        string fileName = downloadPath + blobItem.Name;
                        Console.WriteLine($"Downloading {blobItem.Name} to {downloadPath}");

                        BlobClient blob = container.GetBlobClient(blobItem.Name);

                        // Add the download task to the queue
                        tasks.Enqueue(blob.DownloadToAsync(fileName, default, options));
Exemplo n.º 18
0
        public async Task UploadAsync_File_StorageTransferOptions(int?maximumThreadCount)
        {
            await using DisposingContainer test = await GetTestContainerAsync();

            BlobClient blob = InstrumentClient(test.Container.GetBlobClient(GetNewBlobName()));
            var        data = GetRandomBuffer(Constants.KB);

            using (var stream = new MemoryStream(data))
            {
                var path = Path.GetTempFileName();

                try
                {
                    File.WriteAllBytes(path, data);

                    var options = new StorageTransferOptions {
                        MaximumConcurrency = maximumThreadCount
                    };

                    await Verify(blob.UploadAsync(path, transferOptions: options));

                    async Task Verify(Task <Response <BlobContentInfo> > upload)
                    {
                        using (var stream = new MemoryStream(data))
                        {
                            await upload;
                        }

                        Response <BlobDownloadInfo> download = await blob.DownloadAsync();

                        using var actual = new MemoryStream();
                        await download.Value.Content.CopyToAsync(actual);

                        TestHelper.AssertSequenceEqual(data, actual.ToArray());
                    }
                }
                finally
                {
                    if (File.Exists(path))
                    {
                        File.Delete(path);
                    }
                }
            }

            Response <BlobProperties> properties = await blob.GetPropertiesAsync();

            Assert.AreEqual(BlobType.Block, properties.Value.BlobType);
        }
Exemplo n.º 19
0
        /// <summary>
        /// This operation will create a new
        /// block blob of arbitrary size by uploading it as indiviually staged
        /// blocks if it's larger than the
        /// <paramref name="transferOptions"/>. MaximumTransferLength.
        /// </summary>
        /// <param name="path">
        /// A file path of the file to upload.
        /// </param>
        /// <param name="blobHttpHeaders">
        /// Optional standard HTTP header properties that can be set for the
        /// block blob.
        /// </param>
        /// <param name="metadata">
        /// Optional custom metadata to set for this block blob.
        /// </param>
        /// <param name="conditions">
        /// Optional <see cref="BlobRequestConditions"/> to add conditions on
        /// the creation of this new block blob.
        /// </param>
        /// <param name="progressHandler">
        /// Optional <see cref="IProgress{Long}"/> to provide
        /// progress updates about data transfers.
        /// </param>
        /// <param name="accessTier">
        /// Optional <see cref="AccessTier"/>
        /// Indicates the tier to be set on the blob.
        /// </param>
        /// <param name="transferOptions">
        /// Optional <see cref="StorageTransferOptions"/> to configure
        /// parallel transfer behavior.
        /// </param>
        /// <param name="async">
        /// </param>
        /// <param name="cancellationToken">
        /// Optional <see cref="CancellationToken"/> to propagate
        /// notifications that the operation should be cancelled.
        /// </param>
        /// <returns>
        /// A <see cref="Response{BlobContentInfo}"/> describing the
        /// state of the updated block blob.
        /// </returns>
        /// <remarks>
        /// A <see cref="RequestFailedException"/> will be thrown if
        /// a failure occurs.
        /// </remarks>
        internal async Task <Response <BlobContentInfo> > StagedUploadAsync(
            string path,
            BlobHttpHeaders blobHttpHeaders,
            Metadata metadata,
            BlobRequestConditions conditions,
            IProgress <long> progressHandler,
            AccessTier?accessTier = default,
            StorageTransferOptions transferOptions = default,
            bool async = true,
            CancellationToken cancellationToken = default)
        {
            // TODO Upload from file will get it's own implementation in the future that opens more
            //      than one stream at once. This is incompatible with .NET's CryptoStream. We will
            //      need to uncomment the below code and revert to upload from stream if client-side
            //      encryption is enabled.
            //if (ClientSideEncryption != default)
            //{
            //    using (FileStream stream = new FileStream(path, FileMode.Open, FileAccess.Read))
            //    {
            //        return await StagedUploadAsync(
            //            stream,
            //            blobHttpHeaders,
            //            metadata,
            //            conditions,
            //            progressHandler,
            //            accessTier,
            //            transferOptions: transferOptions,
            //            async: async,
            //            cancellationToken: cancellationToken)
            //            .ConfigureAwait(false);
            //    }
            //}

            using (FileStream stream = new FileStream(path, FileMode.Open, FileAccess.Read))
            {
                return(await StagedUploadAsync(
                           stream,
                           blobHttpHeaders,
                           metadata,
                           conditions,
                           progressHandler,
                           accessTier,
                           transferOptions : transferOptions,
                           async : async,
                           cancellationToken : cancellationToken)
                       .ConfigureAwait(false));
            }
        }
Exemplo n.º 20
0
        public PartitionedDownloader(
            BlobBaseClient client,
            StorageTransferOptions transferOptions             = default,
            DownloadTransactionalHashingOptions hashingOptions = default)
        {
            _client = client;

            // Set _maxWorkerCount
            if (transferOptions.MaximumConcurrency.HasValue &&
                transferOptions.MaximumConcurrency > 0)
            {
                _maxWorkerCount = transferOptions.MaximumConcurrency.Value;
            }
            else
            {
                _maxWorkerCount = Constants.Blob.Block.DefaultConcurrentTransfersCount;
            }

            // Set _rangeSize
            if (transferOptions.MaximumTransferSize.HasValue &&
                transferOptions.MaximumTransferSize.Value > 0)
            {
                _rangeSize = Math.Min(transferOptions.MaximumTransferSize.Value, Constants.Blob.Block.MaxDownloadBytes);
            }
            else
            {
                _rangeSize = Constants.DefaultBufferSize;
            }

            // Set _initialRangeSize
            if (transferOptions.InitialTransferSize.HasValue &&
                transferOptions.InitialTransferSize.Value > 0)
            {
                _initialRangeSize = transferOptions.InitialTransferSize.Value;
            }
            else
            {
                _initialRangeSize = Constants.Blob.Block.DefaultInitalDownloadRangeSize;
            }

            // the caller to this stream cannot defer validation, as they cannot access a returned hash
            if (!(hashingOptions?.Validate ?? true))
            {
                throw Errors.CannotDeferTransactionalHashVerification();
            }

            _hashingOptions = hashingOptions;
        }
Exemplo n.º 21
0
        public virtual async Task ParallelDownloadSuccessfulHashVerification(
            [Values(TransactionalHashAlgorithm.MD5, TransactionalHashAlgorithm.StorageCrc64)] TransactionalHashAlgorithm algorithm,
            [Values(512, 2 * Constants.KB)] int chunkSize)
        {
            await using IDisposingContainer <TContainerClient> disposingContainer = await GetDisposingContainerAsync();

            // Arrange
            const int dataLength = 2 * Constants.KB;
            var       data       = GetRandomBuffer(dataLength);

            var resourceName = GetNewResourceName();
            var client       = await GetResourceClientAsync(
                disposingContainer.Container,
                resourceLength : dataLength,
                createResource : true,
                resourceName : resourceName);

            await SetupDataAsync(client, new MemoryStream(data));

            // make pipeline assertion for checking hash was present on download
            var hashPipelineAssertion = new AssertMessageContentsPolicy(checkResponse: GetResponseHashAssertion(algorithm));
            var clientOptions         = ClientBuilder.GetOptions();

            clientOptions.AddPolicy(hashPipelineAssertion, HttpPipelinePosition.PerCall);

            client = await GetResourceClientAsync(
                disposingContainer.Container,
                createResource : false,
                resourceName : resourceName,
                options : clientOptions);

            var hashingOptions = new DownloadTransactionalHashingOptions {
                Algorithm = algorithm
            };
            StorageTransferOptions transferOptions = new StorageTransferOptions
            {
                InitialTransferSize = chunkSize,
                MaximumTransferSize = chunkSize
            };

            // Act
            hashPipelineAssertion.CheckResponse = true;
            await ParallelDownloadAsync(client, Stream.Null, hashingOptions, transferOptions);

            // Assert
            // Assertion was in the pipeline and the SDK not throwing means the hash was validated
        }
        private async Task UploadStreamAndVerify(
            long size,
            StorageTransferOptions transferOptions)
        {
            using Stream stream = await CreateLimitedMemoryStream(size);

            await using DisposingContainer test = await GetTestContainerAsync();

            var        name       = GetNewBlobName();
            BlobClient blob       = InstrumentClient(test.Container.GetBlobClient(name));
            var        credential = new StorageSharedKeyCredential(TestConfigDefault.AccountName, TestConfigDefault.AccountKey);

            blob = InstrumentClient(new BlobClient(blob.Uri, credential, GetOptions(true)));

            await blob.StagedUploadAsync(
                content : stream,
                blobHttpHeaders : default,
Exemplo n.º 23
0
 public PartitionedDownloader(
     BlobBaseClient client,
     StorageTransferOptions transferOptions = default,
     long?initialTransferLength             = null)
 {
     _client         = client;
     _maxWorkerCount =
         transferOptions.MaximumConcurrency ??
         Constants.Blob.Block.DefaultConcurrentTransfersCount;
     _initialRangeSize =
         initialTransferLength ??
         ((long?)transferOptions.MaximumTransferLength) ??
         Constants.DefaultBufferSize;
     _rangeSize = Math.Min(
         Constants.Blob.Block.MaxDownloadBytes,
         transferOptions.MaximumTransferLength ?? Constants.DefaultBufferSize);
 }
Exemplo n.º 24
0
        public async Task Upload_ImmutableStorageWithVersioning(bool multipart)
        {
            // Arrange
            await using DisposingImmutableStorageWithVersioningContainer vlwContainer = await GetTestVersionLevelWormContainer(TestConfigOAuth);

            BlockBlobClient blockBlob = InstrumentClient(vlwContainer.Container.GetBlockBlobClient(GetNewBlobName()));

            byte[] data = GetRandomBuffer(Constants.KB);
            using Stream stream = new MemoryStream(data);

            BlobImmutabilityPolicy immutabilityPolicy = new BlobImmutabilityPolicy
            {
                ExpiresOn  = Recording.UtcNow.AddMinutes(5),
                PolicyMode = BlobImmutabilityPolicyMode.Unlocked
            };

            // The service rounds Immutability Policy Expiry to the nearest second.
            DateTimeOffset expectedImmutabilityPolicyExpiry = RoundToNearestSecond(immutabilityPolicy.ExpiresOn.Value);

            BlobUploadOptions options = new BlobUploadOptions
            {
                ImmutabilityPolicy = immutabilityPolicy,
                LegalHold          = true
            };

            if (multipart)
            {
                StorageTransferOptions transferOptions = new StorageTransferOptions
                {
                    InitialTransferSize = Constants.KB / 2,
                    MaximumTransferSize = Constants.KB / 2
                };
                options.TransferOptions = transferOptions;
            }

            // Act
            await blockBlob.UploadAsync(stream, options);

            // Assert
            Response <BlobProperties> propertiesResponse = await blockBlob.GetPropertiesAsync();

            Assert.AreEqual(expectedImmutabilityPolicyExpiry, propertiesResponse.Value.ImmutabilityPolicy.ExpiresOn);
            Assert.AreEqual(immutabilityPolicy.PolicyMode, propertiesResponse.Value.ImmutabilityPolicy.PolicyMode);
            Assert.IsTrue(propertiesResponse.Value.HasLegalHold);
        }
Exemplo n.º 25
0
 public virtual Task <Response <BlobContentInfo> > UploadAsync(
     Stream content,
     BlobHttpHeaders httpHeaders            = default,
     Metadata metadata                      = default,
     BlobRequestConditions conditions       = default,
     IProgress <long> progressHandler       = default,
     AccessTier?accessTier                  = default,
     StorageTransferOptions transferOptions = default,
     CancellationToken cancellationToken    = default) =>
 StagedUploadAsync(
     content,
     httpHeaders,
     metadata,
     conditions,
     progressHandler,
     accessTier,
     transferOptions: transferOptions,
     async: true,
     cancellationToken: cancellationToken);
Exemplo n.º 26
0
        /// <summary>
        /// Download blob to local file
        /// </summary>
        /// <param name="blob">Source blob object</param>
        /// <param name="filePath">Destination file path</param>
        internal virtual async Task DownloadBlob(long taskId, IStorageBlobManagement localChannel, BlobBaseClient blob, string filePath)
        {
            Track2Models.BlobProperties blobProperties = blob.GetProperties(cancellationToken: CmdletCancellationToken);

            if (this.Force.IsPresent ||
                !System.IO.File.Exists(filePath) ||
                ShouldContinue(string.Format(Resources.OverwriteConfirmation, filePath), null))
            {
                StorageTransferOptions trasnferOption = new StorageTransferOptions()
                {
                    MaximumConcurrency  = this.GetCmdletConcurrency(),
                    MaximumTransferSize = size4MB,
                    InitialTransferSize = size4MB
                };
                await blob.DownloadToAsync(filePath, BlobRequestConditions, trasnferOption, CmdletCancellationToken).ConfigureAwait(false);

                OutputStream.WriteObject(taskId, new AzureStorageBlob(blob, localChannel.StorageContext, blobProperties, options: ClientOptions));
            }
        }
        public DataLakePartitionedUploader(
            DataLakeFileClient client,
            StorageTransferOptions transferOptions,
            ArrayPool <byte> arrayPool = null,
            string operationName       = null)
        {
            _client    = client;
            _arrayPool = arrayPool ?? ArrayPool <byte> .Shared;

            // Set _maxWorkerCount
            if (transferOptions.MaximumConcurrency.HasValue &&
                transferOptions.MaximumConcurrency > 0)
            {
                _maxWorkerCount = transferOptions.MaximumConcurrency.Value;
            }
            else
            {
                _maxWorkerCount = Constants.DataLake.DefaultConcurrentTransfersCount;
            }

            // Set _singleUploadThreshold
            if (transferOptions.InitialTransferLength.HasValue &&
                transferOptions.InitialTransferLength.Value > 0)
            {
                _singleUploadThreshold = Math.Min(transferOptions.InitialTransferLength.Value, Constants.DataLake.MaxAppendBytes);
            }
            else
            {
                _singleUploadThreshold = Constants.DataLake.MaxAppendBytes;
            }

            // Set _blockSize
            if (transferOptions.MaximumTransferLength.HasValue &&
                transferOptions.MaximumTransferLength > 0)
            {
                _blockSize = Math.Min(
                    Constants.DataLake.MaxAppendBytes,
                    transferOptions.MaximumTransferLength.Value);
            }

            _operationName = operationName;
        }
Exemplo n.º 28
0
        private async Task UploadFileAndVerify(
            long size,
            StorageTransferOptions transferOptions)
        {
            var path = Path.GetTempFileName();

            try
            {
                using Stream stream = await CreateLimitedMemoryStream(size);

                // create a new file and copy contents of stream into it, and then close the FileStream
                // so the StagedUploadAsync call is not prevented from reading using its FileStream.
                using (FileStream fileStream = File.Create(path))
                {
                    await stream.CopyToAsync(fileStream);
                }

                await using DisposingContainer test = await GetTestContainerAsync();

                var        name       = GetNewBlobName();
                BlobClient blob       = InstrumentClient(test.Container.GetBlobClient(name));
                var        credential = new StorageSharedKeyCredential(TestConfigDefault.AccountName, TestConfigDefault.AccountKey);
                blob = InstrumentClient(new BlobClient(blob.Uri, credential, GetOptions(true)));

                await blob.StagedUploadInternal(
                    path : path,
                    new BlobUploadOptions
                {
                    TransferOptions = transferOptions
                },
                    async : true);

                await DownloadAndAssertAsync(stream, blob);
            }
            finally
            {
                if (File.Exists(path))
                {
                    File.Delete(path);
                }
            }
        }
Exemplo n.º 29
0
        private async Task UploadStreamAndVerify(
            long size,
            long singleBlockThreshold,
            StorageTransferOptions transferOptions)
        {
            var data = GetRandomBuffer(size);

            await using DisposingContainer test = await GetTestContainerAsync();

            var        name       = GetNewBlobName();
            BlobClient blob       = InstrumentClient(test.Container.GetBlobClient(name));
            var        credential = new StorageSharedKeyCredential(TestConfigDefault.AccountName, TestConfigDefault.AccountKey);

            blob = InstrumentClient(new BlobClient(blob.Uri, credential, GetOptions(true)));

            using (var stream = new MemoryStream(data))
            {
                await blob.StagedUploadAsync(
                    content : stream,
                    blobHttpHeaders : default,
Exemplo n.º 30
0
 public PartitionedUploader(
     BlockBlobClient client,
     StorageTransferOptions transferOptions,
     long?singleUploadThreshold = null,
     ArrayPool <byte> arrayPool = null)
 {
     _client         = client;
     _arrayPool      = arrayPool ?? ArrayPool <byte> .Shared;
     _maxWorkerCount =
         transferOptions.MaximumConcurrency ??
         Constants.Blob.Block.DefaultConcurrentTransfersCount;
     _singleUploadThreshold = singleUploadThreshold ?? Constants.Blob.Block.MaxUploadBytes;
     _blockSize             = null;
     if (transferOptions.MaximumTransferLength != null)
     {
         _blockSize = Math.Min(
             Constants.Blob.Block.MaxStageBytes,
             transferOptions.MaximumTransferLength.Value);
     }
 }