public virtual async Task UploadPartitionMismatchedHashThrows(TransactionalHashAlgorithm algorithm) { await using IDisposingContainer <TContainerClient> disposingContainer = await GetDisposingContainerAsync(); // Arrange const int dataLength = Constants.KB; var data = GetRandomBuffer(dataLength); var hashingOptions = new UploadTransactionalHashingOptions { Algorithm = algorithm }; // Tamper with stream contents in the pipeline to simulate silent failure in the transit layer var streamTamperPolicy = new TamperStreamContentsPolicy(); var clientOptions = ClientBuilder.GetOptions(); clientOptions.AddPolicy(streamTamperPolicy, HttpPipelinePosition.PerCall); var client = await GetResourceClientAsync( disposingContainer.Container, resourceLength : dataLength, createResource : true, options : clientOptions); using (var stream = new MemoryStream(data)) { // Act streamTamperPolicy.TransformRequestBody = true; AsyncTestDelegate operation = async() => await UploadPartitionAsync(client, stream, hashingOptions); // Assert AssertWriteHashMismatch(operation, algorithm); } }
public virtual async Task UploadPartitionSuccessfulHashComputation(TransactionalHashAlgorithm algorithm) { await using IDisposingContainer <TContainerClient> disposingContainer = await GetDisposingContainerAsync(); // Arrange const int dataLength = Constants.KB; var data = GetRandomBuffer(dataLength); var hashingOptions = new UploadTransactionalHashingOptions { Algorithm = algorithm }; // make pipeline assertion for checking hash was present on upload var hashPipelineAssertion = new AssertMessageContentsPolicy(checkRequest: GetRequestHashAssertion(algorithm)); var clientOptions = ClientBuilder.GetOptions(); clientOptions.AddPolicy(hashPipelineAssertion, HttpPipelinePosition.PerCall); var client = await GetResourceClientAsync( disposingContainer.Container, resourceLength : dataLength, createResource : true, options : clientOptions); // Act using (var stream = new MemoryStream(data)) { hashPipelineAssertion.CheckRequest = true; await UploadPartitionAsync(client, stream, hashingOptions); } // Assert // Assertion was in the pipeline and the service returning success means the hash was correct }
protected override async Task ParallelUploadAsync( BlockBlobClient client, Stream source, UploadTransactionalHashingOptions hashingOptions, StorageTransferOptions transferOptions) => await client.UploadAsync(source, new BlobUploadOptions { TransactionalHashingOptions = hashingOptions, TransferOptions = transferOptions });
protected override async Task <Response> UploadPartitionAsync( AppendBlobClient client, Stream source, UploadTransactionalHashingOptions hashingOptions) { return((await client.AppendBlockAsync(source, new AppendBlobAppendBlockOptions { TransactionalHashingOptions = hashingOptions })).GetRawResponse()); }
protected override Task <Response> UploadPartitionAsync( BlobClient client, Stream source, UploadTransactionalHashingOptions hashingOptions) { /* Need to rerecord? Azure.Core framework won't record inconclusive tests. * Change this to pass for recording and revert when done. */ Assert.Inconclusive("BlobClient contains no definition for a 1:1 upload."); return(Task.FromResult <Response>(null)); }
protected override Task <Stream> OpenWriteAsync( BlobClient client, UploadTransactionalHashingOptions hashingOptions, int internalBufferSize) { /* Need to rerecord? Azure.Core framework won't record inconclusive tests. * Change this to pass for recording and revert when done. */ Assert.Inconclusive("BlobClient contains no definition for OpenWriteAsync."); return(Task.FromResult <Stream>(null)); }
protected override async Task <Response> UploadPartitionAsync( PageBlobClient client, Stream source, UploadTransactionalHashingOptions hashingOptions) { return((await client.UploadPagesAsync(source, 0, new PageBlobUploadPagesOptions { TransactionalHashingOptions = hashingOptions })).GetRawResponse()); }
protected override async Task <Stream> OpenWriteAsync( AppendBlobClient client, UploadTransactionalHashingOptions hashingOptions, int internalBufferSize) { return(await client.OpenWriteAsync(true, new AppendBlobOpenWriteOptions { TransactionalHashingOptions = hashingOptions, BufferSize = internalBufferSize })); }
protected override Task ParallelUploadAsync( AppendBlobClient client, Stream source, UploadTransactionalHashingOptions hashingOptions, StorageTransferOptions transferOptions) { /* Need to rerecord? Azure.Core framework won't record inconclusive tests. * Change this to pass for recording and revert when done. */ Assert.Inconclusive("AppendBlobClient contains no definition for parallel upload."); return(Task.CompletedTask); }
public void TestDefaults() { var uploadOptions = new UploadTransactionalHashingOptions(); Assert.AreEqual(TransactionalHashAlgorithm.StorageCrc64, uploadOptions.Algorithm); Assert.IsNull(uploadOptions.PrecalculatedHash); var downloadOptions = new DownloadTransactionalHashingOptions(); Assert.AreEqual(TransactionalHashAlgorithm.StorageCrc64, downloadOptions.Algorithm); Assert.IsTrue(downloadOptions.Validate); }
protected override async Task <Response> UploadPartitionAsync( BlockBlobClient client, Stream source, UploadTransactionalHashingOptions hashingOptions) { return((await client.StageBlockAsync( Convert.ToBase64String(Recording.Random.NewGuid().ToByteArray()), source, new BlockBlobStageBlockOptions { TransactionalHashingOptions = hashingOptions })).GetRawResponse()); }
public AppendBlobWriteStream( AppendBlobClient appendBlobClient, long bufferSize, long position, AppendBlobRequestConditions conditions, IProgress <long> progressHandler, UploadTransactionalHashingOptions hashingOptions) : base( position, bufferSize, progressHandler, hashingOptions) { ValidateBufferSize(bufferSize); _appendBlobClient = appendBlobClient; _conditions = conditions ?? new AppendBlobRequestConditions(); }
public ShareFileWriteStream( ShareFileClient fileClient, long bufferSize, long position, ShareFileRequestConditions conditions, IProgress <long> progressHandler, UploadTransactionalHashingOptions hashingOptions) : base( position, bufferSize, progressHandler, hashingOptions) { ValidateBufferSize(bufferSize); _fileClient = fileClient; _conditions = conditions; _writeIndex = position; }
public PageBlobWriteStream( PageBlobClient pageBlobClient, long bufferSize, long position, PageBlobRequestConditions conditions, IProgress <long> progressHandler, UploadTransactionalHashingOptions hashingOptions) : base( position, bufferSize, progressHandler, hashingOptions) { ValidateBufferSize(bufferSize); ValidatePosition(position); _pageBlobClient = pageBlobClient; _conditions = conditions ?? new PageBlobRequestConditions(); _writeIndex = position; }
public virtual async Task UploadPartitionUsePrecalculatedHash(TransactionalHashAlgorithm algorithm) { await using IDisposingContainer <TContainerClient> disposingContainer = await GetDisposingContainerAsync(); // Arrange const int dataLength = Constants.KB; var data = GetRandomBuffer(dataLength); // service throws different error for crc only when hash size in incorrect; we don't want to test that var hashSizeBytes = algorithm switch { TransactionalHashAlgorithm.MD5 => 16, TransactionalHashAlgorithm.StorageCrc64 => 8, _ => throw new ArgumentException("Cannot determine hash size for provided algorithm type") }; // hash needs to be wrong so we detect difference from auto-SDK correct calculation var precalculatedHash = GetRandomBuffer(hashSizeBytes); var hashingOptions = new UploadTransactionalHashingOptions { Algorithm = algorithm, PrecalculatedHash = precalculatedHash }; // make pipeline assertion for checking precalculated hash was present on upload var hashPipelineAssertion = new AssertMessageContentsPolicy(checkRequest: GetRequestHashAssertion(algorithm, expectedHash: precalculatedHash)); var clientOptions = ClientBuilder.GetOptions(); clientOptions.AddPolicy(hashPipelineAssertion, HttpPipelinePosition.PerCall); var client = await GetResourceClientAsync( disposingContainer.Container, resourceLength : dataLength, createResource : true, options : clientOptions); hashPipelineAssertion.CheckRequest = true; using (var stream = new MemoryStream(data)) { // Act AsyncTestDelegate operation = async() => await UploadPartitionAsync(client, stream, hashingOptions); // Assert AssertWriteHashMismatch(operation, algorithm); } }
public DataLakeFileWriteStream( DataLakeFileClient fileClient, long bufferSize, long position, DataLakeRequestConditions conditions, IProgress <long> progressHandler, UploadTransactionalHashingOptions hashingOptions, bool?closeEvent) : base( position, bufferSize, progressHandler, hashingOptions) { ValidateBufferSize(bufferSize); _fileClient = fileClient; _conditions = conditions ?? new DataLakeRequestConditions(); _writeIndex = position; _closeEvent = closeEvent; }
public virtual async Task OpenWriteMismatchedHashThrows(TransactionalHashAlgorithm algorithm) { await using IDisposingContainer <TContainerClient> disposingContainer = await GetDisposingContainerAsync(); // Arrange const int streamBufferSize = Constants.KB; // this one needs to be 512 multiple for page blobs const int dataSize = Constants.KB - 11; // odd number to get some variance const int streamWrites = 10; var data = GetRandomBuffer(dataSize); var hashingOptions = new UploadTransactionalHashingOptions { Algorithm = algorithm }; // Tamper with stream contents in the pipeline to simulate silent failure in the transit layer var clientOptions = ClientBuilder.GetOptions(); var tamperPolicy = new TamperStreamContentsPolicy(); clientOptions.AddPolicy(tamperPolicy, HttpPipelinePosition.PerCall); var client = await GetResourceClientAsync( disposingContainer.Container, // should use dataSize instead of streamBufferSize but this gives 512 multiple and ends up irrelevant for this test resourceLength : streamBufferSize *streamWrites, createResource : true, options : clientOptions); // Act var writeStream = await OpenWriteAsync(client, hashingOptions, streamBufferSize); // Assert AssertWriteHashMismatch(async() => { tamperPolicy.TransformRequestBody = true; foreach (var _ in Enumerable.Range(0, streamWrites)) { await writeStream.WriteAsync(data, 0, data.Length); } }, algorithm); }
public async Task RoundtripWIthDefaults() { await using IDisposingContainer <TContainerClient> disposingContainer = await GetDisposingContainerAsync(); // Arrange const TransactionalHashAlgorithm expectedAlgorithm = TransactionalHashAlgorithm.StorageCrc64; const int dataLength = Constants.KB; var data = GetRandomBuffer(dataLength); var uploadHashingOptions = new UploadTransactionalHashingOptions(); var downloadHashingOptions = new DownloadTransactionalHashingOptions(); var clientOptions = ClientBuilder.GetOptions(); StorageTransferOptions transferOptions = new StorageTransferOptions { InitialTransferSize = 512, MaximumTransferSize = 512 }; // make pipeline assertion for checking hash was present on upload AND download var hashPipelineAssertion = new AssertMessageContentsPolicy( checkRequest: GetRequestHashAssertion(expectedAlgorithm, isHashExpected: ParallelUploadIsHashExpected), checkResponse: GetResponseHashAssertion(expectedAlgorithm)); clientOptions.AddPolicy(hashPipelineAssertion, HttpPipelinePosition.PerCall); var client = await GetResourceClientAsync(disposingContainer.Container, resourceLength : dataLength, createResource : true, options : clientOptions); // Act using (var stream = new MemoryStream(data)) { hashPipelineAssertion.CheckRequest = true; await ParallelUploadAsync(client, stream, uploadHashingOptions, transferOptions); hashPipelineAssertion.CheckRequest = false; } hashPipelineAssertion.CheckResponse = true; await ParallelDownloadAsync(client, Stream.Null, downloadHashingOptions, transferOptions); // Assert // Assertion was in the pipeline and the service returning success means the hash was correct }
public virtual async Task OpenWriteSuccessfulHashComputation(TransactionalHashAlgorithm algorithm) { await using IDisposingContainer <TContainerClient> disposingContainer = await GetDisposingContainerAsync(); // Arrange const int streamBufferSize = Constants.KB; // this one needs to be 512 multiple for page blobs const int dataSize = Constants.KB - 11; // odd number to get some variance const int streamWrites = 10; var data = GetRandomBuffer(dataSize); var hashingOptions = new UploadTransactionalHashingOptions { Algorithm = algorithm }; // make pipeline assertion for checking hash was present on upload var hashPipelineAssertion = new AssertMessageContentsPolicy(checkRequest: GetRequestHashAssertion(algorithm)); var clientOptions = ClientBuilder.GetOptions(); clientOptions.AddPolicy(hashPipelineAssertion, HttpPipelinePosition.PerCall); var client = await GetResourceClientAsync( disposingContainer.Container, // should use dataSize instead of streamBufferSize but this gives 512 multiple and ends up irrelevant for this test resourceLength : streamBufferSize *streamWrites, createResource : true, options : clientOptions); // Act var writeStream = await OpenWriteAsync(client, hashingOptions, streamBufferSize); // Assert hashPipelineAssertion.CheckRequest = true; foreach (var _ in Enumerable.Range(0, streamWrites)) { // triggers pipeline assertion await writeStream.WriteAsync(data, 0, data.Length); } }
public virtual async Task PrecalculatedHashNotAccepted(TransactionalHashAlgorithm algorithm) { await using IDisposingContainer <TContainerClient> disposingContainer = await GetDisposingContainerAsync(); // Arrange const int dataLength = Constants.KB; var data = GetRandomBuffer(dataLength); var hashingOptions = new UploadTransactionalHashingOptions { Algorithm = algorithm, PrecalculatedHash = GetRandomBuffer(16) }; var client = await GetResourceClientAsync(disposingContainer.Container, dataLength); // Act var exception = ThrowsOrInconclusiveAsync <ArgumentException>( async() => await ParallelUploadAsync(client, new MemoryStream(data), hashingOptions, transferOptions: default)); // Assert Assert.AreEqual("Precalculated hash not supported when potentially partitioning an upload.", exception.Message); }
public BlockBlobWriteStream( BlockBlobClient blockBlobClient, long bufferSize, long position, BlobRequestConditions conditions, IProgress <long> progressHandler, BlobHttpHeaders blobHttpHeaders, IDictionary <string, string> metadata, IDictionary <string, string> tags, UploadTransactionalHashingOptions hashingOptions) : base( position, bufferSize, progressHandler, hashingOptions) { ValidateBufferSize(bufferSize); _blockBlobClient = blockBlobClient; _conditions = conditions ?? new BlobRequestConditions(); _blockIds = new List <string>(); _blobHttpHeaders = blobHttpHeaders; _metadata = metadata; _tags = tags; }
public async Task HashingAndClientSideEncryptionIncompatible(TransactionalHashAlgorithm algorithm) { await using var disposingContainer = await GetDisposingContainerAsync(); // Arrange const int dataSize = Constants.KB; var data = GetRandomBuffer(dataSize); var hashingOptions = new UploadTransactionalHashingOptions { Algorithm = algorithm }; var encryptionOptions = new ClientSideEncryptionOptions(ClientSideEncryptionVersion.V1_0) { KeyEncryptionKey = new Mock <Core.Cryptography.IKeyEncryptionKey>().Object, KeyWrapAlgorithm = "foo" }; var clientOptions = ClientBuilder.GetOptions(); clientOptions._clientSideEncryptionOptions = encryptionOptions; var client = await GetResourceClientAsync( disposingContainer.Container, resourceLength : dataSize, createResource : true, options : clientOptions); // Act using var stream = new MemoryStream(data); var exception = Assert.ThrowsAsync <ArgumentException>(async() => await ParallelUploadAsync(client, stream, hashingOptions, transferOptions: default)); Assert.AreEqual("Client-side encryption and transactional hashing are not supported at the same time.", exception.Message); }
protected StorageWriteStream( long position, long bufferSize, IProgress <long> progressHandler, UploadTransactionalHashingOptions hashingOptions, PooledMemoryStream buffer = null) { _position = position; _bufferSize = bufferSize; if (progressHandler != null) { _progressHandler = new AggregatingProgressIncrementer(progressHandler); } _hashingOptions = hashingOptions; // write streams don't support pre-calculated hashes if (_hashingOptions?.PrecalculatedHash != default) { throw Errors.PrecalculatedHashNotSupportedOnSplit(); } if (buffer != null) { _buffer = buffer; _shouldDisposeBuffer = false; } else { _buffer = new PooledMemoryStream( arrayPool: ArrayPool <byte> .Shared, absolutePosition: 0, maxArraySize: (int)Math.Min(Constants.MB, bufferSize)); _shouldDisposeBuffer = true; } }
protected override async Task ParallelUploadAsync(ShareFileClient client, Stream source, UploadTransactionalHashingOptions hashingOptions, StorageTransferOptions transferOptions) { AssertSupportsHashAlgorithm(hashingOptions?.Algorithm ?? default); await client.UploadAsync(source, new ShareFileUploadOptions { TransactionalHashingOptions = hashingOptions, // files ignores transfer options }); }
protected override async Task <Response> UploadPartitionAsync(ShareFileClient client, Stream source, UploadTransactionalHashingOptions hashingOptions) { AssertSupportsHashAlgorithm(hashingOptions?.Algorithm ?? default); return((await client.UploadRangeAsync(new HttpRange(0, source.Length), source, new ShareFileUploadRangeOptions { TransactionalHashingOptions = hashingOptions })).GetRawResponse()); }
protected override async Task <Stream> OpenWriteAsync(ShareFileClient client, UploadTransactionalHashingOptions hashingOptions, int internalBufferSize) { AssertSupportsHashAlgorithm(hashingOptions?.Algorithm ?? default); return(await client.OpenWriteAsync(false, 0, new ShareFileOpenWriteOptions { TransactionalHashingOptions = hashingOptions, BufferSize = internalBufferSize })); }
protected override async Task ParallelUploadAsync(DataLakeFileClient client, Stream source, UploadTransactionalHashingOptions hashingOptions, StorageTransferOptions transferOptions) { await client.UploadAsync(source, new DataLakeFileUploadOptions { TransactionalHashingOptions = hashingOptions, TransferOptions = transferOptions }); }
protected override async Task <Response> UploadPartitionAsync(DataLakeFileClient client, Stream source, UploadTransactionalHashingOptions hashingOptions) { return(await client.AppendAsync(source, 0, new DataLakeFileAppendOptions { TransactionalHashingOptions = hashingOptions })); }
/// <summary> /// Calls the parallel upload method for the given resource client. /// </summary> /// <param name="client">Client to call upload on.</param> /// <param name="source">Data to upload.</param> /// <param name="hashingOptions">Transactional hashing options to use on upload.</param> /// <param name="transferOptions">Storage transfer options to use on upload.</param> protected abstract Task ParallelUploadAsync( TResourceClient client, Stream source, UploadTransactionalHashingOptions hashingOptions, StorageTransferOptions transferOptions);
/// <summary> /// Calls the 1:1 upload method for the given resource client. /// </summary> /// <param name="client">Client to call upload on.</param> /// <param name="source">Data to upload.</param> /// <param name="hashingOptions">Transactional hashing options to use on upload.</param> protected abstract Task <Response> UploadPartitionAsync( TResourceClient client, Stream source, UploadTransactionalHashingOptions hashingOptions);