internal ICancellableAsyncResult BeginUploadFromStreamHelper(Stream source, long? length, AccessCondition accessCondition, BlobRequestOptions options, OperationContext operationContext, AsyncCallback callback, object state) { CommonUtility.AssertNotNull("source", source); if (length.HasValue) { CommonUtility.AssertInBounds("length", length.Value, 1); if (source.CanSeek && length > source.Length - source.Position) { throw new ArgumentOutOfRangeException("length", SR.StreamLengthShortError); } } this.attributes.AssertNoSnapshot(); BlobRequestOptions modifiedOptions = BlobRequestOptions.ApplyDefaults(options, BlobType.BlockBlob, this.ServiceClient); ExecutionState<NullType> tempExecutionState = CommonUtility.CreateTemporaryExecutionState(modifiedOptions); StorageAsyncResult<NullType> storageAsyncResult = new StorageAsyncResult<NullType>(callback, state); bool lessThanSingleBlobThreshold = CloudBlockBlob.IsLessThanSingleBlobThreshold(source, length, modifiedOptions, false); modifiedOptions.AssertPolicyIfRequired(); if (modifiedOptions.ParallelOperationThreadCount.Value == 1 && lessThanSingleBlobThreshold) { // Because we may or may not want to calculate the MD5, and we may or may not want to encrypt, rather than have four branching code // paths, here we have an action that we will run, which continually gets added to, depending on which operations we need to do. // The confusing part is that we have to build it from the bottom up. string md5 = null; Stream sourceStream = source; Action actionToRun = null; Action uploadAction = () => { if (md5 == null && modifiedOptions.UseTransactionalMD5.Value) { throw new ArgumentException(SR.PutBlobNeedsStoreBlobContentMD5, "options"); } this.UploadFromStreamHandler( sourceStream, length, md5, accessCondition, operationContext, modifiedOptions, storageAsyncResult); }; actionToRun = uploadAction; if (modifiedOptions.StoreBlobContentMD5.Value) { Action<Action> calculateMD5 = (continuation) => { long startPosition = sourceStream.Position; StreamDescriptor streamCopyState = new StreamDescriptor(); sourceStream.WriteToAsync( Stream.Null, length, null /* maxLength */, true, tempExecutionState, streamCopyState, completedState => { ContinueAsyncOperation(storageAsyncResult, completedState, () => { if (completedState.ExceptionRef != null) { storageAsyncResult.OnComplete(completedState.ExceptionRef); } else { sourceStream.Position = startPosition; md5 = streamCopyState.Md5; continuation(); } }); }); storageAsyncResult.CancelDelegate = tempExecutionState.Cancel; if (storageAsyncResult.CancelRequested) { storageAsyncResult.Cancel(); } }; Action oldActionToRun = actionToRun; actionToRun = () => calculateMD5(oldActionToRun); } if (modifiedOptions.EncryptionPolicy != null) { Action<Action> encryptStream = continuation => { SyncMemoryStream syncMemoryStream = new SyncMemoryStream(); options.AssertPolicyIfRequired(); sourceStream = syncMemoryStream; if (modifiedOptions.EncryptionPolicy.EncryptionMode != BlobEncryptionMode.FullBlob) { throw new InvalidOperationException(SR.InvalidEncryptionMode, null); } ICryptoTransform transform = options.EncryptionPolicy.CreateAndSetEncryptionContext(this.Metadata, false /* noPadding */); CryptoStream cryptoStream = new CryptoStream(syncMemoryStream, transform, CryptoStreamMode.Write); StreamDescriptor streamCopyState = new StreamDescriptor(); source.WriteToAsync(cryptoStream, length, null, false, tempExecutionState, streamCopyState, completedState => { ContinueAsyncOperation(storageAsyncResult, completedState, () => { if (completedState.ExceptionRef != null) { storageAsyncResult.OnComplete(completedState.ExceptionRef); } else { // Flush the CryptoStream in order to make sure that the last block of data is flushed. This call is a sync call // but it is ok to have it because we're just writing to a memory stream. cryptoStream.FlushFinalBlock(); // After the tempStream has been written to, we need to seek back to the beginning, so that it can be read from. sourceStream.Seek(0, SeekOrigin.Begin); length = syncMemoryStream.Length; continuation(); } }); }); storageAsyncResult.CancelDelegate = tempExecutionState.Cancel; if (storageAsyncResult.CancelRequested) { storageAsyncResult.Cancel(); } }; Action oldActionToRun = actionToRun; actionToRun = () => encryptStream(oldActionToRun); } actionToRun(); } else { ICancellableAsyncResult result = this.BeginOpenWrite( accessCondition, modifiedOptions, operationContext, ar => { ContinueAsyncOperation(storageAsyncResult, ar, () => { CloudBlobStream blobStream = this.EndOpenWrite(ar); storageAsyncResult.OperationState = blobStream; source.WriteToAsync( blobStream, length, null /* maxLength */, false, tempExecutionState, null /* streamCopyState */, completedState => { ContinueAsyncOperation(storageAsyncResult, completedState, () => { if (completedState.ExceptionRef != null) { storageAsyncResult.OnComplete(completedState.ExceptionRef); } else { ICancellableAsyncResult commitResult = blobStream.BeginCommit( CloudBlob.BlobOutputStreamCommitCallback, storageAsyncResult); storageAsyncResult.CancelDelegate = commitResult.Cancel; if (storageAsyncResult.CancelRequested) { storageAsyncResult.Cancel(); } } }); }); storageAsyncResult.CancelDelegate = tempExecutionState.Cancel; if (storageAsyncResult.CancelRequested) { storageAsyncResult.Cancel(); } }); }, null /* state */); // We do not need to do this inside a lock, as storageAsyncResult is // not returned to the user yet. storageAsyncResult.CancelDelegate = result.Cancel; } return storageAsyncResult; }
/// <summary> /// Implements getting the stream without specifying a range. /// </summary> /// <param name="blobAttributes">The attributes.</param> /// <param name="destStream">The destination stream.</param> /// <param name="offset">The offset.</param> /// <param name="length">The length.</param> /// <param name="accessCondition">An <see cref="AccessCondition"/> object that represents the condition that must be met in order for the request to proceed. If <c>null</c>, no condition is used.</param> /// <param name="options">A <see cref="BlobRequestOptions"/> object that specifies additional options for the request.</param> /// <returns> /// A <see cref="RESTCommand{T}"/> that gets the stream. /// </returns> private RESTCommand<NullType> GetBlobImpl(BlobAttributes blobAttributes, Stream destStream, long? offset, long? length, AccessCondition accessCondition, BlobRequestOptions options) { string lockedETag = null; AccessCondition lockedAccessCondition = null; bool isRangeGet = offset.HasValue; // Adjust the range if the encryption policy is set and it is a range download. For now, we can do it this way. // Once we have multiple versions/algorithms, we will not be able to do this adjustment deterministically without fetching // properties from the service. Since it is an extra call, we will add it once we support multiple versions. int discardFirst = 0; long? endOffset = null; bool bufferIV = false; long? userSpecifiedLength = length; options.AssertPolicyIfRequired(); if (isRangeGet && options.EncryptionPolicy != null) { #if WINDOWS_PHONE // Windows phone does not allow setting padding mode to none. Uses PKCS7 by default. So we cannot download closed ranges that do not // cover the last block of data. However, since we cannot know the length unless we do a fetch attributes, we will just throw // for now if length is specified. Open ranges like x - are still allowed. if (length.HasValue) { throw new InvalidOperationException(SR.RangeDownloadNotPermittedOnPhone); } #endif // Let's say the user requests a download with offset = 39 and length = 54 // First calculate the endOffset if length has value. // endOffset starts at 92 (39 + 54 - 1), but then gets increased to 95 (one less than the next higher multiple of 16) if (length.HasValue) { endOffset = offset.Value + length.Value - 1; // AES-CBC works in 16 byte blocks. So if a user specifies a range whose start and end offsets are not multiples of 16, // update them so we can download entire AES blocks to decrypt. // Adjust the end offset to be a multiple of 16. if ((endOffset.Value + 1) % 16 != 0) { endOffset += (int)(16 - ((endOffset.Value + 1) % 16)); } } // Adjust the end offset to be a multiple of 16. // offset gets reduced down to the highest multiple of 16 lower then the current value (32) discardFirst = (int)(offset.Value % 16); offset -= discardFirst; // We need another 16 bytes for IV if offset is not 0. If the offset is 0, it is the first AES block // and the IV is obtained from blob metadata. // offset is reduced by another 16 (to a final value of 16) if (offset > 15) { offset -= 16; bufferIV = true; } // Adjust the length according to the new start and end offsets. // length = 80 (a multiple of 16) if (endOffset.HasValue) { length = endOffset.Value - offset.Value + 1; } } bool arePropertiesPopulated = false; bool decryptStreamCreated = false; ICryptoTransform transform = null; string storedMD5 = null; long startingOffset = offset.HasValue ? offset.Value : 0; long? startingLength = length; long? validateLength = null; RESTCommand<NullType> getCmd = new RESTCommand<NullType>(this.ServiceClient.Credentials, blobAttributes.StorageUri); options.ApplyToStorageCommand(getCmd); getCmd.CommandLocationMode = CommandLocationMode.PrimaryOrSecondary; getCmd.RetrieveResponseStream = true; getCmd.DestinationStream = destStream; getCmd.CalculateMd5ForResponseStream = !options.DisableContentMD5Validation.Value; getCmd.BuildRequestDelegate = (uri, builder, serverTimeout, useVersionHeader, ctx) => BlobHttpWebRequestFactory.Get(uri, serverTimeout, blobAttributes.SnapshotTime, offset, length, options.UseTransactionalMD5.Value, accessCondition, useVersionHeader, ctx); getCmd.SignRequest = this.ServiceClient.AuthenticationHandler.SignRequest; getCmd.RecoveryAction = (cmd, ex, ctx) => { if ((lockedAccessCondition == null) && !string.IsNullOrEmpty(lockedETag)) { lockedAccessCondition = AccessCondition.GenerateIfMatchCondition(lockedETag); if (accessCondition != null) { lockedAccessCondition.LeaseId = accessCondition.LeaseId; } } if (cmd.StreamCopyState != null) { offset = startingOffset + cmd.StreamCopyState.Length; if (startingLength.HasValue) { length = startingLength.Value - cmd.StreamCopyState.Length; } } getCmd.BuildRequestDelegate = (uri, builder, serverTimeout, useVersionHeader, context) => BlobHttpWebRequestFactory.Get(uri, serverTimeout, blobAttributes.SnapshotTime, offset, length, options.UseTransactionalMD5.Value && !arePropertiesPopulated, lockedAccessCondition ?? accessCondition, useVersionHeader, context); }; getCmd.PreProcessResponse = (cmd, resp, ex, ctx) => { HttpResponseParsers.ProcessExpectedStatusCodeNoException(offset.HasValue ? HttpStatusCode.PartialContent : HttpStatusCode.OK, resp, NullType.Value, cmd, ex); if (!arePropertiesPopulated) { CloudBlob.UpdateAfterFetchAttributes(blobAttributes, resp, isRangeGet); storedMD5 = resp.Headers[HttpResponseHeader.ContentMd5]; if (options.EncryptionPolicy != null) { cmd.DestinationStream = BlobEncryptionPolicy.WrapUserStreamWithDecryptStream(this, cmd.DestinationStream, options, blobAttributes, isRangeGet, out transform, endOffset, userSpecifiedLength, discardFirst, bufferIV); decryptStreamCreated = true; } if (!options.DisableContentMD5Validation.Value && options.UseTransactionalMD5.Value && string.IsNullOrEmpty(storedMD5)) { throw new StorageException( cmd.CurrentResult, SR.MD5NotPresentError, null) { IsRetryable = false }; } // If the download fails and Get Blob needs to resume the download, going to the // same storage location is important to prevent a possible ETag mismatch. getCmd.CommandLocationMode = cmd.CurrentResult.TargetLocation == StorageLocation.Primary ? CommandLocationMode.PrimaryOnly : CommandLocationMode.SecondaryOnly; lockedETag = blobAttributes.Properties.ETag; if (resp.ContentLength >= 0) { validateLength = resp.ContentLength; } arePropertiesPopulated = true; } return NullType.Value; }; getCmd.PostProcessResponse = (cmd, resp, ctx) => { HttpResponseParsers.ValidateResponseStreamMd5AndLength(validateLength, storedMD5, cmd); return NullType.Value; }; getCmd.DisposeAction = (cmd) => { // Crypto stream should be closed in order for it to flush the final block of data to the underlying stream and to clear internal buffers. // It is ok to do this here because we have ensured that we don't end up closing the user provided stream in the process of closing the // cryptostream by wrapping the user provided stream within the NonCloseableStream. if (decryptStreamCreated) { try { // This only throws a NotSupportedException if the current stream is not writable (which should never be true in our case). // But the try/catch is for safe exit if something unexpected happens and we get an exception // when Close is invoked. cmd.DestinationStream.Close(); // Dispose the ICryptoTransform object created for decryption if required. For the range download case, BlobDecryptStream will // dispose the transform function. For full blob downloads, we will dispose it here. if (transform != null) { transform.Dispose(); } } catch (Exception ex) { throw new StorageException( cmd.CurrentResult, SR.CryptoError, ex) { IsRetryable = false }; } } }; return getCmd; }