/// <summary> /// Upload File to blob with storage Client library API /// </summary> internal virtual async Task UploadBlobwithSdk(long taskId, IStorageBlobManagement localChannel, string filePath, StorageBlob.CloudBlob blob) { BlobClientOptions options = null; if (this.Force.IsPresent || !blob.Exists() || ShouldContinue(string.Format(Resources.OverwriteConfirmation, blob.Uri), null)) { // Prepare blob Properties, MetaData, accessTier BlobHttpHeaders blobHttpHeaders = CreateBlobHttpHeaders(BlobProperties); IDictionary <string, string> metadata = new Dictionary <string, string>(); SetBlobMeta_Track2(metadata, this.Metadata); AccessTier?accesstier = GetAccessTier_Track2(this.standardBlobTier, this.pageBlobTier); //Prepare progress handler long fileSize = new FileInfo(ResolvedFileName).Length; string activity = String.Format(Resources.SendAzureBlobActivity, this.File, blob.Name, blob.Container.Name); string status = Resources.PrepareUploadingBlob; ProgressRecord pr = new ProgressRecord(OutputStream.GetProgressId(taskId), activity, status); IProgress <long> progressHandler = new Progress <long>((finishedBytes) => { if (pr != null) { // Size of the source file might be 0, when it is, directly treat the progress as 100 percent. pr.PercentComplete = 0 == fileSize ? 100 : (int)(finishedBytes * 100 / fileSize); pr.StatusDescription = string.Format(CultureInfo.CurrentCulture, Resources.FileTransmitStatus, pr.PercentComplete); Console.WriteLine(finishedBytes); this.OutputStream.WriteProgress(pr); } }); using (FileStream stream = System.IO.File.OpenRead(ResolvedFileName)) { //block blob if (string.Equals(blobType, BlockBlobType, StringComparison.InvariantCultureIgnoreCase)) { BlobClient blobClient = GetTrack2BlobClient(blob, localChannel.StorageContext, options); StorageTransferOptions trasnferOption = new StorageTransferOptions() { MaximumConcurrency = this.GetCmdletConcurrency() }; BlobUploadOptions uploadOptions = new BlobUploadOptions(); uploadOptions.Metadata = metadata; uploadOptions.HttpHeaders = blobHttpHeaders; uploadOptions.Conditions = this.BlobRequestConditions; uploadOptions.AccessTier = accesstier; uploadOptions.ProgressHandler = progressHandler; uploadOptions.TransferOptions = trasnferOption; await blobClient.UploadAsync(stream, uploadOptions, CmdletCancellationToken).ConfigureAwait(false); } //Page or append blob else if (string.Equals(blobType, PageBlobType, StringComparison.InvariantCultureIgnoreCase) || string.Equals(blobType, AppendBlobType, StringComparison.InvariantCultureIgnoreCase)) { PageBlobClient pageblobClient = null; AppendBlobClient appendblobClient = null; //Create Blob if (string.Equals(blobType, PageBlobType, StringComparison.InvariantCultureIgnoreCase)) //page { if (fileSize % 512 != 0) { throw new ArgumentException(String.Format("File size {0} Bytes is invalid for PageBlob, must be a multiple of 512 bytes.", fileSize.ToString())); } pageblobClient = GetTrack2PageBlobClient(blob, localChannel.StorageContext, options); PageBlobCreateOptions createOptions = new PageBlobCreateOptions(); createOptions.Metadata = metadata; createOptions.HttpHeaders = blobHttpHeaders; createOptions.Conditions = this.PageBlobRequestConditions; Response <BlobContentInfo> blobInfo = await pageblobClient.CreateAsync(fileSize, createOptions, CmdletCancellationToken).ConfigureAwait(false); } else //append { appendblobClient = GetTrack2AppendBlobClient(blob, localChannel.StorageContext, options); AppendBlobCreateOptions createOptions = new AppendBlobCreateOptions(); createOptions.Metadata = metadata; createOptions.HttpHeaders = blobHttpHeaders; createOptions.Conditions = this.AppendBlobRequestConditions; Response <BlobContentInfo> blobInfo = await appendblobClient.CreateAsync(createOptions, CmdletCancellationToken).ConfigureAwait(false); } // Upload blob content byte[] uploadcache4MB = null; byte[] uploadcache = null; progressHandler.Report(0); long offset = 0; while (offset < fileSize) { // Get chunk size and prepare cache int chunksize = size4MB; if (chunksize <= (fileSize - offset)) // Chunk size will be 4MB { if (uploadcache4MB == null) { uploadcache4MB = new byte[size4MB]; } uploadcache = uploadcache4MB; } else // last chunk can < 4MB { chunksize = (int)(fileSize - offset); if (uploadcache4MB == null) { uploadcache = new byte[chunksize]; } else { uploadcache = uploadcache4MB; } } //Get content to upload for the chunk int readoutcount = await stream.ReadAsync(uploadcache, 0, (int)chunksize).ConfigureAwait(false); MemoryStream chunkContent = new MemoryStream(uploadcache, 0, readoutcount); //Upload content if (string.Equals(blobType, PageBlobType, StringComparison.InvariantCultureIgnoreCase)) //page { Response <PageInfo> pageInfo = await pageblobClient.UploadPagesAsync(chunkContent, offset, null, null, null, CmdletCancellationToken).ConfigureAwait(false); } else //append { Response <BlobAppendInfo> pageInfo = await appendblobClient.AppendBlockAsync(chunkContent, null, null, null, CmdletCancellationToken).ConfigureAwait(false); } // Update progress offset += readoutcount; progressHandler.Report(offset); } if (string.Equals(blobType, PageBlobType, StringComparison.InvariantCultureIgnoreCase) && accesstier != null) { await pageblobClient.SetAccessTierAsync(accesstier.Value, cancellationToken : CmdletCancellationToken).ConfigureAwait(false); } } else { throw new InvalidOperationException(string.Format( CultureInfo.CurrentCulture, Resources.InvalidBlobType, blobType, BlobName)); } } WriteCloudBlobObject(taskId, localChannel, blob); } }
public async Task AppendBlockFromUriAsync_AccessConditionsFail() { var garbageLeaseId = GetGarbageLeaseId(); AccessConditionParameters[] testCases = new[] { new AccessConditionParameters { IfModifiedSince = NewDate }, new AccessConditionParameters { IfUnmodifiedSince = OldDate }, new AccessConditionParameters { Match = GarbageETag }, new AccessConditionParameters { NoneMatch = ReceivedETag }, new AccessConditionParameters { LeaseId = garbageLeaseId }, new AccessConditionParameters { AppendPosE = 1 }, new AccessConditionParameters { MaxSizeLTE = 1 }, new AccessConditionParameters { SourceIfModifiedSince = NewDate }, new AccessConditionParameters { SourceIfUnmodifiedSince = OldDate }, new AccessConditionParameters { SourceIfMatch = GarbageETag }, new AccessConditionParameters { SourceIfNoneMatch = ReceivedETag } }; foreach (AccessConditionParameters parameters in testCases) { await using DisposingContainer test = await GetTestContainerAsync(); // Arrange await test.Container.SetAccessPolicyAsync(PublicAccessType.BlobContainer); var data = GetRandomBuffer(7); using (var stream = new MemoryStream(data)) { AppendBlobClient sourceBlob = InstrumentClient(test.Container.GetAppendBlobClient(GetNewBlobName())); await sourceBlob.CreateAsync(); await sourceBlob.AppendBlockAsync(stream); AppendBlobClient destBlob = InstrumentClient(test.Container.GetAppendBlobClient(GetNewBlobName())); await destBlob.CreateAsync(); parameters.NoneMatch = await SetupBlobMatchCondition(destBlob, parameters.NoneMatch); parameters.SourceIfNoneMatch = await SetupBlobMatchCondition(sourceBlob, parameters.SourceIfNoneMatch); AppendBlobRequestConditions accessConditions = BuildDestinationAccessConditions( parameters: parameters, lease: true, appendPosAndMaxSize: true); AppendBlobRequestConditions sourceAccessConditions = BuildSourceAccessConditions(parameters); // Act await TestHelper.AssertExpectedExceptionAsync <RequestFailedException>( destBlob.AppendBlockFromUriAsync( sourceUri: sourceBlob.Uri, conditions: accessConditions, sourceConditions: sourceAccessConditions), actualException => Assert.IsTrue(true) ); } } }