/// <summary> /// Occurrs when the progress of a blob transfer operation changes. /// </summary> /// <param name="e">An <see cref="BlobTransferProgressChangedEventArgs"/> that contains the progress information.</param> protected virtual void OnTaskProgressChanged(BlobTransferProgressChangedEventArgs e) { if (TransferProgressChanged != null) { TransferProgressChanged(this, e); } }
private void OnDownloadBlobTransferProgressChanged(object sender, BlobTransferProgressChangedEventArgs e) { EventHandler <DownloadProgressChangedEventArgs> downloadProgressEvent = this.DownloadProgressChanged; if (downloadProgressEvent != null) { downloadProgressEvent(this, new DownloadProgressChangedEventArgs(e.BytesTransferred, e.TotalBytesToTransfer)); } }
private void OnUploadProgressChanged(string file, BlobTransferProgressChangedEventArgs blobTransferProgressChangedEventArgs) { if (blobTransferProgressChangedEventArgs.LocalFile == file) { var uploadChangeHandler = UploadProgressChanged; if (uploadChangeHandler != null) { uploadChangeHandler( sender: this, e: new UploadProgressChangedEventArgs( blobTransferProgressChangedEventArgs.BytesTransferred, blobTransferProgressChangedEventArgs.TotalBytesToTransfer)); } } }
protected void InvokeProgressCallback(BlobTransferContext transferContext, long bytesProcessed, long lastBlockSize) { if (transferContext == null) { throw new ArgumentNullException("transferContext"); } int progress = (int)((double)bytesProcessed / transferContext.Length * 100); double speed = _uploadDownloadSpeedCalculator.UpdateCountersAndCalculateSpeed(bytesProcessed); BlobTransferProgressChangedEventArgs eArgs = new BlobTransferProgressChangedEventArgs( bytesProcessed, lastBlockSize, transferContext.Length, progress, speed, transferContext.Blob.Uri, transferContext.LocalFilePath, null); OnTaskProgressChanged(eArgs); }
private void UploadFileToBlob( CancellationToken cancellationToken, Uri uri, string localFile, string contentType, string subFolder, FileEncryption fileEncryption, CloudBlobClient client, IRetryPolicy retryPolicy, Func <string> getSharedAccessSignature) { //attempt to open the file first so that we throw an exception before getting into the async work using (new FileStream(localFile, FileMode.Open, FileAccess.Read)) { } Exception lastException = null; CloudBlockBlob blob = null; // stats from azurescope show 10 to be an optimal number of transfer threads int numThreads = ParallelTransferThreadCount; var file = new FileInfo(localFile); long fileSize = file.Length; int maxBlockSize = GetBlockSize(fileSize); // Prepare a queue of blocks to be uploaded. Each queue item is a key-value pair where // the 'key' is block id and 'value' is the block length. List <string> blockList; var queue = PreapreUploadQueue(maxBlockSize, fileSize, ref numThreads, out blockList); int exceptionCount = 0; blob = GetCloudBlockBlob(uri, client, subFolder, localFile, contentType, getSharedAccessSignature); blob.DeleteIfExists(options: new BlobRequestOptions() { RetryPolicy = retryPolicy }); if (cancellationToken.IsCancellationRequested) { TaskCompletedCallback(true, null, BlobTransferType.Upload, localFile, uri); cancellationToken.ThrowIfCancellationRequested(); } var options = new BlobRequestOptions { RetryPolicy = retryPolicy, ServerTimeout = TimeSpan.FromSeconds(90) }; // Launch threads to upload blocks. var tasks = new List <Task>(); long bytesSent = 0; Action action = () => { List <Exception> exceptions = new List <Exception>(); int sasRetry = 0; if (_forceSharedAccessSignatureRetry != TimeSpan.Zero) { Thread.Sleep(_forceSharedAccessSignatureRetry); } if (queue.Count > 0) { FileStream fs = null; try { fs = new FileStream(file.FullName, FileMode.Open, FileAccess.Read); KeyValuePair <int, int> blockIdAndLength; while (queue.TryDequeue(out blockIdAndLength)) { cancellationToken.ThrowIfCancellationRequested(); try { var buffer = new byte[blockIdAndLength.Value]; var binaryReader = new BinaryReader(fs); // move the file system reader to the proper position fs.Seek(blockIdAndLength.Key * (long)maxBlockSize, SeekOrigin.Begin); int readSize = binaryReader.Read(buffer, 0, blockIdAndLength.Value); if (fileEncryption != null) { lock (fileEncryption) { using (FileEncryptionTransform encryptor = fileEncryption.GetTransform(file.Name, blockIdAndLength.Key * (long)maxBlockSize)) { encryptor.TransformBlock(buffer, 0, readSize, buffer, 0); } } } using (var ms = new MemoryStream(buffer, 0, blockIdAndLength.Value)) { string blockIdString = Convert.ToBase64String(Encoding.ASCII.GetBytes(string.Format(CultureInfo.InvariantCulture, "BlockId{0}", blockIdAndLength.Key.ToString("0000000", CultureInfo.InvariantCulture)))); string blockHash = GetMd5HashFromStream(buffer); if (blob != null) { blob.PutBlock(blockIdString, ms, blockHash, options: options); } } Interlocked.Add(ref bytesSent, blockIdAndLength.Value); var progress = (int)((double)bytesSent / file.Length * 100); var eArgs = new BlobTransferProgressChangedEventArgs(bytesSent, blockIdAndLength.Value, file.Length, progress, _uploadSpeedCalculator.UpdateCountersAndCalculateSpeed(bytesSent), uri, localFile, null); OnTaskProgressChanged(eArgs); } catch (StorageException ex) { if (ex.RequestInformation.HttpStatusCode == (int)HttpStatusCode.Forbidden && getSharedAccessSignature != null) { sasRetry++; if (sasRetry > MaxSasSignatureRetry) { throw; } blob = GetCloudBlockBlob(uri, client, subFolder, localFile, contentType, getSharedAccessSignature); } else { TimeSpan tm; exceptionCount++; exceptions.Add(ex); if (!retryPolicy.ShouldRetry(exceptions.Count, ex.RequestInformation.HttpStatusCode, ex, out tm, new OperationContext())) { lastException = new AggregateException(String.Format(CultureInfo.InvariantCulture, "Received {0} exceptions while uploading. Canceling upload.", exceptions.Count), exceptions); throw lastException; } Thread.Sleep(tm); } queue.Enqueue(blockIdAndLength); } catch (IOException ex) { TimeSpan tm; exceptionCount++; exceptions.Add(ex); if (!retryPolicy.ShouldRetry(exceptions.Count, 0, ex, out tm, new OperationContext())) { lastException = new AggregateException(String.Format(CultureInfo.InvariantCulture, "Received {0} exceptions while reading file {1} @ location {2} to be uploaded. Canceling upload.", exceptions.Count, file.Name, blockIdAndLength.Key * (long)maxBlockSize), exceptions); throw lastException; } // dispose existing file stream if (fs != null) { fs.Close(); } Thread.Sleep(tm); // try to reopen the file stream again fs = new FileStream(file.FullName, FileMode.Open, FileAccess.Read); queue.Enqueue(blockIdAndLength); } } } finally { if (fs != null) { fs.Close(); } } } }; for (int idxThread = 0; idxThread < numThreads; idxThread++) { tasks.Add(Task.Factory.StartNew( action, cancellationToken, TaskCreationOptions.AttachedToParent, TaskScheduler.Current)); } if (cancellationToken.IsCancellationRequested) { TaskCompletedCallback(true, lastException, BlobTransferType.Upload, localFile, uri); cancellationToken.ThrowIfCancellationRequested(); } Task.Factory.ContinueWhenAll(tasks.ToArray(), (Task[] result) => { if (result.Any(t => t.IsFaulted)) { return; } try { blob.PutBlockList(blockList, options: options); } catch (StorageException ex) { if (ex.RequestInformation.HttpStatusCode == (int)HttpStatusCode.Forbidden && getSharedAccessSignature != null) { blob = GetCloudBlockBlob(uri, client, subFolder, localFile, contentType, getSharedAccessSignature); blob.PutBlockList(blockList, options: options); } else { throw; } } }, TaskContinuationOptions.None).Wait(cancellationToken); TaskCompletedCallback(cancellationToken.IsCancellationRequested, lastException, BlobTransferType.Upload, localFile, uri); }
private void DownloadFileFromBlob(Uri uri, string localFile, FileEncryption fileEncryption, ulong initializationVector, CloudBlobClient client, CancellationToken cancellationToken, IRetryPolicy retryPolicy, Func <string> getSharedAccessSignature) { int exceptionCount = 0; int numThreads = ParallelTransferThreadCount; Exception lastException = null; long bytesDownloaded = 0; CloudBlockBlob blob = InitializeCloudBlockBlob(uri, client, retryPolicy, getSharedAccessSignature); long blobLength = blob.Properties.Length; int bufferLength = GetBlockSize(blobLength); var queue = PrepareDownloadQueue(blobLength, bufferLength, ref numThreads); if (cancellationToken.IsCancellationRequested) { TaskCompletedCallback(true, null, BlobTransferType.Download, localFile, uri); cancellationToken.ThrowIfCancellationRequested(); } using (var fs = new FileStream(localFile, FileMode.OpenOrCreate, FileAccess.Write, FileShare.Read)) { var tasks = new List <Task>(); Action action = () => { KeyValuePair <long, int> blockOffsetAndLength; int exceptionPerThread = 0; int sasRetry = 0; // A buffer to fill per read request. var buffer = new byte[bufferLength]; if (_forceSharedAccessSignatureRetry != TimeSpan.Zero) { // The following sleep is for unit test purpose and we will force the shared access signature to expire and hit retry code path Thread.Sleep(_forceSharedAccessSignatureRetry); } while (queue.TryDequeue(out blockOffsetAndLength)) { if (cancellationToken.IsCancellationRequested) { break; } try { var blobGetRequest = BlobGetRequest(blockOffsetAndLength, blob); using (var response = blobGetRequest.GetResponse() as HttpWebResponse) { if (response != null) { ReadResponseStream(fileEncryption, initializationVector, fs, buffer, response, blockOffsetAndLength, ref bytesDownloaded); var progress = (int)((double)bytesDownloaded / blob.Properties.Length * 100); // raise the progress changed event var eArgs = new BlobTransferProgressChangedEventArgs(bytesDownloaded, blockOffsetAndLength.Value, blob.Properties.Length, progress, _downloadSpeedCalculator.UpdateCountersAndCalculateSpeed(bytesDownloaded), uri, localFile, null); OnTaskProgressChanged(eArgs); } } } catch (Exception ex) { var webEx = ex as WebException; bool ok = (webEx != null) || ex is ObjectDisposedException; bool isSasException = false; if (!ok) { throw; } if (webEx != null && getSharedAccessSignature != null) { if (webEx.Response is HttpWebResponse) { var httpex = (HttpWebResponse)webEx.Response; if (httpex.StatusCode == HttpStatusCode.Forbidden) { sasRetry++; if (sasRetry > MaxSasSignatureRetry) { throw; } isSasException = true; } } } if (isSasException) { blob = InitializeCloudBlockBlob(uri, client, retryPolicy, getSharedAccessSignature); } else { TimeSpan tm; exceptionCount++; exceptionPerThread++; if (!retryPolicy.ShouldRetry(exceptionPerThread, 0, ex, out tm, new OperationContext())) { lastException = new AggregateException(String.Format(CultureInfo.InvariantCulture, "Received {0} exceptions while downloading. Canceling download.", exceptionCount), ex); break; } Thread.Sleep(tm); } // Add block back to queue queue.Enqueue(blockOffsetAndLength); } } }; // Launch threads to download chunks. for (int idxThread = 0; idxThread < numThreads; idxThread++) { tasks.Add(Task.Factory.StartNew(action)); } if (cancellationToken.IsCancellationRequested) { TaskCompletedCallback(true, lastException, BlobTransferType.Download, localFile, uri); cancellationToken.ThrowIfCancellationRequested(); } Task.WaitAll(tasks.ToArray(), cancellationToken); TaskCompletedCallback(cancellationToken.IsCancellationRequested, lastException, BlobTransferType.Download, localFile, uri); } }
private void UploadFileToBlob( CancellationToken cancellationToken, Uri uri, string localFile, string contentType, string subFolder, FileEncryption fileEncryption, CloudBlobClient client, IRetryPolicy retryPolicy) { //attempt to open the file first so that we throw an exception before getting into the async work using (new FileStream(localFile, FileMode.Open, FileAccess.Read)) { } Exception lastException = null; CloudBlockBlob blob = null; // stats from azurescope show 10 to be an optimal number of transfer threads int numThreads = ParallelTransferThreadCount; var file = new FileInfo(localFile); long fileSize = file.Length; int maxBlockSize = GetBlockSize(fileSize); // Prepare a queue of blocks to be uploaded. Each queue item is a key-value pair where // the 'key' is block id and 'value' is the block length. List<string> blockList; var queue = PreapreUploadQueue(maxBlockSize, fileSize, ref numThreads, out blockList); int exceptionCount = 0; blob = GetCloudBlockBlob(uri, client, subFolder, localFile, contentType); blob.DeleteIfExists(options: new BlobRequestOptions() { RetryPolicy = retryPolicy }); if (cancellationToken.IsCancellationRequested) { TaskCompletedCallback(true, null, BlobTransferType.Upload, localFile, uri); cancellationToken.ThrowIfCancellationRequested(); } var options = new BlobRequestOptions { RetryPolicy = retryPolicy, ServerTimeout = TimeSpan.FromSeconds(90) }; // Launch threads to upload blocks. var tasks = new List<Task>(); long bytesSent = 0; Action action = () => { List<Exception> exceptions = new List<Exception>(); if (_forceSharedAccessSignatureRetry != TimeSpan.Zero) { Thread.Sleep(_forceSharedAccessSignatureRetry); } if (queue.Count > 0) { FileStream fs = null; try { fs = new FileStream(file.FullName, FileMode.Open, FileAccess.Read); KeyValuePair<int, int> blockIdAndLength; while (queue.TryDequeue(out blockIdAndLength)) { cancellationToken.ThrowIfCancellationRequested(); try { var buffer = new byte[blockIdAndLength.Value]; var binaryReader = new BinaryReader(fs); // move the file system reader to the proper position fs.Seek(blockIdAndLength.Key * (long)maxBlockSize, SeekOrigin.Begin); int readSize = binaryReader.Read(buffer, 0, blockIdAndLength.Value); if (fileEncryption != null) { lock (fileEncryption) { using (FileEncryptionTransform encryptor = fileEncryption.GetTransform(file.Name, blockIdAndLength.Key * (long)maxBlockSize)) { encryptor.TransformBlock(buffer, 0, readSize, buffer, 0); } } } using (var ms = new MemoryStream(buffer, 0, blockIdAndLength.Value)) { string blockIdString = Convert.ToBase64String(Encoding.ASCII.GetBytes(string.Format(CultureInfo.InvariantCulture, "BlockId{0}", blockIdAndLength.Key.ToString("0000000", CultureInfo.InvariantCulture)))); string blockHash = GetMd5HashFromStream(buffer); if (blob != null) blob.PutBlock(blockIdString, ms, blockHash, options: options); } Interlocked.Add(ref bytesSent, blockIdAndLength.Value); var progress = (int)((double)bytesSent / file.Length * 100); var eArgs = new BlobTransferProgressChangedEventArgs(bytesSent, blockIdAndLength.Value, file.Length, progress, _uploadSpeedCalculator.UpdateCountersAndCalculateSpeed(bytesSent), uri, localFile, null); OnTaskProgressChanged(eArgs); } catch (StorageException ex) { TimeSpan tm; exceptionCount++; exceptions.Add(ex); if (!retryPolicy.ShouldRetry(exceptions.Count, ex.RequestInformation.HttpStatusCode, ex, out tm, new OperationContext())) { lastException = new AggregateException(String.Format(CultureInfo.InvariantCulture, "Received {0} exceptions while uploading. Canceling upload.", exceptions.Count), exceptions); throw lastException; } Thread.Sleep(tm); queue.Enqueue(blockIdAndLength); } catch (IOException ex) { TimeSpan tm; exceptionCount++; exceptions.Add(ex); if (!retryPolicy.ShouldRetry(exceptions.Count, 0, ex, out tm, new OperationContext())) { lastException = new AggregateException(String.Format(CultureInfo.InvariantCulture, "Received {0} exceptions while reading file {1} @ location {2} to be uploaded. Canceling upload.", exceptions.Count, file.Name, blockIdAndLength.Key * (long)maxBlockSize), exceptions); throw lastException; } // dispose existing file stream if (fs != null) { fs.Close(); } Thread.Sleep(tm); // try to reopen the file stream again fs = new FileStream(file.FullName, FileMode.Open, FileAccess.Read); queue.Enqueue(blockIdAndLength); } } } finally { if (fs != null) { fs.Close(); } } } }; for (int idxThread = 0; idxThread < numThreads; idxThread++) { tasks.Add(Task.Factory.StartNew( action, cancellationToken, TaskCreationOptions.AttachedToParent, TaskScheduler.Current)); } if (cancellationToken.IsCancellationRequested) { TaskCompletedCallback(true, lastException, BlobTransferType.Upload, localFile, uri); cancellationToken.ThrowIfCancellationRequested(); } Task.Factory.ContinueWhenAll(tasks.ToArray(), (Task[] result) => { if (result.Any(t => t.IsFaulted)) { return; } blob.PutBlockList(blockList, options: options); }, TaskContinuationOptions.None).Wait(cancellationToken); TaskCompletedCallback(cancellationToken.IsCancellationRequested, lastException, BlobTransferType.Upload, localFile, uri); }
private void DownloadFileFromBlob(Uri uri, string localFile, FileEncryption fileEncryption, ulong initializationVector, CloudBlobClient client, CancellationToken cancellationToken, IRetryPolicy retryPolicy) { int numThreads = ParallelTransferThreadCount; List<Exception> exceptions = new List<Exception>(); AggregateException aggregateException = null; long bytesDownloaded = 0; CloudBlockBlob blob = InitializeCloudBlockBlob(uri, client, retryPolicy); long blobLength = blob.Properties.Length; int bufferLength = GetBlockSize(blobLength); var queue = PrepareDownloadQueue(blobLength, bufferLength, ref numThreads); if (cancellationToken.IsCancellationRequested) { TaskCompletedCallback(true, null, BlobTransferType.Download, localFile, uri); cancellationToken.ThrowIfCancellationRequested(); } using (var fs = new FileStream(localFile, FileMode.OpenOrCreate, FileAccess.Write, FileShare.Read)) { var tasks = new List<Task>(); Action action = () => { KeyValuePair<long, int> blockOffsetAndLength; int exceptionPerThread = 0; // A buffer to fill per read request. var buffer = new byte[bufferLength]; if (_forceSharedAccessSignatureRetry != TimeSpan.Zero) { // The following sleep is for unit test purpose and we will force the shared access signature to expire and hit retry code path Thread.Sleep(_forceSharedAccessSignatureRetry); } while (queue.TryDequeue(out blockOffsetAndLength)) { if (cancellationToken.IsCancellationRequested) { break; } try { var blobGetRequest = BlobGetRequest(blockOffsetAndLength, blob); using (var response = blobGetRequest.GetResponse() as HttpWebResponse) { if (response != null) { ReadResponseStream(fileEncryption, initializationVector, fs, buffer, response, blockOffsetAndLength, ref bytesDownloaded); var progress = (int)((double)bytesDownloaded / blob.Properties.Length * 100); // raise the progress changed event var eArgs = new BlobTransferProgressChangedEventArgs(bytesDownloaded, blockOffsetAndLength.Value, blob.Properties.Length, progress, _downloadSpeedCalculator.UpdateCountersAndCalculateSpeed(bytesDownloaded), uri, localFile, null); OnTaskProgressChanged(eArgs); } } } catch (Exception ex) { var webEx = ex as WebException; bool ok = (webEx != null) || ex is ObjectDisposedException; if (!ok) { throw; } if (webEx != null) { if (webEx.Response is HttpWebResponse) { var httpex = (HttpWebResponse)webEx.Response; if (httpex.StatusCode == HttpStatusCode.Forbidden) { blob = InitializeCloudBlockBlob(uri, null, retryPolicy); } } } TimeSpan tm; exceptionPerThread++; exceptions.Add(ex); if (!retryPolicy.ShouldRetry(exceptionPerThread, 0, ex, out tm, new OperationContext())) { aggregateException = new AggregateException(String.Format(CultureInfo.InvariantCulture, "Received {0} exceptions while downloading. Canceling download.", exceptions.Count), exceptions); throw aggregateException; } Thread.Sleep(tm); // Add block back to queue queue.Enqueue(blockOffsetAndLength); } } }; // Launch threads to download chunks. for (int idxThread = 0; idxThread < numThreads; idxThread++) { tasks.Add(Task.Factory.StartNew(action)); } if (cancellationToken.IsCancellationRequested) { TaskCompletedCallback(true, aggregateException, BlobTransferType.Download, localFile, uri); cancellationToken.ThrowIfCancellationRequested(); } Task.WaitAll(tasks.ToArray(), cancellationToken); TaskCompletedCallback(cancellationToken.IsCancellationRequested, aggregateException, BlobTransferType.Download, localFile, uri); } }
/// <summary> /// Occurrs when the progress of a blob transfer operation changes. /// </summary> /// <param name="e">An <see cref="BlobTransferProgressChangedEventArgs"/> that contains the progress information.</param> protected virtual void OnTaskProgressChanged(BlobTransferProgressChangedEventArgs e) { if (TransferProgressChanged != null) TransferProgressChanged(this, e); }
public WamsUploadProgressInfo(Guid assetMoniker, BlobTransferProgressChangedEventArgs data) { AssetMoniker = assetMoniker; Data = data; }
private static void blobTransferClient_TransferProgressChanged(object sender, BlobTransferProgressChangedEventArgs e) { Console.WriteLine("{0}% upload competed for {1}.", e.ProgressPercentage, e.LocalFile); }
private void OnBlobTransferProgressChanged(object sender, BlobTransferProgressChangedEventArgs e) { if (this.onProgress != null) { this.onProgress.Invoke(e.ProgressPercentage); } }
public void WhenBlobTransferProgressChangedEventIsRaisedThenProgressCallbackIsCalled() { const int Percentage = 10; var progressCalled = false; var providedPercentage = -1; EventHandler<BlobTransferProgressChangedEventArgs> blobTransferProgressHandler = null; var stubAsset = new StubIAsset { NameGet = () => "test" }; var stubAssetFile = new StubIAssetFile(); var stubAccessPolicy = new StubIAccessPolicy(); var stubLocator = new StubILocator(); using (ShimsContext.Create()) { var stubAssets = new StubAssetBaseCollection { CreateStringAssetCreationOptions = (name, options) => stubAsset }; var stubAssetsFiles = new StubAssetFileBaseCollection { CreateString = path => stubAssetFile }; stubAsset.AssetFilesGet = () => stubAssetsFiles; var accessPolicies = new ShimAccessPolicyBaseCollection { CreateStringTimeSpanAccessPermissions = (name, timesSpan, accessPermissions) => stubAccessPolicy }; var locators = new ShimLocatorBaseCollection { CreateSasLocatorIAssetIAccessPolicy = (asset, acccessPolicy) => stubLocator }; ShimPath.GetFileNameString = fileName => string.Empty; ShimBlobTransferClient.Constructor = client => { }; stubAssetFile.UploadAsyncStringBlobTransferClientILocatorCancellationToken = (filePath, blobTransferClient, locator, cancellationToken) => Task.Delay(0); var context = new ShimCloudMediaContext { AssetsGet = () => stubAssets, AccessPoliciesGet = () => accessPolicies, LocatorsGet = () => locators, }; Func<CloudMediaContext> createContext = () => context; ShimBlobTransferClient.AllInstances.TransferProgressChangedAddEventHandlerOfBlobTransferProgressChangedEventArgs = (client, handler) => { blobTransferProgressHandler = handler; }; var uploader = new AzureMediaServicesUploader("myVideo", @"C:\videos\myvideo.mp4", createContext); Action<int> onProgress = progressPercentage => { progressCalled = true; providedPercentage = progressPercentage; }; uploader.On(progress: onProgress); uploader.Start(); } var args = new BlobTransferProgressChangedEventArgs(0, 0, 0, Percentage, 0, new Uri("http://myvideo"), @"C:\videos\myvideo.mp4", null); blobTransferProgressHandler.Invoke(null, args); Assert.IsTrue(progressCalled); Assert.AreEqual(Percentage, providedPercentage); }
private void OnDownloadBlobTransferProgressChanged(object sender, BlobTransferProgressChangedEventArgs e) { EventHandler<DownloadProgressChangedEventArgs> downloadProgressEvent = this.DownloadProgressChanged; if (downloadProgressEvent != null) { downloadProgressEvent(this, new DownloadProgressChangedEventArgs(e.BytesTransferred, e.TotalBytesToTransfer)); } }
static void blobTransferClient_TransferProgressChanged(object sender, BlobTransferProgressChangedEventArgs e) { if (e.ProgressPercentage > 4) // Avoid startup jitter, as the upload tasks are added. { Console.WriteLine("{0}% upload competed for {1}.", e.ProgressPercentage, e.LocalFile); } }