public static void ParallelUpload(this CloudBlockBlob blobRef, string filename, BlobRequestOptions options) { if (null == options) { options = new BlobRequestOptions() { ServerTimeout = blobRef.ServiceClient.ServerTimeout, //RetryPolicy = new ExponentialRetry(Microsoft.WindowsAzure.Storage.RetryPolicies..DefaultClientBackoff, RetryPolicies.DefaultClientRetryCount) }; } // get upload history if any UploadInfo uploadInfo = UploadInfo.LoadByUploadFilename(filename); using (FileStream fs = new FileStream(filename, FileMode.Open, FileAccess.Read)) { blobRef.ParallelUpload(fs, uploadInfo, options); } // upload completed no history needed - delete it if (File.Exists(uploadInfo.LogFilename)) { File.Delete(uploadInfo.LogFilename); } Console.WriteLine("\nUpload completed."); }
public static UploadInfo LoadByUploadFilename(string uploadFilename) { UploadInfo uploadInfo = new UploadInfo(uploadFilename); if (File.Exists(uploadInfo.LogFilename)) { // read previous uploaded blocks SortedDictionary <int, BlockInfo> sortedInfo = new SortedDictionary <int, BlockInfo>(); string line; using (StreamReader sr = new StreamReader(uploadInfo.LogFilename)) { while ((line = sr.ReadLine()) != null) { string[] data = line.Split('|'); BlockInfo blockInfo = new BlockInfo { OrderPosition = Int32.Parse(data[0]), BlockId = data[1] }; sortedInfo.Add(blockInfo.OrderPosition, blockInfo); } } // fill continous BlockInfoList and overwrite the log file with current list if (File.Exists(uploadInfo.LogFilename)) { File.Delete(uploadInfo.LogFilename); } for (int i = 0; i < sortedInfo.Count; i++) { if (sortedInfo.ContainsKey(i)) { uploadInfo.BlockInfoList.Add(sortedInfo[i]); LogUploadProgress(uploadInfo.LogFilename, sortedInfo[i]); } else { break; } } } return(uploadInfo); }
private static void ParallelUpload(this CloudBlockBlob blobRef, Stream sourceStream, UploadInfo uploadInfo, BlobRequestOptions options) { List <IAsyncResult> asyncResults = new List <IAsyncResult>(); List <BlockInfo> blockInfoList = uploadInfo.BlockInfoList; // set stream position based on read uploadInfo int blockSize = (int)blobRef.ServiceClient.SingleBlobUploadThresholdInBytes; bool moreToUpload = (sourceStream.Length - sourceStream.Position > 0); int currentBlockPossition = blockInfoList.Count; long totalBytes = sourceStream.Length; long uploadedBytes = 0; using (MD5 fullBlobMD5 = MD5.Create()) { // re-create file hash if starting again if (currentBlockPossition > 0) { for (int i = 0; i < currentBlockPossition; i++) { int totalCopied = 0, numRead = 0; int blockBufferSize = (int)Math.Min(blockSize, sourceStream.Length - sourceStream.Position); byte[] buffer = new byte[blockBufferSize]; do { numRead = sourceStream.Read(buffer, totalCopied, blockBufferSize - totalCopied); totalCopied += numRead; }while (numRead != 0 && totalCopied < blockBufferSize); fullBlobMD5.TransformBlock(buffer, 0, totalCopied, null, 0); } uploadedBytes = sourceStream.Position; } do { int currentPendingTasks = asyncResults.Count; for (int i = currentPendingTasks; i < blobRef.ServiceClient.ParallelOperationThreadCount && moreToUpload; i++) { // Step 1: Create block streams in a serial order as stream can only be read sequentially string blockId = null; // Dispense Block Stream int totalCopied = 0, numRead = 0; MemoryStream blockAsStream = null; uploadInfo.BlockIdSequenceNumber++; int blockBufferSize = (int)Math.Min(blockSize, sourceStream.Length - sourceStream.Position); byte[] buffer = new byte[blockBufferSize]; blockAsStream = new MemoryStream(buffer); do { numRead = sourceStream.Read(buffer, totalCopied, blockBufferSize - totalCopied); totalCopied += numRead; }while (numRead != 0 && totalCopied < blockBufferSize); // Update Running MD5 Hashes fullBlobMD5.TransformBlock(buffer, 0, totalCopied, null, 0); blockId = GenerateBase64BlockID(uploadInfo.BlockIdSequenceNumber); // Step 2: Fire off consumer tasks that may finish on other threads BlockInfo blockInfo = new BlockInfo { OrderPosition = currentBlockPossition++, BlockId = blockId }; blockInfoList.Add(blockInfo); ICancellableAsyncResult asyncresult = blobRef.BeginPutBlock(blockId, blockAsStream, null, null, options, null, null, new UploadState { BlockAsStream = blockAsStream, BlockInfo = blockInfo }); asyncResults.Add(asyncresult); if (sourceStream.Length == sourceStream.Position) { // No more upload tasks moreToUpload = false; } } // Step 3: Wait for 1 or more put blocks to finish and finish operations if (asyncResults.Count > 0) { int waitTimeout = options.ServerTimeout.HasValue ? (int)Math.Ceiling(options.ServerTimeout.Value.TotalMilliseconds) : Timeout.Infinite; int waitResult = WaitHandle.WaitAny(asyncResults.Select(result => result.AsyncWaitHandle).ToArray(), waitTimeout); if (waitResult == WaitHandle.WaitTimeout) { throw new TimeoutException(String.Format("ParallelUpload Failed with timeout = {0}", options.ServerTimeout.Value)); } // Optimize away any other completed operations for (int index = 0; index < asyncResults.Count; index++) { IAsyncResult result = asyncResults[index]; if (result.IsCompleted) { // Dispose of memory stream var uploadState = result.AsyncState as UploadState; uploadedBytes += uploadState.BlockAsStream.Length; (uploadState.BlockAsStream as IDisposable).Dispose(); asyncResults.RemoveAt(index); blobRef.EndPutBlock(result); index--; // log uploaded block UploadInfo.LogUploadProgress(uploadInfo.LogFilename, uploadState.BlockInfo); // output progress Console.Write("\b\b\b\b"); Console.Write(" {0}%", (uploadedBytes * 100) / (totalBytes)); } } } }while (moreToUpload || asyncResults.Count != 0); // Step 4: Calculate MD5 and do a PutBlockList to commit the blob fullBlobMD5.TransformFinalBlock(new byte[0], 0, 0); byte[] blobHashBytes = fullBlobMD5.Hash; string blobHash = Convert.ToBase64String(blobHashBytes); blobRef.Properties.ContentMD5 = blobHash; List <string> blockList = blockInfoList.OrderBy(b => b.OrderPosition).Select(b => b.BlockId).ToList(); blobRef.PutBlockList(blockList, options: options); } }