private IEnumerator <Int32> UploadBlock(AsyncEnumerator ae, MemoryMappedFile mmf, CloudBlockBlob blob, Int32 blockNumber, Int64 length) { using (var stream = mmf.CreateViewStream(blockNumber * m_blockSize, length, MemoryMappedFileAccess.Read)) { blob.BeginPutBlock(blockNumber.ToBase64(), stream, null, ae.End(), null); yield return(1); blob.EndPutBlock(ae.DequeueAsyncResult()); } }
public void EnsureMultiBufferMemoryStreamIsNotClosedAPM() { byte[] buffer = GetRandomBuffer(1 * 1024 * 1024); CloudBlobClient blobClient = GenerateCloudBlobClient(); CloudBlobContainer container = blobClient.GetContainerReference(Guid.NewGuid().ToString("N")); try { container.Create(); CloudBlockBlob blob = container.GetBlockBlobReference("blob1"); using (MultiBufferMemoryStream originalBlob = new MultiBufferMemoryStream(null)) { originalBlob.Write(buffer, 0, buffer.Length); originalBlob.Seek(0, SeekOrigin.Begin); using (AutoResetEvent waitHandle = new AutoResetEvent(false)) { ICancellableAsyncResult result = blob.BeginPutBlock( Convert.ToBase64String(Guid.NewGuid().ToByteArray()), originalBlob, null, ar => waitHandle.Set(), null); waitHandle.WaitOne(); blob.EndPutBlock(result); } Assert.IsTrue(originalBlob.CanSeek); } } finally { container.DeleteIfExists(); } }
private static void ParallelUpload(this CloudBlockBlob blobRef, Stream sourceStream, UploadInfo uploadInfo, BlobRequestOptions options) { List <IAsyncResult> asyncResults = new List <IAsyncResult>(); List <BlockInfo> blockInfoList = uploadInfo.BlockInfoList; // set stream position based on read uploadInfo int blockSize = (int)blobRef.ServiceClient.SingleBlobUploadThresholdInBytes; bool moreToUpload = (sourceStream.Length - sourceStream.Position > 0); int currentBlockPossition = blockInfoList.Count; long totalBytes = sourceStream.Length; long uploadedBytes = 0; using (MD5 fullBlobMD5 = MD5.Create()) { // re-create file hash if starting again if (currentBlockPossition > 0) { for (int i = 0; i < currentBlockPossition; i++) { int totalCopied = 0, numRead = 0; int blockBufferSize = (int)Math.Min(blockSize, sourceStream.Length - sourceStream.Position); byte[] buffer = new byte[blockBufferSize]; do { numRead = sourceStream.Read(buffer, totalCopied, blockBufferSize - totalCopied); totalCopied += numRead; }while (numRead != 0 && totalCopied < blockBufferSize); fullBlobMD5.TransformBlock(buffer, 0, totalCopied, null, 0); } uploadedBytes = sourceStream.Position; } do { int currentPendingTasks = asyncResults.Count; for (int i = currentPendingTasks; i < blobRef.ServiceClient.ParallelOperationThreadCount && moreToUpload; i++) { // Step 1: Create block streams in a serial order as stream can only be read sequentially string blockId = null; // Dispense Block Stream int totalCopied = 0, numRead = 0; MemoryStream blockAsStream = null; uploadInfo.BlockIdSequenceNumber++; int blockBufferSize = (int)Math.Min(blockSize, sourceStream.Length - sourceStream.Position); byte[] buffer = new byte[blockBufferSize]; blockAsStream = new MemoryStream(buffer); do { numRead = sourceStream.Read(buffer, totalCopied, blockBufferSize - totalCopied); totalCopied += numRead; }while (numRead != 0 && totalCopied < blockBufferSize); // Update Running MD5 Hashes fullBlobMD5.TransformBlock(buffer, 0, totalCopied, null, 0); blockId = GenerateBase64BlockID(uploadInfo.BlockIdSequenceNumber); // Step 2: Fire off consumer tasks that may finish on other threads BlockInfo blockInfo = new BlockInfo { OrderPosition = currentBlockPossition++, BlockId = blockId }; blockInfoList.Add(blockInfo); ICancellableAsyncResult asyncresult = blobRef.BeginPutBlock(blockId, blockAsStream, null, null, options, null, null, new UploadState { BlockAsStream = blockAsStream, BlockInfo = blockInfo }); asyncResults.Add(asyncresult); if (sourceStream.Length == sourceStream.Position) { // No more upload tasks moreToUpload = false; } } // Step 3: Wait for 1 or more put blocks to finish and finish operations if (asyncResults.Count > 0) { int waitTimeout = options.ServerTimeout.HasValue ? (int)Math.Ceiling(options.ServerTimeout.Value.TotalMilliseconds) : Timeout.Infinite; int waitResult = WaitHandle.WaitAny(asyncResults.Select(result => result.AsyncWaitHandle).ToArray(), waitTimeout); if (waitResult == WaitHandle.WaitTimeout) { throw new TimeoutException(String.Format("ParallelUpload Failed with timeout = {0}", options.ServerTimeout.Value)); } // Optimize away any other completed operations for (int index = 0; index < asyncResults.Count; index++) { IAsyncResult result = asyncResults[index]; if (result.IsCompleted) { // Dispose of memory stream var uploadState = result.AsyncState as UploadState; uploadedBytes += uploadState.BlockAsStream.Length; (uploadState.BlockAsStream as IDisposable).Dispose(); asyncResults.RemoveAt(index); blobRef.EndPutBlock(result); index--; // log uploaded block UploadInfo.LogUploadProgress(uploadInfo.LogFilename, uploadState.BlockInfo); // output progress Console.Write("\b\b\b\b"); Console.Write(" {0}%", (uploadedBytes * 100) / (totalBytes)); } } } }while (moreToUpload || asyncResults.Count != 0); // Step 4: Calculate MD5 and do a PutBlockList to commit the blob fullBlobMD5.TransformFinalBlock(new byte[0], 0, 0); byte[] blobHashBytes = fullBlobMD5.Hash; string blobHash = Convert.ToBase64String(blobHashBytes); blobRef.Properties.ContentMD5 = blobHash; List <string> blockList = blockInfoList.OrderBy(b => b.OrderPosition).Select(b => b.BlockId).ToList(); blobRef.PutBlockList(blockList, options: options); } }