// Uploads each part to AWS static List <string> UploadParts(string uploadID, AmazonGlacierClient client, string archiveToUpload) { List <string> partChecksumList = new List <string>(); long currentPosition = 0; var buffer = new byte[Convert.ToInt32(partSize)]; long fileLength = new FileInfo(archiveToUpload).Length; using (FileStream fileToUpload = new FileStream(archiveToUpload, FileMode.Open, FileAccess.Read)) { while (fileToUpload.Position < fileLength) { Stream uploadPartStream = GlacierUtils.CreatePartStream(fileToUpload, partSize); string checksum = TreeHashGenerator.CalculateTreeHash(uploadPartStream); partChecksumList.Add(checksum); UploadMultipartPartRequest uploadMPUrequest = new UploadMultipartPartRequest() { VaultName = vaultName, Body = uploadPartStream, Checksum = checksum, UploadId = uploadID }; uploadMPUrequest.SetRange(currentPosition, currentPosition + uploadPartStream.Length - 1); client.UploadMultipartPart(uploadMPUrequest); currentPosition = currentPosition + uploadPartStream.Length; } } return(partChecksumList); }
public async Task UploadChunkAsync(UploadJob job, UploadItem item) { //create reference to a part of the input stream var chunkStream = GlacierUtils.CreatePartStream(item.DataStream, job.ChunkSize); var chunkChecksum = TreeHashGenerator.CalculateTreeHash(chunkStream); //prepare request var request = new UploadMultipartPartRequest { VaultName = job.VaultName, Body = chunkStream, Checksum = chunkChecksum, UploadId = job.UploadId }; //set range of the current part request.SetRange(job.CurrentPosition, job.CurrentPosition + chunkStream.Length - 1); //upload this part var response = await _client.UploadMultipartPartAsync(request); response.EnsureSuccess(); //commit progress job.ChunkChecksums.Add(chunkChecksum); job.CurrentPosition += chunkStream.Length; }
static List <string> UploadParts(string uploadID, AmazonGlacier client) { var partChecksumList = new List <string>(); long currentPosition = 0; var buffer = new byte[Convert.ToInt32(partSize)]; long fileLength = new FileInfo(BackupFilePath).Length; WriteFileUploadProgress(currentPosition, fileLength); using (var fileToUpload = new FileStream(BackupFilePath, FileMode.Open, FileAccess.Read)) { while (fileToUpload.Position < fileLength) { var uploadPartStream = GlacierUtils.CreatePartStream(fileToUpload, partSize); var checksum = TreeHashGenerator.CalculateTreeHash(uploadPartStream); partChecksumList.Add(checksum); // Upload part. var uploadMPUrequest = new UploadMultipartPartRequest() { VaultName = VaultName, Body = uploadPartStream, Checksum = checksum, UploadId = uploadID }; uploadMPUrequest.SetRange(currentPosition, currentPosition + uploadPartStream.Length - 1); client.UploadMultipartPart(uploadMPUrequest); currentPosition = currentPosition + uploadPartStream.Length; WriteFileUploadProgress(currentPosition, fileLength); } } return(partChecksumList); }
private bool retryUpload(object f_object) { try { ThreadData objData = null; AmazonGlacier client = null; Stream uploadPartStream = null; objData = (ThreadData)f_object; string uploadID = objData.uploadID; client = objData.client; long currentPosition = objData.currentPosition; Form1.log.Info("Trying to upload Part :" + Convert.ToString(objData.currentPosition)); //For the last one we need to make sure the buffer is the right size? //The uploadMPUrequest.SetRange probably takes care of this. int memoryBufferIndex = 0; //The index into buffer at which the stream begin int memoryBuffercount = (int)(objData.uploadPartStream.Length); //The length of the stream in bytes. uploadPartStream = new MemoryStream(objData.buffer, memoryBufferIndex, memoryBuffercount); //To ensure that part data is not corrupted in transmission, you compute a SHA256 tree // hash of the part and include it in your request. Upon receiving the part data, Amazon Glacier also computes a SHA256 tree hash. //If these hash values don't match, the operation fails. For information about computing a SHA256 tree hash, see Computing Checksums string checksum = TreeHashGenerator.CalculateTreeHash(uploadPartStream); SHA256ConcurrentQueue.Enqueue(checksum); UploadMultipartPartRequest uploadMPUrequest = new UploadMultipartPartRequest() { VaultName = vaultName, Body = uploadPartStream, Checksum = checksum, UploadId = uploadID }; uploadMPUrequest.SetRange(currentPosition, currentPosition + objData.uploadPartStream.Length - 1); UploadMultipartPartResponse mpr = client.UploadMultipartPart(uploadMPUrequest); Form1.log.Info("Retry Success " + Convert.ToString(mpr.ContentLength) + "bytes" + " for Part :" + Convert.ToString(objData.currentPosition)); return(true); } catch (Exception ex) { Form1.log.Error(ex.ToString()); return(false); } }
private void ThreadUpload(object f_object) { ThreadData objData = null; AmazonGlacier client = null; Stream uploadPartStream = null; try { objData = (ThreadData)f_object; string uploadID = objData.uploadID; client = objData.client; long currentPosition = objData.currentPosition; Form1.log.Info("Trying to upload Part :" + Convert.ToString(objData.currentPosition)); //For the last one we need to make sure the buffer is the right size? //The uploadMPUrequest.SetRange probably takes care of this. int memoryBufferIndex = 0; //The index into buffer at which the stream begin int memoryBuffercount = (int)(objData.uploadPartStream.Length); //The length of the stream in bytes. uploadPartStream = new MemoryStream(objData.buffer, memoryBufferIndex, memoryBuffercount); //To ensure that part data is not corrupted in transmission, you compute a SHA256 tree // hash of the part and include it in your request. Upon receiving the part data, Amazon Glacier also computes a SHA256 tree hash. //If these hash values don't match, the operation fails. For information about computing a SHA256 tree hash, see Computing Checksums string checksum = TreeHashGenerator.CalculateTreeHash(uploadPartStream); SHA256ConcurrentQueue.Enqueue(checksum); UploadMultipartPartRequest uploadMPUrequest = new UploadMultipartPartRequest() { VaultName = vaultName, Body = uploadPartStream, Checksum = checksum, UploadId = uploadID }; uploadMPUrequest.SetRange(currentPosition, currentPosition + objData.uploadPartStream.Length - 1); UploadMultipartPartResponse mpr = client.UploadMultipartPart(uploadMPUrequest); Form1.log.Info("Sent " + Convert.ToString(mpr.ContentLength) + "bytes" + " for Part :" + Convert.ToString(objData.currentPosition)); } catch (Exception e) { Form1.log.Error(e.ToString()); Form1.log.Error(e.StackTrace); Form1.log.Info("Retrying Part " + Convert.ToString(objData.currentPosition)); //Retrying up to 10 times - waiting longer each try { int fv = 0; bool successfulPartUpload = false; while (fv < 10 && successfulPartUpload == false) { successfulPartUpload = retryUpload(f_object); Thread.Sleep(4000 * fv); } } } finally { if (Interlocked.Decrement(ref ActiveWorkerCount) <= 0) { AllWorkerCompletedEvent.Set(); } uploadPartStream = null; f_object = null; objData = null; client = null; } }
// Commented out because the would leave data in glacier that would cost money //[TestMethod] //[TestCategory("Glacier")] public void TestMultiPartUpload() { var testingVaultName = "dotnet-sdk-test" + DateTime.Now.Ticks.ToString(); Client.CreateVault(new CreateVaultRequest() { VaultName = testingVaultName }); InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest() { VaultName = testingVaultName, ArchiveDescription = "dotnet mp upload", PartSize = 1048576 }; InitiateMultipartUploadResponse initResponse = Client.InitiateMultipartUpload(initRequest); string uploadId = initResponse.UploadId; MemoryStream totalStream = new MemoryStream(); for (int i = 0; i < 1048576 + 1048576 / 2; i++) { totalStream.WriteByte((byte)(i % byte.MaxValue)); } totalStream.Position = 0; List <string> md5s = new List <string>(); long currentPosition = 0; long partSize = 1048576; while (totalStream.Position < totalStream.Length) { Stream partStream = GlacierUtils.CreatePartStream(totalStream, partSize); string checkSum = TreeHashGenerator.CalculateTreeHash(partStream); md5s.Add(checkSum); UploadMultipartPartRequest partRequest = new UploadMultipartPartRequest() { VaultName = testingVaultName, UploadId = uploadId, Body = partStream, Checksum = checkSum }; partRequest.SetRange(currentPosition, currentPosition + partStream.Length - 1); Client.UploadMultipartPart(partRequest); currentPosition += partStream.Length; } CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest() { VaultName = testingVaultName, UploadId = uploadId, ArchiveSize = totalStream.Length.ToString(), Checksum = TreeHashGenerator.CalculateTreeHash(md5s) }; CompleteMultipartUploadResponse compResponse = Client.CompleteMultipartUpload(compRequest); Assert.IsNotNull(compResponse.Location); Assert.IsNotNull(compResponse.Checksum); string archiveId = compResponse.ArchiveId; DeleteArchiveRequest delArchiveRequest = new DeleteArchiveRequest() { VaultName = testingVaultName, ArchiveId = archiveId }; DeleteArchiveResponse delArchiveResponse = Client.DeleteArchive(delArchiveRequest); }
private async Task <string> UploadArchiveAsync(Stream stream, string archiveDescription) { await TestConnectionAsync(); var streamSize = new Size(stream.Length, SizeUnit.Bytes); if (streamSize > TotalArchiveSizeLimit) { throw new InvalidOperationException($@"Can't upload more than 40TB to AWS Glacier, current upload size: {streamSize}"); } var streamLength = streamSize.GetValue(SizeUnit.Bytes); try { _progress?.UploadProgress.SetTotal(streamLength); if (streamSize > MaxUploadArchiveSize) { var partSize = GetPartSize(streamLength); _progress?.UploadProgress.ChangeType(UploadType.Chunked); var initiateResponse = await _client.InitiateMultipartUploadAsync(new InitiateMultipartUploadRequest { ArchiveDescription = archiveDescription, VaultName = _vaultName, AccountId = "-", PartSize = partSize }, _cancellationToken); var partChecksums = new List <string>(); var currentPosition = 0L; while (stream.Position < streamLength) { var partStream = GlacierUtils.CreatePartStream(stream, partSize); var partChecksum = TreeHashGenerator.CalculateTreeHash(partStream); partChecksums.Add(partChecksum); var uploadRequest = new UploadMultipartPartRequest { UploadId = initiateResponse.UploadId, VaultName = _vaultName, AccountId = "-", Body = partStream, StreamTransferProgress = (_, args) => { _progress?.UploadProgress.ChangeState(UploadState.Uploading); _progress?.UploadProgress.UpdateUploaded(args.IncrementTransferred); _progress?.OnUploadProgress?.Invoke(); }, Checksum = partChecksum }; uploadRequest.SetRange(currentPosition, currentPosition + partStream.Length - 1); await _client.UploadMultipartPartAsync(uploadRequest, _cancellationToken); currentPosition += partStream.Length; } var completeResponse = await _client.CompleteMultipartUploadAsync(new CompleteMultipartUploadRequest { AccountId = "-", VaultName = _vaultName, ArchiveSize = streamLength.ToString(), UploadId = initiateResponse.UploadId, Checksum = TreeHashGenerator.CalculateTreeHash(partChecksums) }, _cancellationToken); return(completeResponse.ArchiveId); } var response = await _client.UploadArchiveAsync(new UploadArchiveRequest { AccountId = "-", ArchiveDescription = archiveDescription, Body = stream, VaultName = _vaultName, StreamTransferProgress = (_, args) => { _progress?.UploadProgress.ChangeState(UploadState.Uploading); _progress?.UploadProgress.UpdateUploaded(args.IncrementTransferred); _progress?.OnUploadProgress?.Invoke(); }, Checksum = TreeHashGenerator.CalculateTreeHash(stream) }, _cancellationToken); return(response.ArchiveId); } finally { _progress?.UploadProgress.ChangeState(UploadState.Done); } }