// Uploads each part to AWS static List <string> UploadParts(string uploadID, AmazonGlacierClient client, string archiveToUpload) { List <string> partChecksumList = new List <string>(); long currentPosition = 0; var buffer = new byte[Convert.ToInt32(partSize)]; long fileLength = new FileInfo(archiveToUpload).Length; using (FileStream fileToUpload = new FileStream(archiveToUpload, FileMode.Open, FileAccess.Read)) { while (fileToUpload.Position < fileLength) { Stream uploadPartStream = GlacierUtils.CreatePartStream(fileToUpload, partSize); string checksum = TreeHashGenerator.CalculateTreeHash(uploadPartStream); partChecksumList.Add(checksum); UploadMultipartPartRequest uploadMPUrequest = new UploadMultipartPartRequest() { VaultName = vaultName, Body = uploadPartStream, Checksum = checksum, UploadId = uploadID }; uploadMPUrequest.SetRange(currentPosition, currentPosition + uploadPartStream.Length - 1); client.UploadMultipartPart(uploadMPUrequest); currentPosition = currentPosition + uploadPartStream.Length; } } return(partChecksumList); }
public async Task UploadChunkAsync(UploadJob job, UploadItem item) { //create reference to a part of the input stream var chunkStream = GlacierUtils.CreatePartStream(item.DataStream, job.ChunkSize); var chunkChecksum = TreeHashGenerator.CalculateTreeHash(chunkStream); //prepare request var request = new UploadMultipartPartRequest { VaultName = job.VaultName, Body = chunkStream, Checksum = chunkChecksum, UploadId = job.UploadId }; //set range of the current part request.SetRange(job.CurrentPosition, job.CurrentPosition + chunkStream.Length - 1); //upload this part var response = await _client.UploadMultipartPartAsync(request); response.EnsureSuccess(); //commit progress job.ChunkChecksums.Add(chunkChecksum); job.CurrentPosition += chunkStream.Length; }
static List <string> UploadParts(string uploadID, AmazonGlacier client) { var partChecksumList = new List <string>(); long currentPosition = 0; var buffer = new byte[Convert.ToInt32(partSize)]; long fileLength = new FileInfo(BackupFilePath).Length; WriteFileUploadProgress(currentPosition, fileLength); using (var fileToUpload = new FileStream(BackupFilePath, FileMode.Open, FileAccess.Read)) { while (fileToUpload.Position < fileLength) { var uploadPartStream = GlacierUtils.CreatePartStream(fileToUpload, partSize); var checksum = TreeHashGenerator.CalculateTreeHash(uploadPartStream); partChecksumList.Add(checksum); // Upload part. var uploadMPUrequest = new UploadMultipartPartRequest() { VaultName = VaultName, Body = uploadPartStream, Checksum = checksum, UploadId = uploadID }; uploadMPUrequest.SetRange(currentPosition, currentPosition + uploadPartStream.Length - 1); client.UploadMultipartPart(uploadMPUrequest); currentPosition = currentPosition + uploadPartStream.Length; WriteFileUploadProgress(currentPosition, fileLength); } } return(partChecksumList); }
List <string> UploadParts(string uploadID, AmazonGlacier client) { List <string> partChecksumList = new List <string>(); long currentPosition = 0; var buffer = new byte[Convert.ToInt32(partSize)]; long fileLength = new FileInfo(archiveToUpload).Length; ThreadPool.SetMaxThreads(25, 25); List <ThreadData> arThreadObj = new List <ThreadData>(); FileStream fileToUpload = new FileStream(archiveToUpload, FileMode.Open, FileAccess.Read); //Beware - we create memory buffers for the entire file at once. //BBCREVISIT - use a queue for the threads and pick off that as the ThreadPool frees up resources while (currentPosition < fileLength) { Stream uploadPartStream = GlacierUtils.CreatePartStream(fileToUpload, partSize); ThreadData objData = new ThreadData(); objData.uploadID = uploadID; objData.client = client; objData.currentPosition = currentPosition; objData.uploadPartStream = uploadPartStream; objData.buffer = new byte[Convert.ToInt32(partSize)]; int read = 0; try { read = fileToUpload.Read(objData.buffer, (int)0, (int)partSize); } catch (Exception e) { Form1.log.Error(e.ToString()); } if (read == -1) { Form1.log.Info("Nothing to read : fileLength % partSize ==0"); break; } arThreadObj.Add(objData); Form1.log.Info("Created Part : " + Convert.ToString(currentPosition) + " of Length " + uploadPartStream.Length); if (read != uploadPartStream.Length) { Console.WriteLine("We have a problem Houston"); } currentPosition = currentPosition + uploadPartStream.Length;//We are not using the stream right now. } for (int ic = 0; ic < arThreadObj.Count; ic++) { Interlocked.Increment(ref ActiveWorkerCount); ThreadData objData = arThreadObj[ic]; ThreadPool.QueueUserWorkItem(ThreadUpload, objData); } AllWorkerCompletedEvent.WaitOne(); partChecksumList = SHA256ConcurrentQueue.ToList(); fileToUpload.Close(); return(partChecksumList); }
// Commented out because the would leave data in glacier that would cost money //[TestMethod] //[TestCategory("Glacier")] public void TestMultiPartUpload() { var testingVaultName = "dotnet-sdk-test" + DateTime.Now.Ticks.ToString(); Client.CreateVault(new CreateVaultRequest() { VaultName = testingVaultName }); InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest() { VaultName = testingVaultName, ArchiveDescription = "dotnet mp upload", PartSize = 1048576 }; InitiateMultipartUploadResponse initResponse = Client.InitiateMultipartUpload(initRequest); string uploadId = initResponse.UploadId; MemoryStream totalStream = new MemoryStream(); for (int i = 0; i < 1048576 + 1048576 / 2; i++) { totalStream.WriteByte((byte)(i % byte.MaxValue)); } totalStream.Position = 0; List <string> md5s = new List <string>(); long currentPosition = 0; long partSize = 1048576; while (totalStream.Position < totalStream.Length) { Stream partStream = GlacierUtils.CreatePartStream(totalStream, partSize); string checkSum = TreeHashGenerator.CalculateTreeHash(partStream); md5s.Add(checkSum); UploadMultipartPartRequest partRequest = new UploadMultipartPartRequest() { VaultName = testingVaultName, UploadId = uploadId, Body = partStream, Checksum = checkSum }; partRequest.SetRange(currentPosition, currentPosition + partStream.Length - 1); Client.UploadMultipartPart(partRequest); currentPosition += partStream.Length; } CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest() { VaultName = testingVaultName, UploadId = uploadId, ArchiveSize = totalStream.Length.ToString(), Checksum = TreeHashGenerator.CalculateTreeHash(md5s) }; CompleteMultipartUploadResponse compResponse = Client.CompleteMultipartUpload(compRequest); Assert.IsNotNull(compResponse.Location); Assert.IsNotNull(compResponse.Checksum); string archiveId = compResponse.ArchiveId; DeleteArchiveRequest delArchiveRequest = new DeleteArchiveRequest() { VaultName = testingVaultName, ArchiveId = archiveId }; DeleteArchiveResponse delArchiveResponse = Client.DeleteArchive(delArchiveRequest); }
private async Task <string> UploadArchiveAsync(Stream stream, string archiveDescription) { await TestConnectionAsync(); var streamSize = new Size(stream.Length, SizeUnit.Bytes); if (streamSize > TotalArchiveSizeLimit) { throw new InvalidOperationException($@"Can't upload more than 40TB to AWS Glacier, current upload size: {streamSize}"); } var streamLength = streamSize.GetValue(SizeUnit.Bytes); try { _progress?.UploadProgress.SetTotal(streamLength); if (streamSize > MaxUploadArchiveSize) { var partSize = GetPartSize(streamLength); _progress?.UploadProgress.ChangeType(UploadType.Chunked); var initiateResponse = await _client.InitiateMultipartUploadAsync(new InitiateMultipartUploadRequest { ArchiveDescription = archiveDescription, VaultName = _vaultName, AccountId = "-", PartSize = partSize }, _cancellationToken); var partChecksums = new List <string>(); var currentPosition = 0L; while (stream.Position < streamLength) { var partStream = GlacierUtils.CreatePartStream(stream, partSize); var partChecksum = TreeHashGenerator.CalculateTreeHash(partStream); partChecksums.Add(partChecksum); var uploadRequest = new UploadMultipartPartRequest { UploadId = initiateResponse.UploadId, VaultName = _vaultName, AccountId = "-", Body = partStream, StreamTransferProgress = (_, args) => { _progress?.UploadProgress.ChangeState(UploadState.Uploading); _progress?.UploadProgress.UpdateUploaded(args.IncrementTransferred); _progress?.OnUploadProgress?.Invoke(); }, Checksum = partChecksum }; uploadRequest.SetRange(currentPosition, currentPosition + partStream.Length - 1); await _client.UploadMultipartPartAsync(uploadRequest, _cancellationToken); currentPosition += partStream.Length; } var completeResponse = await _client.CompleteMultipartUploadAsync(new CompleteMultipartUploadRequest { AccountId = "-", VaultName = _vaultName, ArchiveSize = streamLength.ToString(), UploadId = initiateResponse.UploadId, Checksum = TreeHashGenerator.CalculateTreeHash(partChecksums) }, _cancellationToken); return(completeResponse.ArchiveId); } var response = await _client.UploadArchiveAsync(new UploadArchiveRequest { AccountId = "-", ArchiveDescription = archiveDescription, Body = stream, VaultName = _vaultName, StreamTransferProgress = (_, args) => { _progress?.UploadProgress.ChangeState(UploadState.Uploading); _progress?.UploadProgress.UpdateUploaded(args.IncrementTransferred); _progress?.OnUploadProgress?.Invoke(); }, Checksum = TreeHashGenerator.CalculateTreeHash(stream) }, _cancellationToken); return(response.ArchiveId); } finally { _progress?.UploadProgress.ChangeState(UploadState.Done); } }