internal UploadMultipartPartResponse UploadMultipartPart(UploadMultipartPartRequest request) { var marshaller = new UploadMultipartPartRequestMarshaller(); var unmarshaller = UploadMultipartPartResponseUnmarshaller.Instance; return(Invoke <UploadMultipartPartRequest, UploadMultipartPartResponse>(request, marshaller, unmarshaller)); }
// Uploads each part to AWS static List <string> UploadParts(string uploadID, AmazonGlacierClient client, string archiveToUpload) { List <string> partChecksumList = new List <string>(); long currentPosition = 0; var buffer = new byte[Convert.ToInt32(partSize)]; long fileLength = new FileInfo(archiveToUpload).Length; using (FileStream fileToUpload = new FileStream(archiveToUpload, FileMode.Open, FileAccess.Read)) { while (fileToUpload.Position < fileLength) { Stream uploadPartStream = GlacierUtils.CreatePartStream(fileToUpload, partSize); string checksum = TreeHashGenerator.CalculateTreeHash(uploadPartStream); partChecksumList.Add(checksum); UploadMultipartPartRequest uploadMPUrequest = new UploadMultipartPartRequest() { VaultName = vaultName, Body = uploadPartStream, Checksum = checksum, UploadId = uploadID }; uploadMPUrequest.SetRange(currentPosition, currentPosition + uploadPartStream.Length - 1); client.UploadMultipartPart(uploadMPUrequest); currentPosition = currentPosition + uploadPartStream.Length; } } return(partChecksumList); }
public async Task UploadChunkAsync(UploadJob job, UploadItem item) { //create reference to a part of the input stream var chunkStream = GlacierUtils.CreatePartStream(item.DataStream, job.ChunkSize); var chunkChecksum = TreeHashGenerator.CalculateTreeHash(chunkStream); //prepare request var request = new UploadMultipartPartRequest { VaultName = job.VaultName, Body = chunkStream, Checksum = chunkChecksum, UploadId = job.UploadId }; //set range of the current part request.SetRange(job.CurrentPosition, job.CurrentPosition + chunkStream.Length - 1); //upload this part var response = await _client.UploadMultipartPartAsync(request); response.EnsureSuccess(); //commit progress job.ChunkChecksums.Add(chunkChecksum); job.CurrentPosition += chunkStream.Length; }
static List <string> UploadParts(string uploadID, AmazonGlacier client) { var partChecksumList = new List <string>(); long currentPosition = 0; var buffer = new byte[Convert.ToInt32(partSize)]; long fileLength = new FileInfo(BackupFilePath).Length; WriteFileUploadProgress(currentPosition, fileLength); using (var fileToUpload = new FileStream(BackupFilePath, FileMode.Open, FileAccess.Read)) { while (fileToUpload.Position < fileLength) { var uploadPartStream = GlacierUtils.CreatePartStream(fileToUpload, partSize); var checksum = TreeHashGenerator.CalculateTreeHash(uploadPartStream); partChecksumList.Add(checksum); // Upload part. var uploadMPUrequest = new UploadMultipartPartRequest() { VaultName = VaultName, Body = uploadPartStream, Checksum = checksum, UploadId = uploadID }; uploadMPUrequest.SetRange(currentPosition, currentPosition + uploadPartStream.Length - 1); client.UploadMultipartPart(uploadMPUrequest); currentPosition = currentPosition + uploadPartStream.Length; WriteFileUploadProgress(currentPosition, fileLength); } } return(partChecksumList); }
/// <summary> /// Initiates the asynchronous execution of the UploadMultipartPart operation. /// <seealso cref="Amazon.Glacier.IAmazonGlacier"/> /// </summary> /// /// <param name="request">Container for the necessary parameters to execute the UploadMultipartPart operation.</param> /// <param name="cancellationToken"> /// A cancellation token that can be used by other objects or threads to receive notice of cancellation. /// </param> /// <returns>The task object representing the asynchronous operation.</returns> public Task <UploadMultipartPartResponse> UploadMultipartPartAsync(UploadMultipartPartRequest request, System.Threading.CancellationToken cancellationToken = default(CancellationToken)) { var marshaller = new UploadMultipartPartRequestMarshaller(); var unmarshaller = UploadMultipartPartResponseUnmarshaller.Instance; return(InvokeAsync <UploadMultipartPartRequest, UploadMultipartPartResponse>(request, marshaller, unmarshaller, cancellationToken)); }
private bool retryUpload(object f_object) { try { ThreadData objData = null; AmazonGlacier client = null; Stream uploadPartStream = null; objData = (ThreadData)f_object; string uploadID = objData.uploadID; client = objData.client; long currentPosition = objData.currentPosition; Form1.log.Info("Trying to upload Part :" + Convert.ToString(objData.currentPosition)); //For the last one we need to make sure the buffer is the right size? //The uploadMPUrequest.SetRange probably takes care of this. int memoryBufferIndex = 0; //The index into buffer at which the stream begin int memoryBuffercount = (int)(objData.uploadPartStream.Length); //The length of the stream in bytes. uploadPartStream = new MemoryStream(objData.buffer, memoryBufferIndex, memoryBuffercount); //To ensure that part data is not corrupted in transmission, you compute a SHA256 tree // hash of the part and include it in your request. Upon receiving the part data, Amazon Glacier also computes a SHA256 tree hash. //If these hash values don't match, the operation fails. For information about computing a SHA256 tree hash, see Computing Checksums string checksum = TreeHashGenerator.CalculateTreeHash(uploadPartStream); SHA256ConcurrentQueue.Enqueue(checksum); UploadMultipartPartRequest uploadMPUrequest = new UploadMultipartPartRequest() { VaultName = vaultName, Body = uploadPartStream, Checksum = checksum, UploadId = uploadID }; uploadMPUrequest.SetRange(currentPosition, currentPosition + objData.uploadPartStream.Length - 1); UploadMultipartPartResponse mpr = client.UploadMultipartPart(uploadMPUrequest); Form1.log.Info("Retry Success " + Convert.ToString(mpr.ContentLength) + "bytes" + " for Part :" + Convert.ToString(objData.currentPosition)); return(true); } catch (Exception ex) { Form1.log.Error(ex.ToString()); return(false); } }
public bool UploadPart(GlacierFilePart part, System.EventHandler <Amazon.Runtime.StreamTransferProgressArgs> progressCallback) { UploadMultipartPartRequest uploadRequest = new UploadMultipartPartRequest() { Body = new MemoryStream(part.Data), Checksum = part.Checksum, Range = part.Range, StreamTransferProgress = progressCallback, UploadId = part.UploadId, VaultName = _vault }; UploadMultipartPartResponse response = _amazonGlacierClient.UploadMultipartPart(uploadRequest); if (part.Checksum == response.UploadMultipartPartResult.Checksum) { return(true); } else { return(false); } }
internal override void Execute() { FileInfo fileInfo = new FileInfo(filePath); FileStream fileStream = File.OpenRead(filePath); string uploadId = null; try { this.currentUploadProgressArgs = new StreamTransferProgressArgs(0, 0, fileInfo.Length); long partSize = CalculatePartSize(fileInfo.Length); InitiateMultipartUploadRequest initiateRequest = new InitiateMultipartUploadRequest { AccountId = this.options.AccountId, ArchiveDescription = archiveDescription, VaultName = vaultName, PartSize = partSize }; initiateRequest.BeforeRequestEvent += new UserAgentPostFix("MultiUpload").UserAgentRequestEventHandlerSync; InitiateMultipartUploadResult initiateResult = this.manager.GlacierClient.InitiateMultipartUpload(initiateRequest).InitiateMultipartUploadResult; uploadId = initiateResult.UploadId; List <string> partTreeHashs = new List <string>(); long currentPosition = 0; while (currentPosition < fileInfo.Length) { long length = partSize; if (currentPosition + partSize > fileInfo.Length) { length = fileInfo.Length - currentPosition; } PartStreamWrapper partStream = new PartStreamWrapper(fileStream, length); string checksum = TreeHashGenerator.CalculateTreeHash(partStream); partTreeHashs.Add(checksum); UploadMultipartPartRequest uploadRequest = new UploadMultipartPartRequest { AccountId = this.options.AccountId, Checksum = checksum, Body = partStream, Range = ("bytes " + currentPosition + "-" + (currentPosition + length - 1) + "/*"), UploadId = uploadId, VaultName = vaultName }; uploadRequest.StreamTransferProgress += this.ProgressCallback; uploadRequest.BeforeRequestEvent += new UserAgentPostFix("MultiUpload").UserAgentRequestEventHandlerSync; this.manager.GlacierClient.UploadMultipartPart(uploadRequest); currentPosition += partSize; } string totalFileChecksum = TreeHashGenerator.CalculateTreeHash(partTreeHashs); string archiveSize = fileInfo.Length.ToString(); CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest { AccountId = this.options.AccountId, ArchiveSize = archiveSize, VaultName = vaultName, Checksum = totalFileChecksum, UploadId = uploadId }; compRequest.BeforeRequestEvent += new UserAgentPostFix("MultiUpload").UserAgentRequestEventHandlerSync; CompleteMultipartUploadResult completeMultipartUploadResult = this.manager.GlacierClient.CompleteMultipartUpload(compRequest).CompleteMultipartUploadResult; string archiveId = completeMultipartUploadResult.ArchiveId; this.UploadResult = new UploadResult(archiveId, totalFileChecksum); } catch (Exception) { // If we got an unrecoverable then abort the upload. if (!string.IsNullOrEmpty(uploadId)) { AbortMultipartUploadRequest abortRequest = new AbortMultipartUploadRequest() { AccountId = this.options.AccountId, VaultName = this.vaultName, UploadId = uploadId }; abortRequest.BeforeRequestEvent += new UserAgentPostFix("MultiUpload").UserAgentRequestEventHandlerSync; this.manager.GlacierClient.AbortMultipartUpload(abortRequest); } throw; } finally { try { fileStream.Close(); } catch (Exception) { } } }
/// <summary> /// Creates the range formatted string and set the Range property. /// </summary> /// <param name="request"></param> /// <param name="start">The start of the range.</param> /// <param name="end">The end of the range.</param> public static void SetRange(this UploadMultipartPartRequest request, long start, long end) { request.Range = string.Format(CultureInfo.InvariantCulture, "bytes {0}-{1}/*", start, end); }
private void ThreadUpload(object f_object) { ThreadData objData = null; AmazonGlacier client = null; Stream uploadPartStream = null; try { objData = (ThreadData)f_object; string uploadID = objData.uploadID; client = objData.client; long currentPosition = objData.currentPosition; Form1.log.Info("Trying to upload Part :" + Convert.ToString(objData.currentPosition)); //For the last one we need to make sure the buffer is the right size? //The uploadMPUrequest.SetRange probably takes care of this. int memoryBufferIndex = 0; //The index into buffer at which the stream begin int memoryBuffercount = (int)(objData.uploadPartStream.Length); //The length of the stream in bytes. uploadPartStream = new MemoryStream(objData.buffer, memoryBufferIndex, memoryBuffercount); //To ensure that part data is not corrupted in transmission, you compute a SHA256 tree // hash of the part and include it in your request. Upon receiving the part data, Amazon Glacier also computes a SHA256 tree hash. //If these hash values don't match, the operation fails. For information about computing a SHA256 tree hash, see Computing Checksums string checksum = TreeHashGenerator.CalculateTreeHash(uploadPartStream); SHA256ConcurrentQueue.Enqueue(checksum); UploadMultipartPartRequest uploadMPUrequest = new UploadMultipartPartRequest() { VaultName = vaultName, Body = uploadPartStream, Checksum = checksum, UploadId = uploadID }; uploadMPUrequest.SetRange(currentPosition, currentPosition + objData.uploadPartStream.Length - 1); UploadMultipartPartResponse mpr = client.UploadMultipartPart(uploadMPUrequest); Form1.log.Info("Sent " + Convert.ToString(mpr.ContentLength) + "bytes" + " for Part :" + Convert.ToString(objData.currentPosition)); } catch (Exception e) { Form1.log.Error(e.ToString()); Form1.log.Error(e.StackTrace); Form1.log.Info("Retrying Part " + Convert.ToString(objData.currentPosition)); //Retrying up to 10 times - waiting longer each try { int fv = 0; bool successfulPartUpload = false; while (fv < 10 && successfulPartUpload == false) { successfulPartUpload = retryUpload(f_object); Thread.Sleep(4000 * fv); } } } finally { if (Interlocked.Decrement(ref ActiveWorkerCount) <= 0) { AllWorkerCompletedEvent.Set(); } uploadPartStream = null; f_object = null; objData = null; client = null; } }
internal override async Task ExecuteAsync() { FileInfo fileInfo = new FileInfo(filePath); FileStream fileStream = File.OpenRead(filePath); string uploadId = null; try { this.currentUploadProgressArgs = new StreamTransferProgressArgs(0, 0, fileInfo.Length); long partSize = CalculatePartSize(fileInfo.Length); InitiateMultipartUploadRequest initiateRequest = new InitiateMultipartUploadRequest() { AccountId = this.options.AccountId, ArchiveDescription = archiveDescription, VaultName = vaultName, PartSize = partSize }; ((Amazon.Runtime.Internal.IAmazonWebServiceRequest)initiateRequest).AddBeforeRequestHandler(new ArchiveTransferManager.UserAgentPostFix("MultiUpload").UserAgentRequestEventHandlerSync); InitiateMultipartUploadResponse initiateResponse = await this.manager.GlacierClient.InitiateMultipartUploadAsync(initiateRequest).ConfigureAwait(false); uploadId = initiateResponse.UploadId; List <string> partTreeHashs = new List <string>(); long currentPosition = 0; while (currentPosition < fileInfo.Length) { long length = partSize; if (currentPosition + partSize > fileInfo.Length) { length = fileInfo.Length - currentPosition; } Stream partStream = new PartialWrapperStream(fileStream, length); string checksum = TreeHashGenerator.CalculateTreeHash(partStream); partTreeHashs.Add(checksum); UploadMultipartPartRequest uploadRequest = new UploadMultipartPartRequest() { AccountId = this.options.AccountId, Checksum = checksum, Range = "bytes " + currentPosition + "-" + (currentPosition + length - 1) + "/*", UploadId = uploadId, VaultName = vaultName, Body = partStream }; uploadRequest.StreamTransferProgress += this.ProgressCallback; ((Amazon.Runtime.Internal.IAmazonWebServiceRequest)uploadRequest).AddBeforeRequestHandler(new ArchiveTransferManager.UserAgentPostFix("MultiUpload").UserAgentRequestEventHandlerSync); await this.manager.GlacierClient.UploadMultipartPartAsync(uploadRequest).ConfigureAwait(false); currentPosition += partSize; } string totalFileChecksum = TreeHashGenerator.CalculateTreeHash(partTreeHashs); string archiveSize = fileInfo.Length.ToString(CultureInfo.InvariantCulture); CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest() { AccountId = this.options.AccountId, ArchiveSize = archiveSize, VaultName = vaultName, Checksum = totalFileChecksum, UploadId = uploadId }; ((Amazon.Runtime.Internal.IAmazonWebServiceRequest)compRequest).AddBeforeRequestHandler(new ArchiveTransferManager.UserAgentPostFix("MultiUpload").UserAgentRequestEventHandlerSync); CompleteMultipartUploadResponse completeMultipartUploadResponse = await this.manager.GlacierClient.CompleteMultipartUploadAsync(compRequest).ConfigureAwait(false); string archiveId = completeMultipartUploadResponse.ArchiveId; this.UploadResult = new UploadResult(archiveId, totalFileChecksum); } catch (Exception) { // If we got an unrecoverable then abort the upload. if (!string.IsNullOrEmpty(uploadId)) { AbortMultipartUploadRequest abortRequest = new AbortMultipartUploadRequest() { AccountId = this.options.AccountId, VaultName = this.vaultName, UploadId = uploadId }; ((Amazon.Runtime.Internal.IAmazonWebServiceRequest)abortRequest).AddBeforeRequestHandler(new ArchiveTransferManager.UserAgentPostFix("MultiUpload").UserAgentRequestEventHandlerSync); this.manager.GlacierClient.AbortMultipartUploadAsync(abortRequest).Wait(); } throw; } finally { try { fileStream.Dispose(); } catch (Exception) { } } }
// Commented out because the would leave data in glacier that would cost money //[TestMethod] //[TestCategory("Glacier")] public void TestMultiPartUpload() { var testingVaultName = "dotnet-sdk-test" + DateTime.Now.Ticks.ToString(); Client.CreateVault(new CreateVaultRequest() { VaultName = testingVaultName }); InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest() { VaultName = testingVaultName, ArchiveDescription = "dotnet mp upload", PartSize = 1048576 }; InitiateMultipartUploadResponse initResponse = Client.InitiateMultipartUpload(initRequest); string uploadId = initResponse.UploadId; MemoryStream totalStream = new MemoryStream(); for (int i = 0; i < 1048576 + 1048576 / 2; i++) { totalStream.WriteByte((byte)(i % byte.MaxValue)); } totalStream.Position = 0; List <string> md5s = new List <string>(); long currentPosition = 0; long partSize = 1048576; while (totalStream.Position < totalStream.Length) { Stream partStream = GlacierUtils.CreatePartStream(totalStream, partSize); string checkSum = TreeHashGenerator.CalculateTreeHash(partStream); md5s.Add(checkSum); UploadMultipartPartRequest partRequest = new UploadMultipartPartRequest() { VaultName = testingVaultName, UploadId = uploadId, Body = partStream, Checksum = checkSum }; partRequest.SetRange(currentPosition, currentPosition + partStream.Length - 1); Client.UploadMultipartPart(partRequest); currentPosition += partStream.Length; } CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest() { VaultName = testingVaultName, UploadId = uploadId, ArchiveSize = totalStream.Length.ToString(), Checksum = TreeHashGenerator.CalculateTreeHash(md5s) }; CompleteMultipartUploadResponse compResponse = Client.CompleteMultipartUpload(compRequest); Assert.IsNotNull(compResponse.Location); Assert.IsNotNull(compResponse.Checksum); string archiveId = compResponse.ArchiveId; DeleteArchiveRequest delArchiveRequest = new DeleteArchiveRequest() { VaultName = testingVaultName, ArchiveId = archiveId }; DeleteArchiveResponse delArchiveResponse = Client.DeleteArchive(delArchiveRequest); }
private async Task <string> UploadArchiveAsync(Stream stream, string archiveDescription) { await TestConnectionAsync(); var streamSize = new Size(stream.Length, SizeUnit.Bytes); if (streamSize > TotalArchiveSizeLimit) { throw new InvalidOperationException($@"Can't upload more than 40TB to AWS Glacier, current upload size: {streamSize}"); } var streamLength = streamSize.GetValue(SizeUnit.Bytes); try { _progress?.UploadProgress.SetTotal(streamLength); if (streamSize > MaxUploadArchiveSize) { var partSize = GetPartSize(streamLength); _progress?.UploadProgress.ChangeType(UploadType.Chunked); var initiateResponse = await _client.InitiateMultipartUploadAsync(new InitiateMultipartUploadRequest { ArchiveDescription = archiveDescription, VaultName = _vaultName, AccountId = "-", PartSize = partSize }, _cancellationToken); var partChecksums = new List <string>(); var currentPosition = 0L; while (stream.Position < streamLength) { var partStream = GlacierUtils.CreatePartStream(stream, partSize); var partChecksum = TreeHashGenerator.CalculateTreeHash(partStream); partChecksums.Add(partChecksum); var uploadRequest = new UploadMultipartPartRequest { UploadId = initiateResponse.UploadId, VaultName = _vaultName, AccountId = "-", Body = partStream, StreamTransferProgress = (_, args) => { _progress?.UploadProgress.ChangeState(UploadState.Uploading); _progress?.UploadProgress.UpdateUploaded(args.IncrementTransferred); _progress?.OnUploadProgress?.Invoke(); }, Checksum = partChecksum }; uploadRequest.SetRange(currentPosition, currentPosition + partStream.Length - 1); await _client.UploadMultipartPartAsync(uploadRequest, _cancellationToken); currentPosition += partStream.Length; } var completeResponse = await _client.CompleteMultipartUploadAsync(new CompleteMultipartUploadRequest { AccountId = "-", VaultName = _vaultName, ArchiveSize = streamLength.ToString(), UploadId = initiateResponse.UploadId, Checksum = TreeHashGenerator.CalculateTreeHash(partChecksums) }, _cancellationToken); return(completeResponse.ArchiveId); } var response = await _client.UploadArchiveAsync(new UploadArchiveRequest { AccountId = "-", ArchiveDescription = archiveDescription, Body = stream, VaultName = _vaultName, StreamTransferProgress = (_, args) => { _progress?.UploadProgress.ChangeState(UploadState.Uploading); _progress?.UploadProgress.UpdateUploaded(args.IncrementTransferred); _progress?.OnUploadProgress?.Invoke(); }, Checksum = TreeHashGenerator.CalculateTreeHash(stream) }, _cancellationToken); return(response.ArchiveId); } finally { _progress?.UploadProgress.ChangeState(UploadState.Done); } }
private static void HandleUpload(string[] args) { var region = RegionEndpoint.EnumerableAllRegions.SingleOrDefault(reg => reg.SystemName == args[2]); if (args.Length < 6 || region == null) { Console.WriteLine($"args should be aws_key aws_secret region vault_name description filename"); return; } var aws_key = args[0]; var aws_secret = args[1]; var vault_name = args[3]; var description = args[4]; var filename = args[5]; var creds = new BasicAWSCredentials(aws_key, aws_secret); var config = new AmazonGlacierConfig { RegionEndpoint = region, Timeout = TimeSpan.FromDays(10) }; var client = new AmazonGlacierClient(creds, config); var initReq = new InitiateMultipartUploadRequest(vault_name, description, PartSize); var ts = new CancellationTokenSource(); var completed = 0; var started = 0; try { var res = client.InitiateMultipartUploadAsync(initReq, ts.Token).Result; var promises = new List <Task <UploadMultipartPartResponse> >(); Task <UploadMultipartPartResponse> lastPart = null; var sem = new SemaphoreSlim(ConcurrencyLimit); long totalSize = 0; int totalParts = 0; using (var fs = new FileStream(filename, FileMode.Open)) { totalSize = fs.Length; Console.WriteLine($"Preparing to upload {ByteSize.FromBytes(totalSize)}"); totalParts = (int)(fs.Length / PartSize) + 1; bool noErrors = true; while (noErrors) { sem.Wait(); var arr = new byte[PartSize]; var start = fs.Position; var read = fs.Read(arr, 0, (int)PartSize); var check = TreeHasher.ComputeArrayHashString(arr, read); var partReq = new UploadMultipartPartRequest(vault_name, res.UploadId, check, $"bytes {start}-{start + read - 1}/*", new MemoryStream(arr, 0, read)); var promise = client.UploadMultipartPartAsync(partReq, ts.Token); Interlocked.Increment(ref started); Console.WriteLine($"Started {started} out of {totalParts}"); promise.ContinueWith(tsk => { if (tsk.IsFaulted) { Console.WriteLine($"Exception encountered: {tsk.Exception.ToString()}"); noErrors = false; throw tsk.Exception; } Interlocked.Increment(ref completed); Console.WriteLine($"{completed} out of {totalParts} completed."); sem.Release(); }); promises.Add(promise); if (read < PartSize || fs.Position >= fs.Length - 1) { lastPart = promise; break; } } } Task.WaitAll(promises.ToArray()); using (var fs = new FileStream(filename, FileMode.Open)) { var check = TreeHasher.ComputeHashString(fs); var finisher = new CompleteMultipartUploadRequest(vault_name, res.UploadId, totalSize.ToString(), check); Console.WriteLine("Finishing up"); Console.WriteLine($"Computed checksum {check}"); var result = client.CompleteMultipartUploadAsync(finisher, ts.Token).Result; Console.WriteLine($"Completed: {result.Checksum}"); Console.WriteLine($"Calculated: {check}"); var match = string.Equals(result.Checksum, check, StringComparison.InvariantCultureIgnoreCase) ? "" : "not "; Console.WriteLine($"Checksums do {match}match."); Console.WriteLine($"Archive ID: {result.ArchiveId} Location: {result.Location}"); } } catch (Exception ex) { Console.WriteLine($"Exception thrown: {ex.GetType().Name} - {ex.Message}"); Console.WriteLine($"Full exception: {ex.ToString()}"); } }