internal override void Execute() { FileInfo fi = new FileInfo(filePath); FileStream input = File.OpenRead(filePath); string checksum = TreeHashGenerator.CalculateTreeHash(input); try { UploadArchiveRequest uploadRequest = new UploadArchiveRequest() .WithAccountId(this.options.AccountId) .WithArchiveDescription(archiveDescription) .WithVaultName(vaultName) .WithChecksum(checksum) .WithBody(input); uploadRequest.StreamTransferProgress += this.ProgressCallback; uploadRequest.BeforeRequestEvent += new UserAgentPostFix("SingleUpload").UserAgentRequestEventHandlerSync; UploadArchiveResult uploadArchiveResult = this.manager.GlacierClient.UploadArchive(uploadRequest).UploadArchiveResult; string archiveId = uploadArchiveResult.ArchiveId; this.UploadResult = new UploadResult(archiveId, checksum); } finally { try { input.Close(); } catch (Exception) { } } }
public override async Task <StorageFile> UploadFileAsync(Stream fileStream, string fn, StorageFile destFolder, CancellationToken token, Action <TransferProgress> progressCallback) { var path = ((GlacierFile)destFolder).FolderPath; if (!string.IsNullOrEmpty(path) && !path.EndsWith("/")) { path += "/"; } var filePathName = path + fn; var fileLen = fileStream.CanSeek ? fileStream.Length : 0; var checksum = await Task.Run(() => TreeHashGenerator.CalculateTreeHash(fileStream)); var observed = new ObserverStream(fileStream); var percent = 0; observed.PositionChanged += (sender, e) => { var currentPercent = fileLen == 0 ? 0 : (int)(100 * ((Stream)sender).Position / (float)fileLen); if (currentPercent == percent) { return; } percent = currentPercent; progressCallback(new TransferProgress(percent)); }; var req = new UploadArchiveRequest(Vault, filePathName.Unidecode(), checksum, observed); var result = await client.UploadArchiveAsync(req, token); return(new GlacierFile(this, result.ArchiveId, false, fn)); }
internal override async Task ExecuteAsync() { FileStream input = File.OpenRead(filePath); string checksum = TreeHashGenerator.CalculateTreeHash(input); try { UploadArchiveRequest uploadRequest = new UploadArchiveRequest() { AccountId = this.options.AccountId, ArchiveDescription = archiveDescription, VaultName = vaultName, Checksum = checksum, Body = input }; uploadRequest.StreamTransferProgress += this.ProgressCallback; ((Amazon.Runtime.Internal.IAmazonWebServiceRequest)uploadRequest).AddBeforeRequestHandler(new ArchiveTransferManager.UserAgentPostFix("SingleUpload").UserAgentRequestEventHandlerSync); UploadArchiveResponse uploadArchivResponse = await this.manager.GlacierClient.UploadArchiveAsync(uploadRequest).ConfigureAwait(false); string archiveId = uploadArchivResponse.ArchiveId; this.UploadResult = new UploadResult(archiveId, checksum); } finally { try { input.Dispose(); } catch (Exception) { } } }
public async Task TestSimpleUpload() { await Client.CreateVaultAsync(new CreateVaultRequest() { VaultName = testingVaultName }).ConfigureAwait(false); const string accountID = "-"; string archiveID = null; var memoryStream = new MemoryStream(Encoding.ASCII.GetBytes("data to archive")); var uploadArchiveRequest = new UploadArchiveRequest { VaultName = testingVaultName, Body = memoryStream, Checksum = TreeHashGenerator.CalculateTreeHash(memoryStream), AccountId = accountID, ArchiveDescription = "my first archive" }; var response = await Client.UploadArchiveAsync(uploadArchiveRequest).ConfigureAwait(false); archiveID = response.ArchiveId; await Client.DeleteArchiveAsync(new DeleteArchiveRequest { AccountId = accountID, VaultName = testingVaultName, ArchiveId = archiveID }); }
internal override void Execute() { FileStream input = File.OpenRead(filePath); string checksum = TreeHashGenerator.CalculateTreeHash(input); try { UploadArchiveRequest uploadRequest = new UploadArchiveRequest() { AccountId = this.options.AccountId, ArchiveDescription = archiveDescription, VaultName = vaultName, Checksum = checksum, Body = input }; uploadRequest.StreamTransferProgress += this.ProgressCallback; uploadRequest.BeforeRequestEvent += new UserAgentPostFix("SingleUpload").UserAgentRequestEventHandlerSync; UploadArchiveResponse uploadArchivResponse = this.manager.GlacierClient.UploadArchive(uploadRequest); string archiveId = uploadArchivResponse.ArchiveId; this.UploadResult = new UploadResult(archiveId, checksum); } finally { try { input.Close(); } catch (Exception) { } } }
private UploadArchiveResponse StoreArchive(Stream uncompressedContent, string archiveDescription) { using ( var glacierClient = new AmazonGlacierClient(new BasicAWSCredentials(GlacierAccessKey, new Secrets().GetGlacierSecretKey()), GlacierRegion)) { // zip to memory stream var ms = new MemoryStream(); var zipper = new GZipStream(ms, CompressionLevel.Optimal); uncompressedContent.CopyTo(zipper); ms.Seek(0, SeekOrigin.Begin); //calculate sha256 hash var shaTree = TreeHashGenerator.CalculateTreeHash(ms); ms.Seek(0, SeekOrigin.Begin); var result = glacierClient.UploadArchive(new UploadArchiveRequest() { AccountId = GlacierOwnerAccountID, VaultName = GlacierVaultName, ArchiveDescription = archiveDescription, Body = ms, Checksum = shaTree, }); return(result); } }
public async Task UploadChunkAsync(UploadJob job, UploadItem item) { //create reference to a part of the input stream var chunkStream = GlacierUtils.CreatePartStream(item.DataStream, job.ChunkSize); var chunkChecksum = TreeHashGenerator.CalculateTreeHash(chunkStream); //prepare request var request = new UploadMultipartPartRequest { VaultName = job.VaultName, Body = chunkStream, Checksum = chunkChecksum, UploadId = job.UploadId }; //set range of the current part request.SetRange(job.CurrentPosition, job.CurrentPosition + chunkStream.Length - 1); //upload this part var response = await _client.UploadMultipartPartAsync(request); response.EnsureSuccess(); //commit progress job.ChunkChecksums.Add(chunkChecksum); job.CurrentPosition += chunkStream.Length; }
static List <string> UploadParts(string uploadID, AmazonGlacier client) { var partChecksumList = new List <string>(); long currentPosition = 0; var buffer = new byte[Convert.ToInt32(partSize)]; long fileLength = new FileInfo(BackupFilePath).Length; WriteFileUploadProgress(currentPosition, fileLength); using (var fileToUpload = new FileStream(BackupFilePath, FileMode.Open, FileAccess.Read)) { while (fileToUpload.Position < fileLength) { var uploadPartStream = GlacierUtils.CreatePartStream(fileToUpload, partSize); var checksum = TreeHashGenerator.CalculateTreeHash(uploadPartStream); partChecksumList.Add(checksum); // Upload part. var uploadMPUrequest = new UploadMultipartPartRequest() { VaultName = VaultName, Body = uploadPartStream, Checksum = checksum, UploadId = uploadID }; uploadMPUrequest.SetRange(currentPosition, currentPosition + uploadPartStream.Length - 1); client.UploadMultipartPart(uploadMPUrequest); currentPosition = currentPosition + uploadPartStream.Length; WriteFileUploadProgress(currentPosition, fileLength); } } return(partChecksumList); }
public void UploadVaultArchive() { var stream = File.OpenRead(AppDomain.CurrentDomain.BaseDirectory + "\\test.txt"); UploadArchiveRequest request = new UploadArchiveRequest { VaultName = vaultName, AccountId = "-", ArchiveDescription = "test desc", Checksum = TreeHashGenerator.CalculateTreeHash(stream), Body = stream }; request.StreamTransferProgress += OnUploadProgress; var response = client.UploadArchive(request); if (response.HttpStatusCode.IsSuccess()) { Console.WriteLine("Archive Uploaded successfully"); Console.WriteLine($"RequestId: {response.ResponseMetadata.RequestId}"); foreach (var item in response.ResponseMetadata.Metadata) { Console.WriteLine($"{item.Key}:{item.Value}"); } } }
private static string UploadArchive(AmazonGlacierClient glacier) { MemoryStream ms = new MemoryStream(Encoding.UTF8.GetBytes("data to archive")); string treeHash = TreeHashGenerator.CalculateTreeHash(ms); UploadArchiveRequest req = new UploadArchiveRequest(); req.VaultName = vaultName; req.Body = ms; req.Checksum = treeHash; Task <UploadArchiveResponse> res = glacier.UploadArchiveAsync(req); Task.WaitAll(res); if (res.IsCompletedSuccessfully) { Console.WriteLine("ArchiveId: {0}", res.Result.ArchiveId); return(res.Result.ArchiveId); } else { return(string.Empty); } }
// Commented out because the would leave data in glacier that would cost money //[TestMethod] //[TestCategory("Glacier")] public void TestSimpleUpload() { var testingVaultName = "dotnet-sdk-test" + DateTime.Now.Ticks.ToString(); Client.CreateVault(new CreateVaultRequest() { VaultName = testingVaultName }); const string accountID = "-"; string archiveID = null; try { var memoryStream = new MemoryStream(Encoding.ASCII.GetBytes("data to archive")); var uploadArchiveRequest = new UploadArchiveRequest { VaultName = testingVaultName, Body = memoryStream, Checksum = TreeHashGenerator.CalculateTreeHash(memoryStream), AccountId = accountID, ArchiveDescription = "my first archive" }; var response = Client.UploadArchive(uploadArchiveRequest); archiveID = response.ArchiveId; } finally { Client.DeleteArchive(new DeleteArchiveRequest { AccountId = accountID, VaultName = testingVaultName, ArchiveId = archiveID }); } }
// Uploads each part to AWS static List <string> UploadParts(string uploadID, AmazonGlacierClient client, string archiveToUpload) { List <string> partChecksumList = new List <string>(); long currentPosition = 0; var buffer = new byte[Convert.ToInt32(partSize)]; long fileLength = new FileInfo(archiveToUpload).Length; using (FileStream fileToUpload = new FileStream(archiveToUpload, FileMode.Open, FileAccess.Read)) { while (fileToUpload.Position < fileLength) { Stream uploadPartStream = GlacierUtils.CreatePartStream(fileToUpload, partSize); string checksum = TreeHashGenerator.CalculateTreeHash(uploadPartStream); partChecksumList.Add(checksum); UploadMultipartPartRequest uploadMPUrequest = new UploadMultipartPartRequest() { VaultName = vaultName, Body = uploadPartStream, Checksum = checksum, UploadId = uploadID }; uploadMPUrequest.SetRange(currentPosition, currentPosition + uploadPartStream.Length - 1); client.UploadMultipartPart(uploadMPUrequest); currentPosition = currentPosition + uploadPartStream.Length; } } return(partChecksumList); }
string CalculateHash(string file) { using (var fs = new FileStream(file, FileMode.Open, FileAccess.Read, FileShare.Read)) { return(TreeHashGenerator.CalculateTreeHash(fs)); } }
private bool retryUpload(object f_object) { try { ThreadData objData = null; AmazonGlacier client = null; Stream uploadPartStream = null; objData = (ThreadData)f_object; string uploadID = objData.uploadID; client = objData.client; long currentPosition = objData.currentPosition; Form1.log.Info("Trying to upload Part :" + Convert.ToString(objData.currentPosition)); //For the last one we need to make sure the buffer is the right size? //The uploadMPUrequest.SetRange probably takes care of this. int memoryBufferIndex = 0; //The index into buffer at which the stream begin int memoryBuffercount = (int)(objData.uploadPartStream.Length); //The length of the stream in bytes. uploadPartStream = new MemoryStream(objData.buffer, memoryBufferIndex, memoryBuffercount); //To ensure that part data is not corrupted in transmission, you compute a SHA256 tree // hash of the part and include it in your request. Upon receiving the part data, Amazon Glacier also computes a SHA256 tree hash. //If these hash values don't match, the operation fails. For information about computing a SHA256 tree hash, see Computing Checksums string checksum = TreeHashGenerator.CalculateTreeHash(uploadPartStream); SHA256ConcurrentQueue.Enqueue(checksum); UploadMultipartPartRequest uploadMPUrequest = new UploadMultipartPartRequest() { VaultName = vaultName, Body = uploadPartStream, Checksum = checksum, UploadId = uploadID }; uploadMPUrequest.SetRange(currentPosition, currentPosition + objData.uploadPartStream.Length - 1); UploadMultipartPartResponse mpr = client.UploadMultipartPart(uploadMPUrequest); Form1.log.Info("Retry Success " + Convert.ToString(mpr.ContentLength) + "bytes" + " for Part :" + Convert.ToString(objData.currentPosition)); return(true); } catch (Exception ex) { Form1.log.Error(ex.ToString()); return(false); } }
// After each file is uploaded it will return an ArchiveID static string CompleteMPU(string uploadID, AmazonGlacierClient client, List <string> partChecksumList, string archiveToUpload) { long fileLength = new FileInfo(archiveToUpload).Length; CompleteMultipartUploadRequest completeMPUrequest = new CompleteMultipartUploadRequest() { UploadId = uploadID, ArchiveSize = fileLength.ToString(), Checksum = TreeHashGenerator.CalculateTreeHash(partChecksumList), VaultName = vaultName }; CompleteMultipartUploadResponse completeMPUresponse = client.CompleteMultipartUpload(completeMPUrequest); return(completeMPUresponse.ArchiveId); }
public ArchiveModel SaveFile(string filename, object metadata, string vaultName, bool compress) { var json = JsonConvert.SerializeObject(metadata); using (var fileStream = GetFileStream(filename, compress, json)) using (var client = GetGlacierClient()) { _log.InfoFormat("Uploading {0}, {1} bytes", filename, fileStream.Length); var hash = TreeHashGenerator.CalculateTreeHash(fileStream); fileStream.Position = 0; UploadArchiveResponse result; using (var percentUpdater = new ConsolePercentUpdater()) { percentUpdater.Start(); result = client.UploadArchive(new UploadArchiveRequest { AccountId = _accountId, ArchiveDescription = json, VaultName = GetTrimmedVaultName(vaultName), Body = fileStream, Checksum = hash, StreamTransferProgress = new EventHandler <StreamTransferProgressArgs>((a, b) => { percentUpdater.PercentDone = b.PercentDone; }) }); } _log.InfoFormat("File uploaded: {0}, archive ID: {1}", result.HttpStatusCode, result.ArchiveId); var response = new ArchiveModel { ArchiveId = result.ArchiveId, Status = result.HttpStatusCode, Location = result.Location, Metadata = JsonConvert.SerializeObject(result.ResponseMetadata), PostedTimestamp = DateTime.UtcNow }; return(response); } }
public async Task <string> FinishUploadAsync(UploadJob job, UploadItem item) { var checksum = TreeHashGenerator.CalculateTreeHash(job.ChunkChecksums); //prepare request var request = new CompleteMultipartUploadRequest { UploadId = job.UploadId, ArchiveSize = item.ContentLength.ToString(), Checksum = checksum, VaultName = job.VaultName }; //finish up multipart upload var response = await _client.CompleteMultipartUploadAsync(request); var achiveId = response.ArchiveId; return(achiveId); }
private async Task <ArchiveModel> SaveFileWithMetadata(string filename, object metadata, string vaultName) { var json = JsonConvert.SerializeObject(metadata); using (var fileStream = await _archiveProvider.GetFileStream(filename, json)) { var hash = TreeHashGenerator.CalculateTreeHash(fileStream); fileStream.Position = 0; _updater.UpdatePercentage(filename, 0); var result = await DoGlacierUpload(json, vaultName, fileStream, hash, filename); return(new ArchiveModel { ArchiveId = result.ArchiveId, Status = result.HttpStatusCode, Location = result.Location, Metadata = JsonConvert.SerializeObject(result.ResponseMetadata), PostedTimestamp = DateTime.UtcNow }); } }
public void UploadVaultObject() { try { var stream = File.OpenRead(AppDomain.CurrentDomain.BaseDirectory + "\\example"); UploadArchiveRequest request = new UploadArchiveRequest { VaultName = vaultName, AccountId = "", ArchiveDescription = "Test Description Upload", Body = stream, Checksum = TreeHashGenerator.CalculateTreeHash(stream) }; request.StreamTransferProgress += OnUploadProgress; var response = client.UploadArchive(request); if (response.HttpStatusCode == System.Net.HttpStatusCode.Created) { Console.WriteLine("Archive Upload Successsfully"); Console.WriteLine($"RequstId: {response.ResponseMetadata.RequestId}"); Console.WriteLine($"ArchiveID {response.ArchiveId}"); foreach (var item in response.ResponseMetadata.Metadata) { Console.WriteLine($"{item.Key} / {item.Value}"); } } } catch (AmazonGlacierException e) { Console.WriteLine(e.Message); } catch (Exception ex) { Console.WriteLine(ex.Message); } Console.ReadLine(); }
static string UploadArchive(IAmazonGlacier glacier) { var ms = new MemoryStream(Encoding.UTF8.GetBytes("some data to archive")); var treeHash = TreeHashGenerator.CalculateTreeHash(ms); var req = new UploadArchiveRequest { VaultName = VaultName, Body = ms, Checksum = treeHash }; Task <UploadArchiveResponse> res = glacier.UploadArchiveAsync(req); Task.WaitAll(res); if (res.IsCompletedSuccessfully) { Console.WriteLine($"Upload archive to ArchiveId {res.Result.ArchiveId}"); return(res.Result.ArchiveId); } return(string.Empty); }
// Commented out because the would leave data in glacier that would cost money //[TestMethod] //[TestCategory("Glacier")] public void TestMultiPartUpload() { var testingVaultName = "dotnet-sdk-test" + DateTime.Now.Ticks.ToString(); Client.CreateVault(new CreateVaultRequest() { VaultName = testingVaultName }); InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest() { VaultName = testingVaultName, ArchiveDescription = "dotnet mp upload", PartSize = 1048576 }; InitiateMultipartUploadResponse initResponse = Client.InitiateMultipartUpload(initRequest); string uploadId = initResponse.UploadId; MemoryStream totalStream = new MemoryStream(); for (int i = 0; i < 1048576 + 1048576 / 2; i++) { totalStream.WriteByte((byte)(i % byte.MaxValue)); } totalStream.Position = 0; List <string> md5s = new List <string>(); long currentPosition = 0; long partSize = 1048576; while (totalStream.Position < totalStream.Length) { Stream partStream = GlacierUtils.CreatePartStream(totalStream, partSize); string checkSum = TreeHashGenerator.CalculateTreeHash(partStream); md5s.Add(checkSum); UploadMultipartPartRequest partRequest = new UploadMultipartPartRequest() { VaultName = testingVaultName, UploadId = uploadId, Body = partStream, Checksum = checkSum }; partRequest.SetRange(currentPosition, currentPosition + partStream.Length - 1); Client.UploadMultipartPart(partRequest); currentPosition += partStream.Length; } CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest() { VaultName = testingVaultName, UploadId = uploadId, ArchiveSize = totalStream.Length.ToString(), Checksum = TreeHashGenerator.CalculateTreeHash(md5s) }; CompleteMultipartUploadResponse compResponse = Client.CompleteMultipartUpload(compRequest); Assert.IsNotNull(compResponse.Location); Assert.IsNotNull(compResponse.Checksum); string archiveId = compResponse.ArchiveId; DeleteArchiveRequest delArchiveRequest = new DeleteArchiveRequest() { VaultName = testingVaultName, ArchiveId = archiveId }; DeleteArchiveResponse delArchiveResponse = Client.DeleteArchive(delArchiveRequest); }
private void ThreadUpload(object f_object) { ThreadData objData = null; AmazonGlacier client = null; Stream uploadPartStream = null; try { objData = (ThreadData)f_object; string uploadID = objData.uploadID; client = objData.client; long currentPosition = objData.currentPosition; Form1.log.Info("Trying to upload Part :" + Convert.ToString(objData.currentPosition)); //For the last one we need to make sure the buffer is the right size? //The uploadMPUrequest.SetRange probably takes care of this. int memoryBufferIndex = 0; //The index into buffer at which the stream begin int memoryBuffercount = (int)(objData.uploadPartStream.Length); //The length of the stream in bytes. uploadPartStream = new MemoryStream(objData.buffer, memoryBufferIndex, memoryBuffercount); //To ensure that part data is not corrupted in transmission, you compute a SHA256 tree // hash of the part and include it in your request. Upon receiving the part data, Amazon Glacier also computes a SHA256 tree hash. //If these hash values don't match, the operation fails. For information about computing a SHA256 tree hash, see Computing Checksums string checksum = TreeHashGenerator.CalculateTreeHash(uploadPartStream); SHA256ConcurrentQueue.Enqueue(checksum); UploadMultipartPartRequest uploadMPUrequest = new UploadMultipartPartRequest() { VaultName = vaultName, Body = uploadPartStream, Checksum = checksum, UploadId = uploadID }; uploadMPUrequest.SetRange(currentPosition, currentPosition + objData.uploadPartStream.Length - 1); UploadMultipartPartResponse mpr = client.UploadMultipartPart(uploadMPUrequest); Form1.log.Info("Sent " + Convert.ToString(mpr.ContentLength) + "bytes" + " for Part :" + Convert.ToString(objData.currentPosition)); } catch (Exception e) { Form1.log.Error(e.ToString()); Form1.log.Error(e.StackTrace); Form1.log.Info("Retrying Part " + Convert.ToString(objData.currentPosition)); //Retrying up to 10 times - waiting longer each try { int fv = 0; bool successfulPartUpload = false; while (fv < 10 && successfulPartUpload == false) { successfulPartUpload = retryUpload(f_object); Thread.Sleep(4000 * fv); } } } finally { if (Interlocked.Decrement(ref ActiveWorkerCount) <= 0) { AllWorkerCompletedEvent.Set(); } uploadPartStream = null; f_object = null; objData = null; client = null; } }
internal override void Execute() { FileInfo fileInfo = new FileInfo(filePath); FileStream fileStream = File.OpenRead(filePath); string uploadId = null; try { this.currentUploadProgressArgs = new StreamTransferProgressArgs(0, 0, fileInfo.Length); long partSize = CalculatePartSize(fileInfo.Length); InitiateMultipartUploadRequest initiateRequest = new InitiateMultipartUploadRequest { AccountId = this.options.AccountId, ArchiveDescription = archiveDescription, VaultName = vaultName, PartSize = partSize }; initiateRequest.BeforeRequestEvent += new UserAgentPostFix("MultiUpload").UserAgentRequestEventHandlerSync; InitiateMultipartUploadResult initiateResult = this.manager.GlacierClient.InitiateMultipartUpload(initiateRequest).InitiateMultipartUploadResult; uploadId = initiateResult.UploadId; List <string> partTreeHashs = new List <string>(); long currentPosition = 0; while (currentPosition < fileInfo.Length) { long length = partSize; if (currentPosition + partSize > fileInfo.Length) { length = fileInfo.Length - currentPosition; } PartStreamWrapper partStream = new PartStreamWrapper(fileStream, length); string checksum = TreeHashGenerator.CalculateTreeHash(partStream); partTreeHashs.Add(checksum); UploadMultipartPartRequest uploadRequest = new UploadMultipartPartRequest { AccountId = this.options.AccountId, Checksum = checksum, Body = partStream, Range = ("bytes " + currentPosition + "-" + (currentPosition + length - 1) + "/*"), UploadId = uploadId, VaultName = vaultName }; uploadRequest.StreamTransferProgress += this.ProgressCallback; uploadRequest.BeforeRequestEvent += new UserAgentPostFix("MultiUpload").UserAgentRequestEventHandlerSync; this.manager.GlacierClient.UploadMultipartPart(uploadRequest); currentPosition += partSize; } string totalFileChecksum = TreeHashGenerator.CalculateTreeHash(partTreeHashs); string archiveSize = fileInfo.Length.ToString(); CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest { AccountId = this.options.AccountId, ArchiveSize = archiveSize, VaultName = vaultName, Checksum = totalFileChecksum, UploadId = uploadId }; compRequest.BeforeRequestEvent += new UserAgentPostFix("MultiUpload").UserAgentRequestEventHandlerSync; CompleteMultipartUploadResult completeMultipartUploadResult = this.manager.GlacierClient.CompleteMultipartUpload(compRequest).CompleteMultipartUploadResult; string archiveId = completeMultipartUploadResult.ArchiveId; this.UploadResult = new UploadResult(archiveId, totalFileChecksum); } catch (Exception) { // If we got an unrecoverable then abort the upload. if (!string.IsNullOrEmpty(uploadId)) { AbortMultipartUploadRequest abortRequest = new AbortMultipartUploadRequest() { AccountId = this.options.AccountId, VaultName = this.vaultName, UploadId = uploadId }; abortRequest.BeforeRequestEvent += new UserAgentPostFix("MultiUpload").UserAgentRequestEventHandlerSync; this.manager.GlacierClient.AbortMultipartUpload(abortRequest); } throw; } finally { try { fileStream.Close(); } catch (Exception) { } } }
internal void Execute() { long contentLength = -1; string glacierProvidedCheckSum = null; string rangeValue = null; Stream input = null; Stream output = null; try { // Make sure the directory exists to write too. FileInfo fi = new FileInfo(filePath); Directory.CreateDirectory(fi.DirectoryName); FileMode fileMode = FileMode.Create; int retryAttempts = 0; byte[] buffer = new byte[1024 * 1024 * 5]; long transferredBytes = 0; MemoryStream partStream = new MemoryStream(new byte[PART_STREAM_HASH_SIZE]); LinkedList <string> hashes = new LinkedList <string>(); while (true) { try { output = File.Open(filePath, fileMode, FileAccess.Write, FileShare.None); try { GetJobOutputRequest getJobOutputRequest = new GetJobOutputRequest() { AccountId = this.options.AccountId, VaultName = this.vaultName, JobId = jobId, Range = rangeValue }; ((Amazon.Runtime.Internal.IAmazonWebServiceRequest)getJobOutputRequest).AddBeforeRequestHandler(new ArchiveTransferManager.UserAgentPostFix("DownloadArchive").UserAgentRequestEventHandlerSync); GetJobOutputResponse jobOutputResponse = this.manager.GlacierClient.GetJobOutput(getJobOutputRequest); if (contentLength < 0) { contentLength = jobOutputResponse.ContentLength; glacierProvidedCheckSum = jobOutputResponse.Checksum; } input = new BufferedStream(jobOutputResponse.Body); long totalBytesFromGetJobOutput = jobOutputResponse.ContentLength; long bytesReadFromGetJobOutput = 0; int bytesRead = 0; do { bytesRead = input.Read(buffer, 0, buffer.Length); if (bytesRead > 0) { bytesReadFromGetJobOutput += bytesRead; output.Write(buffer, 0, bytesRead); transferredBytes += bytesRead; int offset = 0; if (partStream.Position + bytesRead > PART_STREAM_HASH_SIZE) { var length = PART_STREAM_HASH_SIZE - (int)partStream.Position; partStream.Write(buffer, 0, length); offset = length; } else { partStream.Write(buffer, 0, bytesRead); offset = bytesRead; } if (partStream.Position == PART_STREAM_HASH_SIZE) { partStream.Position = 0; hashes.AddLast(TreeHashGenerator.CalculateTreeHash(partStream)); } if (offset != bytesRead) { partStream.Write(buffer, offset, bytesRead - offset); } // Make callback on progress AWSSDKUtils.InvokeInBackground( this.options.StreamTransferProgress, new Runtime.StreamTransferProgressArgs(bytesRead, transferredBytes, contentLength), this.manager); } if (retryAttempts > 0) { retryAttempts = 0; // Reset retry attempts back to 0 since we able to successfully write more data to disk. } } while (bytesReadFromGetJobOutput < totalBytesFromGetJobOutput); // Compute hash of the last remaining bytes if (partStream.Position != 0) { partStream.SetLength(partStream.Position); partStream.Position = 0; hashes.AddLast(TreeHashGenerator.CalculateTreeHash(partStream)); } break; } finally { output.Close(); output = null; try { if (input != null) { input.Close(); } } catch (Exception) { } } } catch (Exception e) { var age = e as AmazonGlacierException; if (age != null && age.StatusCode == HttpStatusCode.NotFound) { throw; } fileMode = FileMode.Append; rangeValue = string.Format(CultureInfo.InvariantCulture, "bytes={0}-", new FileInfo(filePath).Length); retryAttempts++; if (retryAttempts <= DownloadFileCommand.MAX_OPERATION_RETRY) { Console.WriteLine("Error and going to retry: {0}", e.Message); Console.WriteLine(e.StackTrace); Thread.Sleep(60 * 1000); } else { throw; } } } // If the job output is a vault inventory then Glacier does not return back a tree hash. if (!string.IsNullOrEmpty(glacierProvidedCheckSum)) { var computedCheckSum = TreeHashGenerator.CalculateTreeHash(hashes); if (!string.Equals(glacierProvidedCheckSum, computedCheckSum, StringComparison.OrdinalIgnoreCase)) { throw new AmazonGlacierException("Checksum of the downloaded file does not match the checksum reported by Amazon Glacier."); } } } catch (IOException e) { throw new IOException("Unable to save archive to disk", e); } finally { try { if (input != null) { input.Close(); } } catch (Exception) { } try { if (output != null) { output.Close(); } } catch (Exception) { } } }
public async Task <ImageUploadedModel> UploadImage( string bucketName, string bucketUrl, string objectKey, S3StorageClass storageClass, S3CannedACL permissions, string glacierVaultName, ImageInfo image) { ImageUploadedModel model = new ImageUploadedModel(); try { PutObjectRequest putRequest = new PutObjectRequest { BucketName = bucketName, Key = objectKey, StorageClass = storageClass, CannedACL = permissions, ContentType = image.MimeType, AutoCloseStream = false }; putRequest.Metadata.Add("width", image.Width.ToString()); putRequest.Metadata.Add("height", image.Height.ToString()); putRequest.InputStream = image.Image; byte[] md5Hash = image.Image.Md5Hash(); putRequest.MD5Digest = md5Hash.ToBase64String(); PutObjectResponse response = await S3Client.PutObjectAsync(putRequest); string eTag = response.ETag.Trim('"').ToLowerInvariant(); string expectedETag = md5Hash.ToS3ETagString(); if (eTag != expectedETag) { throw new Exception("The eTag received from S3 doesn't match the eTag computed before uploading. This usually indicates that the image has been corrupted in transit."); } // upload to Glacier if needed if (!string.IsNullOrWhiteSpace(glacierVaultName)) { ArchiveDescription description = new ArchiveDescription { ObjectKey = objectKey, ContentType = image.MimeType, Width = image.Width, Height = image.Height }; // reset stream position in image image.Image.Position = 0; UploadArchiveRequest glacierRequest = new UploadArchiveRequest { ArchiveDescription = JsonConvert.SerializeObject(description, Formatting.None), Body = image.Image, VaultName = glacierVaultName, Checksum = TreeHashGenerator.CalculateTreeHash(image.Image) }; UploadArchiveResponse glacierResponse = await GlacierClient.UploadArchiveAsync(glacierRequest); model.ArchiveId = glacierResponse.ArchiveId; } model.ObjectKey = objectKey; model.ETag = eTag; model.ObjectLocation = bucketUrl + objectKey; model.VersionId = response.VersionId; } catch (Exception ex) { model.Exception = ex; } return(model); }
internal override async Task ExecuteAsync() { FileInfo fileInfo = new FileInfo(filePath); FileStream fileStream = File.OpenRead(filePath); string uploadId = null; try { this.currentUploadProgressArgs = new StreamTransferProgressArgs(0, 0, fileInfo.Length); long partSize = CalculatePartSize(fileInfo.Length); InitiateMultipartUploadRequest initiateRequest = new InitiateMultipartUploadRequest() { AccountId = this.options.AccountId, ArchiveDescription = archiveDescription, VaultName = vaultName, PartSize = partSize }; ((Amazon.Runtime.Internal.IAmazonWebServiceRequest)initiateRequest).AddBeforeRequestHandler(new ArchiveTransferManager.UserAgentPostFix("MultiUpload").UserAgentRequestEventHandlerSync); InitiateMultipartUploadResponse initiateResponse = await this.manager.GlacierClient.InitiateMultipartUploadAsync(initiateRequest).ConfigureAwait(false); uploadId = initiateResponse.UploadId; List <string> partTreeHashs = new List <string>(); long currentPosition = 0; while (currentPosition < fileInfo.Length) { long length = partSize; if (currentPosition + partSize > fileInfo.Length) { length = fileInfo.Length - currentPosition; } Stream partStream = new PartialWrapperStream(fileStream, length); string checksum = TreeHashGenerator.CalculateTreeHash(partStream); partTreeHashs.Add(checksum); UploadMultipartPartRequest uploadRequest = new UploadMultipartPartRequest() { AccountId = this.options.AccountId, Checksum = checksum, Range = "bytes " + currentPosition + "-" + (currentPosition + length - 1) + "/*", UploadId = uploadId, VaultName = vaultName, Body = partStream }; uploadRequest.StreamTransferProgress += this.ProgressCallback; ((Amazon.Runtime.Internal.IAmazonWebServiceRequest)uploadRequest).AddBeforeRequestHandler(new ArchiveTransferManager.UserAgentPostFix("MultiUpload").UserAgentRequestEventHandlerSync); await this.manager.GlacierClient.UploadMultipartPartAsync(uploadRequest).ConfigureAwait(false); currentPosition += partSize; } string totalFileChecksum = TreeHashGenerator.CalculateTreeHash(partTreeHashs); string archiveSize = fileInfo.Length.ToString(CultureInfo.InvariantCulture); CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest() { AccountId = this.options.AccountId, ArchiveSize = archiveSize, VaultName = vaultName, Checksum = totalFileChecksum, UploadId = uploadId }; ((Amazon.Runtime.Internal.IAmazonWebServiceRequest)compRequest).AddBeforeRequestHandler(new ArchiveTransferManager.UserAgentPostFix("MultiUpload").UserAgentRequestEventHandlerSync); CompleteMultipartUploadResponse completeMultipartUploadResponse = await this.manager.GlacierClient.CompleteMultipartUploadAsync(compRequest).ConfigureAwait(false); string archiveId = completeMultipartUploadResponse.ArchiveId; this.UploadResult = new UploadResult(archiveId, totalFileChecksum); } catch (Exception) { // If we got an unrecoverable then abort the upload. if (!string.IsNullOrEmpty(uploadId)) { AbortMultipartUploadRequest abortRequest = new AbortMultipartUploadRequest() { AccountId = this.options.AccountId, VaultName = this.vaultName, UploadId = uploadId }; ((Amazon.Runtime.Internal.IAmazonWebServiceRequest)abortRequest).AddBeforeRequestHandler(new ArchiveTransferManager.UserAgentPostFix("MultiUpload").UserAgentRequestEventHandlerSync); this.manager.GlacierClient.AbortMultipartUploadAsync(abortRequest).Wait(); } throw; } finally { try { fileStream.Dispose(); } catch (Exception) { } } }
private async Task <string> UploadArchiveAsync(Stream stream, string archiveDescription) { await TestConnectionAsync(); var streamSize = new Size(stream.Length, SizeUnit.Bytes); if (streamSize > TotalArchiveSizeLimit) { throw new InvalidOperationException($@"Can't upload more than 40TB to AWS Glacier, current upload size: {streamSize}"); } var streamLength = streamSize.GetValue(SizeUnit.Bytes); try { _progress?.UploadProgress.SetTotal(streamLength); if (streamSize > MaxUploadArchiveSize) { var partSize = GetPartSize(streamLength); _progress?.UploadProgress.ChangeType(UploadType.Chunked); var initiateResponse = await _client.InitiateMultipartUploadAsync(new InitiateMultipartUploadRequest { ArchiveDescription = archiveDescription, VaultName = _vaultName, AccountId = "-", PartSize = partSize }, _cancellationToken); var partChecksums = new List <string>(); var currentPosition = 0L; while (stream.Position < streamLength) { var partStream = GlacierUtils.CreatePartStream(stream, partSize); var partChecksum = TreeHashGenerator.CalculateTreeHash(partStream); partChecksums.Add(partChecksum); var uploadRequest = new UploadMultipartPartRequest { UploadId = initiateResponse.UploadId, VaultName = _vaultName, AccountId = "-", Body = partStream, StreamTransferProgress = (_, args) => { _progress?.UploadProgress.ChangeState(UploadState.Uploading); _progress?.UploadProgress.UpdateUploaded(args.IncrementTransferred); _progress?.OnUploadProgress?.Invoke(); }, Checksum = partChecksum }; uploadRequest.SetRange(currentPosition, currentPosition + partStream.Length - 1); await _client.UploadMultipartPartAsync(uploadRequest, _cancellationToken); currentPosition += partStream.Length; } var completeResponse = await _client.CompleteMultipartUploadAsync(new CompleteMultipartUploadRequest { AccountId = "-", VaultName = _vaultName, ArchiveSize = streamLength.ToString(), UploadId = initiateResponse.UploadId, Checksum = TreeHashGenerator.CalculateTreeHash(partChecksums) }, _cancellationToken); return(completeResponse.ArchiveId); } var response = await _client.UploadArchiveAsync(new UploadArchiveRequest { AccountId = "-", ArchiveDescription = archiveDescription, Body = stream, VaultName = _vaultName, StreamTransferProgress = (_, args) => { _progress?.UploadProgress.ChangeState(UploadState.Uploading); _progress?.UploadProgress.UpdateUploaded(args.IncrementTransferred); _progress?.OnUploadProgress?.Invoke(); }, Checksum = TreeHashGenerator.CalculateTreeHash(stream) }, _cancellationToken); return(response.ArchiveId); } finally { _progress?.UploadProgress.ChangeState(UploadState.Done); } }