AWSArchiveResult CompleteMPU(string uploadID, AmazonGlacier client, List <string> partChecksumList, FileInfo fio) { try { long fileLength = fio.Length; fileLength = new FileInfo(archiveToUpload).Length; FileStream inputFile = File.Open(archiveToUpload, FileMode.Open, FileAccess.Read); byte[] treeHash = Form1.ComputeSHA256TreeHash(inputFile); String localChecksum = BitConverter.ToString(treeHash).Replace("-", "").ToLower(); CompleteMultipartUploadRequest completeMPUrequest = new CompleteMultipartUploadRequest() { UploadId = uploadID, ArchiveSize = fileLength.ToString(), Checksum = localChecksum, VaultName = vaultName }; CompleteMultipartUploadResponse completeMPUresponse = client.CompleteMultipartUpload(completeMPUrequest); AWSArchiveResult ar = new AWSArchiveResult(); ar.ArchiveID = completeMPUresponse.CompleteMultipartUploadResult.ArchiveId; ar.Checksum = localChecksum; return(ar); } catch (Exception e) { Form1.log.Error(e.ToString()); return(new AWSArchiveResult()); } }
static List <string> UploadParts(string uploadID, AmazonGlacier client) { var partChecksumList = new List <string>(); long currentPosition = 0; var buffer = new byte[Convert.ToInt32(partSize)]; long fileLength = new FileInfo(BackupFilePath).Length; WriteFileUploadProgress(currentPosition, fileLength); using (var fileToUpload = new FileStream(BackupFilePath, FileMode.Open, FileAccess.Read)) { while (fileToUpload.Position < fileLength) { var uploadPartStream = GlacierUtils.CreatePartStream(fileToUpload, partSize); var checksum = TreeHashGenerator.CalculateTreeHash(uploadPartStream); partChecksumList.Add(checksum); // Upload part. var uploadMPUrequest = new UploadMultipartPartRequest() { VaultName = VaultName, Body = uploadPartStream, Checksum = checksum, UploadId = uploadID }; uploadMPUrequest.SetRange(currentPosition, currentPosition + uploadPartStream.Length - 1); client.UploadMultipartPart(uploadMPUrequest); currentPosition = currentPosition + uploadPartStream.Length; WriteFileUploadProgress(currentPosition, fileLength); } } return(partChecksumList); }
public static bool checkVault(string vaultName) { bool result = false; try { client = new AmazonGlacierClient(Amazon.RegionEndpoint.USEast1); DescribeVaultRequest describeVaultRequest = new DescribeVaultRequest() { VaultName = vaultName }; DescribeVaultResponse describeVaultResponse = client.DescribeVault(describeVaultRequest); DescribeVaultResult describeVaultResult = describeVaultResponse.DescribeVaultResult; Console.WriteLine("\nVault description..."); Console.WriteLine( "\nVaultName: " + describeVaultResult.VaultName + "\nVaultARN: " + describeVaultResult.VaultARN + "\nVaultCreationDate: " + describeVaultResult.CreationDate + "\nNumberOfArchives: " + describeVaultResult.NumberOfArchives + "\nSizeInBytes: " + describeVaultResult.SizeInBytes + "\nLastInventoryDate: " + describeVaultResult.LastInventoryDate ); result = true; } catch (AmazonGlacierException e) { Console.WriteLine(e.Message); result = false; } catch (AmazonServiceException e) { Console.WriteLine(e.Message); result = false; } catch (Exception e) { Console.WriteLine(e.Message); result = false; } return(result); }
string InitiateMultipartUpload(AmazonGlacier client) { InitiateMultipartUploadRequest initiateMPUrequest = new InitiateMultipartUploadRequest() { VaultName = vaultName, PartSize = partSize, ArchiveDescription = archiveDescription }; InitiateMultipartUploadResponse initiateMPUresponse = client.InitiateMultipartUpload(initiateMPUrequest); return(initiateMPUresponse.InitiateMultipartUploadResult.UploadId); }
private bool retryUpload(object f_object) { try { ThreadData objData = null; AmazonGlacier client = null; Stream uploadPartStream = null; objData = (ThreadData)f_object; string uploadID = objData.uploadID; client = objData.client; long currentPosition = objData.currentPosition; Form1.log.Info("Trying to upload Part :" + Convert.ToString(objData.currentPosition)); //For the last one we need to make sure the buffer is the right size? //The uploadMPUrequest.SetRange probably takes care of this. int memoryBufferIndex = 0; //The index into buffer at which the stream begin int memoryBuffercount = (int)(objData.uploadPartStream.Length); //The length of the stream in bytes. uploadPartStream = new MemoryStream(objData.buffer, memoryBufferIndex, memoryBuffercount); //To ensure that part data is not corrupted in transmission, you compute a SHA256 tree // hash of the part and include it in your request. Upon receiving the part data, Amazon Glacier also computes a SHA256 tree hash. //If these hash values don't match, the operation fails. For information about computing a SHA256 tree hash, see Computing Checksums string checksum = TreeHashGenerator.CalculateTreeHash(uploadPartStream); SHA256ConcurrentQueue.Enqueue(checksum); UploadMultipartPartRequest uploadMPUrequest = new UploadMultipartPartRequest() { VaultName = vaultName, Body = uploadPartStream, Checksum = checksum, UploadId = uploadID }; uploadMPUrequest.SetRange(currentPosition, currentPosition + objData.uploadPartStream.Length - 1); UploadMultipartPartResponse mpr = client.UploadMultipartPart(uploadMPUrequest); Form1.log.Info("Retry Success " + Convert.ToString(mpr.ContentLength) + "bytes" + " for Part :" + Convert.ToString(objData.currentPosition)); return(true); } catch (Exception ex) { Form1.log.Error(ex.ToString()); return(false); } }
static string CompleteMPU(string uploadID, AmazonGlacier client, List <string> partChecksumList) { long fileLength = new FileInfo(BackupFilePath).Length; var completeMPUrequest = new CompleteMultipartUploadRequest() { UploadId = uploadID, ArchiveSize = fileLength.ToString(), Checksum = TreeHashGenerator.CalculateTreeHash(partChecksumList), VaultName = VaultName }; var completeMPUresponse = client.CompleteMultipartUpload(completeMPUrequest); return(completeMPUresponse.CompleteMultipartUploadResult.ArchiveId); }
private static void DownloadOutput(string jobId, AmazonGlacier client, AWSArchiveRquest request) { GetJobOutputRequest getJobOutputRequest = new GetJobOutputRequest() { JobId = jobId, VaultName = vaultName }; GetJobOutputResponse getJobOutputResponse = client.GetJobOutput(getJobOutputRequest); GetJobOutputResult result = getJobOutputResponse.GetJobOutputResult; using (Stream webStream = result.Body) { using (Stream fileToSave = File.OpenWrite(request.Description)) { CopyStream(webStream, fileToSave); } } }
static void RetrieveArchive(AmazonGlacier client, AWSArchiveRquest request) { // Initiate job. InitiateJobRequest initJobRequest = new InitiateJobRequest() { VaultName = vaultName, JobParameters = new JobParameters() { Type = "archive-retrieval", ArchiveId = request.ArchiveID, Description = "This job is to download archive updated as part of getting started", SNSTopic = topicArn, } }; InitiateJobResponse initJobResponse = client.InitiateJob(initJobRequest); string jobId = initJobResponse.InitiateJobResult.JobId; // Check queue for a message and if job completed successfully, download archive. ProcessQueue(jobId, client, request); }
private static void ProcessQueue(string jobId, AmazonGlacier client, AWSArchiveRquest request) { var receiveMessageRequest = new ReceiveMessageRequest() { QueueUrl = queueUrl, MaxNumberOfMessages = 1 }; bool jobDone = false; while (!jobDone) { var receiveMessageResponse = sqsClient.ReceiveMessage(receiveMessageRequest); if (receiveMessageResponse.ReceiveMessageResult.Message.Count == 0) { Thread.Sleep(1000 * 60); continue; } Amazon.SQS.Model.Message message = receiveMessageResponse.ReceiveMessageResult.Message[0]; Dictionary <string, string> outerLayer = JsonConvert.DeserializeObject <Dictionary <string, string> >(message.Body); Dictionary <string, string> fields = JsonConvert.DeserializeObject <Dictionary <string, string> >(outerLayer["Message"]); string statusCode = fields["StatusCode"] as string; if (string.Equals(statusCode, GlacierUtils.JOB_STATUS_SUCCEEDED, StringComparison.InvariantCultureIgnoreCase)) { Console.WriteLine("Downloading job output"); DownloadOutput(jobId, client, request); // This where we save job output to the specified file location. } else if (string.Equals(statusCode, GlacierUtils.JOB_STATUS_FAILED, StringComparison.InvariantCultureIgnoreCase)) { Console.WriteLine("Job failed... cannot download the archive."); } jobDone = true; sqsClient.DeleteMessage(new DeleteMessageRequest() { QueueUrl = queueUrl, ReceiptHandle = message.ReceiptHandle }); } }
private void ThreadUpload(object f_object) { ThreadData objData = null; AmazonGlacier client = null; Stream uploadPartStream = null; try { objData = (ThreadData)f_object; string uploadID = objData.uploadID; client = objData.client; long currentPosition = objData.currentPosition; Form1.log.Info("Trying to upload Part :" + Convert.ToString(objData.currentPosition)); //For the last one we need to make sure the buffer is the right size? //The uploadMPUrequest.SetRange probably takes care of this. int memoryBufferIndex = 0; //The index into buffer at which the stream begin int memoryBuffercount = (int)(objData.uploadPartStream.Length); //The length of the stream in bytes. uploadPartStream = new MemoryStream(objData.buffer, memoryBufferIndex, memoryBuffercount); //To ensure that part data is not corrupted in transmission, you compute a SHA256 tree // hash of the part and include it in your request. Upon receiving the part data, Amazon Glacier also computes a SHA256 tree hash. //If these hash values don't match, the operation fails. For information about computing a SHA256 tree hash, see Computing Checksums string checksum = TreeHashGenerator.CalculateTreeHash(uploadPartStream); SHA256ConcurrentQueue.Enqueue(checksum); UploadMultipartPartRequest uploadMPUrequest = new UploadMultipartPartRequest() { VaultName = vaultName, Body = uploadPartStream, Checksum = checksum, UploadId = uploadID }; uploadMPUrequest.SetRange(currentPosition, currentPosition + objData.uploadPartStream.Length - 1); UploadMultipartPartResponse mpr = client.UploadMultipartPart(uploadMPUrequest); Form1.log.Info("Sent " + Convert.ToString(mpr.ContentLength) + "bytes" + " for Part :" + Convert.ToString(objData.currentPosition)); } catch (Exception e) { Form1.log.Error(e.ToString()); Form1.log.Error(e.StackTrace); Form1.log.Info("Retrying Part " + Convert.ToString(objData.currentPosition)); //Retrying up to 10 times - waiting longer each try { int fv = 0; bool successfulPartUpload = false; while (fv < 10 && successfulPartUpload == false) { successfulPartUpload = retryUpload(f_object); Thread.Sleep(4000 * fv); } } } finally { if (Interlocked.Decrement(ref ActiveWorkerCount) <= 0) { AllWorkerCompletedEvent.Set(); } uploadPartStream = null; f_object = null; objData = null; client = null; } }
List <string> UploadParts(string uploadID, AmazonGlacier client) { List <string> partChecksumList = new List <string>(); long currentPosition = 0; var buffer = new byte[Convert.ToInt32(partSize)]; long fileLength = new FileInfo(archiveToUpload).Length; ThreadPool.SetMaxThreads(25, 25); List <ThreadData> arThreadObj = new List <ThreadData>(); FileStream fileToUpload = new FileStream(archiveToUpload, FileMode.Open, FileAccess.Read); //Beware - we create memory buffers for the entire file at once. //BBCREVISIT - use a queue for the threads and pick off that as the ThreadPool frees up resources while (currentPosition < fileLength) { Stream uploadPartStream = GlacierUtils.CreatePartStream(fileToUpload, partSize); ThreadData objData = new ThreadData(); objData.uploadID = uploadID; objData.client = client; objData.currentPosition = currentPosition; objData.uploadPartStream = uploadPartStream; objData.buffer = new byte[Convert.ToInt32(partSize)]; int read = 0; try { read = fileToUpload.Read(objData.buffer, (int)0, (int)partSize); } catch (Exception e) { Form1.log.Error(e.ToString()); } if (read == -1) { Form1.log.Info("Nothing to read : fileLength % partSize ==0"); break; } arThreadObj.Add(objData); Form1.log.Info("Created Part : " + Convert.ToString(currentPosition) + " of Length " + uploadPartStream.Length); if (read != uploadPartStream.Length) { Console.WriteLine("We have a problem Houston"); } currentPosition = currentPosition + uploadPartStream.Length;//We are not using the stream right now. } for (int ic = 0; ic < arThreadObj.Count; ic++) { Interlocked.Increment(ref ActiveWorkerCount); ThreadData objData = arThreadObj[ic]; ThreadPool.QueueUserWorkItem(ThreadUpload, objData); } AllWorkerCompletedEvent.WaitOne(); partChecksumList = SHA256ConcurrentQueue.ToList(); fileToUpload.Close(); return(partChecksumList); }
static List<string> UploadParts(string uploadID, AmazonGlacier client) { var partChecksumList = new List<string>(); long currentPosition = 0; var buffer = new byte[Convert.ToInt32(partSize)]; long fileLength = new FileInfo(BackupFilePath).Length; WriteFileUploadProgress(currentPosition, fileLength); using (var fileToUpload = new FileStream(BackupFilePath, FileMode.Open, FileAccess.Read)) { while (fileToUpload.Position < fileLength) { var uploadPartStream = GlacierUtils.CreatePartStream(fileToUpload, partSize); var checksum = TreeHashGenerator.CalculateTreeHash(uploadPartStream); partChecksumList.Add(checksum); // Upload part. var uploadMPUrequest = new UploadMultipartPartRequest() { VaultName = VaultName, Body = uploadPartStream, Checksum = checksum, UploadId = uploadID }; uploadMPUrequest.SetRange(currentPosition, currentPosition + uploadPartStream.Length - 1); client.UploadMultipartPart(uploadMPUrequest); currentPosition = currentPosition + uploadPartStream.Length; WriteFileUploadProgress(currentPosition, fileLength); } } return partChecksumList; }
/// <summary> /// Constructs an ArchiveTransferManager object for the specified Amazon Glacier region endpoint using the credentials /// loaded from the application's default configuration, and if unsuccessful from the Instance Profile service on an EC2 instance. /// /// Example App.config with credentials set. /// <code> /// <?xml version="1.0" encoding="utf-8" ?> /// <configuration> /// <appSettings> /// <add key="AWSAccessKey" value="********************"/> /// <add key="AWSSecretKey" value="****************************************"/> /// </appSettings> /// </configuration> /// </code> /// /// </summary> /// <param name="region">Amazon Glacier region endpoint</param> public ArchiveTransferManager(RegionEndpoint region) { this.glacierClient = new AmazonGlacierClient(region); this.shouldDispose = true; }
static string CompleteMPU(string uploadID, AmazonGlacier client, List<string> partChecksumList) { long fileLength = new FileInfo(BackupFilePath).Length; var completeMPUrequest = new CompleteMultipartUploadRequest() { UploadId = uploadID, ArchiveSize = fileLength.ToString(), Checksum = TreeHashGenerator.CalculateTreeHash(partChecksumList), VaultName = VaultName }; var completeMPUresponse = client.CompleteMultipartUpload(completeMPUrequest); return completeMPUresponse.CompleteMultipartUploadResult.ArchiveId; }
/// <summary> /// Constructs an ArchiveTransferManager object using an existing Amazon Glacier client. /// </summary> /// <param name="glacier">An AmazonGlacier client that used to make service calls.</param> public ArchiveTransferManager(AmazonGlacier glacier) { this.glacierClient = glacier; this.shouldDispose = false; }
static string InitiateMultipartUpload(AmazonGlacier client) { var initiateMPUrequest = new InitiateMultipartUploadRequest() { VaultName = VaultName, PartSize = partSize, ArchiveDescription = ArchiveDescription }; var initiateMPUresponse = client.InitiateMultipartUpload(initiateMPUrequest); return initiateMPUresponse.InitiateMultipartUploadResult.UploadId; }