public override void Invoke(AWSCredentials creds, RegionEndpoint region, int maxItems) { AmazonGlacierConfig config = new AmazonGlacierConfig(); config.RegionEndpoint = region; ConfigureClient(config); AmazonGlacierClient client = new AmazonGlacierClient(creds, config); ListMultipartUploadsResponse resp = new ListMultipartUploadsResponse(); do { ListMultipartUploadsRequest req = new ListMultipartUploadsRequest { Limit = maxItems }; resp = client.ListMultipartUploads(req); CheckError(resp.HttpStatusCode, "200"); foreach (var obj in resp.UploadsList) { AddObject(obj); } }while (!string.IsNullOrEmpty(resp.Marker)); }
private static void HandleInventory(string[] args) { var region = RegionEndpoint.EnumerableAllRegions.SingleOrDefault(reg => reg.SystemName == args[2]); if (args.Length < 5 || region == null) { Console.WriteLine($"args should be aws_key aws_secret region vault_name output_filename"); return; } var aws_key = args[0]; var aws_secret = args[1]; var vault_name = args[3]; var filename = args[4]; var creds = new BasicAWSCredentials(aws_key, aws_secret); var config = new AmazonGlacierConfig { RegionEndpoint = region, Timeout = TimeSpan.FromDays(10) }; var client = new AmazonGlacierClient(creds, config); var initReq = new InitiateJobRequest(vault_name, new JobParameters("JSON", "inventory-retrieval", null, null)); var promise = client.InitiateJobAsync(initReq); promise.ContinueWith(job => { Console.WriteLine($"Job ID: {job.Result.JobId}"); File.WriteAllText(filename, job.Result.JobId); }); Console.WriteLine("Retrieval job initiated"); }
protected IAmazonGlacier CreateClient(AWSCredentials credentials, RegionEndpoint region) { var config = new AmazonGlacierConfig { RegionEndpoint = region }; Amazon.PowerShell.Utils.Common.PopulateConfig(this, config); this.CustomizeClientConfig(config); var client = new AmazonGlacierClient(credentials, config); client.BeforeRequestEvent += RequestEventHandler; client.AfterResponseEvent += ResponseEventHandler; return(client); }
private static void HandleStatus(string[] args) { int interval; var region = RegionEndpoint.EnumerableAllRegions.SingleOrDefault(reg => reg.SystemName == args[2]); if (args.Length < 7 || !int.TryParse(args[5], out interval) || region == null) { Console.WriteLine("args should be aws_key aws_secret region vault_name job_id interval_secs output_filename"); return; } var aws_key = args[0]; var aws_secret = args[1]; var vault_name = args[3]; var job_id = args[4]; var filename = args[6]; var creds = new BasicAWSCredentials(aws_key, aws_secret); var config = new AmazonGlacierConfig { RegionEndpoint = region, Timeout = TimeSpan.FromDays(10) }; var client = new AmazonGlacierClient(creds, config); var descReq = new DescribeJobRequest(vault_name, job_id); do { Console.WriteLine("Checking status..."); var jobStatus = client.DescribeJobAsync(descReq).Result; if (jobStatus.Completed) { Console.WriteLine("Job completed."); break; } Console.WriteLine($"Job incomplete."); Console.WriteLine($"Job status: {jobStatus.StatusCode}"); Thread.Sleep(interval); } while (true); var retrReq = new GetJobOutputRequest(vault_name, job_id, "bytes=0-1073741824"); var retrievalPromise = client.GetJobOutputAsync(retrReq).Result; var json = new StreamReader(retrievalPromise.Body).ReadToEnd(); File.WriteAllText(filename, json); Console.WriteLine($"Output written to {filename}"); }
private OperationResult EstablishClient(AddonManifest manifest, DeveloperOptions devOptions, out AmazonGlacierClient client) { OperationResult result; bool requireCreds; //var accessKey = manifest.ProvisioningUsername; //var secretAccessKey = manifest.ProvisioningPassword; var accessKey = devOptions.AccessKey; var secretAccessKey = devOptions.SecretAccessKey; var prop = manifest.Properties.First( p => p.Key.Equals("requireDevCredentials", StringComparison.InvariantCultureIgnoreCase)); if (bool.TryParse(prop.Value, out requireCreds) && requireCreds) { if (!ValidateDevCreds(devOptions)) { client = null; result = new OperationResult() { IsSuccess = false, EndUserMessage = "The add on requires that developer credentials are specified but none were provided." }; return(result); } accessKey = devOptions.AccessKey; secretAccessKey = devOptions.SecretAccessKey; } AmazonGlacierConfig config = new AmazonGlacierConfig() { RegionEndpoint = RegionEndpoint.USEast1 }; client = new AmazonGlacierClient(devOptions.AccessKey, devOptions.SecretAccessKey, config); result = new OperationResult { IsSuccess = true }; return(result); }
private static void HandleUpload(string[] args) { var region = RegionEndpoint.EnumerableAllRegions.SingleOrDefault(reg => reg.SystemName == args[2]); if (args.Length < 6 || region == null) { Console.WriteLine($"args should be aws_key aws_secret region vault_name description filename"); return; } var aws_key = args[0]; var aws_secret = args[1]; var vault_name = args[3]; var description = args[4]; var filename = args[5]; var creds = new BasicAWSCredentials(aws_key, aws_secret); var config = new AmazonGlacierConfig { RegionEndpoint = region, Timeout = TimeSpan.FromDays(10) }; var client = new AmazonGlacierClient(creds, config); var initReq = new InitiateMultipartUploadRequest(vault_name, description, PartSize); var ts = new CancellationTokenSource(); var completed = 0; var started = 0; try { var res = client.InitiateMultipartUploadAsync(initReq, ts.Token).Result; var promises = new List <Task <UploadMultipartPartResponse> >(); Task <UploadMultipartPartResponse> lastPart = null; var sem = new SemaphoreSlim(ConcurrencyLimit); long totalSize = 0; int totalParts = 0; using (var fs = new FileStream(filename, FileMode.Open)) { totalSize = fs.Length; Console.WriteLine($"Preparing to upload {ByteSize.FromBytes(totalSize)}"); totalParts = (int)(fs.Length / PartSize) + 1; bool noErrors = true; while (noErrors) { sem.Wait(); var arr = new byte[PartSize]; var start = fs.Position; var read = fs.Read(arr, 0, (int)PartSize); var check = TreeHasher.ComputeArrayHashString(arr, read); var partReq = new UploadMultipartPartRequest(vault_name, res.UploadId, check, $"bytes {start}-{start + read - 1}/*", new MemoryStream(arr, 0, read)); var promise = client.UploadMultipartPartAsync(partReq, ts.Token); Interlocked.Increment(ref started); Console.WriteLine($"Started {started} out of {totalParts}"); promise.ContinueWith(tsk => { if (tsk.IsFaulted) { Console.WriteLine($"Exception encountered: {tsk.Exception.ToString()}"); noErrors = false; throw tsk.Exception; } Interlocked.Increment(ref completed); Console.WriteLine($"{completed} out of {totalParts} completed."); sem.Release(); }); promises.Add(promise); if (read < PartSize || fs.Position >= fs.Length - 1) { lastPart = promise; break; } } } Task.WaitAll(promises.ToArray()); using (var fs = new FileStream(filename, FileMode.Open)) { var check = TreeHasher.ComputeHashString(fs); var finisher = new CompleteMultipartUploadRequest(vault_name, res.UploadId, totalSize.ToString(), check); Console.WriteLine("Finishing up"); Console.WriteLine($"Computed checksum {check}"); var result = client.CompleteMultipartUploadAsync(finisher, ts.Token).Result; Console.WriteLine($"Completed: {result.Checksum}"); Console.WriteLine($"Calculated: {check}"); var match = string.Equals(result.Checksum, check, StringComparison.InvariantCultureIgnoreCase) ? "" : "not "; Console.WriteLine($"Checksums do {match}match."); Console.WriteLine($"Archive ID: {result.ArchiveId} Location: {result.Location}"); } } catch (Exception ex) { Console.WriteLine($"Exception thrown: {ex.GetType().Name} - {ex.Message}"); Console.WriteLine($"Full exception: {ex.ToString()}"); } }