public ListObjects ( ListObjectsRequest request ) : ListObjectsResponse | ||
request | ListObjectsRequest | Container for the necessary parameters to execute the ListObjects service method. |
return | ListObjectsResponse |
public void RefreshAssetList() { string SecretKey = null; string PublicKey = null; AmazonS3Client Client = new AmazonS3Client(PublicKey, SecretKey); ListObjectsRequest Request = new ListObjectsRequest { BucketName = "assets.minecraft.net", }; ListObjectsResponse Result; List<Release> releases = new List<Release>(); do { Result = Client.ListObjects(Request); foreach (S3Object o in Result.S3Objects) { string IsSnapshot = "Release"; if(!o.Key.Contains("minecraft.jar")) continue; if (Regex.IsMatch(o.Key, "[0-9][0-9]w[0-9][0-9]")) IsSnapshot = "Snapshot"; else if (o.Key.Contains("pre")) IsSnapshot = "Pre-release"; releases.Add(new Release { Version = o.Key.Split('/')[0], Size = (o.Size / 1024).ToString() + "KB", Uploaded = DateTime.Parse(o.LastModified), Type = IsSnapshot, Key = o.Key} ); } } while (Result.IsTruncated); releases.Sort(new Comparison<Release>((x, y) => DateTime.Compare(y.Uploaded, x.Uploaded))); _Releases.Clear(); foreach (Release r in releases) { _Releases.Add(r); } Client.Dispose(); Result.Dispose(); }
static void Main() { // Connect to Amazon S3 service with authentication BasicAWSCredentials basicCredentials = new BasicAWSCredentials("AKIAIIYG27E27PLQ6EWQ", "hr9+5JrS95zA5U9C6OmNji+ZOTR+w3vIXbWr3/td"); AmazonS3Client s3Client = new AmazonS3Client(basicCredentials); // Display all S3 buckets ListBucketsResponse buckets = s3Client.ListBuckets(); foreach (var bucket in buckets.Buckets) { Console.WriteLine(bucket.BucketName); } // Display and download the files in the first S3 bucket string bucketName = buckets.Buckets[0].BucketName; Console.WriteLine("Objects in bucket '{0}':", bucketName); ListObjectsResponse objects = s3Client.ListObjects(new ListObjectsRequest() { BucketName = bucketName }); foreach (var s3Object in objects.S3Objects) { Console.WriteLine("\t{0} ({1})", s3Object.Key, s3Object.Size); if (s3Object.Size > 0) { // We have a file (not a directory) --> download it GetObjectResponse objData = s3Client.GetObject( new GetObjectRequest() { BucketName = bucketName, Key = s3Object.Key }); string s3FileName = new FileInfo(s3Object.Key).Name; SaveStreamToFile(objData.ResponseStream, s3FileName); } } // Create a new directory and upload a file in it string path = "uploads/new_folder_" + DateTime.Now.Ticks; string newFileName = "example.txt"; string fullFileName = path + "/" + newFileName; string fileContents = "This is an example file created through the Amazon S3 API."; s3Client.PutObject(new PutObjectRequest() { BucketName = bucketName, Key = fullFileName, ContentBody = fileContents} ); Console.WriteLine("Created a file in Amazon S3: {0}", fullFileName); // Share the uploaded file and get a download URL string uploadedFileUrl = s3Client.GetPreSignedURL(new GetPreSignedUrlRequest() { BucketName = bucketName, Key = fullFileName, Expires = DateTime.Now.AddYears(5) }); Console.WriteLine("File download URL: {0}", uploadedFileUrl); System.Diagnostics.Process.Start(uploadedFileUrl); }
public void TestSerializingExceptions() { using (var client = new Amazon.S3.AmazonS3Client()) { try { var fakeBucketName = "super.duper.fake.bucket.name.123." + Guid.NewGuid().ToString(); client.ListObjects(fakeBucketName); } catch (AmazonS3Exception e) { TestException(e); } var s3pue = CreateS3PostUploadException(); TestException(s3pue); var doe = CreateDeleteObjectsException(); TestException(doe); var aace = new AdfsAuthenticationControllerException("Message"); TestException(aace); #pragma warning disable 618 var ccre = new CredentialCallbackRequiredException("Message"); TestException(ccre); var afe = new AuthenticationFailedException("Message"); TestException(afe); #pragma warning restore 618 } }
public void TestSerializingExceptions() { using(var client = new Amazon.S3.AmazonS3Client()) { try { var fakeBucketName = "super.duper.fake.bucket.name.123." + Guid.NewGuid().ToString(); client.ListObjects(fakeBucketName); } catch(AmazonS3Exception e) { TestException(e); } var s3pue = CreateS3PostUploadException(); TestException(s3pue); var doe = CreateDeleteObjectsException(); TestException(doe); var aace = new AdfsAuthenticationControllerException("Message"); TestException(aace); #pragma warning disable 618 var ccre = new CredentialCallbackRequiredException("Message"); TestException(ccre); var afe = new AuthenticationFailedException("Message"); TestException(afe); #pragma warning restore 618 } }
public static IEnumerable<string> ListFiles(string bucketName, string accessKeyID, string secretAccessKey) { List<string> fileNames = new List<string>(); try { var s3Client = new AmazonS3Client(accessKeyID, secretAccessKey, Amazon.RegionEndpoint.USEast1); ListObjectsRequest request = new ListObjectsRequest { BucketName = bucketName }; while (request != null) { ListObjectsResponse response = s3Client.ListObjects(request); foreach (S3Object entry in response.S3Objects) { fileNames.Add(entry.Key); } if (response.IsTruncated) { request.Marker = response.NextMarker; } else { request = null; } } } catch (Exception e) { } return fileNames; }
public static void SoftDeleteFolders(ControllerConfiguration context, IEnumerable<string> folders) { if (context == null) throw new ArgumentNullException("context", "Context cannot be null."); if (folders == null) throw new ArgumentNullException("folders", "Folders cannot be null."); using (var client = new AmazonS3Client(context.AwsAccessKeyId, context.AwsSecretAccessKey)) { foreach (var folder in folders) { int maxResults = 100; int lastCount = maxResults; while (maxResults == lastCount) { using (var listResponse = client.ListObjects(new ListObjectsRequest() { BucketName = context.BucketName, Prefix = folder, })) { lastCount = listResponse.S3Objects.Count; Parallel.ForEach(listResponse.S3Objects, folderObject => { using (var copyResponse = client.CopyObject(new CopyObjectRequest() { SourceBucket = context.BucketName, DestinationBucket = context.BucketName, SourceKey = folderObject.Key, DestinationKey = ".recycled/" + folderObject.Key, })) { } }); Parallel.ForEach(listResponse.S3Objects, folderObject => { using (var deleteReponse = client.DeleteObject(new DeleteObjectRequest() { BucketName = context.BucketName, Key = folderObject.Key, })) { } }); } } } } }
public static void DeleteAllBucketItems(string bucketName) { using (var client = new AmazonS3Client(Settings.AccessKey, Settings.Secret)) { var request = new ListObjectsRequest { BucketName = bucketName }; var response = client.ListObjects(request); foreach (var entry in response.S3Objects) { client.DeleteObject(bucketName, entry.Key); } } }
public void S3DeleteItemsFolder(string bucketName, string serverFolder) { //ref: http://docs.aws.amazon.com/AmazonS3/latest/dev/RetrievingObjectUsingNetSDK.html int count = 0; using (var client = new AmazonS3Client(this.AcesssKey, this.SecretKey, this.Region)) { ListObjectsRequest request = new ListObjectsRequest { BucketName = bucketName, MaxKeys = 10, Prefix = serverFolder }; do { ListObjectsResponse response = client.ListObjects(request); // Process response foreach (S3Object entry in response.S3Objects) { if (entry.Key == serverFolder || entry.Key == string.Format("{0}/", serverFolder) || entry.Key == string.Format("/{0}", serverFolder)) continue; //Folder count++; //System.Diagnostics.Debug.WriteLine(string.Format("AwsS3 -- key = {0} size = {1} / {2} items read", entry.Key, entry.Size.ToString("#,##0"), count.ToString("#,##0"))); this.S3DeleteItem(bucketName, entry.Key); Console.WriteLine(string.Format("{0} -- {1} items deleted", entry.Key, count.ToString("#,##0"))); } // If response is truncated, set the marker to get the next // set of keys. if (response.IsTruncated) { request.Marker = response.NextMarker; } else { request = null; } } while (request != null); } }
public virtual void DeleteBucket(AmazonS3Client s3Client, string bucketName) { // First, try to delete the bucket. var deleteBucketRequest = new DeleteBucketRequest { BucketName = bucketName }; try { s3Client.DeleteBucket(deleteBucketRequest); // If we get here, no error was generated so we'll assume the bucket was deleted and return. return; } catch (AmazonS3Exception ex) { if (!ex.ErrorCode.Equals("BucketNotEmpty")) { // We got an unanticipated error. Just rethrow. throw; } } // If we got here, then our bucket isn't empty so we need to delete the items in it first. DeleteObjectsRequest deleteObjectsRequest = new DeleteObjectsRequest {BucketName = bucketName}; foreach (S3Object obj in s3Client.ListObjects(new ListObjectsRequest {BucketName = bucketName}).S3Objects) { // Add keys for the objects to the delete request deleteObjectsRequest.AddKey(obj.Key, null); } // Submit the request s3Client.DeleteObjects(deleteObjectsRequest); // The bucket is empty now, so delete the bucket. s3Client.DeleteBucket(deleteBucketRequest); }
public bool FileExists(string path, string bucket) { using (var s3 = new AmazonS3Client(_connectionInfo.AccessKey, _connectionInfo.SecretKey, new AmazonS3Config {ServiceURL = "http://s3.amazonaws.com"})) { return s3.ListObjects(new ListObjectsRequest { BucketName = bucket, Prefix = path, MaxKeys = 1 }) .MaxKeys > 0; } }
public void PullVersion(Guid key, DirectoryInfo directory, bool mergeExistingFiles = false) { if (!directory.Exists) throw new ArgumentException("Directory must exist.", "directory"); if (!VersionExists(key)) throw new VersionNotFoundException(string.Format("Could not find the version with key: {0}", key)); if (!mergeExistingFiles) { if (directory.EnumerateFileSystemInfos().Any()) throw new ArgumentException("Target directory is not empty."); } try { var ignorePaths = new string[1] { ".info" }; using (var client = new AmazonS3Client(Context.AwsAccessKeyId, Context.AwsSecretAccessKey)) { bool more = true; string lastResult = null; string prefix = string.Format("{0}/{1}/", STR_VERSIONS_CONTAINER_PATH, key.ToString("N")); while (more) { using (var listResponse = client.ListObjects(new ListObjectsRequest() { BucketName = Context.BucketName, Prefix = prefix, Delimiter = lastResult, })) { listResponse.S3Objects.Where(obj => !ignorePaths.Any(ignore => obj.Key == String.Format("{0}{1}", prefix, ignore))) .AsParallel().ForAll(s3obj => { using (var getResponse = client.GetObject(new GetObjectRequest() { BucketName = Context.BucketName, Key = s3obj.Key, })) { getResponse.WriteResponseStreamToFile(Utils.Files.GetLocalAbsolutePath(s3obj.Key, prefix, directory.FullName)); } }); if (listResponse.IsTruncated) { more = true; } more = listResponse.IsTruncated; lastResult = listResponse.S3Objects.Last().Key; } } } } catch (Exception ex) { throw new DeploymentException(string.Format("Failed pushing to version with key \"{0}\"", key), ex); } }
public static IEnumerable<string> ListFromBucket(string bucketName) { using (var client = new AmazonS3Client(Settings.AccessKey, Settings.Secret)) { var request = new ListObjectsRequest { BucketName = bucketName }; var response = client.ListObjects(request); foreach (var entry in response.S3Objects) { yield return entry.Key; } } }
public static long GetBucketItemSizeInBytes(string bucketName, string key) { using (var client = new AmazonS3Client(Settings.AccessKey, Settings.Secret)) { var request = new ListObjectsRequest { BucketName = bucketName, Prefix = key }; var response = client.ListObjects(request).S3Objects.FirstOrDefault(); if (response == null) return 0; return response.Size; } }
public virtual void ListObjects(AmazonS3Client s3Client, string bucketName) { // Create the request var listObjectsRequest = new ListObjectsRequest { BucketName = bucketName }; // Submit the request ListObjectsResponse listObjectsResponse = s3Client.ListObjects(listObjectsRequest); // Display the results foreach (S3Object objectSummary in listObjectsResponse.S3Objects) { Console.WriteLine("{0} (size: {1})", objectSummary.Key, objectSummary.Size); } }
public List<string> S3ListItems(string bucketName, string serverFolder, int? maxItems = null) { //ref: http://docs.aws.amazon.com/AmazonS3/latest/dev/RetrievingObjectUsingNetSDK.html List<string> listRest = new List<string>(); int count = 0; var region = Amazon.RegionEndpoint.GetBySystemName(this.Region); using (var client = new AmazonS3Client(this.AcesssKey, this.SecretKey, region)) { ListObjectsRequest request = new ListObjectsRequest { BucketName = bucketName, MaxKeys = 10, Prefix = serverFolder }; do { ListObjectsResponse response = client.ListObjects(request); // Process response foreach (S3Object entry in response.S3Objects) { if (entry.Key == serverFolder || entry.Key == string.Format("{0}/", serverFolder) || entry.Key == string.Format("/{0}", serverFolder)) continue; //Folder count++; System.Diagnostics.Debug.WriteLine("AwsS3 -- key = {0} size = {1} / {2} items read", entry.Key, entry.Size.ToString("#,##0"), count.ToString("#,##0")); listRest.Add(entry.Key); } // If response is truncated, set the marker to get the next // set of keys. if (response.IsTruncated) { request.Marker = response.NextMarker; } else { request = null; } if (maxItems.HasValue && count >= maxItems.Value) break; } while (request != null); } return listRest; }
public LogEntryPage GetLogEntryPage(Guid instanceKey, string marker = null, int pageSize = 50) { if (pageSize < 1) throw new ArgumentOutOfRangeException("pageSize", "Page size cannot be less than 1."); if (pageSize > 100) throw new ArgumentOutOfRangeException("pageSize", "Page size cannot be greater than 100."); try { using (var client = new AmazonS3Client(Context.AwsAccessKeyId, Context.AwsSecretAccessKey)) { using (var res = client.ListObjects(new ListObjectsRequest() { BucketName = Context.BucketName, Prefix = string.Format("{0}/{1}/", STR_LOGS_CONTAINER_PATH, instanceKey.ToString("N")), MaxKeys = pageSize, Marker = marker, })) { return new LogEntryPage() { InstanceKey = instanceKey, StartMarker = marker, PageSize = pageSize, NextMarker = res.NextMarker, LogEntries = res.S3Objects.Select(o => { DateTime timestamp; LogStatus status; bool timestampSucceeded = TryGetTimestampFromKey(o.Key, out timestamp); bool statusSucceeded = TryGetStateFromKey(o.Key, out status); return new { Timestmp = timestamp, Status = status, ParseSucceeded = timestampSucceeded && statusSucceeded }; }) .Where(o => o.ParseSucceeded) .Select(o => new LogEntryListItem() { Timestamp = o.Timestmp, Status = o.Status }) .ToList() }; } } } catch (AmazonS3Exception awsEx) { throw new DeploymentException("Failed listing logs.", awsEx); } }
protected void Page_Load(object sender, EventArgs e) { if (this.Session[Settings.Default.FlashSessionKey] != null) { this.FlashLiteralWrapper.Visible = true; this.FlashLiteral.Text = this.Session[Settings.Default.FlashSessionKey].ToString(); this.Session[Settings.Default.FlashSessionKey] = null; } else { this.FlashLiteralWrapper.Visible = false; } this._petIdString = this.Request.QueryString["petid"]; if (String.IsNullOrEmpty(this._petIdString)) { this.StatsLiteral.Text = "Add a New Pet"; this.SaveStatsButton.Text = "Save New Pet"; } else { this.PhotoPanel.Visible = true; } this._userBucketName = String.Format(Settings.Default.BucketNameFormat, this.Context.User.Identity.Name, this._petIdString); this._itemName = this._petIdString ?? Guid.NewGuid().ToString(); this._domainName = String.Format(Settings.Default.SimpleDbDomainNameFormat, this.Context.User.Identity.Name); if (!this.Page.IsPostBack) { List<int> years = new List<int>(100); for (int i = 0; i < 100; i++) { years.Add(DateTime.Now.AddYears(i * -1).Year); } this.YearDropDownList.DataSource = years.OrderByDescending(y => y); this.YearDropDownList.DataBind(); this.SelectMonth(); this.SelectDay(); Pet pet = default(Pet); List<string> files = new List<string>(); if (!String.IsNullOrEmpty(this._petIdString)) { // // Try to get the requested pet from the user's private domain // DomainHelper.CheckForDomain(this._domainName, _simpleDBClient); GetAttributesRequest getAttributeRequest = new GetAttributesRequest() .WithDomainName(this._domainName) .WithItemName(this._itemName); GetAttributesResponse getAttributeResponse = _simpleDBClient.GetAttributes(getAttributeRequest); List<Attribute> attrs = null; bool showPublic = false; if (getAttributeResponse.IsSetGetAttributesResult()) { attrs = getAttributeResponse.GetAttributesResult.Attribute; showPublic = false; // // If we can't find it try the public domain // if (attrs.Count == 0) { showPublic = true; } } if (showPublic) { Response.Redirect(String.Concat("PetProfile.aspx?petid", _petIdString)); return; } pet = new Pet { Name = attrs.First(a => a.Name == "Name").Value, Birthdate = attrs.First(a => a.Name == "Birthdate").Value, Sex = attrs.First(a => a.Name == "Sex").Value, Type = attrs.First(a => a.Name == "Type").Value, Breed = attrs.First(a => a.Name == "Breed").Value, Likes = attrs.First(a => a.Name == "Likes").Value, Dislikes = attrs.First(a => a.Name == "Dislikes").Value }; this.Public.Checked = bool.Parse(attrs.First(a => a.Name == "Public").Value); using (AmazonS3Client s3Client = new AmazonS3Client(Settings.Default.AWSAccessKey.Trim(), Settings.Default.AWSSecretAccessKey.Trim())) { BucketHelper.CheckForBucket(this._petIdString, s3Client); ListObjectsRequest listObjectsRequest = new ListObjectsRequest() .WithBucketName(this._userBucketName); using (ListObjectsResponse listObjectsResponse = s3Client.ListObjects(listObjectsRequest)) { files = listObjectsResponse.S3Objects.Select(o => String.Format(Settings.Default.S3BucketUrlFormat, this._userBucketName, o.Key)).ToList(); string firstPhoto = files.FirstOrDefault(); this.PhotoThumbUrl.Value = firstPhoto ?? String.Empty; } } } if (pet != default(Pet)) { this.PetNameHeader.Text = pet.Name; this.NameTextBox.Text = pet.Name; this.AnimalDropDownList.SelectedValue = pet.Type; this.BreedTextBox.Text = pet.Breed; this.SexDropDownList.SelectedValue = pet.Sex; if (pet.Birthdate != null) { DateTime birthdate = DateTime.Parse(pet.Birthdate); this.YearDropDownList.SelectedValue = birthdate.Year.ToString(); this.MonthDropDownList.SelectedValue = birthdate.Month.ToString(); this.DayDropDownList.SelectedValue = birthdate.Day.ToString(); } this.LikesTextBox.Text = pet.Likes; this.DislikesTextBox.Text = pet.Dislikes; this.PhotoRepeater.DataSource = files; this.PhotoRepeater.DataBind(); } } }
/// <summary> /// Uses the AWS SDK for .NET to talk to Tier 3 Object Storage /// </summary> private static void UseAwsSdk() { Console.WriteLine(":: Calling Tier 3 Object Storage from AWS SDK for .NET ::"); Console.WriteLine(); //create configuration that points to different URL AmazonS3Config config = new AmazonS3Config() { ServiceURL = "ca.tier3.io" }; AmazonS3Client client = new AmazonS3Client(adminAccessKey, adminAccessSecret, config); /* * List buckets */ Console.WriteLine("ACTION: List all the buckets"); ListBucketsResponse resp = client.ListBuckets(); foreach (S3Bucket bucket in resp.Buckets) { Console.WriteLine("-" + bucket.BucketName); } Console.WriteLine(); /* * List objects in a single bucket */ Console.WriteLine("ACTION: Enter the name of a bucket to open: "); string inputbucket = Console.ReadLine(); ListObjectsRequest objReq = new ListObjectsRequest() { BucketName = inputbucket }; ListObjectsResponse objResp = client.ListObjects(objReq); foreach (S3Object obj in objResp.S3Objects) { Console.WriteLine("-" + obj.Key); } /* * Upload object to bucket */ //Console.Write("Type [Enter] to upload an object to the opened bucket"); //Console.ReadLine(); //PutObjectRequest putReq = new PutObjectRequest() { BucketName = inputbucket, FilePath = @"C:\image.png", ContentType = "image/png" }; //PutObjectResponse putResp = client.PutObject(putReq); //Console.WriteLine("Object uploaded."); Console.ReadLine(); }
private void DownloadFolder(string folder, Label label) { var region = Amazon.RegionEndpoint.USEast1; var credentials = new BasicAWSCredentials("AKIAI6FLMSLBSLE4YBKQ", "l3sIAyIAvSqsg+uWbwmnD8MgHoaDeDHs2tOyVbYT"); S3Client = new AmazonS3Client(credentials, region); var count = 0; label.Text = count.ToString(); var request = new ListObjectsRequest { BucketName = "logs-concierge-prod", Prefix = folder, MaxKeys = 1000 }; var response = S3Client.ListObjects(request); while (response.S3Objects.Count > 0) { count += response.S3Objects.Count; label.Text = count.ToString(); response = S3Client.ListObjects(request); var lines = new List<string>(); var startPoint = ""; foreach (var item in response.S3Objects) { lines.Add(String.Format("{0}\t{1}\t{2}", item.Key, item.Size, item.LastModified)); startPoint = item.Key; } request.Marker = startPoint; WriteToFile(lines, folder); } label.Text = count.ToString(); }
/// <summary> /// get the files in s3 from a bucket /// </summary> /// <param name="bucket"></param> /// <param name="Files"></param> /// <returns></returns> public bool GetFiles(string bucket, out IEnumerable<string> Files) { Files = null; try { //amazon client using (var client = new AmazonS3Client()) { //get object request using (var response = client.ListObjects(new ListObjectsRequest() .WithBucketName(bucket))) { List<string> list = new List<string>(); response.S3Objects.ForEach(o => list.Add(o.Key)); Files = list; return true; } } } catch (Exception ex) { Trace.WriteLine(ex.ToString()); } return false; }
//*** public System.Tuple<bool, List<string>> ListObjectsInBucket(AmazonS3Client client, string bucketName) { try { ListObjectsRequest request = new ListObjectsRequest(); request.WithBucketName(bucketName); ListObjectsResponse response = client.ListObjects(request); List<string> retList = new List<string>(); foreach (S3Object s3Object in response.S3Objects) retList.Add(s3Object.Key); return new System.Tuple<bool, List<string>>(false, retList); } catch (Exception e) { return new System.Tuple<bool, List<string>>(false, new List<string>() { e.ToString()}); } }
public static int GetBucketItemCount(string bucketName) { using (var client = new AmazonS3Client(Settings.AccessKey, Settings.Secret)) { var request = new ListObjectsRequest { BucketName = bucketName }; var count = 0; var response = client.ListObjects(request); count = count + response.S3Objects.Count(); while (response.IsTruncated) { request.Marker = response.NextMarker; response = client.ListObjects(request); count = count + response.S3Objects.Count(); } return count; } }
public virtual void RemoveLabBuckets(AmazonS3Client s3Client, List<string> bucketNames) { foreach (var bucketName in bucketNames) { try { ListObjectsResponse listObjectsResponse = s3Client.ListObjects(new ListObjectsRequest {BucketName = bucketName}); foreach (var s3Object in listObjectsResponse.S3Objects) { var deleteObjectRequest = new DeleteObjectRequest { BucketName = bucketName, Key = s3Object.Key }; s3Client.DeleteObject(deleteObjectRequest); } s3Client.DeleteBucket(new DeleteBucketRequest {BucketName = bucketName}); } catch (AmazonS3Exception s3E) { if (!s3E.ErrorCode.Equals("NoSuchBucket")) { // This error wasn't expected, so rethrow. throw; } } } }
public static long GetBucketSizeInBytes(string bucketName) { using (var client = new AmazonS3Client(Settings.AccessKey, Settings.Secret)) { var request = new ListObjectsRequest { BucketName = bucketName }; long size = 0; var response = client.ListObjects(request); size = size + response.S3Objects.Sum(x => x.Size); while (response.IsTruncated) { request.Marker = response.NextMarker; response = client.ListObjects(request); size = size + response.S3Objects.Sum(x => x.Size); } return size; } }
private void executeSomeBucketOperations(AmazonS3Config s3Config) { using (var s3Client = new AmazonS3Client(s3Config)) { // Call ListBuckets first to verify that AmazonS3PostMarshallHandler.ProcessRequestHandlers // correctly computes the endpoint when no bucket name is present. var listBucketsResponse = s3Client.ListBuckets(); Assert.IsNotNull(listBucketsResponse); Assert.IsFalse(string.IsNullOrEmpty(listBucketsResponse.ResponseMetadata.RequestId)); // Bonus call on ListObjects if we can find a bucket compatible with the test region (to avoid 301 // errors due to addressing bucket on wrong endpoint). This verifies that // AmazonS3PostMarshallHandler.ProcessRequestHandlers correctly computes the endpoint when // a bucket name is present. string bucketName = null; foreach (var bucket in listBucketsResponse.Buckets) { try { var bucketLocationResponse = s3Client.GetBucketLocation(bucket.BucketName); if (string.IsNullOrEmpty(bucketLocationResponse.Location) && s3Config.RegionEndpoint == RegionEndpoint.USEast1) bucketName = bucket.BucketName; else if (string.Equals(s3Config.RegionEndpoint.SystemName, bucketLocationResponse.Location, StringComparison.OrdinalIgnoreCase)) bucketName = bucket.BucketName; if (!string.IsNullOrEmpty(bucketName)) break; } catch(AmazonS3Exception e) { if (e.StatusCode != System.Net.HttpStatusCode.NotFound) throw; } } if (!string.IsNullOrEmpty(bucketName)) { var listObjectsResponse = s3Client.ListObjects(new ListObjectsRequest { BucketName = bucketName }); Assert.IsNotNull(listObjectsResponse); Assert.IsNotNull(listObjectsResponse.ResponseMetadata); } } }
public static long GetTotalFileCount(S3ConnectionString connectionInfo) { using (var client = new AmazonS3Client(connectionInfo.AccessKey, connectionInfo.SecretKey, new AmazonS3Config {ServiceURL = "http://s3.amazonaws.com"})) { var listObjectsRequest = new ListObjectsRequest { BucketName = connectionInfo.BucketName, Prefix = connectionInfo.FolderName }; var marker = default(string); var totalCount = 0; var resp = default(ListObjectsResponse); do { listObjectsRequest.Marker = marker; resp = client.ListObjects(listObjectsRequest); totalCount += resp.S3Objects.Count; marker = resp.NextMarker; } while (resp.IsTruncated); return totalCount; } }
public void BucketSamples() { { #region ListBuckets Sample // Create a client AmazonS3Client client = new AmazonS3Client(); // Issue call ListBucketsResponse response = client.ListBuckets(); // View response data Console.WriteLine("Buckets owner - {0}", response.Owner.DisplayName); foreach (S3Bucket bucket in response.Buckets) { Console.WriteLine("Bucket {0}, Created on {1}", bucket.BucketName, bucket.CreationDate); } #endregion } { #region BucketPolicy Sample // Create a client AmazonS3Client client = new AmazonS3Client(); // Put sample bucket policy (overwrite an existing policy) string newPolicy = @"{ ""Statement"":[{ ""Sid"":""BasicPerms"", ""Effect"":""Allow"", ""Principal"": ""*"", ""Action"":[""s3:PutObject"",""s3:GetObject""], ""Resource"":[""arn:aws:s3:::samplebucketname/*""] }]}"; PutBucketPolicyRequest putRequest = new PutBucketPolicyRequest { BucketName = "SampleBucket", Policy = newPolicy }; client.PutBucketPolicy(putRequest); // Retrieve current policy GetBucketPolicyRequest getRequest = new GetBucketPolicyRequest { BucketName = "SampleBucket" }; string policy = client.GetBucketPolicy(getRequest).Policy; Console.WriteLine(policy); Debug.Assert(policy.Contains("BasicPerms")); // Delete current policy DeleteBucketPolicyRequest deleteRequest = new DeleteBucketPolicyRequest { BucketName = "SampleBucket" }; client.DeleteBucketPolicy(deleteRequest); // Retrieve current policy and verify that it is null policy = client.GetBucketPolicy(getRequest).Policy; Debug.Assert(policy == null); #endregion } { #region GetBucketLocation Sample // Create a client AmazonS3Client client = new AmazonS3Client(); // Construct request GetBucketLocationRequest request = new GetBucketLocationRequest { BucketName = "SampleBucket" }; // Issue call GetBucketLocationResponse response = client.GetBucketLocation(request); // View response data Console.WriteLine("Bucket location - {0}", response.Location); #endregion } { #region PutBucket Sample // Create a client AmazonS3Client client = new AmazonS3Client(); // Construct request PutBucketRequest request = new PutBucketRequest { BucketName = "SampleBucket", BucketRegion = S3Region.EU, // set region to EU CannedACL = S3CannedACL.PublicRead // make bucket publicly readable }; // Issue call PutBucketResponse response = client.PutBucket(request); #endregion } { #region DeleteBucket Sample 1 // Create a client AmazonS3Client client = new AmazonS3Client(); // Construct request DeleteBucketRequest request = new DeleteBucketRequest { BucketName = "SampleBucket" }; // Issue call DeleteBucketResponse response = client.DeleteBucket(request); #endregion } { #region DeleteBucket Sample 2 // Create a client AmazonS3Client client = new AmazonS3Client(); // List and delete all objects ListObjectsRequest listRequest = new ListObjectsRequest { BucketName = "SampleBucket" }; ListObjectsResponse listResponse; do { // Get a list of objects listResponse = client.ListObjects(listRequest); foreach (S3Object obj in listResponse.S3Objects) { // Delete each object client.DeleteObject(new DeleteObjectRequest { BucketName = "SampleBucket", Key = obj.Key }); } // Set the marker property listRequest.Marker = listResponse.NextMarker; } while (listResponse.IsTruncated); // Construct DeleteBucket request DeleteBucketRequest request = new DeleteBucketRequest { BucketName = "SampleBucket" }; // Issue call DeleteBucketResponse response = client.DeleteBucket(request); #endregion } { #region LifecycleConfiguration Sample // Create a client AmazonS3Client client = new AmazonS3Client(); // Put sample lifecycle configuration (overwrite an existing configuration) LifecycleConfiguration newConfiguration = new LifecycleConfiguration { Rules = new List<LifecycleRule> { // Rule to delete keys with prefix "Test-" after 5 days new LifecycleRule { Prefix = "Test-", Expiration = new LifecycleRuleExpiration { Days = 5 } }, // Rule to delete keys in subdirectory "Logs" after 2 days new LifecycleRule { Prefix = "Logs/", Expiration = new LifecycleRuleExpiration { Days = 2 }, Id = "log-file-removal" } } }; PutLifecycleConfigurationRequest putRequest = new PutLifecycleConfigurationRequest { BucketName = "SampleBucket", Configuration = newConfiguration }; client.PutLifecycleConfiguration(putRequest); // Retrieve current configuration GetLifecycleConfigurationRequest getRequest = new GetLifecycleConfigurationRequest { BucketName = "SampleBucket" }; LifecycleConfiguration configuration = client.GetLifecycleConfiguration(getRequest).Configuration; Console.WriteLine("Configuration contains {0} rules", configuration.Rules.Count); foreach (LifecycleRule rule in configuration.Rules) { Console.WriteLine("Rule"); Console.WriteLine(" Prefix = " + rule.Prefix); Console.WriteLine(" Expiration (days) = " + rule.Expiration.Days); Console.WriteLine(" Id = " + rule.Id); Console.WriteLine(" Status = " + rule.Status); } // Put a new configuration and overwrite the existing configuration configuration.Rules.RemoveAt(0); // remove first rule client.PutLifecycleConfiguration(putRequest); // Delete current configuration DeleteLifecycleConfigurationRequest deleteRequest = new DeleteLifecycleConfigurationRequest { BucketName = "SampleBucket" }; client.DeleteLifecycleConfiguration(deleteRequest); // Retrieve current configuration and verify that it is null configuration = client.GetLifecycleConfiguration(getRequest).Configuration; Debug.Assert(configuration == null); #endregion } }
private static void FailureRetryRequests(int totalRequests, int retryRequests, int extraRequests, AmazonS3Client client) { for (int i = 0; i < totalRequests; i++) { try { var response = client.ListObjects("CapacityManagerTests"); } catch (Exception) { if (i == totalRequests - 1) { Assert.AreEqual(retryRequests * 2, requestCount - extraRequests); } continue; } } }
/// <summary> /// Background Info:- Each retry request requires 5 capacity. On successful retry response 5 would be put back into the /// capacity. On a successful response which is not a retry request 1 is added to the capacity. The capacity has a max cap /// that is not exceeded. /// Dependency:- This test depends on the file ListObjectsResponse.xml which contains a sample success ListObject response. /// This Integration test works in three phases. /// Phase 1. Keeping in mind that we can make a 100 requests with the current set configurations, 500 requests are made /// to a mock servlet which returns back a 500 error which leads to 500 retry requests. As the capacity can only handle /// a 100, this leads to the capacity getting depleted. An assert proves the number of retry requests that was made. /// Phase 2. With the capacity completely entry, phase 2 makes 500 requests for which the mock servlet returns a success /// response. This puts back enough capacity to make a 100 retry requests. /// Phase 3. Phase 1 is repeated again with an assert to prove that Phase 2 added the said capacity. /// </summary> public void S3CapacityManagerIntegrationTest() { int TotalRequests = 500; int RetryRequests = 100; int ExtraRequests = TotalRequests-RetryRequests; requestCount = 0; var retryFlag = true; using (MultipleResponseServlet servlet = new MultipleResponseServlet()) { servlet.OnRequest += path => { requestCount++; if (retryFlag) { return new MultipleResponseServlet.Response { Contents = "foo", Headers = new Dictionary<string, string>(), StatusCode = 500 }; } else { var xmlDoc = UtilityMethods.GetResourceText("ListObjectsResponse.xml"); XmlDocument myxml = new XmlDocument(); myxml.LoadXml(xmlDoc); string contents = myxml.InnerXml; return new MultipleResponseServlet.Response { Contents = contents, Headers = new Dictionary<string, string>(), StatusCode = 200 }; } }; string server = "http://localhost:" + servlet.Port; using (var client = new AmazonS3Client(new AmazonS3Config { ServiceURL = server, MaxErrorRetry = 1 })) { retryFlag = true; FailureRetryRequests(TotalRequests, RetryRequests, ExtraRequests, client); retryFlag = false; requestCount = 0; for (int i = 0; i < TotalRequests; i++) { var response = client.ListObjects("CapacityManagerTests"); } retryFlag = true; requestCount = 0; FailureRetryRequests(TotalRequests, RetryRequests, ExtraRequests, client); } } }
public void ObjectSamples() { { #region ListObjects Sample // Create a client AmazonS3Client client = new AmazonS3Client(); // List all objects ListObjectsRequest listRequest = new ListObjectsRequest { BucketName = "SampleBucket", }; ListObjectsResponse listResponse; do { // Get a list of objects listResponse = client.ListObjects(listRequest); foreach (S3Object obj in listResponse.S3Objects) { Console.WriteLine("Object - " + obj.Key); Console.WriteLine(" Size - " + obj.Size); Console.WriteLine(" LastModified - " + obj.LastModified); Console.WriteLine(" Storage class - " + obj.StorageClass); } // Set the marker property listRequest.Marker = listResponse.NextMarker; } while (listResponse.IsTruncated); #endregion } { #region GetObject Sample // Create a client AmazonS3Client client = new AmazonS3Client(); // Create a GetObject request GetObjectRequest request = new GetObjectRequest { BucketName = "SampleBucket", Key = "Item1" }; // Issue request and remember to dispose of the response using (GetObjectResponse response = client.GetObject(request)) { using (StreamReader reader = new StreamReader(response.ResponseStream)) { string contents = reader.ReadToEnd(); Console.WriteLine("Object - " + response.Key); Console.WriteLine(" Version Id - " + response.VersionId); Console.WriteLine(" Contents - " + contents); } } #endregion } { #region GetObjectMetadata Sample // Create a client AmazonS3Client client = new AmazonS3Client(); // Create a GetObjectMetadata request GetObjectMetadataRequest request = new GetObjectMetadataRequest { BucketName = "SampleBucket", Key = "Item1" }; // Issue request and view the response GetObjectMetadataResponse response = client.GetObjectMetadata(request); Console.WriteLine("Content Length - " + response.ContentLength); Console.WriteLine("Content Type - " + response.Headers.ContentType); if (response.Expiration != null) { Console.WriteLine("Expiration Date - " + response.Expiration.ExpiryDate); Console.WriteLine("Expiration Rule Id - " + response.Expiration.RuleId); } #endregion } { #region PutObject Sample 1 // Create a client AmazonS3Client client = new AmazonS3Client(); // Create a PutObject request PutObjectRequest request = new PutObjectRequest { BucketName = "SampleBucket", Key = "Item1", ContentBody = "This is sample content..." }; // Put object PutObjectResponse response = client.PutObject(request); #endregion } { #region PutObject Sample 2 // Create a client AmazonS3Client client = new AmazonS3Client(); // Create a PutObject request PutObjectRequest request = new PutObjectRequest { BucketName = "SampleBucket", Key = "Item1", FilePath = "contents.txt" }; // Put object PutObjectResponse response = client.PutObject(request); #endregion } { #region PutObject Sample 3 // Create a client AmazonS3Client client = new AmazonS3Client(); // Create a PutObject request PutObjectRequest request = new PutObjectRequest { BucketName = "SampleBucket", Key = "Item1", }; using (FileStream stream = new FileStream("contents.txt", FileMode.Open)) { request.InputStream = stream; // Put object PutObjectResponse response = client.PutObject(request); } #endregion } { #region DeleteObject Sample // Create a client AmazonS3Client client = new AmazonS3Client(); // Create a DeleteObject request DeleteObjectRequest request = new DeleteObjectRequest { BucketName = "SampleBucket", Key = "Item1" }; // Issue request client.DeleteObject(request); #endregion } { #region DeleteObjects Sample // Create a client AmazonS3Client client = new AmazonS3Client(); // Create a DeleteObject request DeleteObjectsRequest request = new DeleteObjectsRequest { BucketName = "SampleBucket", Objects = new List<KeyVersion> { new KeyVersion() {Key = "Item1"}, // Versioned item new KeyVersion() { Key = "Item2", VersionId = "Rej8CiBxcZKVK81cLr39j27Y5FVXghDK", }, // Item in subdirectory new KeyVersion() { Key = "Logs/error.txt"} } }; try { // Issue request DeleteObjectsResponse response = client.DeleteObjects(request); } catch (DeleteObjectsException doe) { // Catch error and list error details DeleteObjectsResponse errorResponse = doe.Response; foreach (DeletedObject deletedObject in errorResponse.DeletedObjects) { Console.WriteLine("Deleted item " + deletedObject.Key); } foreach (DeleteError deleteError in errorResponse.DeleteErrors) { Console.WriteLine("Error deleting item " + deleteError.Key); Console.WriteLine(" Code - " + deleteError.Code); Console.WriteLine(" Message - " + deleteError.Message); } } #endregion } { #region CopyObject Sample // Create a client AmazonS3Client client = new AmazonS3Client(); // Create a CopyObject request CopyObjectRequest request = new CopyObjectRequest { SourceBucket = "SampleBucket", SourceKey = "Item1", DestinationBucket = "AnotherBucket", DestinationKey = "Copy1", CannedACL = S3CannedACL.PublicRead }; // Issue request client.CopyObject(request); #endregion } { #region CopyObject Sample // Create a client AmazonS3Client client = new AmazonS3Client(); // Create a CopyObject request CopyObjectRequest request = new CopyObjectRequest { SourceBucket = "SampleBucket", SourceKey = "Item1", DestinationBucket = "AnotherBucket", DestinationKey = "Copy1", CannedACL = S3CannedACL.PublicRead }; // Issue request client.CopyObject(request); #endregion } { #region ListVersions Sample // Create a client AmazonS3Client client = new AmazonS3Client(); // Turn versioning on for a bucket client.PutBucketVersioning(new PutBucketVersioningRequest { BucketName = "SampleBucket", VersioningConfig = new S3BucketVersioningConfig { Status = "Enable" } }); // Populate bucket with multiple items, each with multiple versions PopulateBucket(client, "SampleBucket"); // Get versions ListVersionsRequest request = new ListVersionsRequest { BucketName = "SampleBucket" }; // Make paged ListVersions calls ListVersionsResponse response; do { response = client.ListVersions(request); // View information about versions foreach (var version in response.Versions) { Console.WriteLine("Key = {0}, Version = {1}, IsLatest = {2}, LastModified = {3}, Size = {4}", version.Key, version.VersionId, version.IsLatest, version.LastModified, version.Size); } request.KeyMarker = response.NextKeyMarker; request.VersionIdMarker = response.NextVersionIdMarker; } while (response.IsTruncated); #endregion } { #region Multipart Upload Sample int MB = (int)Math.Pow(2, 20); // Create a client AmazonS3Client client = new AmazonS3Client(); // Define input stream Stream inputStream = Create13MBDataStream(); // Initiate multipart upload InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest { BucketName = "SampleBucket", Key = "Item1" }; InitiateMultipartUploadResponse initResponse = client.InitiateMultipartUpload(initRequest); // Upload part 1 UploadPartRequest uploadRequest = new UploadPartRequest { BucketName = "SampleBucket", Key = "Item1", UploadId = initResponse.UploadId, PartNumber = 1, PartSize = 5 * MB, InputStream = inputStream }; UploadPartResponse up1Response = client.UploadPart(uploadRequest); // Upload part 2 uploadRequest = new UploadPartRequest { BucketName = "SampleBucket", Key = "Item1", UploadId = initResponse.UploadId, PartNumber = 2, PartSize = 5 * MB, InputStream = inputStream }; UploadPartResponse up2Response = client.UploadPart(uploadRequest); // Upload part 3 uploadRequest = new UploadPartRequest { BucketName = "SampleBucket", Key = "Item1", UploadId = initResponse.UploadId, PartNumber = 3, InputStream = inputStream }; UploadPartResponse up3Response = client.UploadPart(uploadRequest); // List parts for current upload ListPartsRequest listPartRequest = new ListPartsRequest { BucketName = "SampleBucket", Key = "Item1", UploadId = initResponse.UploadId }; ListPartsResponse listPartResponse = client.ListParts(listPartRequest); Debug.Assert(listPartResponse.Parts.Count == 3); // Complete the multipart upload CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest { BucketName = "SampleBucket", Key = "Item1", UploadId = initResponse.UploadId, PartETags = new List<PartETag> { new PartETag { ETag = up1Response.ETag, PartNumber = 1 }, new PartETag { ETag = up2Response.ETag, PartNumber = 2 }, new PartETag { ETag = up3Response.ETag, PartNumber = 3 } } }; CompleteMultipartUploadResponse compResponse = client.CompleteMultipartUpload(compRequest); #endregion } }