// Parse the input request /// <summary> /// //////////////////////////////Create a new bucket////////////////////////////// /// </summary> /// <param name="client"></param> public static void CreateBucket(AmazonS3 client, string cmap) { Console.Out.WriteLine("Checking S3 bucket with name " + cmap); ListBucketsResponse response = client.ListBuckets(); bool found = false; foreach (S3Bucket bucket in response.Buckets) { if (bucket.BucketName == cmap) { Console.Out.WriteLine(" Bucket found will not create it."); found = true; break; } } if (found == false) { Console.Out.WriteLine(" Bucket not found will create it."); client.PutBucket(new PutBucketRequest().WithBucketName(cmap)); Console.Out.WriteLine("Created S3 bucket with name " + cmap); } }
public bool CreateBucket() { config.ServiceURL = "s3.amazonaws.com"; AmazonS3 client = Amazon.AWSClientFactory.CreateAmazonS3Client( am.PublicKey, am.PrivateKey ); ListBucketsResponse response = client.ListBuckets(); bool found = false; foreach (S3Bucket bucket in response.Buckets) { if (bucket.BucketName == "Drone") { found = true; break; } } if (found == false) { client.PutBucket(new PutBucketRequest().WithBucketName("Drone")); } return(found); }
//public bool IsFolderExists(string accessKeyID, string secretAccessKey, string bucketName, string key, bool create) //{ // AmazonS3 client = GetS3Client(accessKeyID, secretAccessKey); // ListObjectsRequest listRequest = new ListObjectsRequest(); // listRequest.WithBucketName(bucketName) // .WithPrefix(key); // // get all objects inside the "folder" // ListObjectsResponse objects = client.ListObjects(listRequest); // foreach (S3Object s3o in objects.S3Objects) // { // // get the acl of the object // GetACLRequest aclRequest = new GetACLRequest(); // aclRequest.WithBucketName("thebucket") // .WithKey(s3o.Key); // GetACLResponse getAclResponse = client.GetACL(aclRequest); // //// copy the object without acl // //string newKey = s3o.Key.Replace(oldOWnerId.ToString(), newOwnerId.ToString()); // //CopyObjectRequest copyRequest = new CopyObjectRequest(); // //copyRequest.SourceBucket = "thebucket"; // //copyRequest.DestinationBucket = "thebucket"; // //copyRequest.WithSourceKey(s3o.Key) // //.WithDestinationKey(newKey); // //S3Response copyResponse = client.CopyObject(copyRequest); // //// set the acl of the newly made object // //SetACLRequest setAclRequest = new SetACLRequest(); // //setAclRequest.WithBucketName("ytimusic") // //.WithKey(newKey) // //.WithACL(getAclResponse.AccessControlList); // //SetACLResponse setAclRespone = client.SetACL(setAclRequest); // //DeleteObjectRequest deleteRequest = new DeleteObjectRequest(); // //deleteRequest.WithBucketName("thebucket") // //.WithKey(s3o.Key); // //DeleteObjectResponse deleteResponse = client.DeleteObject(deleteRequest); // } // return false; //} /// <summary> /// Check Azure connection /// </summary> /// <param name="azureAccountName"></param> /// <param name="azureAccountKey"></param> /// <returns></returns> public bool IsBucketNameExists(string accessKeyID, string secretAccessKey, string bucketName, bool create) { try { AmazonS3 client = GetS3Client(accessKeyID, secretAccessKey); ListBucketsResponse response = client.ListBuckets(); bool found = false; foreach (S3Bucket bucket in response.Buckets) { if (bucket.BucketName == bucketName) { found = true; break; } } if (found == false) { if (create) { client.PutBucket(new PutBucketRequest().WithBucketName(bucketName)); } else { throw new Exception("The bucket " + bucketName + " does not exists."); } } return(true); } catch (Exception ex) { throw new DuradosException("AWS Connection failed, Error: " + ex.Message); //return false; } }
static void ListingBuckets() { try { using (ListBucketsResponse response = client.ListBuckets()) { foreach (S3Bucket bucket in response.Buckets) { Console.WriteLine("You own Bucket with name: {0}", bucket.BucketName); } } } catch (AmazonS3Exception amazonS3Exception) { if (amazonS3Exception.ErrorCode != null && (amazonS3Exception.ErrorCode.Equals("InvalidAccessKeyId") || amazonS3Exception.ErrorCode.Equals("InvalidSecurity"))) { Console.WriteLine("Please check the provided AWS Credentials."); Console.WriteLine("If you haven't signed up for Amazon S3, please visit http://aws.amazon.com/s3"); } else { Console.WriteLine("An Error, number {0}, occurred when listing buckets with the message '{1}", amazonS3Exception.ErrorCode, amazonS3Exception.Message); } } }
private S3Bucket GetBucket(AmazonS3 client) { ListBucketsRequest listRequest = new ListBucketsRequest(); ListBucketsResponse response = client.ListBuckets(listRequest); return(response.Buckets .Where(candidate => candidate.BucketName == this.BucketName) .FirstOrDefault()); }
private void WriteS3Info() { StringBuilder output = new StringBuilder(); ListBucketsResponse response = s3.ListBuckets(); if (response.Buckets != null && response.Buckets.Count > 0) { foreach (S3Bucket theBucket in response.Buckets) { output.AppendFormat("<li>{0}</li>", theBucket.BucketName); } } this.s3Placeholder.Text = output.ToString(); }
public static void ClassInitialize(TestContext context) { try { client = ClientTests.CreateClient(); var buckets = client.ListBuckets(); bucket = buckets.Buckets.First(); } catch (Exception e) { Assert.Inconclusive("prerequisite: unable to create client or bucket. Error: {0}", e.Message); } }
public bool SaveFile(string sFolder, string sObjectKey, byte[] fileContent, bool bMakePublic) { try { AmazonS3 client = AWSClientFactory.CreateAmazonS3Client(S3ACCESSKEY, S3SECRETKEY); Amazon.S3.Transfer.TransferUtility uploader = new Amazon.S3.Transfer.TransferUtility(S3ACCESSKEY, S3SECRETKEY); string BUCKET_NAME = ConfigurationManager.AppSettings["AWSBUCKET"]; ListBucketsResponse response = client.ListBuckets(); bool found = false; foreach (S3Bucket bucket in response.Buckets) { if (bucket.BucketName == BUCKET_NAME) { found = true; break; } } if (found == false) { client.PutBucket(new PutBucketRequest().WithBucketName(BUCKET_NAME)); } if (sFolder != "") { sObjectKey = sFolder + "/" + sObjectKey; } System.IO.Stream stream = new System.IO.MemoryStream(fileContent); Amazon.S3.Transfer.TransferUtilityUploadRequest request = new Amazon.S3.Transfer.TransferUtilityUploadRequest(); request.WithBucketName(BUCKET_NAME); request.WithKey(sObjectKey); request.WithInputStream(stream); request.WithTimeout(-1); if (bMakePublic) { request.CannedACL = S3CannedACL.PublicRead; } uploader.Upload(request); return(true); } catch (Exception) { return(false); } }
public static void CheckForBucket(string itemKey, AmazonS3 s3Client) { if (HttpContext.Current.User.Identity.IsAuthenticated) { string userBucketName = String.Format(Settings.Default.BucketNameFormat, HttpContext.Current.User.Identity.Name, itemKey); using (ListBucketsResponse listBucketsResponse = s3Client.ListBuckets()) { S3Bucket bucket = listBucketsResponse.Buckets.FirstOrDefault(b => b.BucketName == userBucketName); if (bucket == null) { PutBucketRequest putBucketRequest = new PutBucketRequest() .WithBucketName(userBucketName); PutBucketResponse putBucketResponse = s3Client.PutBucket(putBucketRequest); putBucketResponse.Dispose(); } } } }
public static bool Initialize_S3_stuff() { Console.WriteLine("starting Initialize_S3_stuff()"); s3_client = null; bucketName = null; try { if (!Utils.CFG.ContainsKey("s3_bucketName")) { Console.WriteLine("param s3_bucketName is not found in ez3d.config"); return false; } s3_client = AWSClientFactory.CreateAmazonS3Client(); // ListBucketsRequest listBucketsRequest = new ListBucketsRequest(); ListBucketsResponse response = s3_client.ListBuckets(); foreach (S3Bucket bucket in response.Buckets) { if (bucket.BucketName == (String)Utils.CFG["s3_bucketName"]) { bucketName = bucket.BucketName; Console.WriteLine("bucketName =" + bucketName); } } if (bucketName == null) { Console.WriteLine("(bucketName == null)"); return false; } Console.WriteLine("Initialize_S3_stuff fininshed succefully"); return true; } catch (AmazonS3Exception e) { Console.WriteLine("AmazonS3Exception caught !!!"); Console.WriteLine(e.Message); return false; } }
/// <summary> /// Enumerate all the buckets. /// Note: AWS provides the list of buckets in a paged manner. /// A call to this function iterates over all the pages and sums up /// all the items. /// </summary> /// <returns>Array of bucket names</returns> public string[] EnumerateBuckets() { var response = _amazonS3.ListBuckets(); return(response.Buckets.Select(b => b.BucketName).ToArray()); }
private static void CreateBucket(AmazonS3 client, string bucketname) { Console.Out.WriteLine("Checking S3 bucket with name " + bucketname); ListBucketsResponse response = client.ListBuckets(); bool found = false; foreach (S3Bucket bucket in response.Buckets) { if (bucket.BucketName == bucketname) { Console.Out.WriteLine(" Bucket found will not create it."); found = true; break; } } if (found == false) { Console.Out.WriteLine(" Bucket not found will create it."); client.PutBucket(new PutBucketRequest().WithBucketName(bucketname)); Console.Out.WriteLine("Created S3 bucket with name " + bucketname); } }
public List <string> GetAllBuckets() { ListBucketsResponse listBucketsResponse = _client.ListBuckets(); return(listBucketsResponse.Buckets.Select(b => b.BucketName).ToList()); }
public static string GetServiceOutput() { StringBuilder sb = new StringBuilder(1024); using (StringWriter sr = new StringWriter(sb)) { sr.WriteLine("==========================================="); sr.WriteLine("Welcome to the AWS .NET SDK!"); sr.WriteLine("==========================================="); // Print the number of Amazon EC2 instances. AmazonEC2 ec2 = AWSClientFactory.CreateAmazonEC2Client(); DescribeInstancesRequest ec2Request = new DescribeInstancesRequest(); try { DescribeInstancesResponse ec2Response = ec2.DescribeInstances(ec2Request); int numInstances = 0; numInstances = ec2Response.DescribeInstancesResult.Reservation.Count; sr.WriteLine("You have " + numInstances + " Amazon EC2 instance(s) running in the US-East (Northern Virginia) region."); } catch (AmazonEC2Exception ex) { if (ex.ErrorCode != null && ex.ErrorCode.Equals("AuthFailure")) { sr.WriteLine("The account you are using is not signed up for Amazon EC2."); sr.WriteLine("You can sign up for Amazon EC2 at http://aws.amazon.com/ec2"); } else { sr.WriteLine("Caught Exception: " + ex.Message); sr.WriteLine("Response Status Code: " + ex.StatusCode); sr.WriteLine("Error Code: " + ex.ErrorCode); sr.WriteLine("Error Type: " + ex.ErrorType); sr.WriteLine("Request ID: " + ex.RequestId); sr.WriteLine("XML: " + ex.XML); } } sr.WriteLine(); // Print the number of Amazon SimpleDB domains. AmazonSimpleDB sdb = AWSClientFactory.CreateAmazonSimpleDBClient(); ListDomainsRequest sdbRequest = new ListDomainsRequest(); try { ListDomainsResponse sdbResponse = sdb.ListDomains(sdbRequest); if (sdbResponse.IsSetListDomainsResult()) { int numDomains = 0; numDomains = sdbResponse.ListDomainsResult.DomainName.Count; sr.WriteLine("You have " + numDomains + " Amazon SimpleDB domain(s) in the US-East (Northern Virginia) region."); } } catch (AmazonSimpleDBException ex) { if (ex.ErrorCode != null && ex.ErrorCode.Equals("AuthFailure")) { sr.WriteLine("The account you are using is not signed up for Amazon SimpleDB."); sr.WriteLine("You can sign up for Amazon SimpleDB at http://aws.amazon.com/simpledb"); } else { sr.WriteLine("Caught Exception: " + ex.Message); sr.WriteLine("Response Status Code: " + ex.StatusCode); sr.WriteLine("Error Code: " + ex.ErrorCode); sr.WriteLine("Error Type: " + ex.ErrorType); sr.WriteLine("Request ID: " + ex.RequestId); sr.WriteLine("XML: " + ex.XML); } } sr.WriteLine(); // Print the number of Amazon S3 Buckets. AmazonS3 s3Client = AWSClientFactory.CreateAmazonS3Client(); try { ListBucketsResponse response = s3Client.ListBuckets(); int numBuckets = 0; if (response.Buckets != null && response.Buckets.Count > 0) { numBuckets = response.Buckets.Count; } sr.WriteLine("You have " + numBuckets + " Amazon S3 bucket(s) in the US Standard region."); } catch (AmazonS3Exception ex) { if (ex.ErrorCode != null && (ex.ErrorCode.Equals("InvalidAccessKeyId") || ex.ErrorCode.Equals("InvalidSecurity"))) { sr.WriteLine("Please check the provided AWS Credentials."); sr.WriteLine("If you haven't signed up for Amazon S3, please visit http://aws.amazon.com/s3"); } else { sr.WriteLine("Caught Exception: " + ex.Message); sr.WriteLine("Response Status Code: " + ex.StatusCode); sr.WriteLine("Error Code: " + ex.ErrorCode); sr.WriteLine("Request ID: " + ex.RequestId); sr.WriteLine("XML: " + ex.XML); } } sr.WriteLine("Press any key to continue..."); } return(sb.ToString()); }
private bool DoesBucketExist(AmazonS3 client) { using (ListBucketsResponse listBucketsResponse = client.ListBuckets()) return(listBucketsResponse.Buckets.Any(b => b.BucketName == BucketName)); }
private bool DoesBucketExist(AmazonS3 client) { using (ListBucketsResponse listBucketsResponse = client.ListBuckets()) return (listBucketsResponse.Buckets.Any(b => b.BucketName == BucketName)); }
private S3Bucket GetBucket(AmazonS3 client) { ListBucketsRequest listRequest = new ListBucketsRequest(); ListBucketsResponse response = client.ListBuckets(listRequest); return response.Buckets .Where(candidate => candidate.BucketName == this.BucketName) .FirstOrDefault(); }
static void Main(string[] args) { startTime = DateTime.Now; //Catch exceptions AppDomain currentDomain = AppDomain.CurrentDomain; currentDomain.UnhandledException += new UnhandledExceptionEventHandler(MyHandler); //Catch ctrl+c to we can put out our summary Console.CancelKeyPress += (sender, eventArgs) => { eventArgs.Cancel = false; WriteLog("!!CANCELLED!!"); keepRunning = false; PrintSummary(); }; ServicePointManager.Expect100Continue = true; ServicePointManager.SecurityProtocol = SecurityProtocolType.Ssl3; string strConfigFile = "CrossCloudBackup.xml"; if (args.Length > 0) { strConfigFile = args[0]; } if (!File.Exists(strConfigFile)) { new XDocument( new XDeclaration("1.0", "utf-8", "yes"), new XComment("CrossCloudBackup Local Config File"), new XElement("root", new XElement("AWSKey", "someValue"), new XElement("AWSSecret", "someValue"), new XElement("AWSRegion", "eu-west-1"), new XElement("RSUsername", "someValue"), new XElement("RSAPIKey", "someValue"), new XElement("RSUseServiceNet", "false"), new XElement("RSLocation", "UK"), new XElement("ExcludeBuckets", ""), new XElement("ExcludeContainers", ""), //new XElement("MirrorAll", "true"), /TODO: Add Selective Sync new XElement("RSBackupContainer", "s3-backup"), new XElement("S3BackupBucket", "rs-backup") //new XElement("TransferThreads", "3") //TODO: Add Threading ) ) .Save(strConfigFile); Console.WriteLine(strConfigFile + " not found, blank one created."); Console.WriteLine("Press enter to exit..."); Console.ReadLine(); Environment.Exit(1); } //We know the config file exists, so open and read values XDocument config = XDocument.Load(strConfigFile); //Get AWS config string AWSKey = config.Element("root").Element("AWSKey").Value; string AWSSecret = config.Element("root").Element("AWSSecret").Value; RegionEndpoint region = RegionEndpoint.EUWest1; switch (config.Element("root").Element("AWSRegion").Value) { case "eu-west-1": region = RegionEndpoint.EUWest1; break; case "sa-east-1": region = RegionEndpoint.SAEast1; break; case "us-east-1": region = RegionEndpoint.USEast1; break; case "ap-northeast-1": region = RegionEndpoint.APNortheast1; break; case "us-west-2": region = RegionEndpoint.USWest2; break; case "us-west-1": region = RegionEndpoint.USWest1; break; case "ap-southeast-1": region = RegionEndpoint.APSoutheast1; break; case "ap-southeast-2": region = RegionEndpoint.APSoutheast2; break; default: region = RegionEndpoint.EUWest1; break; } //Create a connection to S3 WriteLog("Connecting to S3"); S3Client = AWSClientFactory.CreateAmazonS3Client(AWSKey, AWSSecret, region); //Get RS config Rackspace.CloudFiles.Utils.AuthUrl rsRegion = Rackspace.CloudFiles.Utils.AuthUrl.US; switch (config.Element("root").Element("RSLocation").Value) { case "UK": rsRegion = Rackspace.CloudFiles.Utils.AuthUrl.UK; break; case "US": rsRegion = Rackspace.CloudFiles.Utils.AuthUrl.US; break; case "Mosso": rsRegion = Rackspace.CloudFiles.Utils.AuthUrl.Mosso; break; } //Create connection to Rackspace WriteLog("Connecting to Rackspace Cloud Files"); RSConnection = new Connection(new UserCredentials(config.Element("root").Element("RSUsername").Value, config.Element("root").Element("RSAPIKey").Value, rsRegion), Convert.ToBoolean(config.Element("root").Element("RSUseServiceNet").Value)); //Get exclusions string[] excludeBuckets = config.Element("root").Element("ExcludeBuckets").Value.Split(','); string[] excludeContainers = config.Element("root").Element("ExcludeContainers").Value.Split(','); //First process all the S3 buckets and stream right into Rackspace container. WriteLog("Listing S3 Buckets"); ListBucketsResponse response = S3Client.ListBuckets(); WriteLog("Found " + response.Buckets.Count() + " buckets"); foreach (S3Bucket bucket in response.Buckets) { if (bucket.BucketName == config.Element("root").Element("S3BackupBucket").Value) { WriteLog("Skipping " + bucket.BucketName + " as backup folder"); } else if (excludeBuckets.Contains(bucket.BucketName)) { WriteLog("Skipping " + bucket.BucketName + " as in exclusions"); } else { //We need to know if the bucket is in the right region, otherwise it will error GetBucketLocationResponse locResponse = S3Client.GetBucketLocation(new GetBucketLocationRequest().WithBucketName(bucket.BucketName)); if (locResponse.Location == config.Element("root").Element("AWSRegion").Value) { WriteLog("Processing " + bucket.BucketName); //Get list of files ListObjectsRequest request = new ListObjectsRequest(); request.BucketName = bucket.BucketName; do { ListObjectsResponse filesResponse = S3Client.ListObjects(request); WriteLog("Found " + filesResponse.S3Objects.Count() + " files"); if (filesResponse.IsTruncated) { WriteLog("there are additional pages of files"); } foreach (S3Object file in filesResponse.S3Objects) { bool bolTransfer = false; //See if it exists on Rackspace string uri = RSConnection.StorageUrl + "/" + config.Element("root").Element("RSBackupContainer").Value + "/" + bucket.BucketName + "/" + file.Key; try { var req = (HttpWebRequest)WebRequest.Create(uri); req.Headers.Add("X-Auth-Token", RSConnection.AuthToken); req.Method = "HEAD"; //Compare Etags to see if we need to sync using (var resp = req.GetResponse() as HttpWebResponse) { if ("\"" + resp.Headers["eTag"] + "\"" != file.ETag) { bolTransfer = true; } } } catch (System.Net.WebException e) { if (e.Status == WebExceptionStatus.ProtocolError && ((HttpWebResponse)e.Response).StatusCode == HttpStatusCode.NotFound) { //Item not found, so upload bolTransfer = true; } //WriteLog("End Request to " + uri); } if (file.StorageClass == "GLACIER") { bolTransfer = false; //We can't get things out of Glacier, but they aer still listed here. } if (bolTransfer) { WriteLog("Syncing " + file.Key); using (GetObjectResponse getResponse = S3Client.GetObject(new GetObjectRequest().WithBucketName(bucket.BucketName).WithKey(file.Key))) { using (Stream s = getResponse.ResponseStream) { //We can stream right from s3 to CF, no need to store in memory or filesystem. var req = (HttpWebRequest)WebRequest.Create(uri); req.Headers.Add("X-Auth-Token", RSConnection.AuthToken); req.Method = "PUT"; req.SendChunked = true; req.AllowWriteStreamBuffering = false; req.Timeout = -1; using (Stream stream = req.GetRequestStream()) { byte[] data = new byte[8192]; int bytesRead = 0; while ((bytesRead = s.Read(data, 0, data.Length)) > 0) { stream.Write(data, 0, bytesRead); } stream.Flush(); stream.Close(); } req.GetResponse().Close(); } } intTransferred++; bytesTransferred += file.Size; } else { WriteLog("Skipping " + file.Key); intSkipped++; } //Check our exit condition if (!keepRunning) { break; } } //Loop if there is more than 1000 files if (filesResponse.IsTruncated) { request.Marker = filesResponse.NextMarker; } else { request = null; } if (!keepRunning) { break; } } while (request != null); } } if (!keepRunning) { break; } } //Now get all the Rackspace containers and stream them to Amazon WriteLog("Listing CF Containers"); List <string> lstContainers = RSConnection.GetContainers(); WriteLog("Found " + lstContainers.Count() + " containers"); foreach (string container in lstContainers) { if (container == config.Element("root").Element("RSBackupContainer").Value) { WriteLog("Skipping " + container + " as backup folder"); } else if (excludeContainers.Contains(container)) { WriteLog("Skipping " + container + " as in exclusions"); } else { WriteLog("Processing " + container); XmlDocument containerInfo = RSConnection.GetContainerInformationXml(container); do { int filesCount = containerInfo.GetElementsByTagName("object").Count; WriteLog("Found " + filesCount + " files"); foreach (XmlNode file in containerInfo.GetElementsByTagName("object")) { bool bolTransfer = false; string strBucketName = config.Element("root").Element("S3BackupBucket").Value; string strKey = container + file.SelectSingleNode("name").InnerText; //See if the file exists on s3 try { GetObjectMetadataResponse metaResp = S3Client.GetObjectMetadata(new GetObjectMetadataRequest().WithBucketName(strBucketName).WithKey(strKey)); //Compare the etags if (metaResp.ETag != "\"" + file.SelectSingleNode("hash").InnerText + "\"") { bolTransfer = true; } } catch (Amazon.S3.AmazonS3Exception e) { bolTransfer = true; } if (bolTransfer) { WriteLog("Syncing " + file.SelectSingleNode("name").InnerText); //God the C# binding sucks, so let's stream manually string uri = RSConnection.StorageUrl + "/" + container + "/" + file.SelectSingleNode("name").InnerText; var req = (HttpWebRequest)WebRequest.Create(uri); req.Headers.Add("X-Auth-Token", RSConnection.AuthToken); req.Method = "GET"; using (var resp = req.GetResponse() as HttpWebResponse) { using (Stream s = resp.GetResponseStream()) { string today = String.Format("{0:ddd,' 'dd' 'MMM' 'yyyy' 'HH':'mm':'ss' 'zz00}", DateTime.Now); string stringToSign = "PUT\n" + "\n" + file.SelectSingleNode("content_type").InnerText + "\n" + "\n" + "x-amz-date:" + today + "\n" + "/" + strBucketName + "/" + strKey; Encoding ae = new UTF8Encoding(); HMACSHA1 signature = new HMACSHA1(ae.GetBytes(AWSSecret)); string encodedCanonical = Convert.ToBase64String(signature.ComputeHash(ae.GetBytes(stringToSign))); string authHeader = "AWS " + AWSKey + ":" + encodedCanonical; string uriS3 = "https://" + strBucketName + ".s3.amazonaws.com/" + strKey; var reqS3 = (HttpWebRequest)WebRequest.Create(uriS3); reqS3.Headers.Add("Authorization", authHeader); reqS3.Headers.Add("x-amz-date", today); reqS3.ContentType = file.SelectSingleNode("content_type").InnerText; reqS3.ContentLength = Convert.ToInt32(file.SelectSingleNode("bytes").InnerText); reqS3.Method = "PUT"; reqS3.AllowWriteStreamBuffering = false; if (reqS3.ContentLength == -1L) { reqS3.SendChunked = true; } using (Stream streamS3 = reqS3.GetRequestStream()) { byte[] data = new byte[32768]; int bytesRead = 0; while ((bytesRead = s.Read(data, 0, data.Length)) > 0) { streamS3.Write(data, 0, bytesRead); } streamS3.Flush(); streamS3.Close(); } reqS3.GetResponse().Close(); } } intTransferred++; bytesTransferred += Convert.ToInt64(file.SelectSingleNode("bytes").InnerText); } else { WriteLog("Skipping " + file.SelectSingleNode("name").InnerText); intSkipped++; } //Check our exit condition if (!keepRunning) { break; } } if (filesCount < 10000) { containerInfo = null; } else { //Fetch the next list, but the Rackspace binding doesn't support markers with XML responses.... try { string uri = RSConnection.StorageUrl + "/" + container + "?format=xml&marker=" + Uri.EscapeUriString(containerInfo.FirstChild.NextSibling.LastChild.SelectSingleNode("name").InnerText); var req = (HttpWebRequest)WebRequest.Create(uri); req.Headers.Add("X-Auth-Token", RSConnection.AuthToken); req.Method = "GET"; using (var resp = req.GetResponse() as HttpWebResponse) { using (var reader = new System.IO.StreamReader(resp.GetResponseStream(), ASCIIEncoding.ASCII)) { string responseText = reader.ReadToEnd(); containerInfo.LoadXml(responseText); } } } catch (System.Net.WebException e) { if (e.Status == WebExceptionStatus.ProtocolError && ((HttpWebResponse)e.Response).StatusCode == HttpStatusCode.NotFound) { containerInfo = null; } } } } while (containerInfo != null); } } if (keepRunning) { WriteLog("Completed"); PrintSummary(); } }