internal bool ExistsWithBucketCheck(out bool bucketExists) { bucketExists = true; try { var request = new GetObjectMetadataRequest { BucketName = bucket, Key = S3Helper.EncodeKey(key) }; request.BeforeRequestEvent += S3Helper.FileIORequestEventHandler; // If the object doesn't exist then a "NoSuchKey" will be thrown s3Client.GetObjectMetadata(request); return(true); } catch (AmazonS3Exception e) { if (string.Equals(e.ErrorCode, "NoSuchBucket")) { bucketExists = false; return(false); } else if (string.Equals(e.ErrorCode, "NoSuchKey")) { return(false); } throw; } }
public override void DeleteExpired(string domain, string path, TimeSpan oldThreshold) { using (AmazonS3 client = GetClient()) { IEnumerable <S3Object> s3Obj = GetS3Objects(domain, path); foreach (S3Object s3Object in s3Obj) { GetObjectMetadataRequest request = new GetObjectMetadataRequest().WithBucketName(_bucket).WithKey(s3Object.Key); using (GetObjectMetadataResponse metadata = client.GetObjectMetadata(request)) { string privateExpireKey = metadata.Metadata["private-expire"]; if (!string.IsNullOrEmpty(privateExpireKey)) { long fileTime; if (long.TryParse(privateExpireKey, out fileTime)) { if (DateTime.UtcNow > DateTime.FromFileTimeUtc(fileTime)) { //Delete it DeleteObjectRequest deleteObjectRequest = new DeleteObjectRequest().WithBucketName(_bucket).WithKey(s3Object.Key); using (client.DeleteObject(deleteObjectRequest)) { } } } } } } } }
/// <summary> /// Sets the storage class for the S3 Object's Version to the value /// specified. /// </summary> /// <param name="bucketName">The name of the bucket in which the key is stored</param> /// <param name="key">The key of the S3 Object whose storage class needs changing</param> /// <param name="version">The version of the S3 Object whose storage class needs changing</param> /// <param name="sClass">The new Storage Class for the object</param> /// <param name="s3Client">The Amazon S3 Client to use for S3 specific operations.</param> /// <seealso cref="T:Amazon.S3.Model.S3StorageClass"/> public static void SetObjectStorageClass(string bucketName, string key, string version, S3StorageClass sClass, AmazonS3 s3Client) { if (sClass > S3StorageClass.ReducedRedundancy || sClass < S3StorageClass.Standard) { throw new ArgumentException("Invalid value specified for storage class."); } if (null == s3Client) { throw new ArgumentNullException("s3Client", "Please specify an S3 Client to make service requests."); } // Get the existing ACL of the object GetACLRequest getACLRequest = new GetACLRequest(); getACLRequest.BucketName = bucketName; getACLRequest.Key = key; if (version != null) { getACLRequest.VersionId = version; } GetACLResponse getACLResponse = s3Client.GetACL(getACLRequest); GetObjectMetadataResponse getMetadataResponse = s3Client.GetObjectMetadata(new GetObjectMetadataRequest() .WithBucketName(bucketName) .WithKey(key)); // Set the storage class on the object CopyObjectRequest copyRequest = new CopyObjectRequest(); copyRequest.SourceBucket = copyRequest.DestinationBucket = bucketName; copyRequest.SourceKey = copyRequest.DestinationKey = key; copyRequest.ServerSideEncryptionMethod = getMetadataResponse.ServerSideEncryptionMethod; if (version != null) { copyRequest.SourceVersionId = version; } copyRequest.StorageClass = sClass; // The copyRequest's Metadata directive is COPY by default CopyObjectResponse copyResponse = s3Client.CopyObject(copyRequest); // Set the object's original ACL back onto it because a COPY // operation resets the ACL on the destination object. SetACLRequest setACLRequest = new SetACLRequest(); setACLRequest.BucketName = bucketName; setACLRequest.Key = key; if (version != null) { setACLRequest.VersionId = copyResponse.VersionId; } setACLRequest.ACL = getACLResponse.AccessControlList; s3Client.SetACL(setACLRequest); }
/// <summary> /// Sets up the request needed to make an exact copy of the object leaving the parent method /// the ability to change just the attribute being requested to change. /// </summary> /// <param name="bucketName"></param> /// <param name="key"></param> /// <param name="version"></param> /// <param name="s3Client"></param> /// <param name="copyRequest"></param> /// <param name="setACLRequest"></param> static void SetupForObjectModification(string bucketName, string key, string version, AmazonS3 s3Client, out CopyObjectRequest copyRequest, out SetACLRequest setACLRequest) { // Get the existing ACL of the object GetACLRequest getACLRequest = new GetACLRequest(); getACLRequest.BucketName = bucketName; getACLRequest.Key = key; if (version != null) { getACLRequest.VersionId = version; } GetACLResponse getACLResponse = s3Client.GetACL(getACLRequest); // Set the object's original ACL back onto it because a COPY // operation resets the ACL on the destination object. setACLRequest = new SetACLRequest(); setACLRequest.BucketName = bucketName; setACLRequest.Key = key; setACLRequest.ACL = getACLResponse.AccessControlList; ListObjectsResponse listObjectResponse = s3Client.ListObjects(new ListObjectsRequest() .WithBucketName(bucketName) .WithPrefix(key) .WithMaxKeys(1)); if (listObjectResponse.S3Objects.Count != 1) { throw new ArgumentNullException("No object exists with this bucket name and key."); } GetObjectMetadataRequest getMetaRequest = new GetObjectMetadataRequest() { BucketName = bucketName, Key = key }; GetObjectMetadataResponse getMetaResponse = s3Client.GetObjectMetadata(getMetaRequest); // Set the storage class on the object copyRequest = new CopyObjectRequest(); copyRequest.SourceBucket = copyRequest.DestinationBucket = bucketName; copyRequest.SourceKey = copyRequest.DestinationKey = key; copyRequest.StorageClass = listObjectResponse.S3Objects[0].StorageClass == "STANDARD" ? S3StorageClass.Standard : S3StorageClass.ReducedRedundancy; if (version != null) { copyRequest.SourceVersionId = version; } copyRequest.WebsiteRedirectLocation = getMetaResponse.WebsiteRedirectLocation; copyRequest.ServerSideEncryptionMethod = getMetaResponse.ServerSideEncryptionMethod; }
/// <summary> /// Checks whether a file exists /// </summary> /// <param name="path">Web path to file's folder</param> /// <param name="fileName">File name</param> /// <returns>True if file exists, false if not</returns> public bool DoesFileExist(string path, string fileName) { try { var response = _client.GetObjectMetadata(new GetObjectMetadataRequest() .WithBucketName(_bucketName) .WithKey(GetKey(path, fileName))); return(true); } catch (AmazonS3Exception ex) { if (ex.StatusCode == System.Net.HttpStatusCode.NotFound || ex.StatusCode == System.Net.HttpStatusCode.Forbidden) { return(false); } // Status wasn't not found, so throw the exception throw; } }
bool FileExists(string key) { try { var request = new GetObjectMetadataRequest() .WithBucketName(bucket) .WithKey(key); var response = s3client.GetObjectMetadata(request); return(true); } catch (AmazonS3Exception ex) { if (ex.StatusCode == HttpStatusCode.NotFound) { return(false); } throw; } }
protected GetObjectMetadataResponse FileExists(string bucketName, string key) { var location = String.Format("{0}/{1}", bucketName, key); Logger.Log(String.Format("Checking if file Exists on S3: {0}", location)); try { return(S3Client.GetObjectMetadata(new GetObjectMetadataRequest() .WithBucketName(bucketName) .WithKey(key.ToLowerInvariant()))); } catch (AmazonS3Exception ex) { Logger.Log(String.Format("File Does Not Exist:{0}", location)); if (ex.StatusCode == System.Net.HttpStatusCode.NotFound) { return(null); } //status wasn't not found, so throw the exception throw; } }
/// <summary> /// Sets up the request needed to make an exact copy of the object leaving the parent method /// the ability to change just the attribute being requested to change. /// </summary> /// <param name="bucketName"></param> /// <param name="key"></param> /// <param name="version"></param> /// <param name="s3Client"></param> /// <param name="copyRequest"></param> /// <param name="setACLRequest"></param> static void SetupForObjectModification(string bucketName, string key, string version, AmazonS3 s3Client, out CopyObjectRequest copyRequest, out SetACLRequest setACLRequest) { // Get the existing ACL of the object GetACLRequest getACLRequest = new GetACLRequest(); getACLRequest.BucketName = bucketName; getACLRequest.Key = key; if (version != null) getACLRequest.VersionId = version; GetACLResponse getACLResponse = s3Client.GetACL(getACLRequest); // Set the object's original ACL back onto it because a COPY // operation resets the ACL on the destination object. setACLRequest = new SetACLRequest(); setACLRequest.BucketName = bucketName; setACLRequest.Key = key; setACLRequest.ACL = getACLResponse.AccessControlList; ListObjectsResponse listObjectResponse = s3Client.ListObjects(new ListObjectsRequest() .WithBucketName(bucketName) .WithPrefix(key) .WithMaxKeys(1)); if (listObjectResponse.S3Objects.Count != 1) { throw new ArgumentNullException("No object exists with this bucket name and key."); } GetObjectMetadataRequest getMetaRequest = new GetObjectMetadataRequest() { BucketName = bucketName, Key = key }; GetObjectMetadataResponse getMetaResponse = s3Client.GetObjectMetadata(getMetaRequest); // Set the storage class on the object copyRequest = new CopyObjectRequest(); copyRequest.SourceBucket = copyRequest.DestinationBucket = bucketName; copyRequest.SourceKey = copyRequest.DestinationKey = key; copyRequest.StorageClass = listObjectResponse.S3Objects[0].StorageClass == "STANDARD" ? S3StorageClass.Standard : S3StorageClass.ReducedRedundancy; if (version != null) copyRequest.SourceVersionId = version; copyRequest.WebsiteRedirectLocation = getMetaResponse.WebsiteRedirectLocation; copyRequest.ServerSideEncryptionMethod = getMetaResponse.ServerSideEncryptionMethod; }
/// <summary> /// Sets the storage class for the S3 Object's Version to the value /// specified. /// </summary> /// <param name="bucketName">The name of the bucket in which the key is stored</param> /// <param name="key">The key of the S3 Object whose storage class needs changing</param> /// <param name="version">The version of the S3 Object whose storage class needs changing</param> /// <param name="sClass">The new Storage Class for the object</param> /// <param name="s3Client">The Amazon S3 Client to use for S3 specific operations.</param> /// <seealso cref="T:Amazon.S3.Model.S3StorageClass"/> public static void SetObjectStorageClass(string bucketName, string key, string version, S3StorageClass sClass, AmazonS3 s3Client) { if (sClass > S3StorageClass.ReducedRedundancy || sClass < S3StorageClass.Standard) { throw new ArgumentException("Invalid value specified for storage class."); } if (null == s3Client) { throw new ArgumentNullException("s3Client", "Please specify an S3 Client to make service requests."); } // Get the existing ACL of the object GetACLRequest getACLRequest = new GetACLRequest(); getACLRequest.BucketName = bucketName; getACLRequest.Key = key; if(version != null) getACLRequest.VersionId = version; GetACLResponse getACLResponse = s3Client.GetACL(getACLRequest); GetObjectMetadataResponse getMetadataResponse = s3Client.GetObjectMetadata(new GetObjectMetadataRequest() .WithBucketName(bucketName) .WithKey(key)); // Set the storage class on the object CopyObjectRequest copyRequest = new CopyObjectRequest(); copyRequest.SourceBucket = copyRequest.DestinationBucket = bucketName; copyRequest.SourceKey = copyRequest.DestinationKey = key; copyRequest.ServerSideEncryptionMethod = getMetadataResponse.ServerSideEncryptionMethod; if (version != null) copyRequest.SourceVersionId = version; copyRequest.StorageClass = sClass; // The copyRequest's Metadata directive is COPY by default CopyObjectResponse copyResponse = s3Client.CopyObject(copyRequest); // Set the object's original ACL back onto it because a COPY // operation resets the ACL on the destination object. SetACLRequest setACLRequest = new SetACLRequest(); setACLRequest.BucketName = bucketName; setACLRequest.Key = key; if (version != null) setACLRequest.VersionId = copyResponse.VersionId; setACLRequest.ACL = getACLResponse.AccessControlList; s3Client.SetACL(setACLRequest); }
static void Main(string[] args) { startTime = DateTime.Now; //Catch exceptions AppDomain currentDomain = AppDomain.CurrentDomain; currentDomain.UnhandledException += new UnhandledExceptionEventHandler(MyHandler); //Catch ctrl+c to we can put out our summary Console.CancelKeyPress += (sender, eventArgs) => { eventArgs.Cancel = false; WriteLog("!!CANCELLED!!"); keepRunning = false; PrintSummary(); }; ServicePointManager.Expect100Continue = true; ServicePointManager.SecurityProtocol = SecurityProtocolType.Ssl3; string strConfigFile = "CrossCloudBackup.xml"; if (args.Length > 0) { strConfigFile = args[0]; } if (!File.Exists(strConfigFile)) { new XDocument( new XDeclaration("1.0", "utf-8", "yes"), new XComment("CrossCloudBackup Local Config File"), new XElement("root", new XElement("AWSKey", "someValue"), new XElement("AWSSecret", "someValue"), new XElement("AWSRegion", "eu-west-1"), new XElement("RSUsername", "someValue"), new XElement("RSAPIKey", "someValue"), new XElement("RSUseServiceNet", "false"), new XElement("RSLocation", "UK"), new XElement("ExcludeBuckets", ""), new XElement("ExcludeContainers", ""), //new XElement("MirrorAll", "true"), /TODO: Add Selective Sync new XElement("RSBackupContainer", "s3-backup"), new XElement("S3BackupBucket", "rs-backup") //new XElement("TransferThreads", "3") //TODO: Add Threading ) ) .Save(strConfigFile); Console.WriteLine(strConfigFile + " not found, blank one created."); Console.WriteLine("Press enter to exit..."); Console.ReadLine(); Environment.Exit(1); } //We know the config file exists, so open and read values XDocument config = XDocument.Load(strConfigFile); //Get AWS config string AWSKey = config.Element("root").Element("AWSKey").Value; string AWSSecret = config.Element("root").Element("AWSSecret").Value; RegionEndpoint region = RegionEndpoint.EUWest1; switch (config.Element("root").Element("AWSRegion").Value) { case "eu-west-1": region = RegionEndpoint.EUWest1; break; case "sa-east-1": region = RegionEndpoint.SAEast1; break; case "us-east-1": region = RegionEndpoint.USEast1; break; case "ap-northeast-1": region = RegionEndpoint.APNortheast1; break; case "us-west-2": region = RegionEndpoint.USWest2; break; case "us-west-1": region = RegionEndpoint.USWest1; break; case "ap-southeast-1": region = RegionEndpoint.APSoutheast1; break; case "ap-southeast-2": region = RegionEndpoint.APSoutheast2; break; default: region = RegionEndpoint.EUWest1; break; } //Create a connection to S3 WriteLog("Connecting to S3"); S3Client = AWSClientFactory.CreateAmazonS3Client(AWSKey, AWSSecret, region); //Get RS config Rackspace.CloudFiles.Utils.AuthUrl rsRegion = Rackspace.CloudFiles.Utils.AuthUrl.US; switch (config.Element("root").Element("RSLocation").Value) { case "UK": rsRegion = Rackspace.CloudFiles.Utils.AuthUrl.UK; break; case "US": rsRegion = Rackspace.CloudFiles.Utils.AuthUrl.US; break; case "Mosso": rsRegion = Rackspace.CloudFiles.Utils.AuthUrl.Mosso; break; } //Create connection to Rackspace WriteLog("Connecting to Rackspace Cloud Files"); RSConnection = new Connection(new UserCredentials(config.Element("root").Element("RSUsername").Value, config.Element("root").Element("RSAPIKey").Value, rsRegion), Convert.ToBoolean(config.Element("root").Element("RSUseServiceNet").Value)); //Get exclusions string[] excludeBuckets = config.Element("root").Element("ExcludeBuckets").Value.Split(','); string[] excludeContainers = config.Element("root").Element("ExcludeContainers").Value.Split(','); //First process all the S3 buckets and stream right into Rackspace container. WriteLog("Listing S3 Buckets"); ListBucketsResponse response = S3Client.ListBuckets(); WriteLog("Found " + response.Buckets.Count() + " buckets"); foreach (S3Bucket bucket in response.Buckets) { if (bucket.BucketName == config.Element("root").Element("S3BackupBucket").Value) { WriteLog("Skipping " + bucket.BucketName + " as backup folder"); } else if (excludeBuckets.Contains(bucket.BucketName)) { WriteLog("Skipping " + bucket.BucketName + " as in exclusions"); } else { //We need to know if the bucket is in the right region, otherwise it will error GetBucketLocationResponse locResponse = S3Client.GetBucketLocation(new GetBucketLocationRequest().WithBucketName(bucket.BucketName)); if (locResponse.Location == config.Element("root").Element("AWSRegion").Value) { WriteLog("Processing " + bucket.BucketName); //Get list of files ListObjectsRequest request = new ListObjectsRequest(); request.BucketName = bucket.BucketName; do { ListObjectsResponse filesResponse = S3Client.ListObjects(request); WriteLog("Found " + filesResponse.S3Objects.Count() + " files"); if (filesResponse.IsTruncated) { WriteLog("there are additional pages of files"); } foreach (S3Object file in filesResponse.S3Objects) { bool bolTransfer = false; //See if it exists on Rackspace string uri = RSConnection.StorageUrl + "/" + config.Element("root").Element("RSBackupContainer").Value + "/" + bucket.BucketName + "/" + file.Key; try { var req = (HttpWebRequest)WebRequest.Create(uri); req.Headers.Add("X-Auth-Token", RSConnection.AuthToken); req.Method = "HEAD"; //Compare Etags to see if we need to sync using (var resp = req.GetResponse() as HttpWebResponse) { if ("\"" + resp.Headers["eTag"] + "\"" != file.ETag) { bolTransfer = true; } } } catch (System.Net.WebException e) { if (e.Status == WebExceptionStatus.ProtocolError && ((HttpWebResponse)e.Response).StatusCode == HttpStatusCode.NotFound) { //Item not found, so upload bolTransfer = true; } //WriteLog("End Request to " + uri); } if (file.StorageClass == "GLACIER") { bolTransfer = false; //We can't get things out of Glacier, but they aer still listed here. } if (bolTransfer) { WriteLog("Syncing " + file.Key); using (GetObjectResponse getResponse = S3Client.GetObject(new GetObjectRequest().WithBucketName(bucket.BucketName).WithKey(file.Key))) { using (Stream s = getResponse.ResponseStream) { //We can stream right from s3 to CF, no need to store in memory or filesystem. var req = (HttpWebRequest)WebRequest.Create(uri); req.Headers.Add("X-Auth-Token", RSConnection.AuthToken); req.Method = "PUT"; req.SendChunked = true; req.AllowWriteStreamBuffering = false; req.Timeout = -1; using (Stream stream = req.GetRequestStream()) { byte[] data = new byte[8192]; int bytesRead = 0; while ((bytesRead = s.Read(data, 0, data.Length)) > 0) { stream.Write(data, 0, bytesRead); } stream.Flush(); stream.Close(); } req.GetResponse().Close(); } } intTransferred++; bytesTransferred += file.Size; } else { WriteLog("Skipping " + file.Key); intSkipped++; } //Check our exit condition if (!keepRunning) { break; } } //Loop if there is more than 1000 files if (filesResponse.IsTruncated) { request.Marker = filesResponse.NextMarker; } else { request = null; } if (!keepRunning) { break; } } while (request != null); } } if (!keepRunning) { break; } } //Now get all the Rackspace containers and stream them to Amazon WriteLog("Listing CF Containers"); List <string> lstContainers = RSConnection.GetContainers(); WriteLog("Found " + lstContainers.Count() + " containers"); foreach (string container in lstContainers) { if (container == config.Element("root").Element("RSBackupContainer").Value) { WriteLog("Skipping " + container + " as backup folder"); } else if (excludeContainers.Contains(container)) { WriteLog("Skipping " + container + " as in exclusions"); } else { WriteLog("Processing " + container); XmlDocument containerInfo = RSConnection.GetContainerInformationXml(container); do { int filesCount = containerInfo.GetElementsByTagName("object").Count; WriteLog("Found " + filesCount + " files"); foreach (XmlNode file in containerInfo.GetElementsByTagName("object")) { bool bolTransfer = false; string strBucketName = config.Element("root").Element("S3BackupBucket").Value; string strKey = container + file.SelectSingleNode("name").InnerText; //See if the file exists on s3 try { GetObjectMetadataResponse metaResp = S3Client.GetObjectMetadata(new GetObjectMetadataRequest().WithBucketName(strBucketName).WithKey(strKey)); //Compare the etags if (metaResp.ETag != "\"" + file.SelectSingleNode("hash").InnerText + "\"") { bolTransfer = true; } } catch (Amazon.S3.AmazonS3Exception e) { bolTransfer = true; } if (bolTransfer) { WriteLog("Syncing " + file.SelectSingleNode("name").InnerText); //God the C# binding sucks, so let's stream manually string uri = RSConnection.StorageUrl + "/" + container + "/" + file.SelectSingleNode("name").InnerText; var req = (HttpWebRequest)WebRequest.Create(uri); req.Headers.Add("X-Auth-Token", RSConnection.AuthToken); req.Method = "GET"; using (var resp = req.GetResponse() as HttpWebResponse) { using (Stream s = resp.GetResponseStream()) { string today = String.Format("{0:ddd,' 'dd' 'MMM' 'yyyy' 'HH':'mm':'ss' 'zz00}", DateTime.Now); string stringToSign = "PUT\n" + "\n" + file.SelectSingleNode("content_type").InnerText + "\n" + "\n" + "x-amz-date:" + today + "\n" + "/" + strBucketName + "/" + strKey; Encoding ae = new UTF8Encoding(); HMACSHA1 signature = new HMACSHA1(ae.GetBytes(AWSSecret)); string encodedCanonical = Convert.ToBase64String(signature.ComputeHash(ae.GetBytes(stringToSign))); string authHeader = "AWS " + AWSKey + ":" + encodedCanonical; string uriS3 = "https://" + strBucketName + ".s3.amazonaws.com/" + strKey; var reqS3 = (HttpWebRequest)WebRequest.Create(uriS3); reqS3.Headers.Add("Authorization", authHeader); reqS3.Headers.Add("x-amz-date", today); reqS3.ContentType = file.SelectSingleNode("content_type").InnerText; reqS3.ContentLength = Convert.ToInt32(file.SelectSingleNode("bytes").InnerText); reqS3.Method = "PUT"; reqS3.AllowWriteStreamBuffering = false; if (reqS3.ContentLength == -1L) { reqS3.SendChunked = true; } using (Stream streamS3 = reqS3.GetRequestStream()) { byte[] data = new byte[32768]; int bytesRead = 0; while ((bytesRead = s.Read(data, 0, data.Length)) > 0) { streamS3.Write(data, 0, bytesRead); } streamS3.Flush(); streamS3.Close(); } reqS3.GetResponse().Close(); } } intTransferred++; bytesTransferred += Convert.ToInt64(file.SelectSingleNode("bytes").InnerText); } else { WriteLog("Skipping " + file.SelectSingleNode("name").InnerText); intSkipped++; } //Check our exit condition if (!keepRunning) { break; } } if (filesCount < 10000) { containerInfo = null; } else { //Fetch the next list, but the Rackspace binding doesn't support markers with XML responses.... try { string uri = RSConnection.StorageUrl + "/" + container + "?format=xml&marker=" + Uri.EscapeUriString(containerInfo.FirstChild.NextSibling.LastChild.SelectSingleNode("name").InnerText); var req = (HttpWebRequest)WebRequest.Create(uri); req.Headers.Add("X-Auth-Token", RSConnection.AuthToken); req.Method = "GET"; using (var resp = req.GetResponse() as HttpWebResponse) { using (var reader = new System.IO.StreamReader(resp.GetResponseStream(), ASCIIEncoding.ASCII)) { string responseText = reader.ReadToEnd(); containerInfo.LoadXml(responseText); } } } catch (System.Net.WebException e) { if (e.Status == WebExceptionStatus.ProtocolError && ((HttpWebResponse)e.Response).StatusCode == HttpStatusCode.NotFound) { containerInfo = null; } } } } while (containerInfo != null); } } if (keepRunning) { WriteLog("Completed"); PrintSummary(); } }