public static void CopyFilesFromBucketToBucket(string sourceBucket, string sourceFileName, string destinationBucket, string destinationFileName) { using (AmazonS3Client client = new AmazonS3Client()) { CopyObjectRequest request = new CopyObjectRequest() { SourceBucket = sourceBucket, SourceKey = sourceFileName, DestinationBucket = destinationBucket, DestinationKey = destinationFileName }; CopyObjectResponse response = client.CopyObject(request); } }
public void RenameFile(string bucketName, string source, string target) { CopyObjectRequest copyObjectRequest = new CopyObjectRequest() { SourceBucket = bucketName, SourceKey = source, DestinationBucket = bucketName, DestinationKey = target }; CopyObjectResponse copyObjectResponse = m_client.CopyObject(copyObjectRequest); DeleteObject(bucketName, source); }
public void RenameFile(string bucketName, string source, string target) { CopyObjectRequest copyObjectRequest = new CopyObjectRequest(); copyObjectRequest.SourceBucket = bucketName; copyObjectRequest.SourceKey = source; copyObjectRequest.DestinationBucket = bucketName; copyObjectRequest.DestinationKey = target; using (CopyObjectResponse copyObjectResponse = m_client.CopyObject(copyObjectRequest)) { } DeleteObject(bucketName, source); }
public void CopyObject(string sourceBucket, string sourceKey, string destinationBucket, string destinationPrefix, string copyFrom = null, Action <string, string> logger = null) { string destinationKey = sourceKey; if (!String.IsNullOrWhiteSpace(copyFrom)) { destinationKey = destinationPrefix + sourceKey.Replace(copyFrom, ""); } client.CopyObject(sourceBucket, sourceKey, destinationBucket, destinationKey); if (logger != null) { logger(sourceBucket, $"Copied [s3://{sourceBucket}/{sourceKey}] To [s3://{destinationBucket}/{destinationKey}]."); } }
private static void RenameS3Object(string oldKey, string newKey) { // Copy CopyObjectRequest request = new CopyObjectRequest { SourceBucket = BucketName, SourceKey = oldKey, DestinationBucket = BucketName, DestinationKey = newKey }; _amazonS3Client.CopyObject(request); // Delete DeleteS3Object(oldKey); }
public string copyFiles() { AmazonS3Client client = getConfig(); S3Object file = getFile(); Console.WriteLine($"Bucket Name: {file.BucketName}\n File Key: {file.Key}\n File Size: {file.Size}\n File Tags: {file.ETag}\n File Storage Class: {file.StorageClass}"); // Split filename for later rename string[] name = file.Key.Split('.'); for (int i = 0; i <= cantidad; i++) { CopyObjectRequest item = new CopyObjectRequest() { SourceBucket = file.BucketName, DestinationBucket = file.BucketName, SourceKey = file.Key, //Rename filename DestinationKey = $"user33_v{i}.{name[1]}" }; client.CopyObject(item); } message = $"{cantidad} of files have been copied to {file.BucketName} S3 Bucket."; return(message); }
/// <summary> /// Copies object from one object key to another on amazon s3 /// </summary> /// <param name="sourceObjectKey"></param> /// <param name="destinationObjectKey"></param> public void CopyObject(string sourceObjectKey, string destinationObjectKey) { try { using (var client = new AmazonS3Client(awsAccessKeyId, awsSecretAccessKey, Amazon.RegionEndpoint.EUWest2)) //using (var client = new AmazonS3Client(Amazon.RegionEndpoint.EUWest2)) { var request = new CopyObjectRequest { SourceBucket = bucketName, SourceKey = sourceObjectKey, DestinationBucket = bucketName, DestinationKey = destinationObjectKey }; client.CopyObject(request); } } catch (Exception ex) { Program.ErrorLogging(ex); } }
public void ObjectSamples() { { #region ListObjects Sample // Create a client AmazonS3Client client = new AmazonS3Client(); // List all objects ListObjectsRequest listRequest = new ListObjectsRequest { BucketName = "SampleBucket", }; ListObjectsResponse listResponse; do { // Get a list of objects listResponse = client.ListObjects(listRequest); foreach (S3Object obj in listResponse.S3Objects) { Console.WriteLine("Object - " + obj.Key); Console.WriteLine(" Size - " + obj.Size); Console.WriteLine(" LastModified - " + obj.LastModified); Console.WriteLine(" Storage class - " + obj.StorageClass); } // Set the marker property listRequest.Marker = listResponse.NextMarker; } while (listResponse.IsTruncated); #endregion } { #region GetObject Sample // Create a client AmazonS3Client client = new AmazonS3Client(); // Create a GetObject request GetObjectRequest request = new GetObjectRequest { BucketName = "SampleBucket", Key = "Item1" }; // Issue request and remember to dispose of the response using (GetObjectResponse response = client.GetObject(request)) { using (StreamReader reader = new StreamReader(response.ResponseStream)) { string contents = reader.ReadToEnd(); Console.WriteLine("Object - " + response.Key); Console.WriteLine(" Version Id - " + response.VersionId); Console.WriteLine(" Contents - " + contents); } } #endregion } { #region GetObjectMetadata Sample // Create a client AmazonS3Client client = new AmazonS3Client(); // Create a GetObjectMetadata request GetObjectMetadataRequest request = new GetObjectMetadataRequest { BucketName = "SampleBucket", Key = "Item1" }; // Issue request and view the response GetObjectMetadataResponse response = client.GetObjectMetadata(request); Console.WriteLine("Content Length - " + response.ContentLength); Console.WriteLine("Content Type - " + response.Headers.ContentType); if (response.Expiration != null) { Console.WriteLine("Expiration Date - " + response.Expiration.ExpiryDate); Console.WriteLine("Expiration Rule Id - " + response.Expiration.RuleId); } #endregion } { #region PutObject Sample 1 // Create a client AmazonS3Client client = new AmazonS3Client(); // Create a PutObject request PutObjectRequest request = new PutObjectRequest { BucketName = "SampleBucket", Key = "Item1", ContentBody = "This is sample content..." }; // Put object PutObjectResponse response = client.PutObject(request); #endregion } { #region PutObject Sample 2 // Create a client AmazonS3Client client = new AmazonS3Client(); // Create a PutObject request PutObjectRequest request = new PutObjectRequest { BucketName = "SampleBucket", Key = "Item1", FilePath = "contents.txt" }; // Put object PutObjectResponse response = client.PutObject(request); #endregion } { #region PutObject Sample 3 // Create a client AmazonS3Client client = new AmazonS3Client(); // Create a PutObject request PutObjectRequest request = new PutObjectRequest { BucketName = "SampleBucket", Key = "Item1", }; using (FileStream stream = new FileStream("contents.txt", FileMode.Open)) { request.InputStream = stream; // Put object PutObjectResponse response = client.PutObject(request); } #endregion } { #region DeleteObject Sample // Create a client AmazonS3Client client = new AmazonS3Client(); // Create a DeleteObject request DeleteObjectRequest request = new DeleteObjectRequest { BucketName = "SampleBucket", Key = "Item1" }; // Issue request client.DeleteObject(request); #endregion } { #region DeleteObjects Sample // Create a client AmazonS3Client client = new AmazonS3Client(); // Create a DeleteObject request DeleteObjectsRequest request = new DeleteObjectsRequest { BucketName = "SampleBucket", Objects = new List <KeyVersion> { new KeyVersion() { Key = "Item1" }, // Versioned item new KeyVersion() { Key = "Item2", VersionId = "Rej8CiBxcZKVK81cLr39j27Y5FVXghDK", }, // Item in subdirectory new KeyVersion() { Key = "Logs/error.txt" } } }; try { // Issue request DeleteObjectsResponse response = client.DeleteObjects(request); } catch (DeleteObjectsException doe) { // Catch error and list error details DeleteObjectsResponse errorResponse = doe.Response; foreach (DeletedObject deletedObject in errorResponse.DeletedObjects) { Console.WriteLine("Deleted item " + deletedObject.Key); } foreach (DeleteError deleteError in errorResponse.DeleteErrors) { Console.WriteLine("Error deleting item " + deleteError.Key); Console.WriteLine(" Code - " + deleteError.Code); Console.WriteLine(" Message - " + deleteError.Message); } } #endregion } { #region CopyObject Sample // Create a client AmazonS3Client client = new AmazonS3Client(); // Create a CopyObject request CopyObjectRequest request = new CopyObjectRequest { SourceBucket = "SampleBucket", SourceKey = "Item1", DestinationBucket = "AnotherBucket", DestinationKey = "Copy1", CannedACL = S3CannedACL.PublicRead }; // Issue request client.CopyObject(request); #endregion } { #region CopyObject Sample // Create a client AmazonS3Client client = new AmazonS3Client(); // Create a CopyObject request CopyObjectRequest request = new CopyObjectRequest { SourceBucket = "SampleBucket", SourceKey = "Item1", DestinationBucket = "AnotherBucket", DestinationKey = "Copy1", CannedACL = S3CannedACL.PublicRead }; // Issue request client.CopyObject(request); #endregion } { #region ListVersions Sample // Create a client AmazonS3Client client = new AmazonS3Client(); // Turn versioning on for a bucket client.PutBucketVersioning(new PutBucketVersioningRequest { BucketName = "SampleBucket", VersioningConfig = new S3BucketVersioningConfig { Status = "Enable" } }); // Populate bucket with multiple items, each with multiple versions PopulateBucket(client, "SampleBucket"); // Get versions ListVersionsRequest request = new ListVersionsRequest { BucketName = "SampleBucket" }; // Make paged ListVersions calls ListVersionsResponse response; do { response = client.ListVersions(request); // View information about versions foreach (var version in response.Versions) { Console.WriteLine("Key = {0}, Version = {1}, IsLatest = {2}, LastModified = {3}, Size = {4}", version.Key, version.VersionId, version.IsLatest, version.LastModified, version.Size); } request.KeyMarker = response.NextKeyMarker; request.VersionIdMarker = response.NextVersionIdMarker; } while (response.IsTruncated); #endregion } { #region Multipart Upload Sample int MB = (int)Math.Pow(2, 20); // Create a client AmazonS3Client client = new AmazonS3Client(); // Define input stream Stream inputStream = Create13MBDataStream(); // Initiate multipart upload InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest { BucketName = "SampleBucket", Key = "Item1" }; InitiateMultipartUploadResponse initResponse = client.InitiateMultipartUpload(initRequest); // Upload part 1 UploadPartRequest uploadRequest = new UploadPartRequest { BucketName = "SampleBucket", Key = "Item1", UploadId = initResponse.UploadId, PartNumber = 1, PartSize = 5 * MB, InputStream = inputStream }; UploadPartResponse up1Response = client.UploadPart(uploadRequest); // Upload part 2 uploadRequest = new UploadPartRequest { BucketName = "SampleBucket", Key = "Item1", UploadId = initResponse.UploadId, PartNumber = 2, PartSize = 5 * MB, InputStream = inputStream }; UploadPartResponse up2Response = client.UploadPart(uploadRequest); // Upload part 3 uploadRequest = new UploadPartRequest { BucketName = "SampleBucket", Key = "Item1", UploadId = initResponse.UploadId, PartNumber = 3, InputStream = inputStream }; UploadPartResponse up3Response = client.UploadPart(uploadRequest); // List parts for current upload ListPartsRequest listPartRequest = new ListPartsRequest { BucketName = "SampleBucket", Key = "Item1", UploadId = initResponse.UploadId }; ListPartsResponse listPartResponse = client.ListParts(listPartRequest); Debug.Assert(listPartResponse.Parts.Count == 3); // Complete the multipart upload CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest { BucketName = "SampleBucket", Key = "Item1", UploadId = initResponse.UploadId, PartETags = new List <PartETag> { new PartETag { ETag = up1Response.ETag, PartNumber = 1 }, new PartETag { ETag = up2Response.ETag, PartNumber = 2 }, new PartETag { ETag = up3Response.ETag, PartNumber = 3 } } }; CompleteMultipartUploadResponse compResponse = client.CompleteMultipartUpload(compRequest); #endregion } }
private void copyObjects() { int workerThreads = 0; int ioThreads = 0; string marker = StartMarker; ThreadPool.GetMinThreads(out workerThreads, out ioThreads); Parent.LogOutput(string.Format(" - Min threads: worker: {0} IO: {1}", workerThreads, ioThreads)); if (MinThreads != -1) { Parent.LogOutput(string.Format(" -> Setting Min worker threads to {0}", MinThreads)); bool success = ThreadPool.SetMinThreads(MinThreads, ioThreads); if (!success) { Parent.LogOutput(" FAILED!"); } } ParallelOptions opts = new ParallelOptions(); // Don't bother forking more threads than connections. opts.MaxDegreeOfParallelism = s3.Config.ConnectionLimit; ListObjectsResponse resp = null; bool moreResults = true; do { if (resp == null) { resp = fetchObjectListingSync(SourceBucket, marker); } marker = resp.NextMarker; // If there's more, start fetching the next page of results. Task <ListObjectsResponse> t = null; if (resp.IsTruncated) { t = fetchObjectListing(SourceBucket, marker); } else { moreResults = false; } Parallel.ForEach(resp.S3Objects, opts, obj => { try { if (UseIfNoneMatch) { // Actually, can't use If-None-Match on the *target* object yet. Need to // HEAD the target. try { GetObjectMetadataResponse meta = s3.GetObjectMetadata(TargetBucket, obj.Key); if (meta.ETag.Equals(obj.ETag)) { // Target is same. Interlocked.Increment(ref skippedCount); return; } } catch (AmazonS3Exception e) { if (e.StatusCode == System.Net.HttpStatusCode.NotFound) { // Good! } else { throw e; } } } CopyObjectRequest req = new CopyObjectRequest() { SourceBucket = SourceBucket, SourceKey = obj.Key, DestinationBucket = TargetBucket, DestinationKey = obj.Key }; s3.CopyObject(req); Interlocked.Increment(ref successCount); } catch (AmazonS3Exception e) { if (e.StatusCode == System.Net.HttpStatusCode.PreconditionFailed) { // ETag matched Interlocked.Increment(ref skippedCount); } else { // Some other error. Interlocked.Increment(ref failureCount); Parent.LogOutput(string.Format("Error copying {0}: {1}", obj.Key, e.ToString())); } } catch (Exception e) { Interlocked.Increment(ref failureCount); Parent.LogOutput(string.Format("Error copying {0}: {1}", obj.Key, e.ToString())); } }); if (UseIfNoneMatch) { Parent.LogOutput(string.Format(" -- {0} objects copied ({2} skipped). NextMarker = {1}", successCount, marker, skippedCount)); } else { Parent.LogOutput(string.Format(" -- {0} objects copied. NextMarker = {1}", successCount, marker)); } if (t != null) { t.Wait(); resp = t.Result; } } while (moreResults); }
public static void Main(string[] args) { System.Net.ServicePointManager.ServerCertificateValidationCallback = ((sender, certificate, chain, sslPolicyErrors) => true); // create the AWS S3 client AmazonS3Client s3 = AWSS3Factory.getS3Client(); String bucketName = String.Join("-", AWSS3Factory.S3_BUCKET, DateTime.Now.ToString("yyyyMMddHHmmss")); //********************// // 1. Create a bucket // //********************// Console.Write(string.Format(" [*] Creating bucket '{0}'... ", bucketName)); PutBucketResponse pbRes = s3.PutBucket(bucketName); if (pbRes.HttpStatusCode != System.Net.HttpStatusCode.OK) { Console.WriteLine("fail"); Console.ReadLine(); System.Environment.Exit(1); } Console.WriteLine("done"); //*******************************************// // 2. Enable object versioning on the bucket // //*******************************************// Console.Write(string.Format(" [*] Enabling bucket versioning for bucket '{0}'... ", bucketName)); PutBucketVersioningRequest pvr = new PutBucketVersioningRequest() { BucketName = bucketName, VersioningConfig = new S3BucketVersioningConfig() { Status = VersionStatus.Enabled } }; PutBucketVersioningResponse pvrResponse = s3.PutBucketVersioning(pvr); if (pvrResponse.HttpStatusCode != System.Net.HttpStatusCode.OK) { Console.WriteLine("fail"); Console.ReadLine(); System.Environment.Exit(1); } Console.WriteLine("done"); //************************************// // 3. Create a new object (version 1) // //************************************// String objectKey = "object-" + DateTime.Now.ToString("yyyyMMddHHmmssffff"); Console.Write(string.Format(" [*] Creating a new object with key '{0}'... ", objectKey)); PutObjectRequest poRequest = new PutObjectRequest() { BucketName = bucketName, ContentBody = "Lorem ipsum dolor sit amet, consectetur adipiscing elit...", Key = objectKey }; PutObjectResponse poResponse = s3.PutObject(poRequest); if (poResponse.HttpStatusCode != System.Net.HttpStatusCode.OK) { Console.WriteLine("fail"); Console.ReadLine(); System.Environment.Exit(1); } Console.WriteLine("done"); Console.WriteLine(string.Format(" [x] Object content: '{0}'", poRequest.ContentBody)); //****************************************// // 4. Delete the object (deletion marker) // //****************************************// Console.Write(string.Format(" [*] Deleting object with key '{0}' (adding a deletion marker)... ", objectKey)); DeleteObjectRequest doRequest = new DeleteObjectRequest() { BucketName = bucketName, Key = objectKey }; DeleteObjectResponse doResponse = s3.DeleteObject(doRequest); if (doResponse.HttpStatusCode != System.Net.HttpStatusCode.NoContent || doResponse.DeleteMarker != "true") { Console.WriteLine("fail"); Console.ReadLine(); System.Environment.Exit(1); } Console.WriteLine("done"); //*************************************************// // 5. Try to get the object (expect 404 Not Found) // //*************************************************// Console.Write(string.Format(" [*] Trying to read object with key '{0}' (expecting 404 Not Found)... ", objectKey)); GetObjectRequest goRequest = new GetObjectRequest() { BucketName = bucketName, Key = objectKey, }; try { // should throw an exception as the object is marked as deleted s3.GetObject(goRequest); Console.WriteLine("fail"); Console.ReadLine(); System.Environment.Exit(1); } catch (AmazonS3Exception e) { if (e.StatusCode != System.Net.HttpStatusCode.NotFound) { Console.WriteLine("fail"); Console.ReadLine(); System.Environment.Exit(1); } } Console.WriteLine("done (404 Not Found)"); //*************************************************************************// // 6. List the object versions and get the version ID of the first version // //*************************************************************************// Console.WriteLine(string.Format(" [*] Listing object versions for bucket '{0}' and getting version ID to restore... ", bucketName)); ListVersionsResponse lvResponse = s3.ListVersions(bucketName); if (lvResponse.HttpStatusCode != System.Net.HttpStatusCode.OK) { Console.WriteLine("fail"); Console.ReadLine(); System.Environment.Exit(1); } String restoreVersion = String.Empty; foreach (S3ObjectVersion version in lvResponse.Versions) { if (version.Key != objectKey) { // filtering out other objects continue; } Console.WriteLine(string.Format(" [x] -> Object key: {0}", version.Key)); Console.WriteLine(string.Format(" [x] VersionId: {0}", version.VersionId)); Console.WriteLine(string.Format(" [x] IsDeleteMarker: {0}", version.IsDeleteMarker)); Console.WriteLine(string.Format(" [x] LastModified: {0}", version.LastModified)); if (!version.IsDeleteMarker) { restoreVersion = version.VersionId; } } if (restoreVersion.Length == 0) { Console.WriteLine(" [*] Could not find a version to restore, exiting..."); Console.ReadLine(); System.Environment.Exit(1); } //******************************************************************// // 7. Restore the first version using a server-side copy operation. // //******************************************************************// Console.Write(string.Format(" [*] Restoring object version ID '{0}' (server-side copy)... ", restoreVersion)); CopyObjectRequest coRequest = new CopyObjectRequest() { SourceBucket = bucketName, SourceKey = objectKey, SourceVersionId = restoreVersion, DestinationBucket = bucketName, DestinationKey = objectKey }; CopyObjectResponse coResponse = s3.CopyObject(coRequest); if (coResponse.HttpStatusCode != System.Net.HttpStatusCode.OK) { Console.WriteLine("fail"); Console.ReadLine(); System.Environment.Exit(1); } Console.WriteLine("done"); //************************************************************// // 8. Verify that the object can now be successfully obtained // //************************************************************// Console.Write(string.Format(" [*] Trying to read object '{0}'... ", objectKey)); GetObjectResponse goResponse = s3.GetObject(goRequest); if (goResponse.HttpStatusCode != System.Net.HttpStatusCode.OK || goResponse.ContentLength != poRequest.ContentBody.Length) { Console.WriteLine("fail"); Console.ReadLine(); System.Environment.Exit(1); } Console.WriteLine("done"); String responseBody = ""; using (Stream responseStream = goResponse.ResponseStream) using (StreamReader reader = new StreamReader(responseStream)) { responseBody = reader.ReadToEnd(); } Console.WriteLine(string.Format(" [x] Object '{0}' successfully restored. New VersionId: '{1}'. Content: '{2}'", goResponse.Key, goResponse.VersionId, responseBody)); //*******************************************// // 9. Permanently delete the object versions // //*******************************************// Console.Write(" [*] Permanently deleting all object versions... "); ListVersionsResponse lv2Response = s3.ListVersions(bucketName); if (lv2Response.HttpStatusCode != System.Net.HttpStatusCode.OK) { Console.WriteLine("fail"); Console.ReadLine(); System.Environment.Exit(1); } foreach (S3ObjectVersion version in lv2Response.Versions) { DeleteObjectRequest do2Request = new DeleteObjectRequest() { BucketName = bucketName, Key = version.Key, VersionId = version.VersionId }; DeleteObjectResponse do2Response = s3.DeleteObject(do2Request); if (do2Response.HttpStatusCode != System.Net.HttpStatusCode.NoContent) { Console.WriteLine("fail"); Console.ReadLine(); System.Environment.Exit(1); } } Console.WriteLine("done"); //***********************// // 10. Delete the bucket // //***********************// Console.Write(String.Format(" [*] Deleting bucket '{0}' (sleeping 5 seconds)... ", bucketName)); System.Threading.Thread.Sleep(5000); DeleteBucketResponse dbRes = s3.DeleteBucket(bucketName); if (dbRes.HttpStatusCode != System.Net.HttpStatusCode.NoContent) { Console.WriteLine("fail"); Console.ReadLine(); System.Environment.Exit(1); } Console.WriteLine("done"); Console.WriteLine(" [*] Example is completed. Press any key to exit..."); Console.ReadLine(); }
public static bool Execute( string AWSAccessKey, string AWSSecretKey, RegionEndpoint regionEndpoint, string sourceBucketName, string sourceKey, string destBucketName, string destKey //out long? fileLengthBytes ) { //fileLengthBytes = null; try { //validate exists string sourceUrl; if (!Exists.Execute( fileKey: sourceKey, bucketName: sourceBucketName, url: out sourceUrl, re: regionEndpoint, AWSAccessKey: AWSAccessKey, AWSSecretKey: AWSSecretKey )) { LogIt.W("source did not exist:" + sourceBucketName + "|" + sourceKey); return(false); } //validate dest doesn't already exist, fail if it does bc we aren't validating that it is different? maybe shar eeach in future? string destUrl; if (Exists.Execute( fileKey: destKey, bucketName: destBucketName, url: out destUrl, re: regionEndpoint, AWSAccessKey: AWSAccessKey, AWSSecretKey: AWSSecretKey )) { LogIt.W("dest existed already:" + sourceBucketName + "|" + sourceKey); return(false); } //copy using (var s3c = new AmazonS3Client( awsAccessKeyId: AWSAccessKey, awsSecretAccessKey: AWSSecretKey, region: regionEndpoint )) { var cor = s3c.CopyObject(new CopyObjectRequest { SourceBucket = sourceBucketName, SourceKey = sourceKey, DestinationBucket = destBucketName, DestinationKey = destKey }); //fileLengthBytes = cor. return(cor.HttpStatusCode == System.Net.HttpStatusCode.OK); } } catch (Exception ex) { LogIt.E(ex); return(false); } }
static void Main(string[] args) { Dictionary <string, string> LocalFiles = new Dictionary <string, string>(); Dictionary <string, string> S3Files = new Dictionary <string, string>(); Dictionary <string, string> FilesOnSource = new Dictionary <string, string>(); Dictionary <string, string> FilesOnTarget = new Dictionary <string, string>(); Dictionary <string, string> FilesToCopy = new Dictionary <string, string>(); Dictionary <string, string> z_Files = new Dictionary <string, string>(); AmazonS3Client client = new AmazonS3Client(Amazon.RegionEndpoint.USEast1); TransferUtility transfer = new TransferUtility(client); int iBackSlashIndex; string sSourceBucketName = ""; string sSourceBucketPrefix = ""; string sTargetBucketName = ""; string sTargetBucketPrefix = ""; string dirName = ""; bool bLocalFilesPresent = false; CopyObjectRequest copyRequest = new CopyObjectRequest(); CopyObjectResponse copyResponse; DeleteObjectRequest deleteRequest = new DeleteObjectRequest(); bool synchToTarget = false; bool SynchToSource = false; bool loadNew = false; bool loadAll = false; string sLatestFile = ""; int waitForToken = 0; string appFolder = AppDomain.CurrentDomain.BaseDirectory; string sServiceStatus = ""; string sParams = ""; AppendOnFile LogWriter = new AppendOnFile(); StringBuilder sTextToAppend = new StringBuilder(); // StreamWriter LogWriter = new StreamWriter(appFolder + "\\PMU.log", true); // Open appending // LogWriter.WriteLine(DateTime.Now.ToString("yyyyMMdd HH:mm:ss") + " - >>> DEBUG <<< Begin execution >>>"); LogWriter.file = appFolder + "\\PMU.log"; sTextToAppend.Append(DateTime.Now.ToString("yyyyMMdd HH:mm:ss") + " - >>> DEBUG <<< Begin execution >>>"); LogWriter.sTextToAppend = sTextToAppend; LogWriter.Append(); sTextToAppend.Clear(); // Recovering last file saved StreamReader MyReader = new StreamReader(appFolder + "LastFile.pmu"); string lastFileSavedFromFile = MyReader.ReadLine(); MyReader.Close(); // LogWriter.WriteLine(DateTime.Now.ToString("yyyyMMdd HH:mm:ss") + " - Last file read: " + lastFileSavedFromFile); LogWriter.file = appFolder + "\\PMU.log"; sTextToAppend.Append(DateTime.Now.ToString("yyyyMMdd HH:mm:ss") + " - Last file read: " + lastFileSavedFromFile); LogWriter.sTextToAppend = sTextToAppend; LogWriter.Append(); sTextToAppend.Clear(); // Console.WriteLine("LastFileSaved read from App.Config: " + lastFileSavedFromFile); if (args.Length == 0) { Console.WriteLine("Please select source and target folders"); return; } if (args.Length == 1) { Console.WriteLine("Please select target folders"); return; } if (args.Length > 1) { try { // ---------------------------------------------- Source location (parameter 1) ---------------------------------------------- if (args[0].Substring(0, 5) == "s3://" || args[0].Substring(0, 5) == "S3://") { iBackSlashIndex = args[0].IndexOf('/', 5); sSourceBucketName = args[0].Substring(5, iBackSlashIndex - 5); sSourceBucketPrefix = args[0].Substring(iBackSlashIndex + 1, args[0].Length - iBackSlashIndex - 1); FilesOnSource = GetS3Files(client, sSourceBucketName, sSourceBucketPrefix); } else { dirName = args[0]; FilesOnSource = GetLocalFiles(dirName); bLocalFilesPresent = true; } sParams = args[0]; // --------------------------------------------------------------------------------------------------------------------------- // ---------------------------------------------- Target location (parameter 2) ---------------------------------------------- if (args[1].Substring(0, 5) == "s3://" || args[1].Substring(0, 5) == "S3://") { iBackSlashIndex = args[1].IndexOf('/', 5); sTargetBucketName = args[1].Substring(5, iBackSlashIndex - 5); sTargetBucketPrefix = args[1].Substring(iBackSlashIndex + 1, args[1].Length - iBackSlashIndex - 1); // ---------- Checking if target bucket exists. If it doesn't, create it ---------- GetObjectRequest checkBucketRequest = new GetObjectRequest(); checkBucketRequest.BucketName = sTargetBucketName + "/" + sTargetBucketPrefix; try { GetObjectResponse response = client.GetObject(checkBucketRequest); } catch // Target bucket does not exist. The code within the following catch creates it. { PutBucketRequest createBucketRequest = new PutBucketRequest(); createBucketRequest.BucketName = sTargetBucketName + "/" + sTargetBucketPrefix + "/"; client.PutBucket(createBucketRequest); } // -------------------------------------------------------------------------------- FilesOnTarget = GetS3Files(client, sTargetBucketName, sTargetBucketPrefix); } else { dirName = args[1]; // ---------- Checking if target folder exists. If it doesn't, create it ---------- if (!Directory.Exists(dirName)) { Directory.CreateDirectory(dirName); } // -------------------------------------------------------------------------------- FilesOnTarget = GetLocalFiles(dirName); bLocalFilesPresent = true; } sParams += " " + args[1]; // --------------------------------------------------------------------------------------------------------------------------- // LogWriter.WriteLine(DateTime.Now.ToString("yyyyMMdd HH:mm:ss") + " - Parameters 1 and 2 read."); LogWriter.file = appFolder + "\\PMU.log"; sTextToAppend.Append(DateTime.Now.ToString("yyyyMMdd HH:mm:ss") + " - Parameters 1 and 2 read."); LogWriter.sTextToAppend = sTextToAppend; LogWriter.Append(); sTextToAppend.Clear(); // --------------------------------------------------- Parameters 3 and 4 ---------------------------------------------------- // Parameters 3 and 4 are optional and the order in which they are passed is irrelevant. They are flags that determine: // // 1. if a copy action should be synched between a producer execution and a consumer one. In that case, after all // the files have been copied from source to target, a synch file (SynchToken.pmu) is also moved. The consumer // execution only starts when the synch file is found. The consumer execution deletes the synch file when the // copying is done. // For this synching to happen the producer execution must be called with the parameter "SynchToTarget" and the // consumer execution must be called with the parameter "SynchToSource" // // 2. the code only copies the files that are in the source and aren't in the target. The parameter "All" or "New" // determines whether all the missing files are copied - in case the parameter "All" is passed - or only those // who are later than the last file copied in a previous execution - in case the parameter "New" is passed. // With that purpose, in each execution the name of the later file copied is saved in the app.config. if (args.Length == 3) { switch (args[2]) { case "SynchToTarget": synchToTarget = true; break; case "SynchToSource": SynchToSource = true; break; case "New": loadNew = true; break; case "All": loadAll = true; break; } sParams += " " + args[2]; } else { if (args.Length == 4) { switch (args[2]) { case "SynchToTarget": synchToTarget = true; break; case "SynchToSource": SynchToSource = true; break; case "New": loadNew = true; break; case "All": loadAll = true; break; } switch (args[3]) { case "SynchToTarget": if (!SynchToSource) { synchToTarget = true; } break; case "SynchToSource": if (!synchToTarget) { SynchToSource = true; } break; case "New": if (!loadAll) { loadNew = true; } break; case "All": if (!loadNew) { loadAll = true; } break; } } sParams += " " + args[2] + " " + args[3]; } // LogWriter.WriteLine(DateTime.Now.ToString("yyyyMMdd HH:mm:ss") + " - Parameters 3 and 4 treated."); LogWriter.file = appFolder + "\\PMU.log"; sTextToAppend.Append(DateTime.Now.ToString("yyyyMMdd HH:mm:ss") + " - Parameters 3 and 4 treated."); LogWriter.sTextToAppend = sTextToAppend; LogWriter.Append(); sTextToAppend.Clear(); if (!(loadAll || loadNew)) // Load all is default { loadAll = true; } // --------------------------------------------------------------------------------------------------------------------------- // LogWriter.WriteLine(DateTime.Now.ToString("yyyyMMdd HH:mm:ss") + " - Begin generation."); // LogWriter.WriteLine(DateTime.Now.ToString("yyyyMMdd HH:mm:ss") + " - Parameters: " + sParams); LogWriter.file = appFolder + "\\PMU.log"; sTextToAppend.Append(DateTime.Now.ToString("yyyyMMdd HH:mm:ss") + " - Begin generation." + Environment.NewLine + DateTime.Now.ToString("yyyyMMdd HH:mm:ss") + " - Parameters: " + sParams); LogWriter.sTextToAppend = sTextToAppend; LogWriter.Append(); sTextToAppend.Clear(); if (SynchToSource) { while (!FilesOnSource.ContainsKey("SynchToken.pmu") && waitForToken < 6) { // LogWriter.WriteLine(DateTime.Now.ToString("yyyyMMdd HH:mm:ss") + " - Waiting for token. Attempt " + Convert.ToString(waitForToken + 1)); LogWriter.file = appFolder + "\\PMU.log"; sTextToAppend.Append(DateTime.Now.ToString("yyyyMMdd HH:mm:ss") + " - Waiting for token. Attempt " + Convert.ToString(waitForToken + 1)); LogWriter.sTextToAppend = sTextToAppend; LogWriter.Append(); sTextToAppend.Clear(); System.Threading.Thread.Sleep(300000); // Wait for 5 minutes waitForToken++; FilesOnSource = GetS3Files(client, sSourceBucketName, sSourceBucketPrefix); } if (waitForToken == 6) { // LogWriter.WriteLine(DateTime.Now.ToString("yyyyMMdd HH:mm:ss") + " - TIMEOUT!!! EXECUTION ABORTED."); // LogWriter.WriteLine(DateTime.Now.ToString("yyyyMMdd HH:mm:ss") + " -------------------------------------"); // LogWriter.Close(); LogWriter.file = appFolder + "\\PMU.log"; sTextToAppend.Append(DateTime.Now.ToString("yyyyMMdd HH:mm:ss") + " - TIMEOUT!!! EXECUTION ABORTED." + Environment.NewLine + DateTime.Now.ToString("yyyyMMdd HH:mm:ss") + " -------------------------------------"); LogWriter.sTextToAppend = sTextToAppend; LogWriter.Append(); sTextToAppend.Clear(); return; } } sLatestFile = lastFileSavedFromFile; foreach (string currFile in FilesOnSource.Keys) { if (currFile != "ppa_archive.d") // It is NOT necessary to move this file, since it's a temporary file - it'll be renamed and recreated when a rollover happens { if (!FilesOnTarget.ContainsKey(currFile)) { if (currFile.EndsWith(".dat") && !currFile.StartsWith("z_")) { if (args[0].Substring(0, 5) == "s3://" || args[0].Substring(0, 5) == "S3://") // S3 is the source { FilesToCopy.Add(currFile, currFile); } else { if (File.Exists(dirName + "\\z_" + currFile)) { File.Delete(dirName + "\\z_" + currFile); } File.Copy(dirName + "\\" + currFile, dirName + "\\z_" + currFile); // Use "prefix" z_ so that this file name is greater than lastFileSavedFromFile FilesToCopy.Add("z_" + currFile, "z_" + currFile); } } else { FilesToCopy.Add(currFile, currFile); if (String.Compare(currFile, sLatestFile, true) > 0 && currFile.Substring(currFile.Length - 2, 2) == ".d") { sLatestFile = currFile; } } } else { if (currFile.EndsWith(".dat") && !currFile.StartsWith("z_")) // Copying .dat files { if (args[0].Substring(0, 5) == "s3://" || args[0].Substring(0, 5) == "S3://") // S3 is the source { FilesToCopy.Add(currFile, currFile); } else { if (File.Exists(dirName + "\\z_" + currFile)) { File.Delete(dirName + "\\z_" + currFile); } File.Copy(dirName + "\\" + currFile, dirName + "\\z_" + currFile); // Use "prefix" z_ so that this file name is greater than lastFileSavedFromFile FilesToCopy.Add("z_" + currFile, "z_" + currFile); } } else if (currFile == "SynchToken.pmu") // Copying synch token { FilesToCopy.Add(currFile, currFile); } } } //if (!FilesOnTarget.ContainsKey(currFile)) //{ // //Copying the file ppa_archive.d may cause problems to the openPDC process, because the AWS API locks the file while it's being copied // //and the openPDC process may try to rollover in the meantime. So, a copy of the file is made, and the copy is treated. // //The ppa_archive.d, along with the .dat files, is necessary for reading historic files. // if (currFile == "ppa_archive.d") // { // if (args[0].Substring(0, 5) == "s3://" || args[0].Substring(0, 5) == "S3://") // Local folder is the source // FilesToCopy.Add(currFile, currFile); // else // { // if (File.Exists(dirName + "\\z_" + currFile)) // File.Delete(dirName + "\\z_" + currFile); // File.Copy(dirName + "\\" + currFile, dirName + "\\z_" + currFile); // Use "prefix" z_ so that this file name is greater than lastFileSavedFromFile // FilesToCopy.Add("z_" + currFile, "z_" + currFile); // } // } // else // { // FilesToCopy.Add(currFile, currFile); // if (!currFile.StartsWith("z_")) // if (String.Compare(currFile, sLatestFile, true) > 0 && currFile.Substring(currFile.Length - 2, 2) == ".d") // sLatestFile = currFile; // } //} //else //{ // //The .dat files, along with the ppa_archive.d file, are necessary for reading historic files. Since in most executions these files are // //already in the terget folder - and therefore, due to the if above, are not added to FilesToCopy - their copy is forced in the following // //piece of code. Like the ppa_archive.d, they have to be copied in order to be treated. // if ((currFile.EndsWith(".dat") && !currFile.StartsWith("z_") || currFile == "ppa_archive.d")) // { // if (args[0].Substring(0, 5) == "s3://" || args[0].Substring(0, 5) == "S3://") // S3 is the source // FilesToCopy.Add(currFile, currFile); // else // { // if (File.Exists(dirName + "\\z_" + currFile)) // File.Delete(dirName + "\\z_" + currFile); // File.Copy(dirName + "\\" + currFile, dirName + "\\z_" + currFile); // Use "prefix" z_ so that this file name is greater than lastFileSavedFromFile // FilesToCopy.Add("z_" + currFile, "z_" + currFile); // } // } //} } foreach (string currFile in FilesToCopy.Keys) { if (bLocalFilesPresent) { if (loadAll) { try { if (args[0].Substring(0, 5) == "s3://" || args[0].Substring(0, 5) == "S3://") // S3 is the source { transfer.Download(dirName + "\\" + currFile, sSourceBucketName + "/" + sSourceBucketPrefix, currFile); } else // Local folder is the source { transfer.Upload(dirName + "/" + currFile, sTargetBucketName + "/" + sTargetBucketPrefix, currFile); } } catch (Exception e) { sServiceStatus = GetServiceStatus("openPDC"); LogWriter.file = appFolder + "\\PMU.log"; sTextToAppend.Append(DateTime.Now.ToString("yyyyMMdd HH:mm:ss") + " - File " + currFile + " NOT COPPIED!!! " + e.Message + Environment.NewLine + DateTime.Now.ToString("yyyyMMdd HH:mm:ss") + " - Service was " + sServiceStatus); LogWriter.sTextToAppend = sTextToAppend; LogWriter.Append(); sTextToAppend.Clear(); // LogWriter.WriteLine(DateTime.Now.ToString("yyyyMMdd HH:mm:ss") + " - File " + currFile + " NOT COPPIED!!! " + e.Message); // sServiceStatus = GetServiceStatus("openPDC"); // LogWriter.WriteLine(DateTime.Now.ToString("yyyyMMdd HH:mm:ss") + " - Service was " + sServiceStatus); } } else { try { //if (loadNew && (String.Compare(currFile, lastFileSavedFromFile) > 0 || currFile == "ppa_archive.d")) if (loadNew && String.Compare(currFile, lastFileSavedFromFile) > 0) { if (args[0].Substring(0, 5) == "s3://" || args[0].Substring(0, 5) == "S3://") // S3 is the source { transfer.Download(dirName + "\\" + currFile, sSourceBucketName + "/" + sSourceBucketPrefix, currFile); } else // Local folder is the source { transfer.Upload(dirName + "/" + currFile, sTargetBucketName + "/" + sTargetBucketPrefix, currFile); } } } catch (Exception e) { // LogWriter.WriteLine(DateTime.Now.ToString("yyyyMMdd HH:mm:ss") + " - File " + currFile + " NOT COPPIED!!! " + e.Message); // sServiceStatus = GetServiceStatus("openPDC"); // LogWriter.WriteLine(DateTime.Now.ToString("yyyyMMdd HH:mm:ss") + " - Service was " + sServiceStatus); sServiceStatus = GetServiceStatus("openPDC"); LogWriter.file = appFolder + "\\PMU.log"; sTextToAppend.Append(DateTime.Now.ToString("yyyyMMdd HH:mm:ss") + " - File " + currFile + " NOT COPPIED!!! " + e.Message + Environment.NewLine + DateTime.Now.ToString("yyyyMMdd HH:mm:ss") + " - Service was " + sServiceStatus); LogWriter.sTextToAppend = sTextToAppend; LogWriter.Append(); sTextToAppend.Clear(); } } } else { try { if (loadAll) { copyRequest.SourceBucket = sSourceBucketName + "/" + sSourceBucketPrefix; copyRequest.SourceKey = currFile; copyRequest.DestinationBucket = sTargetBucketName + "/" + sTargetBucketPrefix; copyRequest.DestinationKey = currFile; copyResponse = client.CopyObject(copyRequest); } else { //if (loadNew && (String.Compare(currFile, lastFileSavedFromFile) > 0 || currFile == "ppa_archive.d")) if (loadNew && String.Compare(currFile, lastFileSavedFromFile) > 0) { copyRequest.SourceBucket = sSourceBucketName + "/" + sSourceBucketPrefix; copyRequest.SourceKey = currFile; copyRequest.DestinationBucket = sTargetBucketName + "/" + sTargetBucketPrefix; copyRequest.DestinationKey = currFile; copyResponse = client.CopyObject(copyRequest); } } } catch (Exception e) { // LogWriter.WriteLine(DateTime.Now.ToString("yyyyMMdd HH:mm:ss") + " - File " + currFile + " NOT COPPIED!!! " + e.Message); // sServiceStatus = GetServiceStatus("openPDC"); // LogWriter.WriteLine(DateTime.Now.ToString("yyyyMMdd HH:mm:ss") + " - Service was " + sServiceStatus); sServiceStatus = GetServiceStatus("openPDC"); LogWriter.file = appFolder + "\\PMU.log"; sTextToAppend.Append(DateTime.Now.ToString("yyyyMMdd HH:mm:ss") + " - File " + currFile + " NOT COPPIED!!! " + e.Message + Environment.NewLine + DateTime.Now.ToString("yyyyMMdd HH:mm:ss") + " - Service was " + sServiceStatus); LogWriter.sTextToAppend = sTextToAppend; LogWriter.Append(); sTextToAppend.Clear(); } } Console.WriteLine(currFile + " treated."); } if (synchToTarget) { if (bLocalFilesPresent) { if (args[0].Substring(0, 5) == "s3://" || args[0].Substring(0, 5) == "S3://") // S3 is the source { transfer.Download("SynchToken.pmu", sSourceBucketName + "/" + sSourceBucketPrefix, "SynchToken.pmu"); } else // Local folder is the source { transfer.Upload(dirName + "\\SynchToken.pmu", sTargetBucketName + "/" + sTargetBucketPrefix, "SynchToken.pmu"); } } else { copyRequest.SourceBucket = sSourceBucketName + "/" + sSourceBucketPrefix; copyRequest.SourceKey = "SynchToken.pmu"; copyRequest.DestinationBucket = sTargetBucketName + "/" + sTargetBucketPrefix; copyRequest.DestinationKey = "SynchToken.pmu"; copyResponse = client.CopyObject(copyRequest); } } if (SynchToSource) { if (args[0].Substring(0, 5) == "s3://" || args[0].Substring(0, 5) == "S3://") // S3 is the source { deleteRequest.BucketName = sSourceBucketName + "/" + sSourceBucketPrefix; deleteRequest.Key = "SynchToken.pmu"; client.DeleteObject(deleteRequest); } else // Local folder is the source { System.IO.File.Delete(dirName + "\\SynchToken.pmu"); } } // Deleting the z_ files on local storage and renaming them on S3 if (!(args[0].Substring(0, 5) == "s3://" || args[0].Substring(0, 5) == "S3://")) // Local storage is the source { z_Files = GetLocalFiles(dirName, "z_*"); foreach (string z_File in z_Files.Keys) { try { copyRequest.SourceBucket = sTargetBucketName + "/" + sTargetBucketPrefix; copyRequest.SourceKey = z_File; copyRequest.DestinationBucket = sTargetBucketName + "/" + sTargetBucketPrefix; copyRequest.DestinationKey = z_File.Substring(2); copyResponse = client.CopyObject(copyRequest); // LogWriter.WriteLine(DateTime.Now.ToString("yyyyMMdd HH:mm:ss") + " - " + z_File + " renamed on S3"); LogWriter.file = appFolder + "\\PMU.log"; sTextToAppend.Append(DateTime.Now.ToString("yyyyMMdd HH:mm:ss") + " - " + z_File + " renamed on S3"); LogWriter.sTextToAppend = sTextToAppend; LogWriter.Append(); sTextToAppend.Clear(); deleteRequest.BucketName = sTargetBucketName + "/" + sTargetBucketPrefix; deleteRequest.Key = z_File; client.DeleteObject(deleteRequest); // LogWriter.WriteLine(DateTime.Now.ToString("yyyyMMdd HH:mm:ss") + " - " + z_File + " deleted from S3"); LogWriter.file = appFolder + "\\PMU.log"; sTextToAppend.Append(DateTime.Now.ToString("yyyyMMdd HH:mm:ss") + " - " + z_File + " deleted from S3"); LogWriter.sTextToAppend = sTextToAppend; LogWriter.Append(); sTextToAppend.Clear(); } catch (Exception e) { // LogWriter.WriteLine(DateTime.Now.ToString("yyyyMMdd HH:mm:ss") + " - Error treating file " + z_File + " on S3"); // LogWriter.WriteLine(DateTime.Now.ToString("yyyyMMdd HH:mm:ss") + " - Error Message: " + e.Message); LogWriter.file = appFolder + "\\PMU.log"; sTextToAppend.Append(DateTime.Now.ToString("yyyyMMdd HH:mm:ss") + " - Error treating file " + z_File + " on S3" + Environment.NewLine + DateTime.Now.ToString("yyyyMMdd HH:mm:ss") + " - Error Message: " + e.Message); LogWriter.sTextToAppend = sTextToAppend; LogWriter.Append(); sTextToAppend.Clear(); } try { System.IO.File.Delete(dirName + "\\" + z_File); // LogWriter.WriteLine(DateTime.Now.ToString("yyyyMMdd HH:mm:ss") + " - " + z_File + " deleted from local folder"); LogWriter.file = appFolder + "\\PMU.log"; sTextToAppend.Append(DateTime.Now.ToString("yyyyMMdd HH:mm:ss") + " - " + z_File + " deleted from local folder"); LogWriter.sTextToAppend = sTextToAppend; LogWriter.Append(); sTextToAppend.Clear(); } catch (Exception e) { // LogWriter.WriteLine(DateTime.Now.ToString("yyyyMMdd HH:mm:ss") + " - Error treating local file " + z_File); // LogWriter.WriteLine(DateTime.Now.ToString("yyyyMMdd HH:mm:ss") + " - Error Message: " + e.Message); LogWriter.file = appFolder + "\\PMU.log"; sTextToAppend.Append(DateTime.Now.ToString("yyyyMMdd HH:mm:ss") + " - Error treating local file " + z_File + Environment.NewLine + DateTime.Now.ToString("yyyyMMdd HH:mm:ss") + " - Error Message: " + e.Message); LogWriter.sTextToAppend = sTextToAppend; LogWriter.Append(); sTextToAppend.Clear(); } } } // Storing last file saved System.IO.File.Delete(appFolder + "LastFile.pmu"); StreamWriter MyWriter = new StreamWriter(appFolder + "LastFile.pmu"); MyWriter.Write(sLatestFile); MyWriter.Close(); // LogWriter.WriteLine(DateTime.Now.ToString("yyyyMMdd HH:mm:ss") + " - End of generation."); // LogWriter.WriteLine(DateTime.Now.ToString("yyyyMMdd HH:mm:ss") + " -------------------------------------"); // LogWriter.Close(); LogWriter.file = appFolder + "\\PMU.log"; sTextToAppend.Append(DateTime.Now.ToString("yyyyMMdd HH:mm:ss") + " - End of generation." + Environment.NewLine + DateTime.Now.ToString("yyyyMMdd HH:mm:ss") + " -------------------------------------"); LogWriter.sTextToAppend = sTextToAppend; LogWriter.Append(); sTextToAppend.Clear(); } // end try catch (Exception e) { Console.WriteLine(DateTime.Now.ToString("yyyyMMdd HH:mm:ss") + " - Erro! " + e.Message); LogWriter.file = appFolder + "\\PMU.log"; sTextToAppend.Append(DateTime.Now.ToString("yyyyMMdd HH:mm:ss") + " - Erro! " + e.Message); LogWriter.sTextToAppend = sTextToAppend; LogWriter.Append(); sTextToAppend.Clear(); return; } } }