private void AbortChunkUpload(string uploadId, string s3Key) { try { AbortMultipartUploadRequest abortRequest = new AbortMultipartUploadRequest { UploadId = uploadId, BucketName = _bucketName, Key = s3Key }; AbortMultipartUploadResponse abortResponse = _awsS3Client.AbortMultipartUpload(abortRequest); } catch (Exception) { } }
public static async Task PutObjectAsMultiPartAsync() { Stopwatch sw = Stopwatch.StartNew(); s3Client = new AmazonS3Client( "AKIA6PYYJMASLJEFTI6E", "IJPo9Ys58iAb35dKw4kcW/SkOU2J+iI9IOA5Wpl6", Amazon.RegionEndpoint.APSoutheast1 ); List <UploadPartResponse> uploadResponses = new List <UploadPartResponse>(); // List to store upload part responses. // 1. Initialize. var initiateRequest = new InitiateMultipartUploadRequest { BucketName = bucketName, Key = keyName }; var initResponse = s3Client.InitiateMultipartUpload(initiateRequest); // 2. Upload Parts. var partNumber = 5; var contentLength = new FileInfo(filePath).Length; var partSize = contentLength / partNumber; long filePosition = 0; try { for (var i = 1; filePosition < contentLength; ++i) { // Create request to upload a part. Console.WriteLine(filePosition); var uploadRequest = new UploadPartRequest { BucketName = bucketName, Key = keyName, UploadId = initResponse.UploadId, PartNumber = i, PartSize = partSize, FilePosition = filePosition, FilePath = filePath }; // Upload part and add response to our list. uploadResponses.Add(s3Client.UploadPart(uploadRequest)); filePosition += partSize; } // Step 3: complete. var completeRequest = new CompleteMultipartUploadRequest { BucketName = bucketName, Key = keyName, UploadId = initResponse.UploadId, }; // add ETags for uploaded files completeRequest.AddPartETags(uploadResponses); var completeUploadResponse = s3Client.CompleteMultipartUpload(completeRequest); sw.Stop(); Console.WriteLine("Upload completed : {0}", sw.Elapsed.TotalMilliseconds); } catch (Exception exception) { Console.WriteLine("Exception occurred: {0}", exception.ToString()); var abortMPURequest = new AbortMultipartUploadRequest { BucketName = bucketName, Key = keyName, UploadId = initResponse.UploadId }; s3Client.AbortMultipartUpload(abortMPURequest); } }
private static void CopyTo(string src, string dst) { var uri = new Uri(dst); var dstBucket = uri.Host; var dstPath = uri.PathAndQuery.Substring(1); Log.InfoFormat("Bucket: {0}, Path: {1}", dstBucket, dstPath); var md5File = Path.Combine(src, md5fileName); var md5Dictionary = LoadMd5File(md5File); var files = FetchFiles(src) .Where(x => x.Length > 0) .Where(x => !x.Attributes.HasFlag(FileAttributes.Hidden)) .Where(x => !x.Name.StartsWith(".")) .Where(x => x.FullName != md5File) .Where(x => (x.Attributes & FileAttributes.Archive) == FileAttributes.Archive) .Where(x => (x.Attributes & FileAttributes.Temporary) != FileAttributes.Temporary) .Select(x => Map(src, x)); using (var md5FileWriter = File.AppendText(md5File)) { files = files.Select(x => { Md5Item item; if (!md5Dictionary.TryGetValue(x.Path, out item) || x.Length != item.Length || x.ModifiedUtc != item.ModifiedUtc ) { x.Hash = Md5Hash.Calculate(x.Path).ToHex(); item = new Md5Item(x.Hash, x.Path, x.Length, x.ModifiedUtc); md5Dictionary[x.Path] = item; lock (md5FileWriter) { md5FileWriter.WriteLine(item); } } x.Hash = item.Md5; return(x); }).ToList(); } WriteMd5Dictionary(md5Dictionary, md5File); var objects = FetchObjects(dstBucket, dstPath) .Select(x => Map(dstPath, x)) .ToDictionary(x => x.Relative, x => x); int chunkSize = 1024 * 1024 * 8; //8MB Log.DebugFormat("Chunk size: {0} ({1} bytes)", Pretty(chunkSize), chunkSize); /* * Alleen files die: * - nog niet bestaan * of een andere lengte hebben * of een andere hash md5 hebben * of een andere hash s3md5 hebben */ var items = files .Where(x => { var destinationNotExists = !objects.ContainsKey(x.Relative); if (destinationNotExists) { return(true); } var obj = objects[x.Relative]; var differentSize = x.Length != obj.Length; if (differentSize) { return(true); } var etag = obj.S3Object.ETag; var isMultiPart = etag.Contains("-"); if (isMultiPart) { var parts = ExtractParts(etag); var guestimateChunkSize = GuessChunkSize(obj.Length, parts); var s3md5mismatch = !obj.Hash.Contains(S3Md5.Calculate(x.Path, guestimateChunkSize)); return(s3md5mismatch); } else { var md5mismatch = x.Hash != obj.Hash; return(md5mismatch); } } ) .ToList(); Log.InfoFormat("Items to be mirrored: {0}", items.Count); using (IAmazonS3 client = CreateS3Client()) foreach (var item in items) { var key = dstPath + item.Relative; Log.DebugFormat("Uploading {0} => {1}", item.Relative, key); if (item.Length < chunkSize) { client.UploadObjectFromFilePath(dstBucket, key, item.Path, null); var isMatch = client.GetObject(dstBucket, key).ETag.Contains(item.Hash); if (!isMatch) { Log.ErrorFormat("Upload failed: {0}", item.Relative); } } else { Log.Debug("Multi-part"); var response = client.InitiateMultipartUpload(dstBucket, key); try { long index = 0; var md5s = new List <PartETag>(); for (int part = 1; index < item.Length; part++) { var md5 = Md5Hash.Calculate(item.Path, index, chunkSize); var partSize = Math.Min(chunkSize, item.Length - index); Log.DebugFormat("\tPart {0} ({1:N0}): {2}", part, partSize, md5.ToHex()); client.UploadPart(new UploadPartRequest { Key = key, BucketName = dstBucket, FilePath = item.Path, FilePosition = index, PartNumber = part, PartSize = chunkSize, UploadId = response.UploadId, MD5Digest = System.Convert.ToBase64String(md5), }); md5s.Add(new PartETag(part, md5.ToHex())); index += partSize; } client.CompleteMultipartUpload(new CompleteMultipartUploadRequest { Key = key, BucketName = dstBucket, PartETags = md5s, UploadId = response.UploadId, }); } catch (Exception ex) { Log.Error(item.Relative, ex); client.AbortMultipartUpload(dstBucket, key, response.UploadId); } } File.SetAttributes(item.Path, File.GetAttributes(item.Path) & ~FileAttributes.Archive); } }
private static async Task CopyObjectAsync(IAmazonS3 s3Client, string base64Key) { List <CopyPartResponse> uploadResponses = new List <CopyPartResponse>(); // 1. Initialize. InitiateMultipartUploadRequest initiateRequest = new InitiateMultipartUploadRequest { BucketName = existingBucketName, Key = targetKeyName, ServerSideEncryptionCustomerMethod = ServerSideEncryptionCustomerMethod.AES256, ServerSideEncryptionCustomerProvidedKey = base64Key, }; InitiateMultipartUploadResponse initResponse = await s3Client.InitiateMultipartUploadAsync(initiateRequest); // 2. Upload Parts. long partSize = 5 * (long)Math.Pow(2, 20); // 5 MB long firstByte = 0; long lastByte = partSize; try { // First find source object size. Because object is stored encrypted with // customer provided key you need to provide encryption information in your request. GetObjectMetadataRequest getObjectMetadataRequest = new GetObjectMetadataRequest() { BucketName = existingBucketName, Key = sourceKeyName, ServerSideEncryptionCustomerMethod = ServerSideEncryptionCustomerMethod.AES256, ServerSideEncryptionCustomerProvidedKey = base64Key // " * **source object encryption key ***" }; GetObjectMetadataResponse getObjectMetadataResponse = await s3Client.GetObjectMetadataAsync(getObjectMetadataRequest); long filePosition = 0; for (int i = 1; filePosition < getObjectMetadataResponse.ContentLength; i++) { CopyPartRequest copyPartRequest = new CopyPartRequest { UploadId = initResponse.UploadId, // Source. SourceBucket = existingBucketName, SourceKey = sourceKeyName, // Source object is stored using SSE-C. Provide encryption information. CopySourceServerSideEncryptionCustomerMethod = ServerSideEncryptionCustomerMethod.AES256, CopySourceServerSideEncryptionCustomerProvidedKey = base64Key, //"***source object encryption key ***", FirstByte = firstByte, // If the last part is smaller then our normal part size then use the remaining size. LastByte = lastByte > getObjectMetadataResponse.ContentLength ? getObjectMetadataResponse.ContentLength - 1 : lastByte, // Target. DestinationBucket = existingBucketName, DestinationKey = targetKeyName, PartNumber = i, // Encryption information for the target object. ServerSideEncryptionCustomerMethod = ServerSideEncryptionCustomerMethod.AES256, ServerSideEncryptionCustomerProvidedKey = base64Key }; uploadResponses.Add(await s3Client.CopyPartAsync(copyPartRequest)); filePosition += partSize; firstByte += partSize; lastByte += partSize; } // Step 3: complete. CompleteMultipartUploadRequest completeRequest = new CompleteMultipartUploadRequest { BucketName = existingBucketName, Key = targetKeyName, UploadId = initResponse.UploadId, }; completeRequest.AddPartETags(uploadResponses); CompleteMultipartUploadResponse completeUploadResponse = await s3Client.CompleteMultipartUploadAsync(completeRequest); } catch (Exception exception) { Console.WriteLine("Exception occurred: {0}", exception.Message); AbortMultipartUploadRequest abortMPURequest = new AbortMultipartUploadRequest { BucketName = existingBucketName, Key = targetKeyName, UploadId = initResponse.UploadId }; s3Client.AbortMultipartUpload(abortMPURequest); } }