/// <summary> /// Performs a number of async copy object operations in parallel, all public methods should /// call this. /// </summary> /// <param name="client"></param> /// <param name="request"></param> /// <returns></returns> private static async Task <BulkCopyResponse> CoreBulkCopyAsync(this IAmazonS3 client, BulkCopyRequest request, bool deleteSource) { ParameterTests.NonNull(request, "request"); ParameterTests.OutOfRange(request.PartSize >= Constants.MINIMUM_MULTIPART_PART_SIZE, "partSize", $"The part size must be at least {Constants.MINIMUM_MULTIPART_PART_SIZE} bytes."); ParameterTests.OutOfRange(request.PartSize <= Constants.MAXIMUM_MULTIPART_PART_SIZE, "partSize", $"The part size cannot exceed {Constants.MAXIMUM_MULTIPART_PART_SIZE} bytes."); // Make sure there are not requests that have the same source and destination IEnumerable <CopyObjectRequest> errors = request.Requests .Where(x => x.SourceKey == x.DestinationKey && x.SourceBucket != null && x.SourceBucket.Equals(x.DestinationBucket, StringComparison.OrdinalIgnoreCase)); if (errors.Any()) { throw new SourceDestinationSameException($"The Bulk copy/move operation contained requests that had the same source and destination and could cause the accidential loss of data.", errors); } List <CopyObjectRequestResponse> responses = new List <CopyObjectRequestResponse>(); List <FailedCopyRequest> failures = new List <FailedCopyRequest>(); // Don't copy objects that have the same source and destination // object keys are case sensitive, but bucket names are not, they // are all supposed to be lower case IEnumerable <CopyObjectRequest> filtered = request.Requests.Where(x => !(x.SourceKey == x.DestinationKey && x.SourceBucket != null && x.SourceBucket.Equals(x.DestinationBucket, StringComparison.OrdinalIgnoreCase))); int counter = 0; //IEnumerable<> foreach (List <CopyObjectRequest> chunk in filtered.Chunk(request.MaxConcurrency)) { Debug.WriteLine($"Processing request chunk {++counter}."); List <Task <CopyObjectRequestResponse> > insideLoop = new List <Task <CopyObjectRequestResponse> >(); foreach (CopyObjectRequest req in chunk) { try { if (request.PreferMultipart) { insideLoop.Add(client.CopyOrMoveObjectAsync(req, request.PartSize, deleteSource, preferMultipartLogic)); } else { insideLoop.Add(client.CopyOrMoveObjectAsync(req, request.PartSize, deleteSource, standardMultipartLogic)); } } catch (Exception e) { failures.Add(new FailedCopyRequest(req, e, FailureMode.COPY)); } } try { IEnumerable <CopyObjectRequestResponse> responseChunk = await Task.WhenAll(insideLoop); responses.AddRange(responseChunk); } catch (Exception e) { failures.Add(new FailedCopyRequest(null, e, FailureMode.COPY)); } } try { var dict = responses.ToDictionary(x => x.Request, x => x.Response); return(new BulkCopyResponse(dict, failures)); } catch (Exception e) { return(null); } }
/// <summary> /// Provides the actual implementation to move or copy an S3 object /// </summary> /// <param name="client"></param> /// <param name="request"></param> /// <param name="partSize"></param> /// <param name="deleteSource"></param> /// <returns></returns> private static async Task <CopyObjectRequestResponse> CopyOrMoveObjectAsync(this IAmazonS3 client, CopyObjectRequest request, long partSize, bool deleteSource, Func <long, long, bool> useMulitpart) { /// Handle operation cancelled exceptions ExponentialBackoffAndRetryClient backoffClient = new ExponentialBackoffAndRetryClient(4, 100, 1000) { ExceptionHandlingLogic = (ex) => { if (ex is OperationCanceledException) { return(true); } else { return(false); } } }; try { ParameterTests.NonNull(request, "request"); ParameterTests.OutOfRange(partSize >= Constants.MINIMUM_MULTIPART_PART_SIZE, "partSize", $"The part size must be at least {Constants.MINIMUM_MULTIPART_PART_SIZE} bytes."); ParameterTests.OutOfRange(partSize <= Constants.MAXIMUM_MULTIPART_PART_SIZE, "partSize", $"The part size cannot exceed {Constants.MAXIMUM_MULTIPART_PART_SIZE} bytes."); if (request.SourceKey == request.DestinationKey && request.SourceBucket != null && request.SourceBucket.Equals(request.DestinationBucket, StringComparison.OrdinalIgnoreCase)) { throw new SourceDestinationSameException("The source and destination of the copy operation cannot be the same.", new CopyObjectRequest[] { request }); } // Get the size of the object. GetObjectMetadataRequest metadataRequest = new GetObjectMetadataRequest { BucketName = request.SourceBucket, Key = request.SourceKey }; long objectSize; GetObjectMetadataResponse metadataResponse; try { metadataResponse = await backoffClient.RunAsync(() => client.GetObjectMetadataAsync(metadataRequest)); objectSize = metadataResponse.ContentLength; // Length in bytes. } catch (Exception e) { throw e; } CopyObjectResponse response = null; if (UseMultipart(objectSize, partSize)) { // If it takes more than a 5 GiB part to make 10000 or less parts, than this operation // isn't supported for an object this size if (objectSize / partSize > Constants.MAXIMUM_PARTS) { throw new NotSupportedException($"The object size, {objectSize}, cannot be broken into fewer than {Constants.MAXIMUM_PARTS} parts using a part size of {partSize} bytes."); } List <Task <CopyPartResponse> > copyResponses = new List <Task <CopyPartResponse> >(); // This property has a nullable backing private field that when set to // anything non-null causes the x-amz-object-lock-retain-until-date // header to be sent which in turn results in an exception being thrown // that the Bucket is missing ObjectLockConfiguration InitiateMultipartUploadRequest initiateRequest = request.ConvertTo <InitiateMultipartUploadRequest>("ObjectLockRetainUntilDate"); initiateRequest.BucketName = request.DestinationBucket; initiateRequest.Key = request.DestinationKey; InitiateMultipartUploadResponse initiateResponse = await backoffClient.RunAsync(() => client.InitiateMultipartUploadAsync(initiateRequest)); try { long bytePosition = 0; int counter = 1; // Launch all of the copy parts while (bytePosition < objectSize) { CopyPartRequest copyRequest = request.ConvertTo <CopyPartRequest>("ObjectLockRetainUntilDate"); copyRequest.UploadId = initiateResponse.UploadId; copyRequest.FirstByte = bytePosition; // If we're on the last part, the last byte is the object size minus 1, otherwise the last byte is the part size minus one // added to the current byte position copyRequest.LastByte = ((bytePosition + partSize - 1) >= objectSize) ? objectSize - 1 : bytePosition + partSize - 1; copyRequest.PartNumber = counter++; copyResponses.Add(backoffClient.RunAsync(() => client.CopyPartAsync(copyRequest))); bytePosition += partSize; } IEnumerable <CopyPartResponse> responses = (await Task.WhenAll(copyResponses)).OrderBy(x => x.PartNumber); // Set up to complete the copy. CompleteMultipartUploadRequest completeRequest = new CompleteMultipartUploadRequest { BucketName = request.DestinationBucket, Key = request.DestinationKey, UploadId = initiateResponse.UploadId }; completeRequest.AddPartETags(responses); // Complete the copy. CompleteMultipartUploadResponse completeUploadResponse = await backoffClient.RunAsync(() => client.CompleteMultipartUploadAsync(completeRequest)); response = completeUploadResponse.CopyProperties <CopyObjectResponse>(); response.SourceVersionId = metadataResponse.VersionId; } catch (AmazonS3Exception e) { AbortMultipartUploadRequest abortRequest = new AbortMultipartUploadRequest() { BucketName = request.DestinationBucket, Key = request.DestinationKey, UploadId = initiateResponse.UploadId }; await backoffClient.RunAsync(() => client.AbortMultipartUploadAsync(abortRequest)); throw e; } } else { response = await backoffClient.RunAsync(() => client.CopyObjectAsync(request)); } if (response.HttpStatusCode != HttpStatusCode.OK) { throw new AmazonS3Exception($"Could not copy object from s3://{request.SourceBucket}/{request.SourceKey} to s3://{request.DestinationBucket}/{request.DestinationKey}. Received response : {(int)response.HttpStatusCode}"); } else { // We already checked to make sure the source and destination weren't the same // and it's safe to delete the source object if (deleteSource) { DeleteObjectRequest deleteRequest = new DeleteObjectRequest() { BucketName = request.SourceBucket, Key = request.SourceKey }; DeleteObjectResponse deleteResponse = await backoffClient.RunAsync(() => client.DeleteObjectAsync(deleteRequest)); if (deleteResponse.HttpStatusCode != HttpStatusCode.NoContent) { throw new AmazonS3Exception($"Could not delete s3://{request.SourceBucket}/{request.SourceKey}. Received response : {(int)deleteResponse.HttpStatusCode}"); } } return(new CopyObjectRequestResponse(request, response)); } } catch (Exception e) { return(null); } }