private async Task <CompleteMultipartUploadResponse> MultipartUploadInternal(IAmazonS3 client, string bucketName, string fileName, Stream stream, bool useChunkEncoding, int sliceSize) { var initiateMultipartUploadResponse = await client.InitiateMultipartUploadAsync(bucketName, fileName); //UploadId var uploadId = initiateMultipartUploadResponse.UploadId; //Calculate slice part count var partSize = sliceSize; //var fi = new FileInfo(spoolFile.FilePath);//? var fileSize = stream.Length; var partCount = fileSize / partSize; if (fileSize % partSize != 0) { partCount++; } // 开始分片上传。partETags是保存partETag的列表,OSS收到用户提交的分片列表后,会逐一验证每个分片数据的有效性。 当所有的数据分片通过验证后,OSS会将这些分片组合成一个完整的文件。 var partETags = new List <PartETag>(); for (var i = 0; i < partCount; i++) { var skipBytes = (long)partSize * i; // 计算本次上传的片大小,最后一片为剩余的数据大小。 var size = (int)((partSize < fileSize - skipBytes) ? partSize : (fileSize - skipBytes)); byte[] buffer = new byte[size]; stream.Read(buffer, 0, size); //分片上传 var uploadPartResponse = await client.UploadPartAsync(new UploadPartRequest() { BucketName = bucketName, UploadId = uploadId, Key = fileName, InputStream = new MemoryStream(buffer), PartSize = size, PartNumber = i + 1, UseChunkEncoding = useChunkEncoding }); partETags.Add(new PartETag(uploadPartResponse.PartNumber, uploadPartResponse.ETag)); Logger.LogDebug("Upload part file ,key:{0},UploadId:{1},Complete {2}/{3}", fileName, uploadId, partETags.Count, partCount); } //完成上传分片 var completeMultipartUploadResponse = await client.CompleteMultipartUploadAsync(new CompleteMultipartUploadRequest() { BucketName = bucketName, Key = fileName, UploadId = uploadId, PartETags = partETags }); return(completeMultipartUploadResponse); }
/// <inheritdoc/> public async Task CompleteMultiPartUploadAsync(CompleteMultipartRequest request) { var completeMultipartUploadRequest = new CompleteMultipartUploadRequest { BucketName = _environment.GetEnvironmentVariable("BUCKET_NAME"), Key = ConstructFileKey(request.FileName, request.FolderName), UploadId = request.UploadId, PartETags = request.PartETags, }; await _s3.CompleteMultipartUploadAsync(completeMultipartUploadRequest); }
/// <summary> /// Send a CompleteMultipartUploadRequest request and return the response. /// </summary> /// <param name="existingBucketName"></param> /// <param name="keyName"></param> /// <param name="uploadID"></param> /// <returns>Task<CompleteMultipartUploadRequest></returns> public async Task <CompleteMultipartUploadRequest> CompleteMultipartUploadAsync(string existingBucketName, string keyName, string uploadId, CancellationToken token) { token.ThrowIfCancellationRequested(); _log.Debug("Called CompleteMultipartUploadAsync with parameters keyName = \"" + keyName + "\" and uploadID = \"" + uploadId + "\"."); try { CompleteMultipartUploadRequest completeRequest = new CompleteMultipartUploadRequest { BucketName = existingBucketName, Key = keyName, UploadId = uploadId, }; // in any case request list parts to send etags for each part. var uploadedParts = await this.ListPartsAsync(existingBucketName, keyName, uploadId, token).ConfigureAwait(false); List <PartETag> eTags = new List <PartETag>(); foreach (var part in uploadedParts) { eTags.Add(new PartETag(part.PartNumber, part.ETag)); } completeRequest.AddPartETags(eTags); token.ThrowIfCancellationRequested(); CompleteMultipartUploadResponse completeResponse = await s3Client.CompleteMultipartUploadAsync(completeRequest, token).ConfigureAwait(false); return(completeRequest); } catch (Exception e) { if (!(e is TaskCanceledException || e is OperationCanceledException)) { string messagePart = " with parameters keyName = \"" + keyName + "\" and uploadID = \"" + uploadId + "\""; this.LogAmazonException(messagePart, e); } throw; } }
private async Task <CompleteMultipartUploadResponse> MultipartCopy(string sourceKey, string destinationKey, long objectSize, InitiateMultipartUploadRequest initiateRequest) { var copyResponses = new List <CopyPartResponse>(); var partSize = 5 * (long)Math.Pow(2, 20); // Part size is 5 MB. // Initiate the upload. var initResponse = await _s3Client.InitiateMultipartUploadAsync(initiateRequest); long bytePosition = 0; for (int i = 1; bytePosition < objectSize; i++) { var copyRequest = new CopyPartRequest { DestinationBucket = _bucket, DestinationKey = destinationKey, SourceBucket = _bucket, SourceKey = sourceKey, UploadId = initResponse.UploadId, FirstByte = bytePosition, LastByte = bytePosition + partSize - 1 >= objectSize ? objectSize - 1 : bytePosition + partSize - 1, PartNumber = i, }; copyResponses.Add(await _s3Client.CopyPartAsync(copyRequest)); bytePosition += partSize; } // Set up to complete the copy. var completeRequest = new CompleteMultipartUploadRequest { BucketName = _bucket, Key = destinationKey, UploadId = initResponse.UploadId }; completeRequest.AddPartETags(copyResponses); // Complete the copy. return(await _s3Client.CompleteMultipartUploadAsync(completeRequest)); }
async Task <IDictionary <string, object> > IFileBackend.ChunkedUploadCompleteAsync( object context, string uploadKey, string id, IChunkStatus[] chunkStatuses, CancellationToken cancellationToken ) { var started = DateTime.Now; var config = context as Context; IAmazonS3 s3Client = config.S3; Logger?.LogInformation($"Completing upload ..."); Logger?.LogInformation($"[{DateTime.Now.Subtract(started).TotalMilliseconds}] Assembling parts"); var partList = chunkStatuses .Where(c => c.Success) .GroupBy(c => c.ChunkIndex) .Select(g => new PartETag(g.Key + 1, g.Last().State)) .ToList(); var completeRequest = new CompleteMultipartUploadRequest { BucketName = config.BucketName, Key = id, UploadId = uploadKey, PartETags = partList }; Logger?.LogInformation($"[{DateTime.Now.Subtract(started).TotalMilliseconds}] calling complete"); var completeUploadResponse = await s3Client.CompleteMultipartUploadAsync(completeRequest); Logger?.LogInformation($"[{DateTime.Now.Subtract(started).TotalMilliseconds}] CompleteMultipartUpload finished"); Logger?.LogInformation($"Upload done."); return(null); }
private static async Task UploadObjectAsync() { // Create list to store upload part responses. List <UploadPartResponse> uploadResponses = new List <UploadPartResponse>(); // Setup information required to initiate the multipart upload. InitiateMultipartUploadRequest initiateRequest = new InitiateMultipartUploadRequest { BucketName = bucketName, Key = keyName }; // Initiate the upload. InitiateMultipartUploadResponse initResponse = await s3Client.InitiateMultipartUploadAsync(initiateRequest); // Upload parts. long contentLength = new FileInfo(filePath).Length; long partSize = 5 * (long)Math.Pow(2, 20); // 5 MB try { Console.WriteLine("Uploading parts"); long filePosition = 0; for (int i = 1; filePosition < contentLength; i++) { UploadPartRequest uploadRequest = new UploadPartRequest { BucketName = bucketName, Key = keyName, UploadId = initResponse.UploadId, PartNumber = i, PartSize = partSize, FilePosition = filePosition, FilePath = filePath }; // Track upload progress. uploadRequest.StreamTransferProgress += new EventHandler <StreamTransferProgressArgs>(UploadPartProgressEventCallback); // Upload a part and add the response to our list. uploadResponses.Add(await s3Client.UploadPartAsync(uploadRequest)); filePosition += partSize; } // Setup to complete the upload. CompleteMultipartUploadRequest completeRequest = new CompleteMultipartUploadRequest { BucketName = bucketName, Key = keyName, UploadId = initResponse.UploadId }; completeRequest.AddPartETags(uploadResponses); // Complete the upload. CompleteMultipartUploadResponse completeUploadResponse = await s3Client.CompleteMultipartUploadAsync(completeRequest); } catch (Exception exception) { Console.WriteLine("An AmazonS3Exception was thrown: { 0}", exception.Message); // Abort the upload. AbortMultipartUploadRequest abortMPURequest = new AbortMultipartUploadRequest { BucketName = bucketName, Key = keyName, UploadId = initResponse.UploadId }; await s3Client.AbortMultipartUploadAsync(abortMPURequest); } }
/// <summary> /// Provides the actual implementation to move or copy an S3 object /// </summary> /// <param name="client"></param> /// <param name="request"></param> /// <param name="partSize"></param> /// <param name="deleteSource"></param> /// <returns></returns> private static async Task <CopyObjectRequestResponse> CopyOrMoveObjectAsync(this IAmazonS3 client, CopyObjectRequest request, long partSize, bool deleteSource, Func <long, long, bool> useMulitpart) { /// Handle operation cancelled exceptions ExponentialBackoffAndRetryClient backoffClient = new ExponentialBackoffAndRetryClient(4, 100, 1000) { ExceptionHandlingLogic = (ex) => { if (ex is OperationCanceledException) { return(true); } else { return(false); } } }; try { ParameterTests.NonNull(request, "request"); ParameterTests.OutOfRange(partSize >= Constants.MINIMUM_MULTIPART_PART_SIZE, "partSize", $"The part size must be at least {Constants.MINIMUM_MULTIPART_PART_SIZE} bytes."); ParameterTests.OutOfRange(partSize <= Constants.MAXIMUM_MULTIPART_PART_SIZE, "partSize", $"The part size cannot exceed {Constants.MAXIMUM_MULTIPART_PART_SIZE} bytes."); if (request.SourceKey == request.DestinationKey && request.SourceBucket != null && request.SourceBucket.Equals(request.DestinationBucket, StringComparison.OrdinalIgnoreCase)) { throw new SourceDestinationSameException("The source and destination of the copy operation cannot be the same.", new CopyObjectRequest[] { request }); } // Get the size of the object. GetObjectMetadataRequest metadataRequest = new GetObjectMetadataRequest { BucketName = request.SourceBucket, Key = request.SourceKey }; long objectSize; GetObjectMetadataResponse metadataResponse; try { metadataResponse = await backoffClient.RunAsync(() => client.GetObjectMetadataAsync(metadataRequest)); objectSize = metadataResponse.ContentLength; // Length in bytes. } catch (Exception e) { throw e; } CopyObjectResponse response = null; if (UseMultipart(objectSize, partSize)) { // If it takes more than a 5 GiB part to make 10000 or less parts, than this operation // isn't supported for an object this size if (objectSize / partSize > Constants.MAXIMUM_PARTS) { throw new NotSupportedException($"The object size, {objectSize}, cannot be broken into fewer than {Constants.MAXIMUM_PARTS} parts using a part size of {partSize} bytes."); } List <Task <CopyPartResponse> > copyResponses = new List <Task <CopyPartResponse> >(); // This property has a nullable backing private field that when set to // anything non-null causes the x-amz-object-lock-retain-until-date // header to be sent which in turn results in an exception being thrown // that the Bucket is missing ObjectLockConfiguration InitiateMultipartUploadRequest initiateRequest = request.ConvertTo <InitiateMultipartUploadRequest>("ObjectLockRetainUntilDate"); initiateRequest.BucketName = request.DestinationBucket; initiateRequest.Key = request.DestinationKey; InitiateMultipartUploadResponse initiateResponse = await backoffClient.RunAsync(() => client.InitiateMultipartUploadAsync(initiateRequest)); try { long bytePosition = 0; int counter = 1; // Launch all of the copy parts while (bytePosition < objectSize) { CopyPartRequest copyRequest = request.ConvertTo <CopyPartRequest>("ObjectLockRetainUntilDate"); copyRequest.UploadId = initiateResponse.UploadId; copyRequest.FirstByte = bytePosition; // If we're on the last part, the last byte is the object size minus 1, otherwise the last byte is the part size minus one // added to the current byte position copyRequest.LastByte = ((bytePosition + partSize - 1) >= objectSize) ? objectSize - 1 : bytePosition + partSize - 1; copyRequest.PartNumber = counter++; copyResponses.Add(backoffClient.RunAsync(() => client.CopyPartAsync(copyRequest))); bytePosition += partSize; } IEnumerable <CopyPartResponse> responses = (await Task.WhenAll(copyResponses)).OrderBy(x => x.PartNumber); // Set up to complete the copy. CompleteMultipartUploadRequest completeRequest = new CompleteMultipartUploadRequest { BucketName = request.DestinationBucket, Key = request.DestinationKey, UploadId = initiateResponse.UploadId }; completeRequest.AddPartETags(responses); // Complete the copy. CompleteMultipartUploadResponse completeUploadResponse = await backoffClient.RunAsync(() => client.CompleteMultipartUploadAsync(completeRequest)); response = completeUploadResponse.CopyProperties <CopyObjectResponse>(); response.SourceVersionId = metadataResponse.VersionId; } catch (AmazonS3Exception e) { AbortMultipartUploadRequest abortRequest = new AbortMultipartUploadRequest() { BucketName = request.DestinationBucket, Key = request.DestinationKey, UploadId = initiateResponse.UploadId }; await backoffClient.RunAsync(() => client.AbortMultipartUploadAsync(abortRequest)); throw e; } } else { response = await backoffClient.RunAsync(() => client.CopyObjectAsync(request)); } if (response.HttpStatusCode != HttpStatusCode.OK) { throw new AmazonS3Exception($"Could not copy object from s3://{request.SourceBucket}/{request.SourceKey} to s3://{request.DestinationBucket}/{request.DestinationKey}. Received response : {(int)response.HttpStatusCode}"); } else { // We already checked to make sure the source and destination weren't the same // and it's safe to delete the source object if (deleteSource) { DeleteObjectRequest deleteRequest = new DeleteObjectRequest() { BucketName = request.SourceBucket, Key = request.SourceKey }; DeleteObjectResponse deleteResponse = await backoffClient.RunAsync(() => client.DeleteObjectAsync(deleteRequest)); if (deleteResponse.HttpStatusCode != HttpStatusCode.NoContent) { throw new AmazonS3Exception($"Could not delete s3://{request.SourceBucket}/{request.SourceKey}. Received response : {(int)deleteResponse.HttpStatusCode}"); } } return(new CopyObjectRequestResponse(request, response)); } } catch (Exception e) { return(null); } }
public async Task <CompleteMultipartUploadResponse> CompleteMultipartUploadAsync(CompleteMultipartUploadRequest request) => await _amazonS3.CompleteMultipartUploadAsync(request);
private async Task MultiPartUploadAsync(Stream sourceStream, string sourceMD5, string bucketName, string keyName, CancellationToken cancellationToken) { // Create list to store upload part responses. var uploadResponses = new List <UploadPartResponse>(); // Setup information required to initiate the multipart upload. var initiateRequest = new InitiateMultipartUploadRequest { BucketName = bucketName, Key = keyName }; var validateMD5 = !string.IsNullOrEmpty(sourceMD5); if (validateMD5) { initiateRequest.Metadata.Add("Content-MD5", sourceMD5); } // Initiate the upload. var initResponse = await _amazonS3.InitiateMultipartUploadAsync(initiateRequest, cancellationToken); try { var blobHasher = validateMD5 ? CopyUtils.GetMD5Hasher() : null; var partSize = 10 * 1024 * 1024; // todo: config var partNo = 1; await CopyUtils.CopyAsync( (buffer, ct) => CopyUtils.ReadStreamMaxBufferAsync(buffer, sourceStream, ct), async (buffer, count, cancellationToken2) => { var blockMD5Hash = CopyUtils.GetMD5HashString(buffer, 0, count); if (validateMD5) { CopyUtils.AppendMDHasherData(blobHasher, buffer, 0, count); } using (var ms = new MemoryStream(buffer, 0, count)) { ms.Position = 0; partNo++; var uploadRequest = new UploadPartRequest { BucketName = bucketName, Key = keyName, UploadId = initResponse.UploadId, PartNumber = partNo, PartSize = count, InputStream = ms, MD5Digest = blockMD5Hash }; // Upload a part and add the response to our list. var uploadResponse = await _amazonS3.UploadPartAsync(uploadRequest, cancellationToken); uploadResponses.Add(uploadResponse); } }, partSize, cancellationToken ); if (validateMD5) { var blobHash = CopyUtils.GetMD5HashString(blobHasher); if ((!string.IsNullOrEmpty(sourceMD5)) && (sourceMD5 != blobHash)) { throw new Exception("Invalid destination MD5"); } } // Setup to complete the upload. var completeRequest = new CompleteMultipartUploadRequest { BucketName = bucketName, Key = keyName, UploadId = initResponse.UploadId }; completeRequest.AddPartETags(uploadResponses); // Complete the upload. var completeUploadResponse = await _amazonS3.CompleteMultipartUploadAsync(completeRequest, cancellationToken); } catch (Exception exception) { Console.WriteLine("An AmazonS3Exception was thrown: { 0}", exception.Message); // Abort the upload. var abortMPURequest = new AbortMultipartUploadRequest { BucketName = bucketName, Key = keyName, UploadId = initResponse.UploadId }; await _amazonS3.AbortMultipartUploadAsync(abortMPURequest, cancellationToken); } }
private static async Task CopyObjectAsync(IAmazonS3 s3Client, string base64Key) { List <CopyPartResponse> uploadResponses = new List <CopyPartResponse>(); // 1. Initialize. InitiateMultipartUploadRequest initiateRequest = new InitiateMultipartUploadRequest { BucketName = existingBucketName, Key = targetKeyName, ServerSideEncryptionCustomerMethod = ServerSideEncryptionCustomerMethod.AES256, ServerSideEncryptionCustomerProvidedKey = base64Key, }; InitiateMultipartUploadResponse initResponse = await s3Client.InitiateMultipartUploadAsync(initiateRequest); // 2. Upload Parts. long partSize = 5 * (long)Math.Pow(2, 20); // 5 MB long firstByte = 0; long lastByte = partSize; try { // First find source object size. Because object is stored encrypted with // customer provided key you need to provide encryption information in your request. GetObjectMetadataRequest getObjectMetadataRequest = new GetObjectMetadataRequest() { BucketName = existingBucketName, Key = sourceKeyName, ServerSideEncryptionCustomerMethod = ServerSideEncryptionCustomerMethod.AES256, ServerSideEncryptionCustomerProvidedKey = base64Key // " * **source object encryption key ***" }; GetObjectMetadataResponse getObjectMetadataResponse = await s3Client.GetObjectMetadataAsync(getObjectMetadataRequest); long filePosition = 0; for (int i = 1; filePosition < getObjectMetadataResponse.ContentLength; i++) { CopyPartRequest copyPartRequest = new CopyPartRequest { UploadId = initResponse.UploadId, // Source. SourceBucket = existingBucketName, SourceKey = sourceKeyName, // Source object is stored using SSE-C. Provide encryption information. CopySourceServerSideEncryptionCustomerMethod = ServerSideEncryptionCustomerMethod.AES256, CopySourceServerSideEncryptionCustomerProvidedKey = base64Key, //"***source object encryption key ***", FirstByte = firstByte, // If the last part is smaller then our normal part size then use the remaining size. LastByte = lastByte > getObjectMetadataResponse.ContentLength ? getObjectMetadataResponse.ContentLength - 1 : lastByte, // Target. DestinationBucket = existingBucketName, DestinationKey = targetKeyName, PartNumber = i, // Encryption information for the target object. ServerSideEncryptionCustomerMethod = ServerSideEncryptionCustomerMethod.AES256, ServerSideEncryptionCustomerProvidedKey = base64Key }; uploadResponses.Add(await s3Client.CopyPartAsync(copyPartRequest)); filePosition += partSize; firstByte += partSize; lastByte += partSize; } // Step 3: complete. CompleteMultipartUploadRequest completeRequest = new CompleteMultipartUploadRequest { BucketName = existingBucketName, Key = targetKeyName, UploadId = initResponse.UploadId, }; completeRequest.AddPartETags(uploadResponses); CompleteMultipartUploadResponse completeUploadResponse = await s3Client.CompleteMultipartUploadAsync(completeRequest); } catch (Exception exception) { Console.WriteLine("Exception occurred: {0}", exception.Message); AbortMultipartUploadRequest abortMPURequest = new AbortMultipartUploadRequest { BucketName = existingBucketName, Key = targetKeyName, UploadId = initResponse.UploadId }; s3Client.AbortMultipartUpload(abortMPURequest); } }
private static async Task CreateSampleObjUsingClientEncryptionKeyAsync(string base64Key, IAmazonS3 s3Client) { // List to store upload part responses. List <UploadPartResponse> uploadResponses = new List <UploadPartResponse>(); // 1. Initialize. InitiateMultipartUploadRequest initiateRequest = new InitiateMultipartUploadRequest { BucketName = existingBucketName, Key = sourceKeyName, ServerSideEncryptionCustomerMethod = ServerSideEncryptionCustomerMethod.AES256, ServerSideEncryptionCustomerProvidedKey = base64Key }; InitiateMultipartUploadResponse initResponse = await s3Client.InitiateMultipartUploadAsync(initiateRequest); // 2. Upload Parts. long contentLength = new FileInfo(filePath).Length; long partSize = 5 * (long)Math.Pow(2, 20); // 5 MB try { long filePosition = 0; for (int i = 1; filePosition < contentLength; i++) { UploadPartRequest uploadRequest = new UploadPartRequest { BucketName = existingBucketName, Key = sourceKeyName, UploadId = initResponse.UploadId, PartNumber = i, PartSize = partSize, FilePosition = filePosition, FilePath = filePath, ServerSideEncryptionCustomerMethod = ServerSideEncryptionCustomerMethod.AES256, ServerSideEncryptionCustomerProvidedKey = base64Key }; // Upload part and add response to our list. uploadResponses.Add(await s3Client.UploadPartAsync(uploadRequest)); filePosition += partSize; } // Step 3: complete. CompleteMultipartUploadRequest completeRequest = new CompleteMultipartUploadRequest { BucketName = existingBucketName, Key = sourceKeyName, UploadId = initResponse.UploadId, //PartETags = new List<PartETag>(uploadResponses) }; completeRequest.AddPartETags(uploadResponses); CompleteMultipartUploadResponse completeUploadResponse = await s3Client.CompleteMultipartUploadAsync(completeRequest); } catch (Exception exception) { Console.WriteLine("Exception occurred: {0}", exception.Message); AbortMultipartUploadRequest abortMPURequest = new AbortMultipartUploadRequest { BucketName = existingBucketName, Key = sourceKeyName, UploadId = initResponse.UploadId }; await s3Client.AbortMultipartUploadAsync(abortMPURequest); } }
private static async System.Threading.Tasks.Task MultipartEncryptionTestAsync(IAmazonS3 s3EncryptionClient, IAmazonS3 s3DecryptionClient, string bucketName) { var guid = Guid.NewGuid(); var filePath = Path.Combine(Path.GetTempPath(), $"multi-{guid}.txt"); var retrievedFilepath = Path.Combine(Path.GetTempPath(), $"retrieved-{guid}.txt"); var totalSize = MegaByteSize * 15; UtilityMethods.GenerateFile(filePath, totalSize); var key = $"key-{guid}"; Stream inputStream = File.OpenRead(filePath); try { var initRequest = new InitiateMultipartUploadRequest() { BucketName = bucketName, Key = key, StorageClass = S3StorageClass.OneZoneInfrequentAccess, ContentType = "text/html" }; var initResponse = await s3EncryptionClient.InitiateMultipartUploadAsync(initRequest).ConfigureAwait(false); // Upload part 1 var uploadRequest = new UploadPartRequest() { BucketName = bucketName, Key = key, UploadId = initResponse.UploadId, PartNumber = 1, PartSize = 5 * MegaByteSize, InputStream = inputStream }; var up1Response = await s3EncryptionClient.UploadPartAsync(uploadRequest).ConfigureAwait(false); // Upload part 2 uploadRequest = new UploadPartRequest() { BucketName = bucketName, Key = key, UploadId = initResponse.UploadId, PartNumber = 2, PartSize = 5 * MegaByteSize, InputStream = inputStream }; var up2Response = await s3EncryptionClient.UploadPartAsync(uploadRequest).ConfigureAwait(false); // Upload part 3 uploadRequest = new UploadPartRequest() { BucketName = bucketName, Key = key, UploadId = initResponse.UploadId, PartNumber = 3, InputStream = inputStream, IsLastPart = true }; var up3Response = await s3EncryptionClient.UploadPartAsync(uploadRequest).ConfigureAwait(false); var listPartRequest = new ListPartsRequest() { BucketName = bucketName, Key = key, UploadId = initResponse.UploadId }; var listPartResponse = await s3EncryptionClient.ListPartsAsync(listPartRequest).ConfigureAwait(false); Assert.Equal(3, listPartResponse.Parts.Count); Assert.Equal(up1Response.PartNumber, listPartResponse.Parts[0].PartNumber); Assert.Equal(up1Response.ETag, listPartResponse.Parts[0].ETag); Assert.Equal(up2Response.PartNumber, listPartResponse.Parts[1].PartNumber); Assert.Equal(up2Response.ETag, listPartResponse.Parts[1].ETag); Assert.Equal(up3Response.PartNumber, listPartResponse.Parts[2].PartNumber); Assert.Equal(up3Response.ETag, listPartResponse.Parts[2].ETag); listPartRequest.MaxParts = 1; listPartResponse = await s3EncryptionClient.ListPartsAsync(listPartRequest).ConfigureAwait(false); Assert.Equal(1, listPartResponse.Parts.Count); // Complete the response var compRequest = new CompleteMultipartUploadRequest() { BucketName = bucketName, Key = key, UploadId = initResponse.UploadId }; compRequest.AddPartETags(up1Response, up2Response, up3Response); var compResponse = await s3EncryptionClient.CompleteMultipartUploadAsync(compRequest).ConfigureAwait(false); Assert.Equal(bucketName, compResponse.BucketName); Assert.NotNull(compResponse.ETag); Assert.Equal(key, compResponse.Key); Assert.NotNull(compResponse.Location); // Get the file back from S3 and make sure it is still the same. var getRequest = new GetObjectRequest() { BucketName = bucketName, Key = key }; var getResponse = await s3DecryptionClient.GetObjectAsync(getRequest).ConfigureAwait(false); getResponse.WriteResponseStreamToFile(retrievedFilepath); UtilityMethods.CompareFiles(filePath, retrievedFilepath); var metaDataRequest = new GetObjectMetadataRequest() { BucketName = bucketName, Key = key }; var metaDataResponse = await s3DecryptionClient.GetObjectMetadataAsync(metaDataRequest).ConfigureAwait(false); Assert.Equal("text/html", metaDataResponse.Headers.ContentType); } finally { inputStream.Close(); if (File.Exists(filePath)) { File.Delete(filePath); } if (File.Exists(retrievedFilepath)) { File.Delete(retrievedFilepath); } } }
static async Task S3BucketIOWorker(MemoryStream inStream, string fileName) { List <UploadPartResponse> uploadResponses = new List <UploadPartResponse>(); InitiateMultipartUploadRequest initiateRequest = new InitiateMultipartUploadRequest { BucketName = inBucketName, Key = fileName }; InitiateMultipartUploadResponse initResponse = await s3Client.InitiateMultipartUploadAsync(initiateRequest); try { const long chunkSize = 3145728; long sLen = inStream.Length; long partSize; if (sLen > 0) { for (int iii = 1; sLen > (iii - 1) * chunkSize; iii++) { if (sLen <= iii * chunkSize) { partSize = sLen % (long)chunkSize; if (partSize == 0) { break; } } else { partSize = chunkSize; } UploadPartRequest uploadRequest = new UploadPartRequest { BucketName = inBucketName, Key = fileName, UploadId = initResponse.UploadId, PartNumber = iii, PartSize = partSize, InputStream = inStream, }; retStr += partSize.ToString() + "\n"; uploadResponses.Add(await s3Client.UploadPartAsync(uploadRequest)); } } CompleteMultipartUploadRequest completeRequest = new CompleteMultipartUploadRequest { BucketName = inBucketName, Key = fileName, UploadId = initResponse.UploadId, }; completeRequest.AddPartETags(uploadResponses); CompleteMultipartUploadResponse completeUploadResponse = await s3Client.CompleteMultipartUploadAsync(completeRequest); retStr += completeUploadResponse.HttpStatusCode.ToString(); } catch (Exception exception) { retStr += "An AmazonS3Exception was thrown: " + exception.Message; // Abort the upload. AbortMultipartUploadRequest abortMPURequest = new AbortMultipartUploadRequest { BucketName = inBucketName, Key = fileName, UploadId = initResponse.UploadId }; await s3Client.AbortMultipartUploadAsync(abortMPURequest); } //PutObjectRequest putObjReq = new PutObjectRequest //{ // BucketName = inBucketName, // Key = fileName, // InputStream = outStream, // ContentType = "text/plain", //}; //using (StreamWriter writer = new StreamWriter(outStream)) //{ // string line; // PutObjectResponse objResp=(await s3Client.PutObjectAsync(putObjReq)); // objResp.HttpStatusCode = System.Net.HttpStatusCode.Processing; // while ((line = reader.ReadLine()) != null) // { // writer.WriteLine(line); // } // objResp.HttpStatusCode = System.Net.HttpStatusCode.OK; //} }
private static async Task UploadFileAsync(string filePath) { try { List <UploadPartResponse> uploadResponses = new List <UploadPartResponse>(); var j = new FileInfo(filePath); //do some logic... var uploadToApi = await ffApi.GetUploadOnFileName(j.Name); string uploadId = uploadToApi.S3Id; if (uploadToApi.Id == null || uploadToApi.Status == UploadType.Uploaded) { string key = generateID(); Amazon.S3.Model.InitiateMultipartUploadRequest request = new InitiateMultipartUploadRequest { BucketName = bucketName, Key = key, CannedACL = S3CannedACL.AuthenticatedRead }; request.Metadata.Add("fname", j.Name); request.Metadata.Add("transfer-created", DateTime.UtcNow.ToString()); request.Metadata.Add("up-version", "0.1;closed"); InitiateMultipartUploadResponse response = await s3Client.InitiateMultipartUploadAsync(request); uploadId = response.UploadId; uploadToApi = await ffApi.CreateUpload(new Upload { filename = j.Name, S3Id = uploadId, Key = key, Status = UploadType.Initialized }); } Console.WriteLine("uploadid: " + uploadId); // Upload parts. long contentLength = new FileInfo(filePath).Length; long partSize = 50000000; // 50 mb Console.WriteLine("part size: " + partSize + " bytes"); Console.WriteLine("Finding parts"); //first we check for the parts.. ListPartsRequest listPartsRequest = new ListPartsRequest { BucketName = bucketName, Key = uploadToApi.Key, UploadId = uploadId }; var listParts = await s3Client.ListPartsAsync(listPartsRequest); int currentPart = Math.Max(listParts.NextPartNumberMarker, 0); long filePosition = currentPart * partSize; for (int i = currentPart + 1; filePosition < contentLength; i++) { Console.WriteLine("Uploading part index: " + i); UploadPartRequest uploadRequest = new UploadPartRequest { BucketName = bucketName, Key = uploadToApi.Key, UploadId = uploadId, PartNumber = i, PartSize = partSize, FilePosition = filePosition, FilePath = filePath }; // Track upload progress. uploadRequest.StreamTransferProgress += new EventHandler <StreamTransferProgressArgs>(UploadPartProgressEventCallback); // Upload a part and add the response to our list. var res = await s3Client.UploadPartAsync(uploadRequest); filePosition += partSize; } // Setup to complete the upload. listParts = await s3Client.ListPartsAsync(listPartsRequest); List <PartETag> t = new List <PartETag>(); foreach (var p in listParts.Parts) { t.Add(new PartETag { ETag = p.ETag, PartNumber = p.PartNumber }); } CompleteMultipartUploadRequest completeRequest = new CompleteMultipartUploadRequest { BucketName = bucketName, Key = uploadToApi.Key, UploadId = uploadId }; completeRequest.AddPartETags(t); // Complete the upload. CompleteMultipartUploadResponse completeUploadResponse = await s3Client.CompleteMultipartUploadAsync(completeRequest); uploadToApi.Status = UploadType.Uploaded; var updated = await ffApi.UpdateUpload(uploadToApi, uploadId); } catch (Exception x) { Log.Error(x.ToString()); } }
private static async Task MPUCopyObjectAsync() { // Create a list to store the upload part responses. List <UploadPartResponse> uploadResponses = new List <UploadPartResponse>(); List <CopyPartResponse> copyResponses = new List <CopyPartResponse>(); // Setup information required to initiate the multipart upload. InitiateMultipartUploadRequest initiateRequest = new InitiateMultipartUploadRequest { BucketName = targetBucket, Key = targetObjectKey }; // Initiate the upload. InitiateMultipartUploadResponse initResponse = await s3Client.InitiateMultipartUploadAsync(initiateRequest); // Save the upload ID. String uploadId = initResponse.UploadId; try { // Get the size of the object. GetObjectMetadataRequest metadataRequest = new GetObjectMetadataRequest { BucketName = sourceBucket, Key = sourceObjectKey }; GetObjectMetadataResponse metadataResponse = await s3Client.GetObjectMetadataAsync(metadataRequest); long objectSize = metadataResponse.ContentLength; // Length in bytes. // Copy the parts. long partSize = 5 * (long)Math.Pow(2, 20); // Part size is 5 MB. long bytePosition = 0; for (int i = 1; bytePosition < objectSize; i++) { CopyPartRequest copyRequest = new CopyPartRequest { DestinationBucket = targetBucket, DestinationKey = targetObjectKey, SourceBucket = sourceBucket, SourceKey = sourceObjectKey, UploadId = uploadId, FirstByte = bytePosition, LastByte = bytePosition + partSize - 1 >= objectSize ? objectSize - 1 : bytePosition + partSize - 1, PartNumber = i }; copyResponses.Add(await s3Client.CopyPartAsync(copyRequest)); bytePosition += partSize; } // Set up to complete the copy. CompleteMultipartUploadRequest completeRequest = new CompleteMultipartUploadRequest { BucketName = targetBucket, Key = targetObjectKey, UploadId = initResponse.UploadId }; completeRequest.AddPartETags(copyResponses); // Complete the copy. CompleteMultipartUploadResponse completeUploadResponse = await s3Client.CompleteMultipartUploadAsync(completeRequest); } catch (AmazonS3Exception e) { Console.WriteLine("Error encountered on server. Message:'{0}' when writing an object", e.Message); } catch (Exception e) { Console.WriteLine("Unknown encountered on server. Message:'{0}' when writing an object", e.Message); } }
public async Task <IActionResult> CompleteMultiPartUpload([FromBody] CompleteMultipartUploadRequest request) { await _s3.CompleteMultipartUploadAsync(request); return(new OkResult()); }
/// <summary> /// Creates and uploads an object using a multi-part upload. /// </summary> /// <param name="client">The initialized Amazon S3 object used to /// initialize and perform the multi-part upload.</param> /// <param name="existingBucketName">The name of the bucket to which /// the object will be uploaded.</param> /// <param name="sourceKeyName">The source object name.</param> /// <param name="filePath">The location of the source object.</param> /// <param name="base64Key">The encryption key to use with the upload.</param> public static async Task CreateSampleObjUsingClientEncryptionKeyAsync( IAmazonS3 client, string existingBucketName, string sourceKeyName, string filePath, string base64Key) { List <UploadPartResponse> uploadResponses = new List <UploadPartResponse>(); InitiateMultipartUploadRequest initiateRequest = new InitiateMultipartUploadRequest { BucketName = existingBucketName, Key = sourceKeyName, ServerSideEncryptionCustomerMethod = ServerSideEncryptionCustomerMethod.AES256, ServerSideEncryptionCustomerProvidedKey = base64Key, }; InitiateMultipartUploadResponse initResponse = await client.InitiateMultipartUploadAsync(initiateRequest); long contentLength = new FileInfo(filePath).Length; long partSize = 5 * (long)Math.Pow(2, 20); // 5 MB try { long filePosition = 0; for (int i = 1; filePosition < contentLength; i++) { UploadPartRequest uploadRequest = new UploadPartRequest { BucketName = existingBucketName, Key = sourceKeyName, UploadId = initResponse.UploadId, PartNumber = i, PartSize = partSize, FilePosition = filePosition, FilePath = filePath, ServerSideEncryptionCustomerMethod = ServerSideEncryptionCustomerMethod.AES256, ServerSideEncryptionCustomerProvidedKey = base64Key, }; // Upload part and add response to our list. uploadResponses.Add(await client.UploadPartAsync(uploadRequest)); filePosition += partSize; } CompleteMultipartUploadRequest completeRequest = new CompleteMultipartUploadRequest { BucketName = existingBucketName, Key = sourceKeyName, UploadId = initResponse.UploadId, }; completeRequest.AddPartETags(uploadResponses); CompleteMultipartUploadResponse completeUploadResponse = await client.CompleteMultipartUploadAsync(completeRequest); } catch (Exception exception) { Console.WriteLine($"Exception occurred: {exception.Message}"); // If there was an error, abort the multi-part upload. AbortMultipartUploadRequest abortMPURequest = new AbortMultipartUploadRequest { BucketName = existingBucketName, Key = sourceKeyName, UploadId = initResponse.UploadId, }; await client.AbortMultipartUploadAsync(abortMPURequest); } }
public async Task <string> UploadFileAsync(Stream stream, Action <UploadEvent> callback) { var objectKey = $"{_appOptions.UploadBucketPrefix}/{Guid.NewGuid()}"; _logger.LogInformation($"Start uploading to {objectKey}"); var initateResponse = await _s3Client.InitiateMultipartUploadAsync(new InitiateMultipartUploadRequest { BucketName = _appOptions.PhotoStorageBucket, Key = objectKey }); _logger.LogInformation($"Initiated multi part upload with id {initateResponse.UploadId}"); try { using var inputStream = stream; var partETags = new List <PartETag>(); var readBuffer = ArrayPool <byte> .Shared.Rent(READ_BUFFER_SIZE); var partBuffer = ArrayPool <byte> .Shared.Rent(PART_SIZE + READ_BUFFER_SIZE * 3); var callbackEvent = new UploadEvent(); var nextUploadBuffer = new MemoryStream(partBuffer); try { var partNumber = 1; int readCount; while ((readCount = await inputStream.ReadAsync(readBuffer, 0, readBuffer.Length)) != 0) { callbackEvent.UploadBytes += readCount; callback?.Invoke(callbackEvent); await nextUploadBuffer.WriteAsync(readBuffer, 0, readCount); if (PART_SIZE < nextUploadBuffer.Position) { var isLastPart = readCount == READ_BUFFER_SIZE; var partSize = nextUploadBuffer.Position; nextUploadBuffer.Position = 0; var partResponse = await _s3Client.UploadPartAsync(new UploadPartRequest { BucketName = _appOptions.PhotoStorageBucket, Key = objectKey, UploadId = initateResponse.UploadId, InputStream = nextUploadBuffer, PartSize = partSize, PartNumber = partNumber, IsLastPart = isLastPart }); _logger.LogInformation( $"Uploaded part {partNumber}. (Last part = {isLastPart}, Part size = {partSize}, Upload Id: {initateResponse.UploadId}"); partETags.Add(new PartETag { PartNumber = partResponse.PartNumber, ETag = partResponse.ETag }); partNumber++; nextUploadBuffer = new MemoryStream(partBuffer); callbackEvent.UploadParts++; callback?.Invoke(callbackEvent); } } if (nextUploadBuffer.Position != 0) { var partSize = nextUploadBuffer.Position; nextUploadBuffer.Position = 0; var partResponse = await _s3Client.UploadPartAsync(new UploadPartRequest { BucketName = _appOptions.PhotoStorageBucket, Key = objectKey, UploadId = initateResponse.UploadId, InputStream = nextUploadBuffer, PartSize = partSize, PartNumber = partNumber, IsLastPart = true }); _logger.LogInformation( $"Uploaded final part. (Part size = {partSize}, Upload Id: {initateResponse.UploadId})"); partETags.Add(new PartETag { PartNumber = partResponse.PartNumber, ETag = partResponse.ETag }); callbackEvent.UploadParts++; callback?.Invoke(callbackEvent); } } finally { ArrayPool <byte> .Shared.Return(partBuffer); ArrayPool <byte> .Shared.Return(readBuffer); } await _s3Client.CompleteMultipartUploadAsync(new CompleteMultipartUploadRequest { BucketName = _appOptions.PhotoStorageBucket, Key = objectKey, UploadId = initateResponse.UploadId, PartETags = partETags }); _logger.LogInformation( $"Completed multi part upload. (Part count: {partETags.Count}, Upload Id: {initateResponse.UploadId})"); } catch (Exception e) { await _s3Client.AbortMultipartUploadAsync(new AbortMultipartUploadRequest { BucketName = _appOptions.PhotoStorageBucket, Key = objectKey, UploadId = initateResponse.UploadId }); _logger.LogError($"Error uploading to S3 with error: {e.Message}"); throw; } return(_s3Client.GetPreSignedURL(new GetPreSignedUrlRequest { BucketName = _appOptions.PhotoStorageBucket, Key = objectKey, Verb = HttpVerb.GET, Expires = DateTime.UtcNow.AddDays(1) })); }
public StorageModel Create(StorageModel model) { var result = fileRepo.Create(new FileDto() { Id = Guid.NewGuid().ToString(), Length = model.data.Length, Filename = model.Filename }); var data = model.data.ToList(); List <byte[]> items = new List <byte[]>(); var chunkSize = 1000000; while (data.Any()) { items.Add(data.Take(chunkSize).ToArray()); data = data.Skip(chunkSize).ToList(); } var dataPieces = items.Select(x => { var hashbytes = System.Security.Cryptography.SHA1.Create().ComputeHash(x); var hashString = ""; var hash = hashbytes.Select(z => hashString += String.Format("{0:x2}", z)).ToArray(); return(new FilePieceModel() { Id = Guid.NewGuid().ToString(), Length = x.Length, Hash = hashString, Bytes = x }); }) .ToList() .Select(x => { try { var DoesExist = s3Client.GetObjectAsync(DefaultBucket, x.Hash).Result; return(piece.Where(z => z.Hash == x.Hash).FirstOrDefault()); } catch { var initResult = s3Client.InitiateMultipartUploadAsync(DefaultBucket, x.Hash).Result; var requ = new UploadPartRequest() { InputStream = new MemoryStream(x.Bytes), BucketName = DefaultBucket, Key = x.Hash }; try{ var response = s3Client.UploadPartAsync(requ).Result; if (true) { } } catch {} s3Client.CompleteMultipartUploadAsync(new CompleteMultipartUploadRequest() { Key = x.Hash, BucketName = DefaultBucket }); return(piece.Create(new FilePieceDto() { Id = x.Id, Length = x.Length, Hash = x.Hash })); } }) .ToList(); int index = 0; dataPieces.Select(x => { var connect = new FilePiecesDto() { Id = Guid.NewGuid().ToString(), FilePieceId = x.Id, FileId = result.Id, PieceNumber = index++ }; return(pieces.Create(connect)); }).ToList(); model.data = null; model.Id = result.Id; return(model); }
public override async Task <long> AppendToUploadAsync(Upload upload, Stream stream, CancellationToken cancellationToken) { try { if (!upload.BlockNumber.HasValue) { upload.BlockNumber = 0; } var blobName = upload.Id + upload.Extension; if (upload.Length == upload.UploadedLength) { return(0); } int bytesRead = 0; long bytesWritten = 0; if (upload.ProviderUploadId == null) { // Setup information required to initiate the multipart upload. var initiateRequest = new InitiateMultipartUploadRequest { BucketName = _bucket, Key = blobName, }; // Initiate the upload. InitiateMultipartUploadResponse initResponse = await _s3.InitiateMultipartUploadAsync(initiateRequest, cancellationToken); upload.ProviderUploadId = initResponse.UploadId; } do { if (cancellationToken.IsCancellationRequested) { _logger.LogDebug("Request to append cancelled for file '{id}'", blobName); break; } var buffer = new byte[blockSize]; // Checking for last block (it will never reach 5MB exactly) int lastBytesRead = 0; bytesRead = 0; do { lastBytesRead = await stream.ReadAsync( buffer, bytesRead, // Ensure we don't overread the buffer blockSize - bytesRead, cancellationToken); bytesRead += lastBytesRead; } while (bytesRead < blockSize && lastBytesRead > 0); if (bytesRead == 0) { break; } using (MemoryStream memoryBufferStream = new MemoryStream(buffer, 0, bytesRead)) { var uploadPartRequest = new UploadPartRequest { BucketName = _bucket, Key = blobName, UploadId = upload.ProviderUploadId, PartNumber = upload.BlockNumber.Value + 1, // Amazon S3 part uploads start at one. PartSize = bytesRead, InputStream = memoryBufferStream }; var result = await _s3.UploadPartAsync(uploadPartRequest, cancellationToken); PartETag partETag = new PartETag(result.PartNumber, result.ETag); string serialised = JsonConvert.SerializeObject(partETag); string eTag = Convert.ToBase64String(Encoding.UTF8.GetBytes(serialised)); upload.BlockIds += $"{eTag} "; } bytesWritten += bytesRead; upload.BlockNumber++; upload.UploadedLength += bytesRead; _logger.LogDebug("Read bytes {bytesRead}, written {bytesWritten}, block number {blockNumber} on file {fileId}", bytesRead, bytesWritten, upload.BlockNumber, blobName); // note: cancellation token *not* supplied as this must finish because we have sucessfully uploaded the latest block to S3. await DbContext.SaveChangesAsync(); } while (bytesRead != 0); if (upload.Length == upload.UploadedLength) { var blockIds = upload.BlockIds.Split(new char[] { ' ' }, StringSplitOptions.RemoveEmptyEntries); var completeRequest = new CompleteMultipartUploadRequest { BucketName = _bucket, Key = blobName, UploadId = upload.ProviderUploadId, }; List <PartETag> partETags = new List <PartETag>(); foreach (var blockId in blockIds) { string serialised = Encoding.UTF8.GetString(Convert.FromBase64String(blockId)); PartETag partETag = JsonConvert.DeserializeObject <PartETag>(serialised); partETags.Add(partETag); } completeRequest.AddPartETags(partETags); await _s3.CompleteMultipartUploadAsync(completeRequest); await DbContext.SaveChangesAsync(); } return(bytesWritten); } catch (Exception e) { _logger.LogError(e, "Failed to append data"); throw; } }