/// <summary> /// Send a InitiateMultipartUploadRequest request and return the response. /// </summary> /// <param name="existingBucketName"></param> /// <param name="keyName"></param> /// <returns></returns> public async Task <InitiateMultipartUploadResponse> InitiateMultipartUploadAsync(string existingBucketName, string keyName, CancellationToken token) { token.ThrowIfCancellationRequested(); _log.Debug("Called InitiateMultipartUploadAsync with parameter keyName = \"" + keyName + "\"."); try { InitiateMultipartUploadRequest initiateRequest = new InitiateMultipartUploadRequest { BucketName = existingBucketName, Key = keyName }; InitiateMultipartUploadResponse initResponse = await s3Client.InitiateMultipartUploadAsync(initiateRequest, token).ConfigureAwait(false); return(initResponse); } catch (Exception e) { string messagePart = " with parameter keyName = \"" + keyName + "\""; this.LogAmazonException(messagePart, e); throw; } }
public async Task ManualSinglePartUpload() { string resourceName = nameof(ManualSinglePartUpload); InitiateMultipartUploadResponse initResp = await ObjectClient.InitiateMultipartUploadAsync(BucketName, resourceName, request => { request.SseAlgorithm = SseAlgorithm.Aes256; request.StorageClass = StorageClass.StandardIa; }).ConfigureAwait(false); Assert.True(initResp.IsSuccess); Assert.Equal(BucketName, initResp.Bucket); Assert.Equal(resourceName, initResp.Key); Assert.NotNull(initResp.UploadId); byte[] file = new byte[1024 * 1024 * 5]; file[0] = (byte)'a'; UploadPartResponse uploadResp = await ObjectClient.UploadPartAsync(BucketName, resourceName, 1, initResp.UploadId, new MemoryStream(file)).ConfigureAwait(false); Assert.True(uploadResp.IsSuccess); Assert.NotNull(uploadResp.ETag); Assert.Equal(SseAlgorithm.Aes256, uploadResp.SseAlgorithm); Assert.Equal(StorageClass.StandardIa, uploadResp.StorageClass); CompleteMultipartUploadResponse completeResp = await ObjectClient.CompleteMultipartUploadAsync(BucketName, resourceName, initResp.UploadId, new[] { uploadResp }).ConfigureAwait(false); Assert.True(completeResp.IsSuccess); Assert.NotNull(completeResp.ETag); GetObjectResponse resp = await ObjectClient.GetObjectAsync(BucketName, resourceName).ConfigureAwait(false); Assert.True(resp.IsSuccess); Assert.Equal(file, await resp.Content.AsDataAsync().ConfigureAwait(false)); }
public void TestStandardAwsStuff() { ListObjectsRequest request = new ListObjectsRequest() { BucketName = temp_bucket, Prefix = "folder1/sub2/", Delimiter = "/", MaxKeys = 1 }; client.ListObjects(request); InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest() { BucketName = temp_bucket, Key = "mpu-1", }; InitiateMultipartUploadResponse initResponse = client.InitiateMultipartUpload(initRequest); client.AbortMultipartUpload(new AbortMultipartUploadRequest() { BucketName = temp_bucket, Key = "mpu-1", UploadId = initResponse.UploadId }); }
public async Task ListIncompleteParts() { await CreateTempBucketAsync(async bucket => { InitiateMultipartUploadResponse initResp = await ObjectClient.InitiateMultipartUploadAsync(bucket, nameof(ListIncompleteParts)).ConfigureAwait(false); byte[] file = new byte[5 * 1024]; using (MemoryStream ms = new MemoryStream(file)) await ObjectClient.UploadPartAsync(bucket, nameof(ListIncompleteParts), 1, initResp.UploadId, ms).ConfigureAwait(false); ListMultipartUploadsResponse listResp = await BucketClient.ListMultipartUploadsAsync(bucket).ConfigureAwait(false); Assert.Equal(bucket, listResp.Bucket); Assert.Equal("ListIncompleteParts", listResp.NextKeyMarker); Assert.NotEmpty(listResp.NextUploadIdMarker); Assert.Equal(1000, listResp.MaxUploads); Assert.False(listResp.IsTruncated); S3Upload upload = Assert.Single(listResp.Uploads); Assert.Equal(listResp.NextKeyMarker, upload.Name); Assert.Equal(listResp.NextUploadIdMarker, upload.UploadId); Assert.Equal(TestConstants.TestUsername, upload.Initiator.Name); Assert.Equal(StorageClass.Standard, upload.StorageClass); Assert.Equal(DateTime.UtcNow, upload.Initiated.DateTime, TimeSpan.FromSeconds(5)); }).ConfigureAwait(false); }
/// <summary> /// Use the S3 protocol to upload the local file at filePath to the server at uploadTarget. /// </summary> /// <param name="filePath"></param> /// <param name="uploadTarget"></param> public static void TranslocateFile(string uploadTarget, string filePath) { // Create an S3 client. AmazonS3Client s3Client = Common.CreateS3Client( uploadTarget); // Initiate an upload to the S3 server. InitiateMultipartUploadResponse initMultiPartUploadResponse = Common.OpenUpload( s3Client, uploadTarget, filePath); string uploadId = initMultiPartUploadResponse.UploadId; // Upload the parts to the S3 server. long partSize = 5 * Common.MiB; List <UploadPartResponse> uploadPartResponses = Common.UploadParts( s3Client, uploadTarget, filePath, uploadId, partSize); // Complete the upload to the S3 server. CompleteMultipartUploadResponse completeMultipartUploadResponse = Common.CloseUpload( s3Client, uploadTarget, filePath, uploadId, uploadPartResponses); }
private async void BtnStart_Click(object sender, EventArgs e) { // Adjust controls if (!_uploading) { resetUploadCtrls(true); } return; // Create the multipart upload request var profile = ComboProfiles.SelectedItem as AWSCredentialsProfile; _ctsUpload = new CancellationTokenSource(); ImmutableCredentials creds = await profile.Credentials.GetCredentialsAsync(); if (_ctsUpload.IsCancellationRequested) { return; } var region = ComboRegions.SelectedItem as RegionEndpoint; var bucket = ComboBuckets.SelectedItem as S3Bucket; string key = TxtKey.Text; _uploadRequest.BucketName = bucket.BucketName; _uploadRequest.Key = key; // Initate multipart upload var credsTask = profile.Credentials.GetCredentialsAsync(); var s3 = new AmazonS3Client(creds.AccessKey, creds.SecretKey, region); InitiateMultipartUploadResponse response = await s3.InitiateMultipartUploadAsync(_uploadRequest, _ctsUpload.Token); }
public async Task ListObjects() { await CreateTempBucketAsync(async bucket => { //Create 3 objects in bucket, including an incomplete multipart upload await UploadAsync(bucket, "resource1").ConfigureAwait(false); await UploadAsync(bucket, "resource2").ConfigureAwait(false); InitiateMultipartUploadResponse initResp = await ObjectClient.InitiateMultipartUploadAsync(bucket, "multipart").ConfigureAwait(false); //List the objects List <S3Object> list = await BucketClient.GetBucketRecursiveAsync(bucket, true).ToListAsync().ConfigureAwait(false); //Only 2 objects should be present, as one of them is only initiated Assert.Equal(2, list.Count); Assert.Equal("resource1", list[0].Name); Assert.Equal("resource2", list[1].Name); //List multipart transfers List <S3Upload> uploads = await BucketClient.ListAllMultipartUploadsAsync(bucket).ToListAsync().ConfigureAwait(false); S3Upload upload = Assert.Single(uploads); Assert.Equal("multipart", upload.Name); }).ConfigureAwait(false); }
static void Main(string[] args) { IAmazonS3 s3Client = new AmazonS3Client(Amazon.RegionEndpoint.USEast1); // List to store upload part responses. List<UploadPartResponse> uploadResponses = new List<UploadPartResponse>(); List<CopyPartResponse> copyResponses = new List<CopyPartResponse>(); InitiateMultipartUploadRequest initiateRequest = new InitiateMultipartUploadRequest { BucketName = targetBucket, Key = targetObjectKey }; InitiateMultipartUploadResponse initResponse = s3Client.InitiateMultipartUpload(initiateRequest); String uploadId = initResponse.UploadId; try { // Get object size. GetObjectMetadataRequest metadataRequest = new GetObjectMetadataRequest { BucketName = sourceBucket, Key = sourceObjectKey }; GetObjectMetadataResponse metadataResponse = s3Client.GetObjectMetadata(metadataRequest); long objectSize = metadataResponse.ContentLength; // in bytes // Copy parts. long partSize = 5 * (long)Math.Pow(2, 20); // 5 MB long bytePosition = 0; for (int i = 1; bytePosition < objectSize; i++) { CopyPartRequest copyRequest = new CopyPartRequest { DestinationBucket = targetBucket, DestinationKey = targetObjectKey, SourceBucket = sourceBucket, SourceKey = sourceObjectKey, UploadId = uploadId, FirstByte = bytePosition, LastByte = bytePosition + partSize - 1 >= objectSize ? objectSize - 1 : bytePosition + partSize - 1, PartNumber = i }; copyResponses.Add(s3Client.CopyPart(copyRequest)); bytePosition += partSize; } CompleteMultipartUploadRequest completeRequest = new CompleteMultipartUploadRequest { BucketName = targetBucket, Key = targetObjectKey, UploadId = initResponse.UploadId }; completeRequest.AddPartETags(copyResponses); CompleteMultipartUploadResponse completeUploadResponse = s3Client.CompleteMultipartUpload(completeRequest); } catch (Exception e) { Console.WriteLine(e.Message); } }
public async Task TooSmallUpload() { string resourceName = nameof(TooSmallUpload); InitiateMultipartUploadResponse initResp = await ObjectClient.InitiateMultipartUploadAsync(BucketName, resourceName).ConfigureAwait(false); Assert.Equal(BucketName, initResp.Bucket); Assert.Equal(resourceName, initResp.Key); Assert.NotNull(initResp.UploadId); //4 MB is below the 5 MB limit. See https://docs.aws.amazon.com/AmazonS3/latest/dev/qfacts.html //Note that if there only is 1 part, then it is technically the last part, and can be of any size. That's why this test has 2 parts. byte[] file = new byte[1024 * 1024 * 4]; byte[][] chunks = file.Chunk(file.Length / 2).Select(x => x.ToArray()).ToArray(); UploadPartResponse uploadResp1 = await ObjectClient.UploadPartAsync(BucketName, resourceName, 1, initResp.UploadId, new MemoryStream(chunks[0])).ConfigureAwait(false); Assert.True(uploadResp1.IsSuccess); Assert.NotNull(uploadResp1.ETag); UploadPartResponse uploadResp2 = await ObjectClient.UploadPartAsync(BucketName, resourceName, 2, initResp.UploadId, new MemoryStream(chunks[1])).ConfigureAwait(false); Assert.True(uploadResp2.IsSuccess); Assert.NotNull(uploadResp2.ETag); CompleteMultipartUploadResponse completeResp = await ObjectClient.CompleteMultipartUploadAsync(BucketName, resourceName, initResp.UploadId, new[] { uploadResp1, uploadResp2 }).ConfigureAwait(false); Assert.False(completeResp.IsSuccess); Assert.Equal(400, completeResp.StatusCode); }
public override AmazonWebServiceResponse Unmarshall(JsonUnmarshallerContext context) { InitiateMultipartUploadResponse response = new InitiateMultipartUploadResponse(); context.Read(); UnmarshallResult(context, response); return(response); }
public override AmazonWebServiceResponse Unmarshall(JsonUnmarshallerContext context) { InitiateMultipartUploadResponse response = new InitiateMultipartUploadResponse(); context.Read(); response.InitiateMultipartUploadResult = InitiateMultipartUploadResultUnmarshaller.GetInstance().Unmarshall(context); return(response); }
private static void UnmarshallResult(XmlUnmarshallerContext context, InitiateMultipartUploadResponse response) { int currentDepth = context.get_CurrentDepth(); int num = currentDepth + 1; if (context.get_IsStartOfDocument()) { num += 2; } while (context.Read()) { if (context.get_IsStartElement() || context.get_IsAttribute()) { if (context.TestExpression("Bucket", num)) { response.BucketName = StringUnmarshaller.GetInstance().Unmarshall(context); } else if (context.TestExpression("Key", num)) { response.Key = StringUnmarshaller.GetInstance().Unmarshall(context); } else if (context.TestExpression("UploadId", num)) { response.UploadId = StringUnmarshaller.GetInstance().Unmarshall(context); } } else if (context.get_IsEndElement() && context.get_CurrentDepth() < currentDepth) { return; } } IWebResponseData responseData = context.get_ResponseData(); if (responseData.IsHeaderPresent("x-amz-server-side-encryption")) { response.ServerSideEncryptionMethod = S3Transforms.ToString(responseData.GetHeaderValue("x-amz-server-side-encryption")); } if (responseData.IsHeaderPresent("x-amz-server-side-encryption-aws-kms-key-id")) { response.ServerSideEncryptionKeyManagementServiceKeyId = S3Transforms.ToString(responseData.GetHeaderValue("x-amz-server-side-encryption-aws-kms-key-id")); } if (responseData.IsHeaderPresent("x-amz-abort-date")) { response.AbortDate = S3Transforms.ToDateTime(responseData.GetHeaderValue("x-amz-abort-date")); } if (responseData.IsHeaderPresent("x-amz-abort-rule-id")) { response.AbortRuleId = S3Transforms.ToString(responseData.GetHeaderValue("x-amz-abort-rule-id")); } if (responseData.IsHeaderPresent(S3Constants.AmzHeaderRequestCharged)) { response.RequestCharged = RequestCharged.FindValue(responseData.GetHeaderValue(S3Constants.AmzHeaderRequestCharged)); } }
private static void UnmarshallResult(JsonUnmarshallerContext context, InitiateMultipartUploadResponse response) { if (context.ResponseData.GetHeaderValue("Location") != null) { response.Location = context.ResponseData.GetHeaderValue("Location"); } if (context.ResponseData.GetHeaderValue("x-amz-multipart-upload-id") != null) { response.UploadId = context.ResponseData.GetHeaderValue("x-amz-multipart-upload-id"); } return; }
/// <summary> /// Initializing the request /// </summary> /// <param name="filePath"></param> public void Initialize(string filePath) { _s3 = new AmazonS3Client(accessKey, secretKey, RegionEndpoint.APSouth1); _uploadResponses = new List <UploadPartResponse>(); var initiateRequest = new InitiateMultipartUploadRequest { BucketName = _s3BucketName, Key = filePath }; _initialResponse = _s3.InitiateMultipartUploadAsync(initiateRequest).Result; }
private static void UnmarshallResult(XmlUnmarshallerContext context, InitiateMultipartUploadResponse response) { int originalDepth = context.CurrentDepth; int targetDepth = originalDepth + 1; if (context.IsStartOfDocument) { targetDepth += 2; } while (context.Read()) { if (context.IsStartElement || context.IsAttribute) { if (context.TestExpression("Bucket", targetDepth)) { response.BucketName = StringUnmarshaller.GetInstance().Unmarshall(context); continue; } if (context.TestExpression("Key", targetDepth)) { response.Key = StringUnmarshaller.GetInstance().Unmarshall(context); continue; } if (context.TestExpression("UploadId", targetDepth)) { response.UploadId = StringUnmarshaller.GetInstance().Unmarshall(context); continue; } } else if (context.IsEndElement && context.CurrentDepth < originalDepth) { return; } } IWebResponseData responseData = context.ResponseData; if (responseData.IsHeaderPresent("x-amz-server-side-encryption")) { response.ServerSideEncryptionMethod = S3Transforms.ToString(responseData.GetHeaderValue("x-amz-server-side-encryption")); } if (responseData.IsHeaderPresent(HeaderKeys.XAmzServerSideEncryptionAwsKmsKeyIdHeader)) { response.ServerSideEncryptionKeyManagementServiceKeyId = S3Transforms.ToString(responseData.GetHeaderValue(HeaderKeys.XAmzServerSideEncryptionAwsKmsKeyIdHeader)); } return; }
private void InitializePartToCloud() { // 1. Initialize. uploadPartETags = new List <PartETag>(); InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(); initRequest.BucketName = this.DestinationBucket; initRequest.Key = this.DestinationFilename; // make it public initRequest.AddHeader("x-amz-acl", "public-read"); initResponse = client.InitiateMultipartUpload(initRequest); }
string InitiateMultipartUpload(AmazonGlacier client) { InitiateMultipartUploadRequest initiateMPUrequest = new InitiateMultipartUploadRequest() { VaultName = vaultName, PartSize = partSize, ArchiveDescription = archiveDescription }; InitiateMultipartUploadResponse initiateMPUresponse = client.InitiateMultipartUpload(initiateMPUrequest); return(initiateMPUresponse.InitiateMultipartUploadResult.UploadId); }
public string InitiateMultiPartUpload(int packetSize) { InitiateMultipartUploadRequest initiateRequest = new InitiateMultipartUploadRequest() { ArchiveDescription = _archive, PartSize = packetSize, VaultName = _vault }; InitiateMultipartUploadResponse response = _amazonGlacierClient.InitiateMultipartUpload(initiateRequest); return(response.InitiateMultipartUploadResult.UploadId); }
public override AmazonWebServiceResponse Unmarshall(XmlUnmarshallerContext context) { InitiateMultipartUploadResponse initiateMultipartUploadResponse = new InitiateMultipartUploadResponse(); while (context.Read()) { if (context.get_IsStartElement()) { UnmarshallResult(context, initiateMultipartUploadResponse); } } return(initiateMultipartUploadResponse); }
public void TestOrdinary() { InitiateMultipartUploadResponse response = this.client.InitiateMultipartUpload(this.bucketName, "test"); Assert.AreEqual(response.Bucket, this.bucketName); Assert.AreEqual(response.Key, "test"); String uploadId = response.UploadId; List <MultipartUploadSummary> uploads = this.client.ListMultipartUploads(this.bucketName).Uploads; Assert.AreEqual(uploads.Count, 1); Assert.AreEqual(uploads[0].UploadId, uploadId); }
/// <summary> /// Unmarshaller the response from the service to the response class. /// </summary> /// <param name="context"></param> /// <returns></returns> public override AmazonWebServiceResponse Unmarshall(JsonUnmarshallerContext context) { InitiateMultipartUploadResponse response = new InitiateMultipartUploadResponse(); if (context.ResponseData.IsHeaderPresent("Location")) { response.Location = context.ResponseData.GetHeaderValue("Location"); } if (context.ResponseData.IsHeaderPresent("x-amz-multipart-upload-id")) { response.UploadId = context.ResponseData.GetHeaderValue("x-amz-multipart-upload-id"); } return(response); }
public async Task AbortIncompleteUpload() { string resourceName = nameof(AbortIncompleteUpload); InitiateMultipartUploadResponse initResp = await ObjectClient.InitiateMultipartUploadAsync(BucketName, resourceName).ConfigureAwait(false); Assert.Equal(BucketName, initResp.Bucket); Assert.Equal(resourceName, initResp.Key); Assert.NotNull(initResp.UploadId); AbortMultipartUploadResponse abortResp = await ObjectClient.AbortMultipartUploadAsync(BucketName, resourceName, initResp.UploadId).ConfigureAwait(false); Assert.True(abortResp.IsSuccess); Assert.Equal(204, abortResp.StatusCode); }
private void Init() { var initiateRequest = new InitiateMultipartUploadRequest { BucketName = this.bucketName, Key = this.fileName }; this.initResponse = client.InitiateMultipartUpload(initiateRequest); this.abortMPURequest = new AbortMultipartUploadRequest { BucketName = this.bucketName, Key = this.fileName, UploadId = this.initResponse.UploadId }; this.uploadResponses = new List <UploadPartResponse>(); }
async Task <string> IFileBackend.ChunkedUploadStartAsync( object context, string id ) { var config = context as Context; IAmazonS3 s3Client = config.S3; var initiateRequest = new InitiateMultipartUploadRequest { BucketName = config.BucketName, Key = id }; InitiateMultipartUploadResponse initResponse = await s3Client.InitiateMultipartUploadAsync(initiateRequest); return(initResponse.UploadId); }
public void TestInitiateIAMultiUpload() { var request = new InitiateMultipartUploadRequest(); request.BucketName = this.bucketName; request.Key = "test"; request.ObjectMetadata = new ObjectMetadata(); request.ObjectMetadata.StorageClass = BosConstants.StorageClass.StandardInfrequentAccess; InitiateMultipartUploadResponse response = this.client.InitiateMultipartUpload(request); Assert.AreEqual(response.Bucket, this.bucketName); Assert.AreEqual(response.Key, "test"); String uploadId = response.UploadId; List <MultipartUploadSummary> uploads = this.client.ListMultipartUploads(this.bucketName).Uploads; Assert.AreEqual(uploads.Count, 1); Assert.AreEqual(uploads[0].UploadId, uploadId); Assert.AreEqual(BosConstants.StorageClass.StandardInfrequentAccess, uploads[0].StorageClass); }
static void InitiateMultipartUpload() { try { InitiateMultipartUploadRequest request = new InitiateMultipartUploadRequest() { BucketName = bucketName, ObjectKey = objectName, }; InitiateMultipartUploadResponse response = client.InitiateMultipartUpload(request); Console.WriteLine("Initiate multipart upload response: {0}", response.StatusCode); Console.WriteLine("upload id: {0}", response.UploadId); uploadId = response.UploadId; } catch (ObsException ex) { Console.WriteLine("Exception errorcode: {0}, when initiate multipart upload.", ex.ErrorCode); Console.WriteLine("Exception errormessage: {0}", ex.ErrorMessage); } }
public async IAsyncEnumerable <UploadPartResponse> MultipartUploadAsync(InitiateMultipartUploadRequest req, Stream data, int partSize = 16777216, int numParallelParts = 4, [EnumeratorCancellation] CancellationToken token = default) { Validator.RequireNotNull(req); Validator.RequireNotNull(data); if (RequestWrappers != null) { foreach (IRequestWrapper wrapper in RequestWrappers) { if (wrapper.IsSupported(req)) { data = wrapper.Wrap(data, req); } } } string bucket = req.BucketName; string resource = req.Resource; InitiateMultipartUploadResponse initResp = await InitiateMultipartUploadAsync(req, token).ConfigureAwait(false); if (token.IsCancellationRequested) { yield break; } if (!initResp.IsSuccess) { throw new RequestException(initResp.StatusCode, "InitiateMultipartUploadRequest was unsuccessful"); } Queue <Task <UploadPartResponse> > uploads = new Queue <Task <UploadPartResponse> >(); using (SemaphoreSlim semaphore = new SemaphoreSlim(numParallelParts)) { long offset = 0; for (int i = 1; offset < data.Length; i++) { await semaphore.WaitAsync(token).ConfigureAwait(false); if (token.IsCancellationRequested) { break; } long remaining = data.Length - offset; long bufferSize = Math.Min(remaining, partSize); byte[] partData = new byte[bufferSize]; await data.ReadAsync(partData, 0, partData.Length, token).ConfigureAwait(false); uploads.Enqueue(UploadPartAsync(bucket, resource, partData, i, initResp.UploadId, semaphore, token)); offset += partSize; } Queue <UploadPartResponse> responses = new Queue <UploadPartResponse>(uploads.Count); while (uploads.TryDequeue(out Task <UploadPartResponse> task)) { if (token.IsCancellationRequested) { yield break; } UploadPartResponse response = await task.ConfigureAwait(false); responses.Enqueue(response); yield return(response); } CompleteMultipartUploadRequest completeReq = new CompleteMultipartUploadRequest(bucket, resource, initResp.UploadId, responses); CompleteMultipartUploadResponse completeResp = await CompleteMultipartUploadAsync(completeReq, token).ConfigureAwait(false); if (!completeResp.IsSuccess) { throw new RequestException(completeResp.StatusCode, "CompleteMultipartUploadRequest was unsuccessful"); } } }
/// <summary> /// Runs the multipart upload. /// </summary> public override void Execute() { int timeout = this._config.DefaultTimeout; if (this._fileTransporterRequest.Timeout != 0) { timeout = this._fileTransporterRequest.Timeout; } InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest() .WithBucketName(this._fileTransporterRequest.BucketName) .WithKey(this._fileTransporterRequest.Key) .WithCannedACL(this._fileTransporterRequest.CannedACL) .WithContentType(determineContentType()) .WithStorageClass(this._fileTransporterRequest.StorageClass) .WithBeforeRequestHandler(RequestEventHandler) as InitiateMultipartUploadRequest; if (this._fileTransporterRequest.metadata != null && this._fileTransporterRequest.metadata.Count > 0) { initRequest.WithMetaData(this._fileTransporterRequest.metadata); } if (this._fileTransporterRequest.Headers != null && this._fileTransporterRequest.Headers.Count > 0) { initRequest.AddHeaders(this._fileTransporterRequest.Headers); } InitiateMultipartUploadResponse initResponse = this._s3Client.InitiateMultipartUpload(initRequest); this._logger.DebugFormat("Initiated upload: {0}", initResponse.UploadId); try { this._logger.DebugFormat("Queue up the UploadPartRequests to be executed"); long filePosition = 0; for (int i = 1; filePosition < this._contentLength; i++) { UploadPartRequest uploadRequest = new UploadPartRequest() .WithBucketName(this._fileTransporterRequest.BucketName) .WithKey(this._fileTransporterRequest.Key) .WithUploadId(initResponse.UploadId) .WithTimeout(timeout) .WithPartNumber(i) .WithPartSize(this._partSize) .WithSubscriber(new EventHandler <UploadPartProgressArgs>(this.uploadPartProgressEventCallback)) .WithBeforeRequestHandler(RequestEventHandler) as UploadPartRequest; if (this._fileTransporterRequest.IsSetFilePath()) { uploadRequest .WithFilePosition(filePosition) .WithFilePath(this._fileTransporterRequest.FilePath); } else { uploadRequest.InputStream = this._fileTransporterRequest.InputStream; } this._partsToUpload.Enqueue(uploadRequest); filePosition += this._partSize; } this._totalNumberOfParts = this._partsToUpload.Count; this._logger.DebugFormat("Starting threads to execute the {0} UploadPartRequests in the queue", this._totalNumberOfParts); startInvokerPool(); this._logger.DebugFormat("Waiting for threads to complete. ({0})", initResponse.UploadId); waitTillAllThreadsComplete(); this._logger.DebugFormat("Beginning completing multipart. ({0})", initResponse.UploadId); CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest() .WithBucketName(this._fileTransporterRequest.BucketName) .WithKey(this._fileTransporterRequest.Key) .WithUploadId(initResponse.UploadId) .WithPartETags(this._uploadResponses) .WithBeforeRequestHandler(RequestEventHandler) as CompleteMultipartUploadRequest; this._s3Client.CompleteMultipartUpload(compRequest); this._logger.DebugFormat("Done completing multipart. ({0})", initResponse.UploadId); } catch (Exception e) { this._logger.Error(string.Format("Exception while uploading. ({0})", initResponse.UploadId), e); shutdown(initResponse.UploadId); throw; } finally { if (this._fileTransporterRequest.InputStream != null && !this._fileTransporterRequest.IsSetFilePath()) { this._fileTransporterRequest.InputStream.Close(); } } }
public static void Main(string[] args) { // create the AWS S3 client AmazonS3Client s3 = AWSS3Factory.getS3Client(); // retrieve the object key/value from user Console.Write("Enter the object key: "); string key = Console.ReadLine(); Console.Write("Enter the file location: "); string filePath = Console.ReadLine(); // grab the start time of upload DateTime startDate = DateTime.Now; // part size for chunking in multi-part long partSize = 1024 * 1024 * 2; // 2 MB // list of upload part response objects for each part that is uploaded List <PartETag> partETags = new List <PartETag>(); // Step 1: Initialize InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest() { BucketName = AWSS3Factory.S3_BUCKET, Key = key, }; InitiateMultipartUploadResponse initResponse = s3.InitiateMultipartUpload(initRequest); // get the file and file length long contentLength = new FileInfo(filePath).Length; Console.WriteLine(string.Format("Starting multi-part upload for object {0}/{1} with file path {2} and size {3} in {4} MB size chunks", AWSS3Factory.S3_BUCKET, key, filePath, Convert.ToString(contentLength), partSize / 1024 / 1024)); try { // Step 2: Upload parts long filePosition = 0; for (int i = 1; filePosition < contentLength; i++) { // get the size of the chunk. Note - the last part can be less than the chunk size partSize = Math.Min(partSize, (contentLength - filePosition)); Console.WriteLine(string.Format("Sending chunk {0} starting at position {1}", i, filePosition)); // create request to upload a part UploadPartRequest uploadRequest = new UploadPartRequest() { BucketName = AWSS3Factory.S3_BUCKET, Key = key, UploadId = initResponse.UploadId, PartNumber = i, FilePosition = filePosition, FilePath = filePath, PartSize = partSize }; UploadPartResponse partResponse = s3.UploadPart(uploadRequest); PartETag eTagPart = new PartETag(partResponse.PartNumber, partResponse.ETag); partETags.Add(eTagPart); filePosition = filePosition += partSize; } // Step 3: complete Console.WriteLine("Waiting for completion of multi-part upload"); CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest() { BucketName = AWSS3Factory.S3_BUCKET, Key = key, UploadId = initResponse.UploadId, PartETags = partETags }; s3.CompleteMultipartUpload(compRequest); } catch (Exception e) { s3.AbortMultipartUpload(new AbortMultipartUploadRequest() { BucketName = AWSS3Factory.S3_BUCKET, Key = key, UploadId = initResponse.UploadId }); } // grab the end time of upload DateTime endDate = DateTime.Now; Console.WriteLine(string.Format("Completed multi-part upload for object {0}/{1} with file path: {2}", AWSS3Factory.S3_BUCKET, key, filePath)); Console.WriteLine(string.Format("Process took: {0} seconds.", (endDate - startDate).TotalSeconds.ToString())); Console.ReadLine(); }
/// <summary> /// Add multipart UploadId and encryption context to the current known multipart operations /// Encryption context is used decided the encryption instructions for next UploadPartRequest /// </summary> /// <param name="initiateMultiPartUploadRequest">InitiateMultipartUploadRequest whose encryption context needs to be saved</param> /// <param name="initiateMultiPartResponse">InitiateMultipartUploadResponse whose UploadId needs to be saved</param> protected void AddMultipartUploadEncryptionContext(InitiateMultipartUploadRequest initiateMultiPartUploadRequest, InitiateMultipartUploadResponse initiateMultiPartResponse) { if (!EncryptionClient.AllMultiPartUploadRequestContexts.ContainsKey(initiateMultiPartUploadRequest)) { throw new AmazonServiceException($"Failed to find encryption context required to start multipart uploads for request {initiateMultiPartUploadRequest}"); } EncryptionClient.CurrentMultiPartUploadKeys.TryAdd(initiateMultiPartResponse.UploadId, EncryptionClient.AllMultiPartUploadRequestContexts[initiateMultiPartUploadRequest]); // It is safe to remove the request as it has been already added to the CurrentMultiPartUploadKeys EncryptionClient.AllMultiPartUploadRequestContexts.TryRemove(initiateMultiPartUploadRequest, out _); }