/// <summary> /// 分割アップロードの完了処理 /// </summary> /// <param name="key">Bucket中のオブジェクトを特定するためのキー</param> /// <param name="uploadId">一連のアップロードを識別するためのキー</param> /// <param name="partETags">個別のアップロードを特定するためのシークエンス</param> public async Task CompleteMultiPartUploadAsync(string key, string uploadId, IEnumerable <PartETag> partETags) { var compRequest = new CompleteMultipartUploadRequest { BucketName = bucket, Key = key, UploadId = uploadId, PartETags = partETags.ToList(), }; var compResponse = await client.CompleteMultipartUploadAsync(compRequest); }
public Task <CompleteMultipartUploadResponse> CompleteMultipartUploadAsync( string bucketName, string key, string uploadId, IEnumerable <PartETag> partETags, CancellationToken cancellationToken = default(CancellationToken)) => _S3Client.CompleteMultipartUploadAsync( new CompleteMultipartUploadRequest() { BucketName = bucketName, Key = key, UploadId = uploadId, PartETags = partETags.ToList(), }, cancellationToken).EnsureSuccessAsync();
public async Task <string> Complete() { var responses = await Task.WhenAll(_blocks).ConfigureAwait(false); var req = new CompleteMultipartUploadRequest { BucketName = _bucket, Key = _key, UploadId = await _uploadId.ConfigureAwait(false), }; req.AddPartETags(responses); var r = await _client.CompleteMultipartUploadAsync(req, _ct).ConfigureAwait(false); return(r.VersionId); }
private static async Task UploadObjectAsync(AmazonS3Client client, string filePath) { Console.WriteLine($"Uploading {filePath}"); var keyName = filePath.Substring(3, filePath.Length - 3); keyName = keyName.Replace(@"\", "/"); // Create list to store upload part responses. List <UploadPartResponse> uploadResponses = new List <UploadPartResponse>(); // Setup information required to initiate the multipart upload. InitiateMultipartUploadRequest initiateRequest = new InitiateMultipartUploadRequest { BucketName = bucketName, Key = keyName, }; // Initiate the upload. InitiateMultipartUploadResponse initResponse = await client.InitiateMultipartUploadAsync(initiateRequest); // Upload parts. long contentLength = new FileInfo(filePath).Length; long partSize = 5 * (long)Math.Pow(2, 20); // 5 MB try { long filePosition = 0; for (int i = 1; filePosition < contentLength; i++) { UploadPartRequest uploadRequest = new UploadPartRequest { BucketName = bucketName, Key = keyName, UploadId = initResponse.UploadId, PartNumber = i, PartSize = partSize, FilePosition = filePosition, FilePath = filePath }; // Track upload progress. uploadRequest.StreamTransferProgress += new EventHandler <StreamTransferProgressArgs>(UploadPartProgressEventCallback); // Upload a part and add the response to our list. uploadResponses.Add(await client.UploadPartAsync(uploadRequest)); filePosition += partSize; } // Setup to complete the upload. CompleteMultipartUploadRequest completeRequest = new CompleteMultipartUploadRequest { BucketName = bucketName, Key = keyName, UploadId = initResponse.UploadId }; completeRequest.AddPartETags(uploadResponses); // Complete the upload. CompleteMultipartUploadResponse completeUploadResponse = await client.CompleteMultipartUploadAsync(completeRequest); } catch (Exception exception) { Console.WriteLine("An AmazonS3Exception was thrown: { 0}", exception.Message); // Abort the upload. AbortMultipartUploadRequest abortMPURequest = new AbortMultipartUploadRequest { BucketName = bucketName, Key = keyName, UploadId = initResponse.UploadId }; await client.AbortMultipartUploadAsync(abortMPURequest); } }
public void SSEMultipartUploadTest() { var testKey = "test.tst"; var size = 1 * 1024 * 1024; var data = new byte[size]; new Random().NextBytes(data); Aes aesEncryption = Aes.Create(); aesEncryption.KeySize = 256; aesEncryption.GenerateKey(); var encKey = Convert.ToBase64String(aesEncryption.Key); Assert.DoesNotThrowAsync(async() => { var initiateRequest = new InitiateMultipartUploadRequest { BucketName = TestBucket, Key = testKey, ServerSideEncryptionCustomerMethod = ServerSideEncryptionCustomerMethod.AES256, ServerSideEncryptionCustomerProvidedKey = encKey }; var initResponse = await _s3Client.InitiateMultipartUploadAsync(initiateRequest); // Assert.IsNotNull(initResponse.ServerSideEncryptionCustomerMethod); // Assert.IsNotNull(initResponse.ServerSideEncryptionCustomerProvidedKeyMD5); try { UploadPartResponse uploadResponse; await using (var stream = new MemoryStream(data)) { UploadPartRequest uploadRequest = new UploadPartRequest { BucketName = TestBucket, Key = testKey, UploadId = initResponse.UploadId, PartNumber = 1, PartSize = size, FilePosition = 0, InputStream = stream, ServerSideEncryptionCustomerMethod = ServerSideEncryptionCustomerMethod.AES256, ServerSideEncryptionCustomerProvidedKey = encKey, }; uploadResponse = await _s3Client.UploadPartAsync(uploadRequest); } CompleteMultipartUploadRequest completeRequest = new CompleteMultipartUploadRequest { BucketName = TestBucket, Key = testKey, UploadId = initResponse.UploadId }; completeRequest.AddPartETags(uploadResponse); var completeUploadResponse = await _s3Client.CompleteMultipartUploadAsync(completeRequest); } catch { var abortMPURequest = new AbortMultipartUploadRequest { BucketName = TestBucket, Key = testKey, UploadId = initResponse.UploadId }; await _s3Client.AbortMultipartUploadAsync(abortMPURequest); throw; } }); }
/// <summary> /// This method uses the passed client object to perform a multi-part /// copy operation. /// </summary> /// <param name="client">An Amazon S3 client object that will be used /// to perform the copy.</param> public static async Task MPUCopyObjectAsync(AmazonS3Client client) { // Create a list to store the upload part responses. var uploadResponses = new List <UploadPartResponse>(); var copyResponses = new List <CopyPartResponse>(); // Setup information required to initiate the multipart upload. var initiateRequest = new InitiateMultipartUploadRequest { BucketName = TargetBucket, Key = TargetObjectKey, }; // Initiate the upload. InitiateMultipartUploadResponse initResponse = await client.InitiateMultipartUploadAsync(initiateRequest); // Save the upload ID. string uploadId = initResponse.UploadId; try { // Get the size of the object. var metadataRequest = new GetObjectMetadataRequest { BucketName = SourceBucket, Key = SourceObjectKey, }; GetObjectMetadataResponse metadataResponse = await client.GetObjectMetadataAsync(metadataRequest); var objectSize = metadataResponse.ContentLength; // Length in bytes. // Copy the parts. var partSize = 5 * (long)Math.Pow(2, 20); // Part size is 5 MB. long bytePosition = 0; for (int i = 1; bytePosition < objectSize; i++) { var copyRequest = new CopyPartRequest { DestinationBucket = TargetBucket, DestinationKey = TargetObjectKey, SourceBucket = SourceBucket, SourceKey = SourceObjectKey, UploadId = uploadId, FirstByte = bytePosition, LastByte = bytePosition + partSize - 1 >= objectSize ? objectSize - 1 : bytePosition + partSize - 1, PartNumber = i, }; copyResponses.Add(await client.CopyPartAsync(copyRequest)); bytePosition += partSize; } // Set up to complete the copy. var completeRequest = new CompleteMultipartUploadRequest { BucketName = TargetBucket, Key = TargetObjectKey, UploadId = initResponse.UploadId, }; completeRequest.AddPartETags(copyResponses); // Complete the copy. CompleteMultipartUploadResponse completeUploadResponse = await client.CompleteMultipartUploadAsync(completeRequest); } catch (AmazonS3Exception e) { Console.WriteLine($"Error encountered on server. Message:'{e.Message}' when writing an object"); } catch (Exception e) { Console.WriteLine($"Unknown encountered on server. Message:'{e.Message}' when writing an object"); } }
public async Task PutObjectAsync(string key, Stream stream, Dictionary <string, string> metadata) { //TestConnection(); var streamSize = new Size(stream.Length, SizeUnit.Bytes); if (streamSize > TotalBlocksSizeLimit) { throw new InvalidOperationException($@"Can't upload more than 5TB to AWS S3, current upload size: {streamSize}"); } var streamLength = streamSize.GetValue(SizeUnit.Bytes); try { _progress?.UploadProgress.SetTotal(streamLength); if (streamSize > MaxUploadPutObject) { _progress?.UploadProgress.ChangeType(UploadType.Chunked); var multipartRequest = new InitiateMultipartUploadRequest { Key = key, BucketName = _bucketName }; FillMetadata(multipartRequest.Metadata, metadata); var initiateResponse = await _client.InitiateMultipartUploadAsync(multipartRequest, _cancellationToken); var partNumber = 1; var partEtags = new List <PartETag>(); while (stream.Position < streamLength) { var leftToUpload = streamLength - stream.Position; var toUpload = Math.Min(MinOnePartUploadSizeLimit.GetValue(SizeUnit.Bytes), leftToUpload); var uploadResponse = await _client .UploadPartAsync(new UploadPartRequest { Key = key, BucketName = _bucketName, InputStream = stream, PartNumber = partNumber++, PartSize = toUpload, UploadId = initiateResponse.UploadId, StreamTransferProgress = (_, args) => { _progress?.UploadProgress.ChangeState(UploadState.Uploading); _progress?.UploadProgress.UpdateUploaded(args.IncrementTransferred); _progress?.OnUploadProgress?.Invoke(); } }, _cancellationToken); partEtags.Add(new PartETag(uploadResponse.PartNumber, uploadResponse.ETag)); } await _client.CompleteMultipartUploadAsync( new CompleteMultipartUploadRequest { UploadId = initiateResponse.UploadId, BucketName = _bucketName, Key = key, PartETags = partEtags }, _cancellationToken); return; } var request = new PutObjectRequest { Key = key, BucketName = _bucketName, InputStream = stream, StreamTransferProgress = (_, args) => { _progress?.UploadProgress.ChangeState(UploadState.Uploading); _progress?.UploadProgress.UpdateUploaded(args.IncrementTransferred); _progress?.OnUploadProgress?.Invoke(); } }; FillMetadata(request.Metadata, metadata); await _client.PutObjectAsync(request, _cancellationToken); } catch (AmazonS3Exception e) { await MaybeHandleExceptionAsync(e); throw; } finally { _progress?.UploadProgress.ChangeState(UploadState.Done); } }
private async Task MultipartUploadAsync(AmazonS3Client s3, Stream stream, string key, IOperationExecutionContext context) { var uploadResponse = await s3.InitiateMultipartUploadAsync( new InitiateMultipartUploadRequest { BucketName = this.BucketName, Key = key, CannedACL = this.CannedACL, StorageClass = this.StorageClass, ServerSideEncryptionMethod = this.EncryptionMethod }, context.CancellationToken ); try { var parts = this.GetParts(stream.Length); var completedParts = new List <PartETag>(parts.Count); for (int i = 0; i < parts.Count; i++) { var partResponse = await s3.UploadPartAsync( new UploadPartRequest { BucketName = this.BucketName, Key = key, InputStream = stream, UploadId = uploadResponse.UploadId, PartSize = parts[i].Length, FilePosition = parts[i].StartOffset, PartNumber = i + 1, StreamTransferProgress = (s, e) => Interlocked.Add(ref this.uploadedBytes, e.IncrementTransferred) }, context.CancellationToken ); completedParts.Add(new PartETag(i + 1, partResponse.ETag)); } await s3.CompleteMultipartUploadAsync( new CompleteMultipartUploadRequest { BucketName = this.BucketName, Key = key, UploadId = uploadResponse.UploadId, PartETags = completedParts }, context.CancellationToken ); } catch { await s3.AbortMultipartUploadAsync( new AbortMultipartUploadRequest { BucketName = this.BucketName, Key = key, UploadId = uploadResponse.UploadId } ); throw; } }
private async Task <bool> UploadObjectBandWidthThrottlingAsync(string bucketName, string localFullFilename, string key, CancellationToken token, int sleepMiliSec, bool publicReadTrueOrFalse = false) { bool bReturn = true; List <UploadPartResponse> uploadResponses = new List <UploadPartResponse>(); InitiateMultipartUploadRequest initiateRequest = new InitiateMultipartUploadRequest { BucketName = bucketName, Key = key }; if (publicReadTrueOrFalse) { initiateRequest.CannedACL = S3CannedACL.PublicRead; } IAmazonS3 s3Client = new AmazonS3Client(AccessKey, SecretKey, new AmazonS3Config { ServiceURL = ServiceUrl, BufferSize = 64 * (int)Math.Pow(2, 10), ProgressUpdateInterval = this.ProgressUpdateInterval, Timeout = new TimeSpan(1, 0, 0, 0, 0) }); InitiateMultipartUploadResponse initResponse = await s3Client.InitiateMultipartUploadAsync(initiateRequest, token); long contentLength = new FileInfo(localFullFilename).Length; ContentTotalBytes = contentLength; long partSize = 5 * (long)Math.Pow(2, 20); try { long filePosition = 0; for (int i = 1; filePosition < contentLength; i++) { if (token.IsCancellationRequested) { bReturn = false; break; } var task = Task.Run(() => Thread.Sleep(sleepMiliSec)); await task; UploadPartRequest uploadRequest = new UploadPartRequest { BucketName = bucketName, Key = key, UploadId = initResponse.UploadId, PartNumber = i, PartSize = partSize, FilePosition = filePosition, FilePath = localFullFilename }; uploadRequest.StreamTransferProgress += ProgressEventCallback; uploadResponses.Add(await s3Client.UploadPartAsync(uploadRequest, token)); filePosition += partSize; CurrentTransferredBytes = filePosition; } CompleteMultipartUploadRequest completeRequest = new CompleteMultipartUploadRequest { BucketName = bucketName, Key = key, UploadId = initResponse.UploadId }; completeRequest.AddPartETags(uploadResponses); CompleteMultipartUploadResponse completeUploadResponse = await s3Client.CompleteMultipartUploadAsync(completeRequest, token); } catch (Exception) { AbortMultipartUploadRequest abortMPURequest = new AbortMultipartUploadRequest { BucketName = bucketName, Key = key, UploadId = initResponse.UploadId }; await s3Client.AbortMultipartUploadAsync(abortMPURequest); throw; } finally { s3Client.Dispose(); } return(bReturn); }
public async static Task MainAsync(string[] args) { //Welcome user Console.BackgroundColor = ConsoleColor.Blue; Console.ForegroundColor = ConsoleColor.White; Console.WriteLine("Digital Ocean Spaces Manager"); Console.ResetColor(); #region Keys Console.Write("Enter DO Access Key: "); ConsoleKeyInfo key; KeyManager.ACCESS_KEY.Clear(); do { key = Console.ReadKey(true); if (key.Key != ConsoleKey.Backspace && key.Key != ConsoleKey.Enter) { KeyManager.ACCESS_KEY.AppendChar(key.KeyChar); Console.Write("*"); } else { if (key.Key == ConsoleKey.Backspace && KeyManager.ACCESS_KEY.Length > 0) { KeyManager.ACCESS_KEY.RemoveAt(KeyManager.ACCESS_KEY.Length - 1); Console.Write("\b \b"); } } } while (key.Key != ConsoleKey.Enter); Console.Write("\n"); Console.Write("Enter DO Secrey Key: "); KeyManager.SECRET_KEY.Clear(); do { key = Console.ReadKey(true); if (key.Key != ConsoleKey.Backspace && key.Key != ConsoleKey.Enter) { KeyManager.SECRET_KEY.AppendChar(key.KeyChar); Console.Write("*"); } else { if (key.Key == ConsoleKey.Backspace && KeyManager.ACCESS_KEY.Length > 0) { KeyManager.ACCESS_KEY.RemoveAt(KeyManager.ACCESS_KEY.Length - 1); Console.Write("\b \b"); } } } while (key.Key != ConsoleKey.Enter); Console.Write("\n"); #endregion var client = new AmazonS3Client(KeyManager.SecureStringToString(KeyManager.ACCESS_KEY), KeyManager.SecureStringToString(KeyManager.SECRET_KEY), new AmazonS3Config() { ServiceURL = "https://nyc3.digitaloceanspaces.com" }); client.ExceptionEvent += Client_ExceptionEvent; string filePath, uploadName, spaceName, contentType = string.Empty; Console.Write("Enter Space name to use: "); spaceName = Console.ReadLine(); bool fileExists = false; do { Console.Write("Enter file location: "); filePath = Console.ReadLine(); if (File.Exists(filePath)) { contentType = MimeGuesser.GuessFileType(filePath).MimeType; fileExists = true; } else { fileExists = false; Console.WriteLine("File does not exist. Please enter again."); } } while (!fileExists); Console.Write("Enter name to use when uploaded: "); uploadName = Console.ReadLine(); Console.Write("Wipe away previous attempts? (Y/n): "); var wipeAway = Console.ReadLine(); if (wipeAway == "Y") { var currentMultiParts = await client.ListMultipartUploadsAsync(spaceName); foreach (var multiPart in currentMultiParts.MultipartUploads) { try { await client.AbortMultipartUploadAsync(currentMultiParts.BucketName, multiPart.Key, multiPart.UploadId); } catch (Exception) { } } Console.WriteLine("Wiped away previous upload attempts"); } var fileInfo = new FileInfo(filePath); var multiPartStart = await client.InitiateMultipartUploadAsync(new Amazon.S3.Model.InitiateMultipartUploadRequest() { BucketName = spaceName, ContentType = contentType, Key = uploadName }); try { var i = 0L; var n = 1; Dictionary <string, int> parts = new Dictionary <string, int>(); while (i < fileInfo.Length) { Console.WriteLine($"Starting upload for Part #{n}"); long partSize = 6000000; var lastPart = (i + partSize) >= fileInfo.Length; if (lastPart) { partSize = fileInfo.Length - i; } bool complete = false; int retry = 0; Amazon.S3.Model.UploadPartResponse partResp = null; do { retry++; try { partResp = await client.UploadPartAsync(new Amazon.S3.Model.UploadPartRequest() { BucketName = spaceName, FilePath = filePath, FilePosition = i, IsLastPart = lastPart, PartSize = partSize, PartNumber = n, UploadId = multiPartStart.UploadId, Key = uploadName }); complete = true; } catch (Exception) { Console.WriteLine($"Failed to upload part {n} on try #{retry}..."); } } while (!complete && retry <= 3); if (!complete || partResp == null) { throw new Exception($"Unable to upload part {n}... Failing"); } parts.Add(partResp.ETag, n); i += partSize; n++; Console.WriteLine($"Uploading {(((float)i/(float)fileInfo.Length) * 100).ToString("N2")} ({i}/{fileInfo.Length})"); } Console.WriteLine("Done uploading! Completing upload"); var completePart = await client.CompleteMultipartUploadAsync(new Amazon.S3.Model.CompleteMultipartUploadRequest() { UploadId = multiPartStart.UploadId, BucketName = spaceName, Key = uploadName, PartETags = parts.Select(p => new Amazon.S3.Model.PartETag(p.Value, p.Key)).ToList() }); Console.WriteLine("Successfully uploaded!"); } catch (Exception ex) { var abortPart = await client.AbortMultipartUploadAsync(spaceName, uploadName, multiPartStart.UploadId); Console.WriteLine("Error while uploading! " + ex.Message); await Task.Delay(10000); } Console.WriteLine("Press enter to exit"); Console.ReadLine(); }
public static async Task MultipartEncryptionTestAsync(AmazonS3Client s3EncryptionClient, AmazonS3Client s3DecryptionClient, string bucketName) { var filePath = Path.GetTempFileName(); var retrievedFilepath = Path.GetTempFileName(); var totalSize = MegaBytesSize * 15; UtilityMethods.GenerateFile(filePath, totalSize); var key = Guid.NewGuid().ToString(); Stream inputStream = File.OpenRead(filePath); try { InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest() { BucketName = bucketName, Key = key, StorageClass = S3StorageClass.OneZoneInfrequentAccess, ContentType = "text/html", }; InitiateMultipartUploadResponse initResponse = await s3EncryptionClient.InitiateMultipartUploadAsync(initRequest).ConfigureAwait(false); // Upload part 1 UploadPartRequest uploadRequest = new UploadPartRequest() { BucketName = bucketName, Key = key, UploadId = initResponse.UploadId, PartNumber = 1, PartSize = 5 * MegaBytesSize, InputStream = inputStream, }; UploadPartResponse up1Response = await s3EncryptionClient.UploadPartAsync(uploadRequest).ConfigureAwait(false); // Upload part 2 uploadRequest = new UploadPartRequest() { BucketName = bucketName, Key = key, UploadId = initResponse.UploadId, PartNumber = 2, PartSize = 5 * MegaBytesSize, InputStream = inputStream }; UploadPartResponse up2Response = await s3EncryptionClient.UploadPartAsync(uploadRequest).ConfigureAwait(false); // Upload part 3 uploadRequest = new UploadPartRequest() { BucketName = bucketName, Key = key, UploadId = initResponse.UploadId, PartNumber = 3, InputStream = inputStream, IsLastPart = true }; UploadPartResponse up3Response = await s3EncryptionClient.UploadPartAsync(uploadRequest).ConfigureAwait(false); ListPartsRequest listPartRequest = new ListPartsRequest() { BucketName = bucketName, Key = key, UploadId = initResponse.UploadId }; ListPartsResponse listPartResponse = await s3EncryptionClient.ListPartsAsync(listPartRequest).ConfigureAwait(false); Assert.Equal(3, listPartResponse.Parts.Count); Assert.Equal(up1Response.PartNumber, listPartResponse.Parts[0].PartNumber); Assert.Equal(up1Response.ETag, listPartResponse.Parts[0].ETag); Assert.Equal(up2Response.PartNumber, listPartResponse.Parts[1].PartNumber); Assert.Equal(up2Response.ETag, listPartResponse.Parts[1].ETag); Assert.Equal(up3Response.PartNumber, listPartResponse.Parts[2].PartNumber); Assert.Equal(up3Response.ETag, listPartResponse.Parts[2].ETag); listPartRequest.MaxParts = 1; listPartResponse = await s3EncryptionClient.ListPartsAsync(listPartRequest).ConfigureAwait(false); Assert.Single(listPartResponse.Parts); // Complete the response CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest() { BucketName = bucketName, Key = key, UploadId = initResponse.UploadId }; compRequest.AddPartETags(up1Response, up2Response, up3Response); CompleteMultipartUploadResponse compResponse = await s3EncryptionClient.CompleteMultipartUploadAsync(compRequest).ConfigureAwait(false); Assert.Equal(bucketName, compResponse.BucketName); Assert.NotNull(compResponse.ETag); Assert.Equal(key, compResponse.Key); Assert.NotNull(compResponse.Location); // Get the file back from S3 and make sure it is still the same. GetObjectRequest getRequest = new GetObjectRequest() { BucketName = bucketName, Key = key }; GetObjectResponse getResponse = await s3DecryptionClient.GetObjectAsync(getRequest).ConfigureAwait(false); await getResponse.WriteResponseStreamToFileAsync(retrievedFilepath, false, CancellationToken.None); UtilityMethods.CompareFiles(filePath, retrievedFilepath); GetObjectMetadataRequest metaDataRequest = new GetObjectMetadataRequest() { BucketName = bucketName, Key = key }; GetObjectMetadataResponse metaDataResponse = await s3DecryptionClient.GetObjectMetadataAsync(metaDataRequest).ConfigureAwait(false); Assert.Equal("text/html", metaDataResponse.Headers.ContentType); } finally { inputStream.Dispose(); if (File.Exists(filePath)) { File.Delete(filePath); } if (File.Exists(retrievedFilepath)) { File.Delete(retrievedFilepath); } } }
/// <summary> /// Copies large file as chunks from Azure BLOB to Amazon S3. /// </summary> /// <returns></returns> public async Task CopyLargeFileFromAzureBlobToAwsS3() { AmazonS3Client s3Client = new AmazonS3Client(AwsAccessKeyId, AwsSecretKey, Amazon.RegionEndpoint.APSouth1); CloudStorageAccount storageAccount = CloudStorageAccount.Parse(StorageAccount); //Create Storage account reference. CloudBlobClient blobClient = storageAccount.CreateCloudBlobClient(); // Create the blob client. CloudBlobContainer container = blobClient.GetContainerReference(ContainerName); // Retrieve reference to a container. container.CreateIfNotExists(); CloudBlockBlob blob = container.GetBlockBlobReference(BlobFileName); // Create Blob reference. blob.FetchAttributes(); // Prepare blob instance To get the file length. var remainingBytes = blob.Properties.Length; long readPosition = 0; // To be used offset / position from where to start reading from BLOB. InitiateMultipartUploadRequest initiateMultipartUploadRequest = new InitiateMultipartUploadRequest { BucketName = AwsS3BucketName, Key = TargetFileName }; // Will use UploadId from this response. InitiateMultipartUploadResponse initiateMultipartUploadResponse = s3Client.InitiateMultipartUpload(initiateMultipartUploadRequest); List <UploadPartResponse> uploadPartResponses = new List <UploadPartResponse>(); Stopwatch stopwatch = Stopwatch.StartNew(); try { int partCounter = 0; // To increment on each read of parts and use it as part number. while (remainingBytes > 0) { // Determine the size when final block reached as it might be less than Part size. // Will be PartSize except final block. long bytesToCopy = Math.Min(PartSize, remainingBytes); using (MemoryStream memoryStream = new MemoryStream()) { // To download part from BLOB. await blob.DownloadRangeToStreamAsync(memoryStream, readPosition, bytesToCopy).ConfigureAwait(false); memoryStream.Position = 0; partCounter++; UploadPartRequest uploadRequest = new UploadPartRequest { BucketName = AwsS3BucketName, Key = TargetFileName, UploadId = initiateMultipartUploadResponse.UploadId, PartNumber = partCounter, PartSize = bytesToCopy, InputStream = memoryStream }; UploadPartResponse uploadPartResponse = s3Client.UploadPart(uploadRequest); uploadPartResponses.Add(uploadPartResponse); remainingBytes -= bytesToCopy; readPosition += bytesToCopy; this.logger.WriteLine($"Uploaded part with part number {partCounter}, size {bytesToCopy}bytes and remaining {remainingBytes}bytes to read."); } } CompleteMultipartUploadRequest completeMultipartUploadRequest = new CompleteMultipartUploadRequest { BucketName = AwsS3BucketName, Key = TargetFileName, UploadId = initiateMultipartUploadResponse.UploadId }; completeMultipartUploadRequest.AddPartETags(uploadPartResponses); CompleteMultipartUploadResponse completeMultipartUploadResponse = await s3Client.CompleteMultipartUploadAsync(completeMultipartUploadRequest).ConfigureAwait(false); } catch (Exception exception) { this.logger.WriteLine($"Exception : {exception.Message}"); AbortMultipartUploadRequest abortMultipartUploadRequest = new AbortMultipartUploadRequest { BucketName = AwsS3BucketName, Key = TargetFileName, UploadId = initiateMultipartUploadResponse.UploadId }; await s3Client.AbortMultipartUploadAsync(abortMultipartUploadRequest).ConfigureAwait(false); } finally { stopwatch.Stop(); this.logger.WriteLine($"Execution time in mins: {stopwatch.Elapsed.TotalMinutes}"); } }
public async Task <BlobInfoV1> EndBlobWriteAsync(string correlationId, string token, byte[] buffer) { CheckOpened(correlationId); var tokens = (token ?? "").Split(';'); if (tokens.Length < 2) { throw new BadRequestException( correlationId, "BAD_TOKEN", "Token " + token + " is invalid" ).WithDetails("token", token); } var id = tokens[0]; // Check if temp file exist var size = _storage.GetChunksSize(correlationId, id); // Upload temp file or chunks directly if (size > 0) { // If some chunks already stored in temp file - append then upload the entire file if (buffer != null) { _storage.AppendChunk(correlationId, id, buffer); } token = await UploadAndDeleteChunksAsync(correlationId, token); } else { if (buffer != null) { var stream = new MemoryStream(buffer); // If it's the first chunk then upload it without writing to temp file token = await UploadPartAsync(correlationId, token, stream); } } // Complete upload tokens = (token ?? "").Split(';'); var parts = new List <PartETag>(); for (var index = 2; index < tokens.Length; index++) { parts.Add(new PartETag { ETag = tokens[index], PartNumber = index - 1 }); } await _client.CompleteMultipartUploadAsync(new CompleteMultipartUploadRequest { BucketName = _bucket, Key = id, UploadId = tokens[1], PartETags = parts }); return(await GetBlobByIdAsync(correlationId, id)); }