static void CompleteMultipartUpload() { try { List <PartETag> partEtags = new List <PartETag>(); PartETag partEtag1 = new PartETag(); partEtag1.PartNumber = 1; partEtag1.ETag = etag; partEtags.Add(partEtag1); CompleteMultipartUploadRequest request = new CompleteMultipartUploadRequest() { BucketName = bucketName, ObjectKey = objectName, UploadId = uploadId, PartETags = partEtags }; CompleteMultipartUploadResponse response = client.CompleteMultipartUpload(request); Console.WriteLine("Complete multipart upload response: {0}", response.StatusCode); } catch (ObsException ex) { Console.WriteLine("Exception errorcode: {0}, when complete multipart upload.", ex.ErrorCode); Console.WriteLine("Exception errormessage: {0}", ex.ErrorMessage); } }
/** * 分块上传例子 * **/ private static bool uploadPart() { string path = @"you file path";//上传文件路径,例如E:\tool\aa.rar InitiateMultipartUploadResult result = multipartUp(); FileInfo file = new FileInfo(path); int part = 5 * 1024 * 1024; int numBytesToRead = (int)file.Length; int i = 0; XElement root = new XElement("CompleteMultipartUpload");//初始化一个xml,以备分块上传完成后调用complete方法提交本次上传的文件以通知服务端合并分块 //开始读取文件 using (FileStream fs = new FileStream(path, FileMode.Open)) { while (numBytesToRead > 0) { UploadPartRequest request = new UploadPartRequest( result.getBucket(), result.getKey(), result.getUploadId(), i + 1); //每次读取5M文件内容,如果最后一次内容不及5M则按实际大小取值 int count = Convert.ToInt32((i * part + part) > file.Length ? file.Length - i * part : part); byte[] data = new byte[count]; int n = fs.Read(data, 0, count); request.setInputStream(new MemoryStream(data)); ProgressListener sampleListener = new SampleListener(count); //实例一个更新进度的监听类,实际使用中可自己定义实现 request.setProgressListener(sampleListener); PartETag tag = ks3Client.uploadPart(request); //上传本次分块内容 Console.WriteLine(tag.ToString()); if (n == 0) { break; } numBytesToRead -= n; XElement partE = new XElement("Part"); partE.Add(new XElement("PartNumber", i + 1)); partE.Add(new XElement("ETag", tag.geteTag())); root.Add(partE); i++; } } //所有分块上传完成后发起complete request,通知服务端合并分块 CompleteMultipartUploadRequest completeRequest = new CompleteMultipartUploadRequest(result.getBucket(), result.getKey(), result.getUploadId()); completeRequest.setContent(new MemoryStream(System.Text.Encoding.Default.GetBytes(root.ToString()))); CompleteMultipartUploadResult completeResult = ks3Client.completeMultipartUpload(completeRequest); return(true); }
public static void Main(string[] args) { // create the AWS S3 client AmazonS3Client s3 = AWSS3Factory.getS3Client(); // retrieve the object key/value from user Console.Write("Enter the object key: "); string key = Console.ReadLine(); Console.Write("Enter the file location: "); string filePath = Console.ReadLine(); // grab the start time of upload DateTime startDate = DateTime.Now; // part size for chunking in multi-part long partSize = 1024 * 1024 * 2; // 2 MB // list of upload part response objects for each part that is uploaded List <PartETag> partETags = new List <PartETag>(); // Step 1: Initialize InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest() { BucketName = AWSS3Factory.S3_BUCKET, Key = key, }; InitiateMultipartUploadResponse initResponse = s3.InitiateMultipartUpload(initRequest); // get the file and file length long contentLength = new FileInfo(filePath).Length; Console.WriteLine(string.Format("Starting multi-part upload for object {0}/{1} with file path {2} and size {3} in {4} MB size chunks", AWSS3Factory.S3_BUCKET, key, filePath, Convert.ToString(contentLength), partSize / 1024 / 1024)); try { // Step 2: Upload parts long filePosition = 0; for (int i = 1; filePosition < contentLength; i++) { // get the size of the chunk. Note - the last part can be less than the chunk size partSize = Math.Min(partSize, (contentLength - filePosition)); Console.WriteLine(string.Format("Sending chunk {0} starting at position {1}", i, filePosition)); // create request to upload a part UploadPartRequest uploadRequest = new UploadPartRequest() { BucketName = AWSS3Factory.S3_BUCKET, Key = key, UploadId = initResponse.UploadId, PartNumber = i, FilePosition = filePosition, FilePath = filePath, PartSize = partSize }; UploadPartResponse partResponse = s3.UploadPart(uploadRequest); PartETag eTagPart = new PartETag(partResponse.PartNumber, partResponse.ETag); partETags.Add(eTagPart); filePosition = filePosition += partSize; } // Step 3: complete Console.WriteLine("Waiting for completion of multi-part upload"); CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest() { BucketName = AWSS3Factory.S3_BUCKET, Key = key, UploadId = initResponse.UploadId, PartETags = partETags }; s3.CompleteMultipartUpload(compRequest); } catch (Exception e) { s3.AbortMultipartUpload(new AbortMultipartUploadRequest() { BucketName = AWSS3Factory.S3_BUCKET, Key = key, UploadId = initResponse.UploadId }); } // grab the end time of upload DateTime endDate = DateTime.Now; Console.WriteLine(string.Format("Completed multi-part upload for object {0}/{1} with file path: {2}", AWSS3Factory.S3_BUCKET, key, filePath)); Console.WriteLine(string.Format("Process took: {0} seconds.", (endDate - startDate).TotalSeconds.ToString())); Console.ReadLine(); }
internal static void CreateMultiPartS3Blob(AmazonS3Client client, string key, S3CopyMemoryStream stream) { if (stream.InitiatingPart) { InitiateMultipartUploadRequest initiateMultipartUploadRequest = new InitiateMultipartUploadRequest() .WithBucketName("static.getbrickpile.com") .WithCannedACL(S3CannedACL.PublicRead) .WithKey(key); InitiateMultipartUploadResponse initiateResponse = client.InitiateMultipartUpload(initiateMultipartUploadRequest); stream.UploadPartId = initiateResponse.UploadId; } stream.Position = 0; UploadPartRequest uploadPartRequest = new UploadPartRequest() .WithBucketName("static.getbrickpile.com") .WithKey(key) .WithPartNumber(stream.WriteCount) .WithPartSize(stream.Position) .WithUploadId(stream.UploadPartId) .WithInputStream(stream) as UploadPartRequest; UploadPartResponse response = client.UploadPart(uploadPartRequest); PartETag etag = new PartETag(response.PartNumber, response.ETag); stream.PartETagCollection.Add(etag); if (stream.EndOfPart) { CompleteMultipartUploadRequest completeMultipartUploadRequest = new CompleteMultipartUploadRequest() .WithBucketName("static.getbrickpile.com") .WithKey(key) .WithPartETags(stream.PartETagCollection) .WithUploadId(stream.UploadPartId); CompleteMultipartUploadResponse completeMultipartUploadResponse = client.CompleteMultipartUpload(completeMultipartUploadRequest); string loc = completeMultipartUploadResponse.Location; } }
public override async Task <long> AppendToUploadAsync(Upload upload, Stream stream, CancellationToken cancellationToken) { try { if (!upload.BlockNumber.HasValue) { upload.BlockNumber = 0; } var blobName = upload.Id + upload.Extension; if (upload.Length == upload.UploadedLength) { return(0); } int bytesRead = 0; long bytesWritten = 0; if (upload.ProviderUploadId == null) { // Setup information required to initiate the multipart upload. var initiateRequest = new InitiateMultipartUploadRequest { BucketName = _bucket, Key = blobName, }; // Initiate the upload. InitiateMultipartUploadResponse initResponse = await _s3.InitiateMultipartUploadAsync(initiateRequest, cancellationToken); upload.ProviderUploadId = initResponse.UploadId; } do { if (cancellationToken.IsCancellationRequested) { _logger.LogDebug("Request to append cancelled for file '{id}'", blobName); break; } var buffer = new byte[blockSize]; // Checking for last block (it will never reach 5MB exactly) int lastBytesRead = 0; bytesRead = 0; do { lastBytesRead = await stream.ReadAsync( buffer, bytesRead, // Ensure we don't overread the buffer blockSize - bytesRead, cancellationToken); bytesRead += lastBytesRead; } while (bytesRead < blockSize && lastBytesRead > 0); if (bytesRead == 0) { break; } using (MemoryStream memoryBufferStream = new MemoryStream(buffer, 0, bytesRead)) { var uploadPartRequest = new UploadPartRequest { BucketName = _bucket, Key = blobName, UploadId = upload.ProviderUploadId, PartNumber = upload.BlockNumber.Value + 1, // Amazon S3 part uploads start at one. PartSize = bytesRead, InputStream = memoryBufferStream }; var result = await _s3.UploadPartAsync(uploadPartRequest, cancellationToken); PartETag partETag = new PartETag(result.PartNumber, result.ETag); string serialised = JsonConvert.SerializeObject(partETag); string eTag = Convert.ToBase64String(Encoding.UTF8.GetBytes(serialised)); upload.BlockIds += $"{eTag} "; } bytesWritten += bytesRead; upload.BlockNumber++; upload.UploadedLength += bytesRead; _logger.LogDebug("Read bytes {bytesRead}, written {bytesWritten}, block number {blockNumber} on file {fileId}", bytesRead, bytesWritten, upload.BlockNumber, blobName); // note: cancellation token *not* supplied as this must finish because we have sucessfully uploaded the latest block to S3. await DbContext.SaveChangesAsync(); } while (bytesRead != 0); if (upload.Length == upload.UploadedLength) { var blockIds = upload.BlockIds.Split(new char[] { ' ' }, StringSplitOptions.RemoveEmptyEntries); var completeRequest = new CompleteMultipartUploadRequest { BucketName = _bucket, Key = blobName, UploadId = upload.ProviderUploadId, }; List <PartETag> partETags = new List <PartETag>(); foreach (var blockId in blockIds) { string serialised = Encoding.UTF8.GetString(Convert.FromBase64String(blockId)); PartETag partETag = JsonConvert.DeserializeObject <PartETag>(serialised); partETags.Add(partETag); } completeRequest.AddPartETags(partETags); await _s3.CompleteMultipartUploadAsync(completeRequest); await DbContext.SaveChangesAsync(); } return(bytesWritten); } catch (Exception e) { _logger.LogError(e, "Failed to append data"); throw; } }