private UploadPartCommand(IServiceClient client, Uri endpoint, ExecutionContext context, IDeserializer <ServiceResponse, UploadPartResult> deserializer, UploadPartRequest uploadPartRequest) : base(client, endpoint, context, deserializer) { _uploadPartRequest = uploadPartRequest; }
/// <summary> /// 上传资源 /// </summary> /// <param name="fileName">文件名称</param> /// <param name="mimeType">文件类型</param> /// <param name="stream"></param> public void Upload(string fileName, string mimeType, Stream stream) { //根据资源大小计算Multipart 2MB为一个Part int num = (int)(stream.Length / 0x200000L); if (num > 0 && (stream.Length % 0x200000L) != 0) { num += 1; } ObjectMetadata metadata = new ObjectMetadata() { ContentType = mimeType }; try { if (num > 1) { InitiateMultipartUploadRequest initiateMultipartUploadRequest = new InitiateMultipartUploadRequest(_bucketName, fileName) { ObjectMetadata = metadata }; InitiateMultipartUploadResult result = _ossClient.InitiateMultipartUpload(initiateMultipartUploadRequest); Console.WriteLine("UploadId:" + result.UploadId); List <PartETag> collection = new List <PartETag>(); for (int i = 0; i < num; i++) { int begin = 0x200000 * i; stream.Seek((long)begin, SeekOrigin.Begin); long end = 0x200000 < (stream.Length - begin) ? 0x200000L : stream.Length - begin; UploadPartRequest uploadPartRequest = new UploadPartRequest(_bucketName, fileName, result.UploadId) { InputStream = stream, PartSize = end, PartNumber = i + 1 }; var partResult = _ossClient.UploadPart(uploadPartRequest); collection.Add(partResult.PartETag); } var completeMultipartUploadRequest = new CompleteMultipartUploadRequest(_bucketName, fileName, result.UploadId); ((List <PartETag>)completeMultipartUploadRequest.PartETags).AddRange(collection); _ossClient.CompleteMultipartUpload(completeMultipartUploadRequest); } else { _ossClient.PutObject(_bucketName, fileName, stream, metadata); } } catch (WebException exception) { throw new Exception("上传失败,请重试", exception); } }
static void UploadPart() { try { UploadPartRequest request = new UploadPartRequest() { BucketName = bucketName, ObjectKey = objectName, FilePath = filePath, PartNumber = 1, PartSize = partSize, UploadId = uploadId, Offset = 100, }; UploadPartResponse response = client.UploadPart(request); Console.WriteLine("UploadPart response: {0}", response.StatusCode); Console.WriteLine("ETag: {0}", response.ETag); etag = response.ETag; } catch (ObsException ex) { Console.WriteLine("Exception errorcode: {0}, when upload part.", ex.ErrorCode); Console.WriteLine("Exception errormessage: {0}", ex.ErrorMessage); } }
private async Task <UploadPartResponse> UploadPart(UploadPartRequest request, SemaphoreSlim throttler) { if (!request.UploadPartNum.HasValue) { throw new ArgumentException("UploadPartRequest.UploadPartNum cannot be null"); } int partNum = request.UploadPartNum.Value; Manifest.RegisterTransfer(partNum); try { var response = await OSClient.UploadPart(request, _retryConfiguration, _cancellationToken).ConfigureAwait(false); Manifest.RegisterSuccess(partNum, response); _logger.Debug($"Part {partNum} has been successfully uploaded"); return(response); } catch (Exception e) { _logger.Error($"failure while uploading part {request.UploadPartNum}, message: ${e.Message}"); Manifest.RegisterFailure(partNum); return(null); } finally { throttler.Release(); } }
/// <summary> /// Updates the request where the input stream contains the encrypted object contents. /// </summary> /// <param name="request"></param> private void GenerateEncryptedUploadPartRequest(UploadPartRequest request) { string uploadID = request.UploadId; UploadPartEncryptionContext contextForEncryption = this.EncryptionClient.CurrentMultiPartUploadKeys[uploadID]; byte[] envelopeKey = contextForEncryption.EnvelopeKey; byte[] IV = contextForEncryption.NextIV; EncryptionInstructions instructions = new EncryptionInstructions(EncryptionClient.EncryptionMaterials.MaterialsDescription, envelopeKey, IV); if (request.IsLastPart == false) { if (contextForEncryption.IsFinalPart == true) { throw new AmazonClientException("Last part has already been processed, cannot upload this as the last part"); } if (request.PartNumber < contextForEncryption.PartNumber) { throw new AmazonClientException("Upload Parts must in correct sequence"); } request.InputStream = EncryptionUtils.EncryptUploadPartRequestUsingInstructions(request.InputStream, instructions); contextForEncryption.PartNumber = request.PartNumber; } else { request.InputStream = EncryptionUtils.EncryptRequestUsingInstruction(request.InputStream, instructions); contextForEncryption.IsFinalPart = true; } ((Amazon.Runtime.Internal.IAmazonWebServiceRequest)request).RequestState.Add(AmazonS3EncryptionClient.S3CryptoStream, request.InputStream); }
private UploadPartRequest ConstructUploadPartRequest(int partNumber, long filePosition, InitiateMultipartUploadResponse initResponse) { var uploadRequest = new UploadPartRequest() { BucketName = this._fileTransporterRequest.BucketName, Key = this._fileTransporterRequest.Key, UploadId = initResponse.UploadId, PartNumber = partNumber, PartSize = this._partSize, ServerSideEncryptionCustomerMethod = this._fileTransporterRequest.ServerSideEncryptionCustomerMethod, ServerSideEncryptionCustomerProvidedKey = this._fileTransporterRequest.ServerSideEncryptionCustomerProvidedKey, ServerSideEncryptionCustomerProvidedKeyMD5 = this._fileTransporterRequest.ServerSideEncryptionCustomerProvidedKeyMD5, #if (BCL && !BCL45) Timeout = ClientConfig.GetTimeoutValue(this._config.DefaultTimeout, this._fileTransporterRequest.Timeout) #endif }; #if BCL if ((filePosition + this._partSize >= this._contentLength) && _s3Client is Amazon.S3.Encryption.AmazonS3EncryptionClient ) { uploadRequest.IsLastPart = true; uploadRequest.PartSize = 0; } #endif var progressHandler = new ProgressHandler(this.UploadPartProgressEventCallback); uploadRequest.StreamUploadProgressCallback += progressHandler.OnTransferProgress; uploadRequest.BeforeRequestEvent += this.RequestEventHandler; #if BCL if (this._fileTransporterRequest.IsSetFilePath()) { uploadRequest.FilePosition = filePosition; uploadRequest.FilePath = this._fileTransporterRequest.FilePath; } #elif WIN_RT || WINDOWS_PHONE if (this._fileTransporterRequest.IsSetStorageFile()) { uploadRequest.FilePosition = filePosition; uploadRequest.StorageFile = this._fileTransporterRequest.StorageFile; } #endif else { uploadRequest.InputStream = this._fileTransporterRequest.InputStream; } // If the InitiateMultipartUploadResponse indicates that this upload is // using KMS, force SigV4 for each UploadPart request bool useSigV4 = initResponse.ServerSideEncryptionMethod == ServerSideEncryptionMethod.AWSKMS; if (useSigV4) { uploadRequest.UseSigV4 = true; } return(uploadRequest); }
public static void AsynUploadParts(COSXML.CosXml cosXml, string bucket, string key, string uploadId, int partNumber, string srcPath) { UploadPartRequest request = new UploadPartRequest(bucket, key, partNumber, uploadId, srcPath); //设置签名有效时长 request.SetSign(TimeUtils.GetCurrentTime(TimeUnit.SECONDS), 600); request.SetCosProgressCallback(delegate(long completed, long total) { Console.WriteLine(String.Format("progress = {0} / {1} : {2:##.##}%", completed, total, completed * 100.0 / total)); }); //执行请求 cosXml.UploadPart(request, delegate(CosResult result) { UploadPartResult getObjectResult = result as UploadPartResult; Console.WriteLine(getObjectResult.GetResultInfo()); QLog.D("XIAO", result.GetResultInfo()); }, delegate(CosClientException clientEx, CosServerException serverEx) { if (clientEx != null) { QLog.D("XIAO", clientEx.Message); Console.WriteLine("CosClientException: " + clientEx.StackTrace); } if (serverEx != null) { QLog.D("XIAO", serverEx.Message); Console.WriteLine("CosServerException: " + serverEx.GetInfo()); } }); }
internal void Execute() { UploadPartRequest request = null; while ((request = getNextPartRequest()) != null) { this._lastException = null; try { this._uploader.addResponse(this._s3Client.UploadPart(request)); } catch (ThreadAbortException) { throw; } catch (Exception e) { this._lastException = e; lock (this._uploader.WAIT_FOR_COMPLETION_LOCK) { Monitor.Pulse(this._uploader.WAIT_FOR_COMPLETION_LOCK); } break; } } }
internal bool UploadNextPart() { if (this.uploadedBytes == this.fileSize) { return(false); } UploadPartRequest uploadRequest = this.BuildUploadPartRequest(); long requestTotalBytes = 0; if (this.uploadProgressChanged != null) { uploadRequest.StreamTransferProgress += ((object sender, StreamTransferProgressArgs e) => { var progress = this.uploadedBytes + e.TransferredBytes; uploadProgressChanged(progress); requestTotalBytes = e.TotalBytes; }); } var uploadPartResponse = this.client.UploadPartAsync(uploadRequest, this.cancellationToken); uploadPartResponse.Wait(this.cancellationToken); this.uploadResponses.Add(uploadPartResponse.Result); this.currentPartNumber++; this.uploadedBytes += requestTotalBytes; return(true); }
public static void UploadParts(COSXML.CosXml cosXml, string bucket, string key, string uploadId, int partNumber, string srcPath) { try { UploadPartRequest request = new UploadPartRequest(bucket, key, partNumber, uploadId, srcPath); //设置签名有效时长 request.SetSign(TimeUtils.GetCurrentTime(TimeUnit.SECONDS), 600); //设置进度回调 request.SetCosProgressCallback(delegate(long completed, long total) { Console.WriteLine(String.Format("{0} progress = {1} / {2} : {3:##.##}%", DateTime.Now.ToString(), completed, total, completed * 100.0 / total)); }); //执行请求 UploadPartResult result = cosXml.UploadPart(request); Console.WriteLine(result.GetResultInfo()); QLog.D("XIAO", result.GetResultInfo()); } catch (COSXML.CosException.CosClientException clientEx) { QLog.D("XIAO", clientEx.Message); Console.WriteLine("CosClientException: " + clientEx.StackTrace); } catch (COSXML.CosException.CosServerException serverEx) { QLog.D("XIAO", serverEx.Message); Console.WriteLine("CosServerException: " + serverEx.GetInfo()); } }
public static UploadPartCommand Create(IServiceClient client, Uri endpoint, ExecutionContext context, UploadPartRequest uploadPartRequest) { OssUtils.CheckBucketName(uploadPartRequest.BucketName); OssUtils.CheckObjectKey(uploadPartRequest.Key); if (string.IsNullOrEmpty(uploadPartRequest.UploadId)) { throw new ArgumentException("uploadId should be specified"); } if (!uploadPartRequest.PartNumber.HasValue) { throw new ArgumentException("partNumber should be specified"); } if (!uploadPartRequest.PartSize.HasValue) { throw new ArgumentException("partSize should be specified"); } if (uploadPartRequest.InputStream == null) { throw new ArgumentException("inputStream should be specified"); } if (uploadPartRequest.PartSize < 0 || uploadPartRequest.PartSize > OssUtils.MaxFileSize) { throw new ArgumentException("partSize not live in valid range"); } if (!OssUtils.IsPartNumberInRange(uploadPartRequest.PartNumber)) { throw new ArgumentException("partNumber not live in valid range"); } var conf = OssUtils.GetClientConfiguration(client); var originalStream = uploadPartRequest.InputStream; var streamLength = uploadPartRequest.PartSize.Value; // wrap input stream in PartialWrapperStream originalStream = new PartialWrapperStream(originalStream, streamLength); // setup progress var callback = uploadPartRequest.StreamTransferProgress; if (callback != null) { originalStream = OssUtils.SetupProgressListeners(originalStream, conf.ProgressUpdateInterval, client, callback); uploadPartRequest.InputStream = originalStream; } // wrap input stream in MD5Stream if (conf.EnalbeMD5Check) { var hashStream = new MD5Stream(originalStream, null, streamLength); uploadPartRequest.InputStream = hashStream; context.ResponseHandlers.Add(new MD5DigestCheckHandler(hashStream)); } return(new UploadPartCommand(client, endpoint, context, DeserializerFactory.GetFactory().CreateUploadPartResultDeserializer(uploadPartRequest.PartNumber.Value), uploadPartRequest)); }
private List <PartETag> UploadParts(string filePath, string objectKey, int partCount, string uploadId) { var fi = new FileInfo(filePath); var fileSize = fi.Length; var partSize = fileSize / partCount; if (fileSize % partSize != 0) { partCount++; } var partETags = new List <PartETag>(); using (var fs = File.Open(filePath, FileMode.Open)) { for (var i = 0; i < partCount; i++) { var skipBytes = (long)partSize * i; fs.Seek(skipBytes, 0); var size = (partSize < fileSize - skipBytes) ? partSize : (fileSize - skipBytes); var request = new UploadPartRequest(bucketName, objectKey, uploadId) { InputStream = fs, PartSize = size, PartNumber = i + 1 }; var result = client.UploadPart(request); partETags.Add(result.PartETag); } } return(partETags); }
/// <inheritdoc/> protected override void UpdateMultipartUploadEncryptionContext(UploadPartRequest uploadPartRequest) { string uploadID = uploadPartRequest.UploadId; UploadPartEncryptionContext encryptedUploadedContext = null; if (!EncryptionClient.CurrentMultiPartUploadKeys.TryGetValue(uploadID, out encryptedUploadedContext)) { throw new AmazonS3Exception("Encryption context for multipart upload not found"); } if (!uploadPartRequest.IsLastPart) { object stream = null; if (!((IAmazonWebServiceRequest)uploadPartRequest).RequestState.TryGetValue(AmazonS3EncryptionClient.S3CryptoStream, out stream)) { throw new AmazonS3Exception("Cannot retrieve S3 crypto stream from request state, hence cannot get Initialization vector for next uploadPart "); } var encryptionStream = stream as AESEncryptionUploadPartStream; if (encryptionStream != null) { encryptedUploadedContext.NextIV = encryptionStream.InitializationVector; } var aesGcmEncryptStream = stream as AesGcmEncryptStream; if (aesGcmEncryptStream != null) { encryptedUploadedContext.CryptoStream = aesGcmEncryptStream; } } }
/// <inheritdoc/> protected override void GenerateEncryptedUploadPartRequest(UploadPartRequest request) { string uploadID = request.UploadId; var contextForEncryption = this.EncryptionClient.CurrentMultiPartUploadKeys[uploadID]; var envelopeKey = contextForEncryption.EnvelopeKey; var IV = contextForEncryption.NextIV; var instructions = new EncryptionInstructions(EncryptionMaterials.MaterialsDescription, envelopeKey, IV); if (request.IsLastPart == false) { if (contextForEncryption.IsFinalPart) { throw new AmazonClientException("Last part has already been processed, cannot upload this as the last part"); } if (request.PartNumber < contextForEncryption.PartNumber) { throw new AmazonClientException($"Upload Parts must be in correct sequence. Request part number {request.PartNumber} must be >= to {contextForEncryption.PartNumber}"); } UpdateRequestInputStream(request, contextForEncryption, instructions); contextForEncryption.PartNumber = request.PartNumber; } else { UpdateRequestInputStream(request, contextForEncryption, instructions); contextForEncryption.IsFinalPart = true; } ((Amazon.Runtime.Internal.IAmazonWebServiceRequest)request).RequestState.Add(AmazonS3EncryptionClient.S3CryptoStream, request.InputStream); }
public Task <UploadPartResponse> UploadPartAsync( string bucketName, string key, string uploadId, int partNumber, int partSize, Stream inputStream, Action <object, StreamTransferProgressArgs> progress = null, CancellationToken cancellationToken = default(CancellationToken)) { if (partSize > DefaultPartSize) { throw new ArgumentException($"Part size in multipart upload can't exceed {DefaultPartSize} B, but was {partSize} B, bucket: {bucketName}, key: {key}, part: {partNumber}"); } var request = new UploadPartRequest() { BucketName = bucketName, Key = key, UploadId = uploadId, PartNumber = partNumber, PartSize = partSize, InputStream = inputStream, }; if (progress != null) { request.StreamTransferProgress += new EventHandler <StreamTransferProgressArgs>(progress); } return(_S3Client.UploadPartAsync(request, cancellationToken).EnsureSuccessAsync()); }
public void MultipartUploadAbortInMiddleTest() { var sourceFile = Config.MultiUploadSampleFile; //get target object name var targetObjectKey = OssTestUtils.GetObjectKey(_className); var initRequest = new InitiateMultipartUploadRequest(_bucketName, targetObjectKey); var initResult = _ossClient.InitiateMultipartUpload(initRequest); // 设置每块为 1M const int partSize = 1024 * 1024 * 1; var partFile = new FileInfo(sourceFile); // 计算分块数目 var partCount = OssTestUtils.CalculatePartCount(partFile.Length, partSize); Assert.IsTrue(partCount > 1, "Source file is too small to perform multipart upload"); LogUtility.LogMessage("File {0} is splitted to {1} parts for multipart upload", sourceFile, partCount); // 新建一个List保存每个分块上传后的ETag和PartNumber var partETags = new List <PartETag>(); //upload the file using (var fs = new FileStream(partFile.FullName, FileMode.Open)) { //use partCount - 1, so that the last part is left for (var i = 0; i < partCount - 1; i++) { // 跳到每个分块的开头 long skipBytes = partSize * i; fs.Position = skipBytes; // 计算每个分块的大小 var size = partSize < partFile.Length - skipBytes ? partSize : partFile.Length - skipBytes; // 创建UploadPartRequest,上传分块 var uploadPartRequest = new UploadPartRequest(_bucketName, targetObjectKey, initResult.UploadId); uploadPartRequest.InputStream = fs; uploadPartRequest.PartSize = size; uploadPartRequest.PartNumber = (i + 1); var uploadPartResult = _ossClient.UploadPart(uploadPartRequest); // 将返回的PartETag保存到List中。 partETags.Add(uploadPartResult.PartETag); //list parts which are uploaded var listPartsRequest = new ListPartsRequest(_bucketName, targetObjectKey, initResult.UploadId); var listPartsResult = _ossClient.ListParts(listPartsRequest); //there should be only 1 part was not uploaded Assert.AreEqual(i + 1, listPartsResult.Parts.Count(), "uploaded parts is not expected"); } } //abort the upload var abortRequest = new AbortMultipartUploadRequest(_bucketName, targetObjectKey, initResult.UploadId); _ossClient.AbortMultipartUpload(abortRequest); }
/// <summary> /// Uploads one part of a multi-part content stream using file id obtained from <see cref="StartLargeFileResponse"/>. /// </summary> /// <param name="uploadUrl">The url used to upload this file.</param> /// <param name="partNumber">The part number of the file.</param> /// <param name="authorizationToken">The authorization token that must be used when uploading files.</param> /// <param name="content"> The content stream of the content payload.</param> /// <param name="progress">A progress action which fires every time the write buffer is cycled.</param> /// <exception cref="AuthenticationException">Thrown when authentication fails.</exception> /// <exception cref="CapExceededExecption">Thrown when a cap is exceeded or an account in bad standing.</exception> /// <exception cref="InvalidHashException">Thrown when a checksum hash is not valid.</exception> /// <exception cref="ApiException">Thrown when an error occurs during client operation.</exception> async Task <IApiResults <UploadPartResponse> > IStorageParts.UploadAsync (Uri uploadUrl, int partNumber, string authorizationToken, Stream content, IProgress <ICopyProgress> progress) { var request = new UploadPartRequest(uploadUrl, partNumber, authorizationToken); return(await _client.UploadPartAsync(request, content, progress, _cancellationToken)); }
protected virtual void HandleException(IExecutionContext executionContext, Exception exception) { PutObjectRequest putObjectRequest = executionContext.get_RequestContext().get_OriginalRequest() as PutObjectRequest; if (putObjectRequest != null) { HashStream val = putObjectRequest.InputStream as HashStream; if (val != null) { putObjectRequest.InputStream = val.GetNonWrapperBaseStream(); } } UploadPartRequest uploadPartRequest = executionContext.get_RequestContext().get_OriginalRequest() as UploadPartRequest; if (uploadPartRequest != null) { HashStream val2 = uploadPartRequest.InputStream as HashStream; if (val2 != null) { uploadPartRequest.InputStream = val2.GetNonWrapperBaseStream(); } } if (executionContext.get_RequestContext().get_Request() != null) { AmazonS3Client.CleanupRequest(executionContext.get_RequestContext().get_Request()); } }
public override string UploadChunk(string domain, string path, string uploadId, Stream stream, int chunkNumber, long chunkLength) { var request = new UploadPartRequest { BucketName = _bucket, Key = MakePath(domain, path), UploadId = uploadId, PartNumber = chunkNumber, InputStream = stream }; try { using (var s3 = GetClient()) { var response = s3.UploadPart(request); return(response.ETag); } } catch (AmazonS3Exception error) { if (error.ErrorCode == "NoSuchUpload") { AbortChunkedUpload(domain, path, uploadId); } throw; } }
public void MultipartUploadAbortInMiddleTest() { var sourceFile = Config.MultiUploadTestFile; //get target object name var targetObjectKey = OssTestUtils.GetObjectKey(_className); var initRequest = new InitiateMultipartUploadRequest(_bucketName, targetObjectKey); var initResult = _ossClient.InitiateMultipartUpload(initRequest); // Set the part size const int partSize = 1024 * 1024 * 1; var partFile = new FileInfo(sourceFile); // Calculate the part count var partCount = OssTestUtils.CalculatePartCount(partFile.Length, partSize); Assert.IsTrue(partCount > 1, "Source file is too small to perform multipart upload"); LogUtility.LogMessage("File {0} is splitted to {1} parts for multipart upload", sourceFile, partCount); // Create a list to save result var partETags = new List <PartETag>(); //upload the file using (var fs = new FileStream(partFile.FullName, FileMode.Open)) { //use partCount - 1, so that the last part is left for (var i = 0; i < partCount - 1; i++) { // Skip to the start position long skipBytes = partSize * i; fs.Position = skipBytes; // calculate the part size var size = partSize < partFile.Length - skipBytes ? partSize : partFile.Length - skipBytes; // Create a UploadPartRequest, uploading parts var uploadPartRequest = new UploadPartRequest(_bucketName, targetObjectKey, initResult.UploadId); uploadPartRequest.InputStream = fs; uploadPartRequest.PartSize = size; uploadPartRequest.PartNumber = (i + 1); var uploadPartResult = _ossClient.UploadPart(uploadPartRequest); // Save the result partETags.Add(uploadPartResult.PartETag); //list parts which are uploaded var listPartsRequest = new ListPartsRequest(_bucketName, targetObjectKey, initResult.UploadId); var listPartsResult = _ossClient.ListParts(listPartsRequest); //there should be only 1 part was not uploaded Assert.AreEqual(i + 1, OssTestUtils.ToArray <Part>(listPartsResult.Parts).Count, "uploaded parts is not expected"); } } //abort the upload var abortRequest = new AbortMultipartUploadRequest(_bucketName, targetObjectKey, initResult.UploadId); _ossClient.AbortMultipartUpload(abortRequest); }
/// <summary> /// Uploads one large file stored in <paramref name="stream" /> to Amazon S3 storage in smaller parts. /// </summary> /// <param name="key">Unique identifier for an object within a bucket.</param> /// <param name="bucket">Existing Amazon S3 bucket.</param> /// <param name="stream">Stream with data to upload. Supplied stream has always it's position set to origin.</param> /// <returns> /// Response from Amazon S3 storage after finishing the multipart upload. /// Response contains metadata of the uploaded file. /// </returns> private CompleteMultipartUploadResponse MultiPartUploadFromStream(string key, string bucket, Stream stream) { var uploadPartResponseList = new List <UploadPartResponse>(); string uploadId = this.InitMultiPartUpload(key, bucket); UploadPartRequest uploadPartRequest = this.CreateUploadPartRequest(key, bucket, uploadId); stream.Seek(0L, SeekOrigin.Begin); Stream stream1 = stream; try { uploadPartRequest.PartNumber = 1; while (uploadPartRequest.FilePosition < stream1.Length) { uploadPartRequest.InputStream = stream1; uploadPartResponseList.Add(this.mS3Client.UploadPart(uploadPartRequest)); uploadPartRequest.FilePosition += uploadPartRequest.PartSize; ++uploadPartRequest.PartNumber; } return(this.CompleteMultiPartUploadProcess(uploadPartRequest.Key, uploadPartRequest.BucketName, uploadPartRequest.UploadId, uploadPartResponseList)); } catch (AmazonS3Exception ex) { EventLogProvider.LogException("AmazonStorage", "MULTIPARTUPLOAD", ex, 0); this.AbortMultiPartUpload(uploadPartRequest.Key, uploadPartRequest.BucketName, uploadPartRequest.UploadId); throw; } }
/// <summary> /// Add the next part to the upload. Parts will be committed in the order submitted. /// </summary> /// <param name="stream"></param> /// <param name="contentLength"></param> /// <param name="partNumber"></param> /// <returns>Part Number</returns> public async Task <int> AddPart(Stream stream, long contentLength, int partNumber) { ValidateState(); MultipartUtils.ValidatePartNumber(partNumber); var request = new UploadPartRequest() { BucketName = _bucketName, NamespaceName = _namespaceName, ObjectName = _objectName, ContentLength = contentLength, UploadId = _multipartManifest.UploadId, UploadPartNum = partNumber, UploadPartBody = stream, OpcClientRequestId = CreateClientRequestId($"Part{partNumber}") }; if (_enforceContentMD5Upload) { request.ContentMD5 = MultipartUtils.CalculateMd5(stream); if (stream.CanSeek) { stream.Position = 0; _logger.Info($"MD5: {request.ContentMD5}"); } else { throw new NotSupportedException("Stream cannot be seeked, Please re-try with re-readable streams."); } } await _transferManager.StartTransfer(request).ConfigureAwait(false); return(partNumber); }
private static UploadPartRequest GetUploadPartRequest(string bucketName, string zippedKey, string uploadId, int partNumber, bool lastFilePart, MemoryStream inputStream) { UploadPartRequest uploadPartRequest; if (!lastFilePart) { uploadPartRequest = new UploadPartRequest { BucketName = bucketName, Key = zippedKey, UploadId = uploadId, PartNumber = partNumber, PartSize = inputStream.Length, InputStream = inputStream }; } else { uploadPartRequest = new UploadPartRequest { BucketName = bucketName, Key = zippedKey, UploadId = uploadId, PartNumber = partNumber, InputStream = inputStream, }; } return(uploadPartRequest); }
/// <summary> /// Send up the file in chunks of partSize bytes /// </summary> /// <param name="s3Client">S3 client object.</param> /// <param name="uploadTarget">Upload target returned from the Upload REST API.</param> /// <param name="fileName">Name of the file to be uploaded.</param> /// <param name="uploadId">Upload ID returned from the S3 server.</param> /// <param name="partSize">Size of chunks to upload.</param> /// <returns>List of upload part responses from the server.</returns> public static List <UploadPartResponse> UploadParts( AmazonS3Client s3Client, string uploadTarget, string fileName, string uploadId, long partSize) { long fileSize = new System.IO.FileInfo(fileName).Length; string fileKey = GetFileKey(uploadTarget, fileName); List <UploadPartResponse> uploadResponses = new List <UploadPartResponse>(); long filePosition = 0; for (int i = 1; filePosition < fileSize; i++) { UploadPartRequest uploadRequest = new UploadPartRequest { BucketName = Common.UploadBucketName, Key = fileKey, UploadId = uploadId, PartNumber = i, PartSize = partSize, FilePosition = filePosition, FilePath = fileName }; // add the response to the list since it will be needed to complete the upload uploadResponses.Add(s3Client.UploadPart(uploadRequest)); filePosition += partSize; } return(uploadResponses); }
public Task <UploadPartResponse> UploadPartAsync(string bucketName, string resource, int partNumber, string uploadId, Stream content, Action <UploadPartRequest> config = null, CancellationToken token = default) { UploadPartRequest req = new UploadPartRequest(bucketName, resource, partNumber, uploadId, content); config?.Invoke(req); return(_operations.UploadPartAsync(req, token)); }
public void Construct() { var a = new UploadPartRequest("s3.amazon.com", "bucket", "key", "uploadId", 1); Assert.Equal(new Uri("https://s3.amazon.com/bucket/key?partNumber=1&uploadId=uploadId"), a.RequestUri); Assert.Equal("uploadId", a.UploadId); Assert.Equal(1, a.PartNumber); }
private static async Task <PartETag> ProcessChunk(UploadPartRequest upr) { Console.WriteLine(string.Format("Sending chunk {0} starting at position {1}", upr.PartNumber, upr.FilePosition)); // upload the chucnk and return a new PartETag when upload completes UploadPartResponse response = await AWSS3Factory.getS3Client().UploadPartAsync(upr, new System.Threading.CancellationToken()); return(new PartETag(response.PartNumber, response.ETag)); }
/// <summary> /// 结束对上传段的异步请求。 /// </summary> /// <param name="ar">异步请求的响应结果。</param> /// <returns>上传段的响应结果。</returns> public UploadPartResponse EndUploadPart(IAsyncResult ar) { UploadPartResponse response = this.EndDoRequest <UploadPartRequest, UploadPartResponse>(ar); HttpObsAsyncResult result = ar as HttpObsAsyncResult; UploadPartRequest request = result.AdditionalState as UploadPartRequest; response.PartNumber = request.PartNumber; return(response); }
private UploadPartRequest ConstructUploadPartRequest(int partNumber, long filePosition, InitiateMultipartUploadResponse initResponse) { var uploadRequest = new UploadPartRequest() { BucketName = this._fileTransporterRequest.BucketName, Key = this._fileTransporterRequest.Key, UploadId = initResponse.UploadId, PartNumber = partNumber, PartSize = this._partSize, ServerSideEncryptionCustomerMethod = this._fileTransporterRequest.ServerSideEncryptionCustomerMethod, ServerSideEncryptionCustomerProvidedKey = this._fileTransporterRequest.ServerSideEncryptionCustomerProvidedKey, ServerSideEncryptionCustomerProvidedKeyMD5 = this._fileTransporterRequest.ServerSideEncryptionCustomerProvidedKeyMD5, #if (BCL && !BCL45) Timeout = ClientConfig.GetTimeoutValue(this._config.DefaultTimeout, this._fileTransporterRequest.Timeout), #endif DisableMD5Stream = this._fileTransporterRequest.DisableMD5Stream, DisablePayloadSigning = this._fileTransporterRequest.DisablePayloadSigning, ChecksumAlgorithm = this._fileTransporterRequest.ChecksumAlgorithm }; if ((filePosition + this._partSize >= this._contentLength) && _s3Client is Amazon.S3.Internal.IAmazonS3Encryption) { uploadRequest.IsLastPart = true; uploadRequest.PartSize = 0; } var progressHandler = new ProgressHandler(this.UploadPartProgressEventCallback); ((Amazon.Runtime.Internal.IAmazonWebServiceRequest)uploadRequest).StreamUploadProgressCallback += progressHandler.OnTransferProgress; ((Amazon.Runtime.Internal.IAmazonWebServiceRequest)uploadRequest).AddBeforeRequestHandler(this.RequestEventHandler); if (this._fileTransporterRequest.IsSetFilePath()) { uploadRequest.FilePosition = filePosition; uploadRequest.FilePath = this._fileTransporterRequest.FilePath; } else { uploadRequest.InputStream = this._fileTransporterRequest.InputStream; } // If the InitiateMultipartUploadResponse indicates that this upload is // using KMS, force SigV4 for each UploadPart request bool useSigV4 = initResponse.ServerSideEncryptionMethod == ServerSideEncryptionMethod.AWSKMS; if (useSigV4) { ((Amazon.Runtime.Internal.IAmazonWebServiceRequest)uploadRequest).SignatureVersion = SignatureVersion.SigV4; } uploadRequest.CalculateContentMD5Header = this._fileTransporterRequest.CalculateContentMD5Header; return(uploadRequest); }
/// <summary> /// Sample code to contrast uploading a file using Amazon S3's Multi-Part Upload API /// </summary> /// <param name="s3Client"></param> /// <param name="bucketName"></param> /// <param name="fileName"></param> static void UploadUsingMultiPartAPI(IAmazonS3 s3Client, string bucketName, string fileName) { const string objectKey = "multipart/myobject"; // tell S3 we're going to upload an object in multiple parts and receive an upload ID // in return var initializeUploadRequest = new InitiateMultipartUploadRequest { BucketName = bucketName, Key = objectKey }; var initializeUploadResponse = s3Client.InitiateMultipartUpload(initializeUploadRequest); // this ID must accompany all parts and the final 'completed' call var uploadID = initializeUploadResponse.UploadId; // Send the file (synchronously) using 4*5MB parts - note we pass the upload id // with each call. For each part we need to log the returned etag value to pass // to the completion call var partETags = new List <PartETag>(); var partSize = 5 * ONE_MEG; // this is the minimum part size allowed for (var partNumber = 0; partNumber < 4; partNumber++) { // part numbers must be between 1 and 1000 var logicalPartNumber = partNumber + 1; var uploadPartRequest = new UploadPartRequest { BucketName = bucketName, Key = objectKey, UploadId = uploadID, PartNumber = logicalPartNumber, PartSize = partSize, FilePosition = partNumber * partSize, FilePath = fileName }; var partUploadResponse = s3Client.UploadPart(uploadPartRequest); partETags.Add(new PartETag { PartNumber = logicalPartNumber, ETag = partUploadResponse.ETag }); } var completeUploadRequest = new CompleteMultipartUploadRequest { BucketName = bucketName, Key = objectKey, UploadId = uploadID, PartETags = partETags }; s3Client.CompleteMultipartUpload(completeUploadRequest); }
private static List<PartETag> UploadParts(String bucketName, String objectName, String fileToUpload, String uploadId, int partSize) { var fi = new FileInfo(fileToUpload); var fileSize = fi.Length; var partCount = fileSize / partSize; if (fileSize % partSize != 0) { partCount++; } var partETags = new List<PartETag>(); for (var i = 0; i < partCount; i++) { using (var fs = File.Open(fileToUpload, FileMode.Open)) { var skipBytes = (long)partSize * i; fs.Seek(skipBytes, 0); var size = (partSize < fileSize - skipBytes) ? partSize : (fileSize - skipBytes); var request = new UploadPartRequest(bucketName, objectName, uploadId) { InputStream = fs, PartSize = size, PartNumber = i + 1 }; var result = _ossClient.UploadPart(request); partETags.Add(result.PartETag); } } return partETags; }
private static void AsyncUploadParts(String bucketName, String objectName, String fileToUpload, String uploadId, int partSize) { var fi = new FileInfo(fileToUpload); var fileSize = fi.Length; var partCount = fileSize / partSize; if (fileSize % partSize != 0) { partCount++; } var ctx = new UploadPartContext() { BucketName = bucketName, ObjectName = objectName, UploadId = uploadId, TotalParts = partCount, CompletedParts = 0, SyncLock = new object(), PartETags = new List<PartETag>(), WaitEvent = new ManualResetEvent(false) }; for (var i = 0; i < partCount; i++) { var fs = new FileStream(fileToUpload, FileMode.Open, FileAccess.Read, FileShare.Read); var skipBytes = (long)partSize * i; fs.Seek(skipBytes, 0); var size = (partSize < fileSize - skipBytes) ? partSize : (fileSize - skipBytes); var request = new UploadPartRequest(bucketName, objectName, uploadId) { InputStream = fs, PartSize = size, PartNumber = i + 1 }; _ossClient.BeginUploadPart(request, UploadPartCallback, new UploadPartContextWrapper(ctx, fs, i + 1)); } ctx.WaitEvent.WaitOne(); }