private void Addobject(string filePath, string bucketKey) { try { var attribute = File.GetAttributes(filePath); string relativePath = filePath.Replace(Utilities.Path + "\\", "").Replace("\\", "/"); if (attribute != FileAttributes.Directory && attribute != (FileAttributes.Archive | FileAttributes.Hidden)) { if (File.Exists(filePath) && !Utilities.IsFileUsedbyAnotherProcess(filePath)) { bool IsItSharedFile = false; string Username = string.Empty; if (relativePath.ToLower().IndexOf("shared") == 0) { IsItSharedFile = true; var folders = relativePath.Split('/'); if (folders.Length > 1) { Username = folders[1]; relativePath = ""; for (int i = 2; i < folders.Length; i++) relativePath += folders[i] + "/"; } else { relativePath = ""; for (int i = 1; i < folders.Length; i++) relativePath += folders[i] + "/"; } relativePath = relativePath.Substring(0, relativePath.Length - 1); } var appUpdateInfo = new AppUpdateInfo { Key = relativePath.Replace("\\", "/"), LastModifiedTime = new FileInfo(filePath).LastWriteTime, Status = UpdateStatus.Update }; _processingFiles.Add(filePath); try { var uploadResponses = new List<UploadPartResponse>(); byte[] bytes; long contentLength; using (var fileStream = new FileStream(filePath, FileMode.Open, FileAccess.Read, FileShare.ReadWrite)) { contentLength = fileStream.Length; bytes = new byte[contentLength]; fileStream.Read(bytes, 0, Convert.ToInt32(contentLength)); } if (contentLength > 0) { var lastUploadedPartdetail = GetLastUploadedPartResponse(relativePath, bucketKey, uploadResponses); int alreadyUploadedParts = lastUploadedPartdetail.LastPartNumber; string uploadId; if (string.IsNullOrEmpty(lastUploadedPartdetail.UploadId)) { InitiateMultipartUploadRequest initiateRequest = new InitiateMultipartUploadRequest().WithBucketName(bucketKey).WithKey( relativePath); InitiateMultipartUploadResponse initResponse = _amazons3.InitiateMultipartUpload(initiateRequest); uploadId = initResponse.UploadId; } else uploadId = lastUploadedPartdetail.UploadId; try { long partSize = 5 * (long)Math.Pow(2, 20); // 5 MB long filePosition = partSize * alreadyUploadedParts; for (int i = alreadyUploadedParts + 1; filePosition < contentLength; i++) { // Before upload the next set of upload part, need to check the last modified because user might modify it in the mean time if (File.Exists(filePath) && appUpdateInfo.LastModifiedTime == new FileInfo(filePath).LastWriteTime) { byte[] bytesToStream; if (filePosition + partSize < contentLength) { bytesToStream = new byte[partSize]; Array.Copy(bytes, filePosition, bytesToStream, 0, partSize); } else { bytesToStream = new byte[contentLength - filePosition]; Array.Copy(bytes, filePosition, bytesToStream, 0, contentLength - filePosition); } Stream stream = new MemoryStream(bytesToStream); UploadPartRequest uploadRequest = new UploadPartRequest() .WithBucketName(bucketKey) .WithKey(relativePath) .WithUploadId(uploadId) .WithPartNumber(i) .WithPartSize(partSize) .WithFilePosition(filePosition) .WithTimeout(1000000000); uploadRequest.WithInputStream(stream); // Upload part and add response to our list. var response = _amazons3.UploadPart(uploadRequest); WriteResponseToFile(relativePath, bucketKey, uploadId, appUpdateInfo.LastModifiedTime, response); uploadResponses.Add(response); filePosition += partSize; ModifySyncStatus("Uploaded", contentLength <= filePosition ? contentLength : filePosition, contentLength, relativePath); } else { // need to abort the upload process _processingFiles.Remove(filePath); RemoveConfig(relativePath, lastUploadedPartdetail.BucketKey); _amazons3.AbortMultipartUpload(new AbortMultipartUploadRequest() .WithBucketName(bucketKey) .WithKey(relativePath) .WithUploadId(uploadId)); return; } } CompleteMultipartUploadRequest completeRequest = new CompleteMultipartUploadRequest() .WithBucketName(bucketKey) .WithKey(relativePath) .WithUploadId(uploadId) .WithPartETags(uploadResponses); CompleteMultipartUploadResponse completeUploadResponse = _amazons3.CompleteMultipartUpload(completeRequest); RemoveConfig(relativePath, completeUploadResponse.BucketName); //_service.AddObject(filePath, Utilities.MyConfig.BucketKey, relativePath); SetAcltoObject(relativePath); if (IsItSharedFile) { ProcessApplicationUpdates(appUpdateInfo, true, Username); } else ProcessApplicationUpdates(appUpdateInfo, false, string.Empty); _processingFiles.Remove(filePath); UploadContentForSearch(filePath, relativePath); } catch (Exception) { _processingFiles.Remove(filePath); _amazons3.AbortMultipartUpload(new AbortMultipartUploadRequest() .WithBucketName(bucketKey) .WithKey(relativePath) .WithUploadId(uploadId)); RemoveConfig(relativePath, Utilities.MyConfig.BucketKey); if (!_fileQueue.ContainsKey(filePath)) _fileQueue.Add(filePath, new FileQueue { Type = WatcherChangeTypes.Created, Name = Path.GetFileName(filePath) }); else _fileQueue.Add(filePath, new FileQueue { Type = WatcherChangeTypes.Created, Name = Path.GetFileName(filePath) }); } } } catch (Exception) { return; } } } } catch (Exception) { return; } }
public PartUploadProgressChangedArgs(UploadPartRequest req,UploadPartProgressArgs e) { PartNumber = req.PartNumber; PercentDone = e.PercentDone; TotalBytes = e.TotalBytes; TransferredBytes = e.TransferredBytes; }
/// <summary> /// <para>Uploads a part in a multipart upload.</para> /// </summary> /// <remarks> /// <para> /// After you initiate a multipart upload and upload one or more parts, you must either complete or abort /// the multipart upload in order to stop getting charged for storage of the uploaded parts. Once you /// complete or abort the multipart upload, Amazon S3 will release the stored parts and stop charging you /// for their storage. /// </para> /// </remarks> /// <param name="request">Container for the necessary parameters to execute the UploadPart service method on AmazonS3.</param> /// /// <returns>The response from the UploadPart service method, as returned by AmazonS3.</returns> public UploadPartResponse UploadPart(UploadPartRequest request) { var task = UploadPartAsync(request); try { return task.Result; } catch(AggregateException e) { throw e.InnerException; } }
/// <summary> /// <para>Uploads a part in a multipart upload.</para> /// </summary> /// /// <param name="uploadPartRequest">Container for the necessary parameters to execute the UploadPart service method on AmazonS3.</param> /// /// <returns>The response from the UploadPart service method, as returned by AmazonS3.</returns> /// <param name="cancellationToken"> /// A cancellation token that can be used by other objects or threads to receive notice of cancellation. /// </param> public async Task<UploadPartResponse> UploadPartAsync(UploadPartRequest uploadPartRequest, CancellationToken cancellationToken = default(CancellationToken)) { var marshaller = new UploadPartRequestMarshaller(); var unmarshaller = UploadPartResponseUnmarshaller.GetInstance(); var response = await Invoke<IRequest, UploadPartRequest, UploadPartResponse>(uploadPartRequest, marshaller, unmarshaller, signer, cancellationToken) .ConfigureAwait(continueOnCapturedContext: false); return response; }
/// <summary> /// <para>Uploads a part in a multipart upload.</para> /// </summary> /// <remarks> /// <para> /// After you initiate a multipart upload and upload one or more parts, you must either complete or abort /// the multipart upload in order to stop getting charged for storage of the uploaded parts. Once you /// complete or abort the multipart upload, Amazon S3 will release the stored parts and stop charging you /// for their storage. /// </para> /// </remarks> /// <param name="request">Container for the necessary parameters to execute the UploadPart service method on AmazonS3.</param> /// /// <returns>The response from the UploadPart service method, as returned by AmazonS3.</returns> public UploadPartResponse UploadPart(UploadPartRequest request) { var task = UploadPartAsync(request); try { return task.Result; } catch(AggregateException e) { ExceptionDispatchInfo.Capture(e.InnerException).Throw(); return null; } }
/// <summary> /// Runs the multipart upload. /// </summary> public override void Execute() { InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest() { BucketName = this._fileTransporterRequest.BucketName, Key = this._fileTransporterRequest.Key, CannedACL = this._fileTransporterRequest.CannedACL, ContentType = determineContentType(), StorageClass = this._fileTransporterRequest.StorageClass, ServerSideEncryptionMethod = this._fileTransporterRequest.ServerSideEncryptionMethod }; initRequest.BeforeRequestEvent += this.RequestEventHandler; if (this._fileTransporterRequest.Metadata != null && this._fileTransporterRequest.Metadata.Count > 0) initRequest.Metadata = this._fileTransporterRequest.Metadata; if (this._fileTransporterRequest.Headers != null && this._fileTransporterRequest.Headers.Count > 0) initRequest.Headers = this._fileTransporterRequest.Headers; InitiateMultipartUploadResponse initResponse = this._s3Client.InitiateMultipartUpload(initRequest); _logger.DebugFormat("Initiated upload: {0}", initResponse.UploadId); try { _logger.DebugFormat("Queue up the UploadPartRequests to be executed"); long filePosition = 0; for (int i = 1; filePosition < this._contentLength; i++) { UploadPartRequest uploadRequest = new UploadPartRequest() { BucketName = this._fileTransporterRequest.BucketName, Key = this._fileTransporterRequest.Key, UploadId = initResponse.UploadId, PartNumber = i, PartSize = this._partSize, #if (BCL && !BCL45) Timeout = ClientConfig.GetTimeoutValue(this._config.DefaultTimeout,this._fileTransporterRequest.Timeout) #endif }; if ((filePosition + this._partSize >= this._contentLength) && _s3Client is AmazonS3EncryptionClient) { uploadRequest.IsLastPart = true; uploadRequest.PartSize = 0; } uploadRequest.StreamUploadProgressCallback += this.uploadPartProgressEventCallback; uploadRequest.BeforeRequestEvent += this.RequestEventHandler; if (this._fileTransporterRequest.IsSetFilePath()) { uploadRequest.FilePosition = filePosition; uploadRequest.FilePath = this._fileTransporterRequest.FilePath; } else { uploadRequest.InputStream = this._fileTransporterRequest.InputStream; } this._partsToUpload.Enqueue(uploadRequest); filePosition += this._partSize; } this._totalNumberOfParts = this._partsToUpload.Count; _logger.DebugFormat("Starting threads to execute the {0} UploadPartRequests in the queue", this._totalNumberOfParts); startInvokerPool(); _logger.DebugFormat("Waiting for threads to complete. ({0})", initResponse.UploadId); waitTillAllThreadsComplete(); _logger.DebugFormat("Beginning completing multipart. ({0})", initResponse.UploadId); CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest() { BucketName = this._fileTransporterRequest.BucketName, Key = this._fileTransporterRequest.Key, UploadId = initResponse.UploadId }; compRequest.AddPartETags(this._uploadResponses); compRequest.BeforeRequestEvent += this.RequestEventHandler; this._s3Client.CompleteMultipartUpload(compRequest); _logger.DebugFormat("Done completing multipart. ({0})", initResponse.UploadId); } catch (Exception e) { _logger.Error(e, "Exception while uploading. ({0})", initResponse.UploadId); shutdown(initResponse.UploadId); throw; } finally { if (this._fileTransporterRequest.InputStream != null && !this._fileTransporterRequest.IsSetFilePath() && this._fileTransporterRequest.AutoCloseStream) { this._fileTransporterRequest.InputStream.Close(); } if (_logger != null) { _logger.Flush(); } } }
public static void Main(string[] args) { XmlSerializerFactory factory = new XmlSerializerFactory(); XmlSerializer serializer = factory.CreateSerializer(typeof(UploadState)); UploadState us; if (args.Length == 2) { us = new UploadState(args[1], args[0]); } else if (args.Length == 0 && new FileInfo("upload.dat").Exists) { using (FileStream fs = new FileStream("upload.dat", FileMode.Open)) { us = (UploadState)serializer.Deserialize(fs); } } else { Console.Error.WriteLine("Usage: bucket filename"); return; } try { NameValueCollection appConfig = ConfigurationManager.AppSettings; // Print the number of Amazon S3 Buckets. AmazonS3 s3Client = AWSClientFactory.CreateAmazonS3Client( appConfig["AWSAccessKey"], appConfig["AWSSecretKey"] ); if (string.IsNullOrEmpty(us.UploadId)) { InitiateMultipartUploadRequest req = new InitiateMultipartUploadRequest() .WithBucketName(us.BucketName) .WithKey(us.Key) ; us.UploadId = s3Client.InitiateMultipartUpload(req).UploadId; using (FileStream fs = new FileStream("upload.dat", FileMode.OpenOrCreate)) { serializer.Serialize(fs, us); } } while (us.FilePosition < us.FileLength) { try { Console.WriteLine("Uploading part {0} of {1}", us.PartNumber, us.NumChunks); UploadPartRequest ureq = new UploadPartRequest() .WithBucketName(us.BucketName) .WithFilePath(us.FileName) .WithFilePosition(us.FilePosition) .WithPartNumber(us.PartNumber) .WithPartSize(us.FileLength - us.FilePosition > us.ChunkSize ? us.ChunkSize : us.FileLength - us.FilePosition) .WithGenerateChecksum(true) .WithKey(us.Key) .WithUploadId(us.UploadId) .WithSubscriber(new EventHandler<UploadPartProgressArgs>(ShowProgress)) ; if (us.Responses.Count > us.PartNumber - 1) { us.Responses[us.PartNumber - 1] = new PartETag(us.PartNumber, s3Client.UploadPart(ureq).ETag); } else { us.Responses.Insert(us.PartNumber - 1, new PartETag(us.PartNumber, s3Client.UploadPart(ureq).ETag)); } us.PartNumber++; us.FilePosition += us.ChunkSize; using (FileStream fs = new FileStream("upload.dat", FileMode.OpenOrCreate)) { serializer.Serialize(fs, us); } } catch (System.Net.WebException) { } } CompleteMultipartUploadRequest creq = new CompleteMultipartUploadRequest() .WithPartETags(us.Responses) .WithBucketName(us.BucketName) .WithUploadId(us.UploadId) .WithKey(us.Key) ; CompleteMultipartUploadResponse cresp = s3Client.CompleteMultipartUpload(creq); System.Console.WriteLine("File available at {0}", cresp.Location); File.Delete("upload.dat"); } catch (AmazonS3Exception e) { Console.Error.WriteLine(e); } //Console.Write(GetServiceOutput()); //Console.Read(); }
private UploadPartRequest BuildUploadPartRequest() { UploadPartRequest uploadRequest = new UploadPartRequest { BucketName = this.bucketName, Key = this.fileName, UploadId = this.initResponse.UploadId, PartNumber = this.currentPartNumber, PartSize = this.partSize, FilePosition = this.uploadedBytes, InputStream = this.fileStream, }; return uploadRequest; }
public async Task <UploadPartResponse> MultipartUploadUploadPartAsync(string bucket, string key, string uploadId, int uploadPart, string contents, Encoding encoding = null, CancellationToken cancellationToken = default) { this.Logger.LogDebug($"[{nameof(this.MultipartUploadUploadPartAsync)}]"); this.Logger.LogTrace(JsonConvert.SerializeObject(new { bucket, key, uploadId, uploadPart, encoding })); if (string.IsNullOrWhiteSpace(bucket)) { throw new ArgumentNullException(nameof(bucket)); } if (string.IsNullOrWhiteSpace(key)) { throw new ArgumentNullException(nameof(key)); } if (string.IsNullOrWhiteSpace(uploadId)) { throw new ArgumentNullException(nameof(uploadId)); } if (uploadPart == 0) { throw new ArgumentException(nameof(uploadPart)); } if (string.IsNullOrWhiteSpace(contents)) { throw new ArgumentNullException(nameof(contents)); } var tooSmall = Encoding.Unicode.GetByteCount(contents) < Text.MinimumPartSize; var tooLarge = Encoding.Unicode.GetBytes(contents).LongCount() > Text.MaximumPartSize; if (tooSmall || tooLarge) { throw new ArgumentOutOfRangeException(nameof(contents), Text.InvalidPartSize); } var filePath = this.CreateTempFile(contents: contents, encoding: encoding); var request = new Amazon.S3.Model.UploadPartRequest { BucketName = bucket, FilePath = filePath, Key = key, PartNumber = uploadPart, UploadId = uploadId, }; this.Logger.LogTrace(JsonConvert.SerializeObject(value: request)); var response = await this.Repository.UploadPartAsync(request : request, cancellationToken : cancellationToken == default?this.CancellationToken.Token : cancellationToken); this.Logger.LogTrace(JsonConvert.SerializeObject(value: response)); return(response); }
public override string UploadChunk(string domain, string path, string uploadId, Stream stream, int chunkNumber, long chunkLength) { var request = new UploadPartRequest { BucketName = _bucket, Key = MakePath(domain, path), UploadId = uploadId, PartNumber = chunkNumber, InputStream = stream }; try { using (var s3 = GetClient()) { var response = s3.UploadPart(request); return response.ETag; } } catch (AmazonS3Exception error) { if (error.ErrorCode == "NoSuchUpload") { AbortChunkedUpload(domain, path, uploadId); } throw; } }
public void MultipartEncryptionTestInstructionFile() { string filePath = @"C:\temp\Upload15MegFileIn3PartsViaStream.txt"; string retrievedFilepath = @"C:\temp\Upload15MegFileIn3PartsViaStreamRetreived.txt"; int MEG_SIZE = (int)Math.Pow(2, 20) + 4001; long totalSize = (long)(15 * MEG_SIZE); UtilityMethods.GenerateFile(filePath, totalSize); string key = "MultipartEncryptionTestInstrcutionFile" + random.Next(); s3EncryptionClientFileMode.PutBucket(new PutBucketRequest() { BucketName = bucketName }); Stream inputStream = File.OpenRead(filePath); try { InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest() { BucketName = bucketName, Key = key, StorageClass = S3StorageClass.ReducedRedundancy, ContentType = "text/html", CannedACL = S3CannedACL.PublicRead }; InitiateMultipartUploadResponse initResponse = s3EncryptionClientFileMode.InitiateMultipartUpload(initRequest); // Upload part 1 UploadPartRequest uploadRequest = new UploadPartRequest() { BucketName = bucketName, Key = key, UploadId = initResponse.UploadId, PartNumber = 1, PartSize = 5 * MEG_SIZE, InputStream = inputStream }; UploadPartResponse up1Response = s3EncryptionClientFileMode.UploadPart(uploadRequest); // Upload part 2 uploadRequest = new UploadPartRequest() { BucketName = bucketName, Key = key, UploadId = initResponse.UploadId, PartNumber = 2, PartSize = 5 * MEG_SIZE + 4001, InputStream = inputStream }; UploadPartResponse up2Response = s3EncryptionClientFileMode.UploadPart(uploadRequest); // Upload part 3 uploadRequest = new UploadPartRequest() { BucketName = bucketName, Key = key, UploadId = initResponse.UploadId, PartNumber = 3, InputStream = inputStream, IsLastPart = true }; //uploadRequest.setLastPart(); UploadPartResponse up3Response = s3EncryptionClientFileMode.UploadPart(uploadRequest); ListPartsRequest listPartRequest = new ListPartsRequest() { BucketName = bucketName, Key = key, UploadId = initResponse.UploadId }; ListPartsResponse listPartResponse = s3EncryptionClientFileMode.ListParts(listPartRequest); Assert.AreEqual(3, listPartResponse.Parts.Count); Assert.AreEqual(up1Response.PartNumber, listPartResponse.Parts[0].PartNumber); Assert.AreEqual(up1Response.ETag, listPartResponse.Parts[0].ETag); Assert.AreEqual(up2Response.PartNumber, listPartResponse.Parts[1].PartNumber); Assert.AreEqual(up2Response.ETag, listPartResponse.Parts[1].ETag); Assert.AreEqual(up3Response.PartNumber, listPartResponse.Parts[2].PartNumber); Assert.AreEqual(up3Response.ETag, listPartResponse.Parts[2].ETag); listPartRequest.MaxParts = 1; listPartResponse = s3EncryptionClientFileMode.ListParts(listPartRequest); Assert.AreEqual(1, listPartResponse.Parts.Count); // Complete the response CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest() { BucketName = bucketName, Key = key, UploadId = initResponse.UploadId }; compRequest.AddPartETags(up1Response, up2Response, up3Response); CompleteMultipartUploadResponse compResponse = s3EncryptionClientFileMode.CompleteMultipartUpload(compRequest); Assert.AreEqual(bucketName, compResponse.BucketName); Assert.IsNotNull(compResponse.ETag); Assert.AreEqual(key, compResponse.Key); Assert.IsNotNull(compResponse.Location); // Get the file back from S3 and make sure it is still the same. GetObjectRequest getRequest = new GetObjectRequest() { BucketName = bucketName, Key = key }; GetObjectResponse getResponse = s3EncryptionClientFileMode.GetObject(getRequest); getResponse.WriteResponseStreamToFile(retrievedFilepath); UtilityMethods.CompareFiles(filePath, retrievedFilepath); GetObjectMetadataRequest metaDataRequest = new GetObjectMetadataRequest() { BucketName = bucketName, Key = key }; GetObjectMetadataResponse metaDataResponse = s3EncryptionClientFileMode.GetObjectMetadata(metaDataRequest); Assert.AreEqual("text/html", metaDataResponse.Headers.ContentType); } finally { inputStream.Close(); if (File.Exists(filePath)) File.Delete(filePath); if (File.Exists(retrievedFilepath)) File.Delete(retrievedFilepath); } }
private async Task<UploadPartResponse> UploadPartAsync(UploadPartRequest uploadRequest, CancellationTokenSource internalCts, SemaphoreSlim asyncThrottler) { try { return await _s3Client.UploadPartAsync(uploadRequest, internalCts.Token). ConfigureAwait(continueOnCapturedContext: false); } catch (Exception exception) { if (!(exception is OperationCanceledException)) { // Cancel scheduling any more tasks // Cancel other UploadPart requests running in parallel. internalCts.Cancel(); } throw; } finally { asyncThrottler.Release(); } }
private void UploadDirectory(string prefix, string directoryPath, Action<string> notifier = null) { if (!Directory.Exists(directoryPath)) { throw new DirectoryNotFoundException( "Source directory does not exist or could not be found: " + directoryPath); } prefix = prefix + Path.GetFileName(directoryPath)+kDirectoryDelimeterForS3; foreach (string file in Directory.GetFiles(directoryPath)) { var request = new UploadPartRequest() { BucketName = _bucketName, FilePath = file, IsLastPart = true, Key = prefix+ Path.GetFileName(file) }; if (notifier != null) notifier(string.Format("Uploading {0}", Path.GetFileName(file))); _amazonS3.UploadPart(request); } foreach (string subdir in Directory.GetDirectories(directoryPath)) { UploadDirectory(prefix, subdir, notifier); } }
/// <summary> /// Initiates the asynchronous execution of the UploadPart operation. /// <seealso cref="Amazon.S3.IAmazonS3.UploadPart"/> /// </summary> /// /// <param name="request">Container for the necessary parameters to execute the UploadPart operation.</param> /// <param name="cancellationToken"> /// A cancellation token that can be used by other objects or threads to receive notice of cancellation. /// </param> /// <returns>The task object representing the asynchronous operation.</returns> public Task<UploadPartResponse> UploadPartAsync(UploadPartRequest request, CancellationToken cancellationToken = default(CancellationToken)) { var marshaller = new UploadPartRequestMarshaller(); var unmarshaller = UploadPartResponseUnmarshaller.GetInstance(); return Invoke<IRequest, UploadPartRequest, UploadPartResponse>(request, marshaller, unmarshaller, signer, cancellationToken); }
/// <summary> /// Initiates the asynchronous execution of the UploadPart operation. /// </summary> /// /// <param name="request">Container for the necessary parameters to execute the UploadPart operation.</param> /// <param name="cancellationToken"> /// A cancellation token that can be used by other objects or threads to receive notice of cancellation. /// </param> /// <returns>The task object representing the asynchronous operation.</returns> public Task<UploadPartResponse> UploadPartAsync(UploadPartRequest request, System.Threading.CancellationToken cancellationToken = default(CancellationToken)) { var marshaller = new UploadPartRequestMarshaller(); var unmarshaller = UploadPartResponseUnmarshaller.Instance; return InvokeAsync<UploadPartRequest,UploadPartResponse>(request, marshaller, unmarshaller, cancellationToken); }
internal static void CreateMultiPartS3Blob(AmazonS3Client client, string key, S3CopyMemoryStream stream) { if (stream.InitiatingPart) { InitiateMultipartUploadRequest initiateMultipartUploadRequest = new InitiateMultipartUploadRequest() .WithBucketName("static.getbrickpile.com") .WithCannedACL(S3CannedACL.PublicRead) .WithKey(key); InitiateMultipartUploadResponse initiateResponse = client.InitiateMultipartUpload(initiateMultipartUploadRequest); stream.UploadPartId = initiateResponse.UploadId; } stream.Position = 0; UploadPartRequest uploadPartRequest = new UploadPartRequest() .WithBucketName("static.getbrickpile.com") .WithKey(key) .WithPartNumber(stream.WriteCount) .WithPartSize(stream.Position) .WithUploadId(stream.UploadPartId) .WithInputStream(stream) as UploadPartRequest; UploadPartResponse response = client.UploadPart(uploadPartRequest); PartETag etag = new PartETag(response.PartNumber, response.ETag); stream.PartETagCollection.Add(etag); if (stream.EndOfPart) { CompleteMultipartUploadRequest completeMultipartUploadRequest = new CompleteMultipartUploadRequest() .WithBucketName("static.getbrickpile.com") .WithKey(key) .WithPartETags(stream.PartETagCollection) .WithUploadId(stream.UploadPartId); CompleteMultipartUploadResponse completeMultipartUploadResponse = client.CompleteMultipartUpload(completeMultipartUploadRequest); string loc = completeMultipartUploadResponse.Location; } }
private UploadPartRequest ConstructUploadPartRequest(int partNumber, long filePosition, InitiateMultipartUploadResponse initResponse) { var uploadRequest = new UploadPartRequest() { BucketName = this._fileTransporterRequest.BucketName, Key = this._fileTransporterRequest.Key, UploadId = initResponse.UploadId, PartNumber = partNumber, PartSize = this._partSize, ServerSideEncryptionCustomerMethod = this._fileTransporterRequest.ServerSideEncryptionCustomerMethod, ServerSideEncryptionCustomerProvidedKey = this._fileTransporterRequest.ServerSideEncryptionCustomerProvidedKey, ServerSideEncryptionCustomerProvidedKeyMD5 = this._fileTransporterRequest.ServerSideEncryptionCustomerProvidedKeyMD5, #if (BCL && !BCL45) Timeout = ClientConfig.GetTimeoutValue(this._config.DefaultTimeout, this._fileTransporterRequest.Timeout) #endif }; #if BCL if ((filePosition + this._partSize >= this._contentLength) && _s3Client is Amazon.S3.Encryption.AmazonS3EncryptionClient ) { uploadRequest.IsLastPart = true; uploadRequest.PartSize = 0; } #endif var progressHandler = new ProgressHandler(this.UploadPartProgressEventCallback); uploadRequest.StreamUploadProgressCallback += progressHandler.OnTransferProgress; uploadRequest.BeforeRequestEvent += this.RequestEventHandler; #if BCL if (this._fileTransporterRequest.IsSetFilePath()) { uploadRequest.FilePosition = filePosition; uploadRequest.FilePath = this._fileTransporterRequest.FilePath; } #elif WIN_RT || WINDOWS_PHONE if (this._fileTransporterRequest.IsSetStorageFile()) { uploadRequest.FilePosition = filePosition; uploadRequest.StorageFile = this._fileTransporterRequest.StorageFile; } #endif else { uploadRequest.InputStream = this._fileTransporterRequest.InputStream; } return uploadRequest; }
internal UploadPartResponse UploadPart(UploadPartRequest request) { var marshaller = new UploadPartRequestMarshaller(); var unmarshaller = UploadPartResponseUnmarshaller.Instance; return Invoke<UploadPartRequest,UploadPartResponse>(request, marshaller, unmarshaller); }
/// <summary> /// Create an UploadPartRequest. /// </summary> /// <param name="existingBucketName"></param> /// <param name="keyName"></param> /// <param name="uploadID"></param> /// <param name="partNumber"></param> /// <param name="partSize"></param> /// <param name="filePosition"></param> /// <param name="filePath"></param> /// <returns></returns> public UploadPartRequest CreateUploadPartRequest(string existingBucketName, string keyName, string uploadID, int partNumber, long partSize, long filePosition, string filePath) { // Create request to upload a part. UploadPartRequest uploadRequest = new UploadPartRequest { BucketName = existingBucketName, Key = keyName, UploadId = uploadID, PartNumber = partNumber, PartSize = partSize, FilePosition = filePosition, FilePath = filePath }; return uploadRequest; }
/// <summary> /// Send an UploadPartRequest request and return its response. This call uploads part data. /// </summary> /// <param name="uploadPartRequest"></param> /// <param name="token"></param> /// <returns>Task<UploadPartResponse></returns> public async Task<UploadPartResponse> UploadPartAsync(UploadPartRequest uploadPartRequest, Dictionary<int, long> multipartUploadProgress, CancellationToken token, IProgress<Tuple<string,long>> progress = null) { token.ThrowIfCancellationRequested(); _log.Debug("Called UploadPartAsync with UploadPartRequest properties: KeyName = \"" + uploadPartRequest.Key + "\", PartNumber = " + uploadPartRequest.PartNumber + ", PartSize = " + uploadPartRequest.PartSize + ", UploadId = \"" + uploadPartRequest.UploadId + ", FilePath = \"" + uploadPartRequest.FilePath + "\"."); int retries = 2; uploadPartRequest.StreamTransferProgress += (sender, eventArgs) => { if (progress != null) { multipartUploadProgress[uploadPartRequest.PartNumber] = eventArgs.TransferredBytes; var multiPartProgress = multipartUploadProgress.Sum(x => x.Value); progress.Report(Tuple.Create(uploadPartRequest.Key, multiPartProgress)); } Console.WriteLine(uploadPartRequest.Key + " - part " + uploadPartRequest.PartNumber + ": " + eventArgs.TransferredBytes + " / " + eventArgs.TotalBytes + " bytes."); }; while (true) { try { token.ThrowIfCancellationRequested(); // Upload part and return response. var uploadPartResponse = await s3Client.UploadPartAsync(uploadPartRequest, token).ConfigureAwait(false); _log.Debug("Successfully uploaded KeyName = \"" + uploadPartRequest.Key + "\", PartNumber = " + uploadPartRequest.PartNumber + "\"."); return uploadPartResponse; } catch (Exception e) { if (!(e is TaskCanceledException || e is OperationCanceledException)) { var messagePart = " with UploadPartRequest properties: KeyName = \"" + uploadPartRequest.Key + "\", PartNumber = " + uploadPartRequest.PartNumber + ", PartSize = " + uploadPartRequest.PartSize + ", UploadId = \"" + uploadPartRequest.UploadId + ", FilePath = \"" + uploadPartRequest.FilePath + "\""; this.LogAmazonException(messagePart, e); // if the exception is AmazonS3Exception or AmazonServiceException with error type Sender, // which means that the client is responsible for the error, // then throw do not retry. if (e is AmazonS3Exception) { var ae = e as AmazonS3Exception; if (ae.ErrorType == ErrorType.Sender) throw; } if (e is AmazonServiceException) { var ae = e as AmazonServiceException; if (ae.ErrorType == ErrorType.Sender) throw; } if (--retries == 0) throw; } // if the action is paused, throw an exception. The loop is broken. else throw; } } }
IAsyncResult invokeUploadPart(UploadPartRequest uploadPartRequest, AsyncCallback callback, object state, bool synchronized) { var marshaller = new UploadPartRequestMarshaller(); var unmarshaller = UploadPartResponseUnmarshaller.GetInstance(); var result = Invoke<IRequest, UploadPartRequest>(uploadPartRequest, callback, state, synchronized, marshaller, unmarshaller, this.signer); return result; }
/// <summary> /// Updates the request where the input stream contains the encrypted object contents. /// </summary> /// <param name="request"></param> private void GenerateEncryptedUploadPartRequest(UploadPartRequest request) { string uploadID = request.UploadId; UploadPartEncryptionContext contextForEncryption = currentMultiPartUploadKeys[uploadID]; byte[] envelopeKey = contextForEncryption.EnvelopeKey; byte[] IV = contextForEncryption.NextIV; EncryptionInstructions instructions = new EncryptionInstructions(EncryptionMaterials.EmptyMaterialsDescription, envelopeKey, IV); if (request.IsLastPart == false) { if (contextForEncryption.IsFinalPart == true) throw new AmazonClientException("Last part has already been processed, cannot upload this as the last part"); if (request.PartNumber < contextForEncryption.PartNumber) throw new AmazonClientException("Upload Parts must in correct sequence"); request.InputStream = EncryptionUtils.EncryptUploadPartRequestUsingInstructions(request.InputStream, instructions); contextForEncryption.PartNumber = request.PartNumber; } else { request.InputStream = EncryptionUtils.EncryptRequestUsingInstruction(request.InputStream, instructions); contextForEncryption.IsFinalPart = true; } request.RequestState.Add(S3CryptoStream, request.InputStream); }
/// <summary> /// <para>Uploads a part in a multipart upload.</para> /// </summary> /// <remarks> /// <para> /// After you initiate a multipart upload and upload one or more parts, you must either complete or abort /// the multipart upload in order to stop getting charged for storage of the uploaded parts. Once you /// complete or abort the multipart upload, Amazon S3 will release the stored parts and stop charging you /// for their storage. /// </para> /// </remarks> /// <param name="uploadPartRequest">Container for the necessary parameters to execute the UploadPart service method on AmazonS3.</param> /// /// <returns>The response from the UploadPart service method, as returned by AmazonS3.</returns> /// public UploadPartResponse UploadPart(UploadPartRequest uploadPartRequest) { IAsyncResult asyncResult = invokeUploadPart(uploadPartRequest, null, null, true); return EndUploadPart(asyncResult); }
/// <summary> /// Initiates the asynchronous execution of the UploadPart operation. /// <seealso cref="Amazon.S3.IAmazonS3.UploadPart"/> /// </summary> /// <remarks> /// <para> /// After you initiate a multipart upload and upload one or more parts, you must either complete or abort /// the multipart upload in order to stop getting charged for storage of the uploaded parts. Once you /// complete or abort the multipart upload, Amazon S3 will release the stored parts and stop charging you /// for their storage. /// </para> /// </remarks> /// <param name="uploadPartRequest">Container for the necessary parameters to execute the UploadPart operation on AmazonS3.</param> /// <param name="callback">An AsyncCallback delegate that is invoked when the operation completes.</param> /// <param name="state">A user-defined state object that is passed to the callback procedure. Retrieve this object from within the callback /// procedure using the AsyncState property.</param> /// /// <returns>An IAsyncResult that can be used to poll or wait for results, or both; this value is also needed when invoking EndUploadPart /// operation.</returns> public IAsyncResult BeginUploadPart(UploadPartRequest uploadPartRequest, AsyncCallback callback, object state) { return invokeUploadPart(uploadPartRequest, callback, state, false); }
/// <summary> /// Runs the multipart upload. /// </summary> public override void Execute() { int timeout = this._config.DefaultTimeout; if (this._fileTransporterRequest.Timeout != 0) timeout = this._fileTransporterRequest.Timeout; InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest() .WithBucketName(this._fileTransporterRequest.BucketName) .WithKey(this._fileTransporterRequest.Key) .WithCannedACL(this._fileTransporterRequest.CannedACL) .WithContentType(determineContentType()) .WithStorageClass(this._fileTransporterRequest.StorageClass) .WithBeforeRequestHandler(RequestEventHandler) as InitiateMultipartUploadRequest; if (this._fileTransporterRequest.metadata != null && this._fileTransporterRequest.metadata.Count > 0) initRequest.WithMetaData(this._fileTransporterRequest.metadata); if (this._fileTransporterRequest.Headers != null && this._fileTransporterRequest.Headers.Count > 0) initRequest.AddHeaders(this._fileTransporterRequest.Headers); InitiateMultipartUploadResponse initResponse = this._s3Client.InitiateMultipartUpload(initRequest); this._logger.DebugFormat("Initiated upload: {0}", initResponse.UploadId); try { this._logger.DebugFormat("Queue up the UploadPartRequests to be executed"); long filePosition = 0; for (int i = 1; filePosition < this._contentLength; i++) { UploadPartRequest uploadRequest = new UploadPartRequest() .WithBucketName(this._fileTransporterRequest.BucketName) .WithKey(this._fileTransporterRequest.Key) .WithUploadId(initResponse.UploadId) .WithTimeout(timeout) .WithPartNumber(i) .WithPartSize(this._partSize) .WithSubscriber(new EventHandler<UploadPartProgressArgs>(this.uploadPartProgressEventCallback)) .WithBeforeRequestHandler(RequestEventHandler) as UploadPartRequest; if (this._fileTransporterRequest.IsSetFilePath()) { uploadRequest .WithFilePosition(filePosition) .WithFilePath(this._fileTransporterRequest.FilePath); } else { uploadRequest.InputStream = this._fileTransporterRequest.InputStream; } this._partsToUpload.Enqueue(uploadRequest); filePosition += this._partSize; } this._totalNumberOfParts = this._partsToUpload.Count; this._logger.DebugFormat("Starting threads to execute the {0} UploadPartRequests in the queue", this._totalNumberOfParts); startInvokerPool(); this._logger.DebugFormat("Waiting for threads to complete. ({0})", initResponse.UploadId); waitTillAllThreadsComplete(); this._logger.DebugFormat("Beginning completing multipart. ({0})", initResponse.UploadId); CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest() .WithBucketName(this._fileTransporterRequest.BucketName) .WithKey(this._fileTransporterRequest.Key) .WithUploadId(initResponse.UploadId) .WithPartETags(this._uploadResponses) .WithBeforeRequestHandler(RequestEventHandler) as CompleteMultipartUploadRequest; this._s3Client.CompleteMultipartUpload(compRequest); this._logger.DebugFormat("Done completing multipart. ({0})", initResponse.UploadId); } catch (Exception e) { this._logger.Error(string.Format("Exception while uploading. ({0})", initResponse.UploadId), e); shutdown(initResponse.UploadId); throw; } finally { if (this._fileTransporterRequest.InputStream != null && !this._fileTransporterRequest.IsSetFilePath()) { this._fileTransporterRequest.InputStream.Close(); } } }
/// <summary> /// Initiates the asynchronous execution of the UploadPart operation. /// This API is supported only when AWSConfigs.HttpClient is set to AWSConfigs.HttpClientOption.UnityWebRequest, the default value for this configuration option is AWSConfigs.HttpClientOption.UnityWWW /// </summary> /// /// <param name="request">Container for the necessary parameters to execute the UploadPart operation on AmazonS3Client.</param> /// <param name="callback">An Action delegate that is invoked when the operation completes.</param> /// <param name="options">A user-defined state object that is passed to the callback procedure. Retrieve this object from within the callback /// procedure using the AsyncState property.</param> public void UploadPartAsync(UploadPartRequest request, AmazonServiceCallback<UploadPartRequest, UploadPartResponse> callback, AsyncOptions options = null) { if (AWSConfigs.HttpClient == AWSConfigs.HttpClientOption.UnityWWW) { throw new InvalidOperationException("UploadPart is only allowed with AWSConfigs.HttpClientOption.UnityWebRequest API option"); } options = options == null?new AsyncOptions():options; var marshaller = new UploadPartRequestMarshaller(); var unmarshaller = UploadPartResponseUnmarshaller.Instance; Action<AmazonWebServiceRequest, AmazonWebServiceResponse, Exception, AsyncOptions> callbackHelper = null; if(callback !=null ) callbackHelper = (AmazonWebServiceRequest req, AmazonWebServiceResponse res, Exception ex, AsyncOptions ao) => { AmazonServiceResult<UploadPartRequest,UploadPartResponse> responseObject = new AmazonServiceResult<UploadPartRequest,UploadPartResponse>((UploadPartRequest)req, (UploadPartResponse)res, ex , ao.State); callback(responseObject); }; BeginInvoke<UploadPartRequest>(request, marshaller, unmarshaller, options, callbackHelper); }
private UploadPartRequest ConstructUploadPartRequest(int partNumber, long filePosition, InitiateMultipartUploadResponse initResponse) { var uploadRequest = new UploadPartRequest() { BucketName = this._fileTransporterRequest.BucketName, Key = this._fileTransporterRequest.Key, UploadId = initResponse.UploadId, PartNumber = partNumber, PartSize = this._partSize, ServerSideEncryptionCustomerMethod = this._fileTransporterRequest.ServerSideEncryptionCustomerMethod, ServerSideEncryptionCustomerProvidedKey = this._fileTransporterRequest.ServerSideEncryptionCustomerProvidedKey, ServerSideEncryptionCustomerProvidedKeyMD5 = this._fileTransporterRequest.ServerSideEncryptionCustomerProvidedKeyMD5, #if (BCL && !BCL45) Timeout = ClientConfig.GetTimeoutValue(this._config.DefaultTimeout, this._fileTransporterRequest.Timeout) #endif }; #if BCL if ((filePosition + this._partSize >= this._contentLength) && _s3Client is Amazon.S3.Encryption.AmazonS3EncryptionClient ) { uploadRequest.IsLastPart = true; uploadRequest.PartSize = 0; } #endif var progressHandler = new ProgressHandler(this.UploadPartProgressEventCallback); ((Amazon.Runtime.Internal.IAmazonWebServiceRequest)uploadRequest).StreamUploadProgressCallback += progressHandler.OnTransferProgress; ((Amazon.Runtime.Internal.IAmazonWebServiceRequest)uploadRequest).AddBeforeRequestHandler(this.RequestEventHandler); if (this._fileTransporterRequest.IsSetFilePath()) { uploadRequest.FilePosition = filePosition; uploadRequest.FilePath = this._fileTransporterRequest.FilePath; } else { uploadRequest.InputStream = this._fileTransporterRequest.InputStream; } // If the InitiateMultipartUploadResponse indicates that this upload is // using KMS, force SigV4 for each UploadPart request bool useSigV4 = initResponse.ServerSideEncryptionMethod == ServerSideEncryptionMethod.AWSKMS; if (useSigV4) ((Amazon.Runtime.Internal.IAmazonWebServiceRequest)uploadRequest).UseSigV4 = true; return uploadRequest; }
/// <summary> /// Sample code to contrast uploading a file using Amazon S3's Multi-Part Upload API /// </summary> /// <param name="s3Client"></param> /// <param name="bucketName"></param> /// <param name="fileName"></param> static void UploadUsingMultiPartAPI(IAmazonS3 s3Client, string bucketName, string fileName) { const string objectKey = "multipart/myobject"; // tell S3 we're going to upload an object in multiple parts and receive an upload ID // in return var initializeUploadRequest = new InitiateMultipartUploadRequest { BucketName = bucketName, Key = objectKey }; var initializeUploadResponse = s3Client.InitiateMultipartUpload(initializeUploadRequest); // this ID must accompany all parts and the final 'completed' call var uploadID = initializeUploadResponse.UploadId; // Send the file (synchronously) using 4*5MB parts - note we pass the upload id // with each call. For each part we need to log the returned etag value to pass // to the completion call var partETags = new List<PartETag>(); var partSize = 5 * ONE_MEG; // this is the minimum part size allowed for (var partNumber = 0; partNumber < 4; partNumber++) { // part numbers must be between 1 and 1000 var logicalPartNumber = partNumber + 1; var uploadPartRequest = new UploadPartRequest { BucketName = bucketName, Key = objectKey, UploadId = uploadID, PartNumber = logicalPartNumber, PartSize = partSize, FilePosition = partNumber * partSize, FilePath = fileName }; var partUploadResponse = s3Client.UploadPart(uploadPartRequest); partETags.Add(new PartETag { PartNumber = logicalPartNumber, ETag = partUploadResponse.ETag }); } var completeUploadRequest = new CompleteMultipartUploadRequest { BucketName = bucketName, Key = objectKey, UploadId = uploadID, PartETags = partETags }; s3Client.CompleteMultipartUpload(completeUploadRequest); }
/// <summary> /// Initiates the asynchronous execution of the UploadPart operation. /// </summary> /// /// <param name="request">Container for the necessary parameters to execute the UploadPart operation on AmazonS3Client.</param> /// <param name="callback">An AsyncCallback delegate that is invoked when the operation completes.</param> /// <param name="state">A user-defined state object that is passed to the callback procedure. Retrieve this object from within the callback /// procedure using the AsyncState property.</param> /// /// <returns>An IAsyncResult that can be used to poll or wait for results, or both; this value is also needed when invoking EndUploadPart /// operation.</returns> public IAsyncResult BeginUploadPart(UploadPartRequest request, AsyncCallback callback, object state) { var marshaller = new UploadPartRequestMarshaller(); var unmarshaller = UploadPartResponseUnmarshaller.Instance; return BeginInvoke<UploadPartRequest>(request, marshaller, unmarshaller, callback, state); }
static void Main() { _accessKeyId = Utilities.AwsAccessKey; _secretAccessKey = Utilities.AwsSecretKey; AmazonS3 s3Client = new AmazonS3Client(_accessKeyId, _secretAccessKey); ListMultipartUploadsRequest allMultipartUploadsRequest = new ListMultipartUploadsRequest().WithBucketName(ExistingBucketName); ListMultipartUploadsResponse mpUploadsResponse = s3Client.ListMultipartUploads(allMultipartUploadsRequest); var objects = new List<Object>(); foreach (MultipartUpload multipartUpload in mpUploadsResponse.MultipartUploads) { bool isObjectdFound = false; foreach (Object o in objects) { if (o.UploadId == multipartUpload.UploadId) { o.Parts.Add(new Part { PartId = o.Parts.Count, Etag = "" }); isObjectdFound = true; } } if (!isObjectdFound) { objects.Add(new Object { Parts = new List<Part> { new Part() { Etag = "", PartId = 1 } }, UploadId = multipartUpload.UploadId }); } } var result = JsonConvert.SerializeObject(objects); var objs = JsonConvert.DeserializeObject<List<Object>>(result); //return; // List to store upload part responses. var uploadResponses = new List<UploadPartResponse>(); byte[] bytes; long contentLength = 0; using (var fileStream = new FileStream(FilePath, FileMode.Open, FileAccess.Read, FileShare.ReadWrite)) { contentLength = fileStream.Length; bytes = new byte[contentLength]; fileStream.Read(bytes, 0, Convert.ToInt32(contentLength)); } // 1. Initialize. InitiateMultipartUploadRequest initiateRequest = new InitiateMultipartUploadRequest().WithBucketName(ExistingBucketName).WithKey(KeyName); InitiateMultipartUploadResponse initResponse = s3Client.InitiateMultipartUpload(initiateRequest); try { // 2. Upload Parts. long partSize = 5 * (long)Math.Pow(2, 20); // 5 MB long filePosition = 0; for (int i = 1; filePosition < contentLength; i++) { byte[] bytesToStream; if (filePosition + partSize < contentLength) { bytesToStream = new byte[partSize]; Array.Copy(bytes, filePosition, bytesToStream, 0, partSize); } else { bytesToStream = new byte[contentLength - filePosition]; Array.Copy(bytes, filePosition, bytesToStream, 0, contentLength - filePosition); } Stream stream = new MemoryStream(bytesToStream); // Create request to upload a part. UploadPartRequest uploadRequest = new UploadPartRequest() .WithBucketName(ExistingBucketName) .WithKey(KeyName) .WithUploadId(initResponse.UploadId) .WithPartNumber(i) .WithPartSize(partSize) .WithFilePosition(filePosition) .WithTimeout(1000000000) .WithMD5Digest(Convert.ToBase64String(MD5.Create().ComputeHash(bytesToStream))); uploadRequest.WithInputStream(stream); // Upload part and add response to our list. uploadResponses.Add(s3Client.UploadPart(uploadRequest)); filePosition += partSize; } // Step 3: complete. CompleteMultipartUploadRequest completeRequest = new CompleteMultipartUploadRequest() .WithBucketName(ExistingBucketName) .WithKey(KeyName) .WithUploadId(initResponse.UploadId) .WithPartETags(uploadResponses); CompleteMultipartUploadResponse completeUploadResponse = s3Client.CompleteMultipartUpload(completeRequest); Console.WriteLine(completeUploadResponse.ETag); } catch (Exception exception) { Console.WriteLine("Exception occurred: {0}", exception.Message); s3Client.AbortMultipartUpload(new AbortMultipartUploadRequest() .WithBucketName(ExistingBucketName) .WithKey(KeyName) .WithUploadId(initResponse.UploadId)); } }
public void ObjectSamples() { { #region ListObjects Sample // Create a client AmazonS3Client client = new AmazonS3Client(); // List all objects ListObjectsRequest listRequest = new ListObjectsRequest { BucketName = "SampleBucket", }; ListObjectsResponse listResponse; do { // Get a list of objects listResponse = client.ListObjects(listRequest); foreach (S3Object obj in listResponse.S3Objects) { Console.WriteLine("Object - " + obj.Key); Console.WriteLine(" Size - " + obj.Size); Console.WriteLine(" LastModified - " + obj.LastModified); Console.WriteLine(" Storage class - " + obj.StorageClass); } // Set the marker property listRequest.Marker = listResponse.NextMarker; } while (listResponse.IsTruncated); #endregion } { #region GetObject Sample // Create a client AmazonS3Client client = new AmazonS3Client(); // Create a GetObject request GetObjectRequest request = new GetObjectRequest { BucketName = "SampleBucket", Key = "Item1" }; // Issue request and remember to dispose of the response using (GetObjectResponse response = client.GetObject(request)) { using (StreamReader reader = new StreamReader(response.ResponseStream)) { string contents = reader.ReadToEnd(); Console.WriteLine("Object - " + response.Key); Console.WriteLine(" Version Id - " + response.VersionId); Console.WriteLine(" Contents - " + contents); } } #endregion } { #region GetObjectMetadata Sample // Create a client AmazonS3Client client = new AmazonS3Client(); // Create a GetObjectMetadata request GetObjectMetadataRequest request = new GetObjectMetadataRequest { BucketName = "SampleBucket", Key = "Item1" }; // Issue request and view the response GetObjectMetadataResponse response = client.GetObjectMetadata(request); Console.WriteLine("Content Length - " + response.ContentLength); Console.WriteLine("Content Type - " + response.Headers.ContentType); if (response.Expiration != null) { Console.WriteLine("Expiration Date - " + response.Expiration.ExpiryDate); Console.WriteLine("Expiration Rule Id - " + response.Expiration.RuleId); } #endregion } { #region PutObject Sample 1 // Create a client AmazonS3Client client = new AmazonS3Client(); // Create a PutObject request PutObjectRequest request = new PutObjectRequest { BucketName = "SampleBucket", Key = "Item1", ContentBody = "This is sample content..." }; // Put object PutObjectResponse response = client.PutObject(request); #endregion } { #region PutObject Sample 2 // Create a client AmazonS3Client client = new AmazonS3Client(); // Create a PutObject request PutObjectRequest request = new PutObjectRequest { BucketName = "SampleBucket", Key = "Item1", FilePath = "contents.txt" }; // Put object PutObjectResponse response = client.PutObject(request); #endregion } { #region PutObject Sample 3 // Create a client AmazonS3Client client = new AmazonS3Client(); // Create a PutObject request PutObjectRequest request = new PutObjectRequest { BucketName = "SampleBucket", Key = "Item1", }; using (FileStream stream = new FileStream("contents.txt", FileMode.Open)) { request.InputStream = stream; // Put object PutObjectResponse response = client.PutObject(request); } #endregion } { #region DeleteObject Sample // Create a client AmazonS3Client client = new AmazonS3Client(); // Create a DeleteObject request DeleteObjectRequest request = new DeleteObjectRequest { BucketName = "SampleBucket", Key = "Item1" }; // Issue request client.DeleteObject(request); #endregion } { #region DeleteObjects Sample // Create a client AmazonS3Client client = new AmazonS3Client(); // Create a DeleteObject request DeleteObjectsRequest request = new DeleteObjectsRequest { BucketName = "SampleBucket", Objects = new List<KeyVersion> { new KeyVersion() {Key = "Item1"}, // Versioned item new KeyVersion() { Key = "Item2", VersionId = "Rej8CiBxcZKVK81cLr39j27Y5FVXghDK", }, // Item in subdirectory new KeyVersion() { Key = "Logs/error.txt"} } }; try { // Issue request DeleteObjectsResponse response = client.DeleteObjects(request); } catch (DeleteObjectsException doe) { // Catch error and list error details DeleteObjectsResponse errorResponse = doe.Response; foreach (DeletedObject deletedObject in errorResponse.DeletedObjects) { Console.WriteLine("Deleted item " + deletedObject.Key); } foreach (DeleteError deleteError in errorResponse.DeleteErrors) { Console.WriteLine("Error deleting item " + deleteError.Key); Console.WriteLine(" Code - " + deleteError.Code); Console.WriteLine(" Message - " + deleteError.Message); } } #endregion } { #region CopyObject Sample // Create a client AmazonS3Client client = new AmazonS3Client(); // Create a CopyObject request CopyObjectRequest request = new CopyObjectRequest { SourceBucket = "SampleBucket", SourceKey = "Item1", DestinationBucket = "AnotherBucket", DestinationKey = "Copy1", CannedACL = S3CannedACL.PublicRead }; // Issue request client.CopyObject(request); #endregion } { #region CopyObject Sample // Create a client AmazonS3Client client = new AmazonS3Client(); // Create a CopyObject request CopyObjectRequest request = new CopyObjectRequest { SourceBucket = "SampleBucket", SourceKey = "Item1", DestinationBucket = "AnotherBucket", DestinationKey = "Copy1", CannedACL = S3CannedACL.PublicRead }; // Issue request client.CopyObject(request); #endregion } { #region ListVersions Sample // Create a client AmazonS3Client client = new AmazonS3Client(); // Turn versioning on for a bucket client.PutBucketVersioning(new PutBucketVersioningRequest { BucketName = "SampleBucket", VersioningConfig = new S3BucketVersioningConfig { Status = "Enable" } }); // Populate bucket with multiple items, each with multiple versions PopulateBucket(client, "SampleBucket"); // Get versions ListVersionsRequest request = new ListVersionsRequest { BucketName = "SampleBucket" }; // Make paged ListVersions calls ListVersionsResponse response; do { response = client.ListVersions(request); // View information about versions foreach (var version in response.Versions) { Console.WriteLine("Key = {0}, Version = {1}, IsLatest = {2}, LastModified = {3}, Size = {4}", version.Key, version.VersionId, version.IsLatest, version.LastModified, version.Size); } request.KeyMarker = response.NextKeyMarker; request.VersionIdMarker = response.NextVersionIdMarker; } while (response.IsTruncated); #endregion } { #region Multipart Upload Sample int MB = (int)Math.Pow(2, 20); // Create a client AmazonS3Client client = new AmazonS3Client(); // Define input stream Stream inputStream = Create13MBDataStream(); // Initiate multipart upload InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest { BucketName = "SampleBucket", Key = "Item1" }; InitiateMultipartUploadResponse initResponse = client.InitiateMultipartUpload(initRequest); // Upload part 1 UploadPartRequest uploadRequest = new UploadPartRequest { BucketName = "SampleBucket", Key = "Item1", UploadId = initResponse.UploadId, PartNumber = 1, PartSize = 5 * MB, InputStream = inputStream }; UploadPartResponse up1Response = client.UploadPart(uploadRequest); // Upload part 2 uploadRequest = new UploadPartRequest { BucketName = "SampleBucket", Key = "Item1", UploadId = initResponse.UploadId, PartNumber = 2, PartSize = 5 * MB, InputStream = inputStream }; UploadPartResponse up2Response = client.UploadPart(uploadRequest); // Upload part 3 uploadRequest = new UploadPartRequest { BucketName = "SampleBucket", Key = "Item1", UploadId = initResponse.UploadId, PartNumber = 3, InputStream = inputStream }; UploadPartResponse up3Response = client.UploadPart(uploadRequest); // List parts for current upload ListPartsRequest listPartRequest = new ListPartsRequest { BucketName = "SampleBucket", Key = "Item1", UploadId = initResponse.UploadId }; ListPartsResponse listPartResponse = client.ListParts(listPartRequest); Debug.Assert(listPartResponse.Parts.Count == 3); // Complete the multipart upload CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest { BucketName = "SampleBucket", Key = "Item1", UploadId = initResponse.UploadId, PartETags = new List<PartETag> { new PartETag { ETag = up1Response.ETag, PartNumber = 1 }, new PartETag { ETag = up2Response.ETag, PartNumber = 2 }, new PartETag { ETag = up3Response.ETag, PartNumber = 3 } } }; CompleteMultipartUploadResponse compResponse = client.CompleteMultipartUpload(compRequest); #endregion } }