internal AbortMultipartUploadsCommand(IAmazonS3 s3Client, string bucketName, DateTime initiateDate, TransferUtilityConfig config) { this._s3Client = s3Client; this._bucketName = bucketName; this._initiatedDate = initiateDate; this._config = config; }
/// <summary> /// Initializes a new instance of the <see cref="MultipartUploadCommand"/> class. /// </summary> /// <param name="s3Client">The s3 client.</param> /// <param name="config">The config object that has the number of threads to use.</param> /// <param name="fileTransporterRequest">The file transporter request.</param> internal MultipartUploadCommand(AmazonS3 s3Client, TransferUtilityConfig config, TransferUtilityUploadRequest fileTransporterRequest) { this._config = config; if (fileTransporterRequest.IsSetFilePath()) { this._logger.DebugFormat("Beginning upload of file {0}.", fileTransporterRequest.FilePath); } else { this._logger.DebugFormat("Beginning upload of stream."); } this._s3Client = s3Client; this._fileTransporterRequest = fileTransporterRequest; this._contentLength = this._fileTransporterRequest.ContentLength; if (fileTransporterRequest.IsSetPartSize()) this._partSize = fileTransporterRequest.PartSize; else this._partSize = calculatePartSize(this._contentLength); this._logger.DebugFormat("Upload part size {0}.", this._partSize); }
/// <summary> /// Initializes a new instance of the <see cref="MultipartUploadCommand"/> class. /// </summary> /// <param name="s3Client">The s3 client.</param> /// <param name="config">The config object that has the number of threads to use.</param> /// <param name="fileTransporterRequest">The file transporter request.</param> internal MultipartUploadCommand(IAmazonS3 s3Client, TransferUtilityConfig config, TransferUtilityUploadRequest fileTransporterRequest) { this._config = config; if (fileTransporterRequest.IsSetFilePath()) { _logger.DebugFormat("Beginning upload of file {0}.", fileTransporterRequest.FilePath); } else { _logger.DebugFormat("Beginning upload of stream."); } this._s3Client = s3Client; this._fileTransporterRequest = fileTransporterRequest; this._contentLength = this._fileTransporterRequest.ContentLength; if (fileTransporterRequest.IsSetPartSize()) this._partSize = fileTransporterRequest.PartSize; else this._partSize = calculatePartSize(this._contentLength); if (fileTransporterRequest.InputStream != null) { if (fileTransporterRequest.AutoResetStreamPosition && fileTransporterRequest.InputStream.CanSeek) { fileTransporterRequest.InputStream.Seek(0, SeekOrigin.Begin); } } _logger.DebugFormat("Upload part size {0}.", this._partSize); }
public void TestMultipartUploadStreamViaTransferUtility() { var transferConfig = new TransferUtilityConfig { MinSizeBeforePartUpload = 6000000 }; var transfer = new TransferUtility(Client, transferConfig); var content = new string('a', 7000000); var contentStream = new MemoryStream(Encoding.UTF8.GetBytes(content)); var uploadRequest = new TransferUtilityUploadRequest { BucketName = bucketName, Key = UtilityMethods.GenerateName(nameof(ObjectLockConfigurationTests)), CalculateContentMD5Header = true, InputStream = contentStream }; transfer.Upload(uploadRequest); using (var getResponse = Client.GetObject(bucketName, uploadRequest.Key)) { var getBody = new StreamReader(getResponse.ResponseStream).ReadToEnd(); Assert.AreEqual(content, getBody); } }
internal SimpleUploadCommand(IAmazonS3 s3Client, TransferUtilityConfig config, TransferUtilityUploadRequest fileTransporterRequest) { this._s3Client = s3Client; this._config = config; this._fileTransporterRequest = fileTransporterRequest; var fileName = fileTransporterRequest.FilePath; }
public async void UploadFileAsync(string filePath, string bucketName, string keyname) { CognitoAWSCredentials credentials = new CognitoAWSCredentials( "us-east-1:220800bd-8233-4785-b80e-7f440926f503", // 身份池 ID RegionEndpoint.USEast1 // 区域 ); var config = new TransferUtilityConfig(); config.ConcurrentServiceRequests = 10; config.MinSizeBeforePartUpload = 16 * 1024 * 1024; var s3Client = new AmazonS3Client(credentials, bucketRegion); transferUtility = new TransferUtility(s3Client);//, config); try { await transferUtility.UploadAsync( filePath, bucketName, keyname ); } catch (Exception e) { Debug.WriteLine("AWSS3 upload file exception = " + e); } }
public void TestMultipartUploadFileViaTransferUtility() { var transferConfig = new TransferUtilityConfig { MinSizeBeforePartUpload = 6000000 }; var transfer = new TransferUtility(Client, transferConfig); var content = new string('a', 7000000); var key = UtilityMethods.GenerateName(nameof(ObjectLockConfigurationTests)); var filePath = Path.Combine(Path.GetTempPath(), key + ".txt"); // Create the file using (StreamWriter writer = File.CreateText(filePath)) { writer.Write(content); } var uploadRequest = new TransferUtilityUploadRequest { BucketName = bucketName, Key = key, CalculateContentMD5Header = true, FilePath = filePath }; transfer.Upload(uploadRequest); using (var getResponse = Client.GetObject(bucketName, uploadRequest.Key)) { var getBody = new StreamReader(getResponse.ResponseStream).ReadToEnd(); Assert.AreEqual(content, getBody); } }
public void TestSimpleUploadFileFailViaTransferUtility() { var transferConfig = new TransferUtilityConfig { MinSizeBeforePartUpload = 6000000 }; var transfer = new TransferUtility(Client, transferConfig); var content = new string('a', 2000000); var key = UtilityMethods.GenerateName(nameof(ObjectLockConfigurationTests)); var filePath = Path.Combine(Path.GetTempPath(), key + ".txt"); // Create the file using (StreamWriter writer = File.CreateText(filePath)) { writer.Write(content); } // Do not set CalculateContentMD5Header as true which should cause upload to fail. var uploadRequest = new TransferUtilityUploadRequest { BucketName = bucketName, Key = key, FilePath = filePath }; transfer.Upload(uploadRequest); }
/// <summary> /// /// <para>BFileServiceAWS: Parametered Constructor:</para> /// /// <para><paramref name="_AccessKey"/> AWS Access Key</para> /// <para><paramref name="_SecretKey"/> AWS Secret Key</para> /// <para><paramref name="_Region"/> AWS Region that DynamoDB Client will connect to (I.E. eu-west-1) </para> /// <para><paramref name="_ErrorMessageAction"/> Error messages will be pushed to this action</para> /// /// </summary> public BFileServiceAWS( string _AccessKey, string _SecretKey, string _Region, Action <string> _ErrorMessageAction = null) { try { S3Client = new AmazonS3Client(new Amazon.Runtime.BasicAWSCredentials(_AccessKey, _SecretKey), Amazon.RegionEndpoint.GetBySystemName(_Region)); TransferUtilityConfig TransferUtilConfig = new TransferUtilityConfig { ConcurrentServiceRequests = 10, }; TransferUtil = new TransferUtility(S3Client, TransferUtilConfig); bInitializationSucceed = true; } catch (Exception e) { _ErrorMessageAction?.Invoke("BFileServiceAWS->Constructor: " + e.Message + ", Trace: " + e.StackTrace); bInitializationSucceed = false; } }
/// <summary> /// Initializes a new instance of the <see cref="MultipartUploadCommand"/> class. /// </summary> /// <param name="s3Client">The s3 client.</param> /// <param name="config">The config object that has the number of threads to use.</param> /// <param name="fileTransporterRequest">The file transporter request.</param> internal MultipartUploadCommand(AmazonS3 s3Client, TransferUtilityConfig config, TransferUtilityUploadRequest fileTransporterRequest) { this._config = config; if (fileTransporterRequest.IsSetFilePath()) { this._logger.DebugFormat("Beginning upload of file {0}.", fileTransporterRequest.FilePath); } else { this._logger.DebugFormat("Beginning upload of stream."); } this._s3Client = s3Client; this._fileTransporterRequest = fileTransporterRequest; this._contentLength = this._fileTransporterRequest.ContentLength; if (fileTransporterRequest.IsSetPartSize()) { this._partSize = fileTransporterRequest.PartSize; } else { this._partSize = calculatePartSize(this._contentLength); } this._logger.DebugFormat("Upload part size {0}.", this._partSize); }
private void makeFileTransport(AmazonS3Client _client) { if (fileTransferUtility == null) { TransferUtilityConfig config = new TransferUtilityConfig(); config.ConcurrentServiceRequests = 20; fileTransferUtility = new TransferUtility(_client, config); } }
/// <summary> /// Creates an Amazon S3 storage provider for a custom S3-compatible storage server /// </summary> /// <param name="factory">Factory reference</param> /// <param name="accessKeyId">Access key ID</param> /// <param name="secretAccessKey">Secret access key</param> /// <param name="sessionToken">Optional. Only required when using session credentials.</param> /// <param name="bucketName">Bucket name</param> /// <param name="clientConfig">S3 client configuration</param> /// <param name="transferUtilityConfig">S3 transfer utility configuration</param> /// <returns>A reference to the created storage</returns> public static IBlobStorage AwsS3(this IBlobStorageFactory factory, string accessKeyId, string secretAccessKey, string sessionToken, string bucketName, AmazonS3Config clientConfig, TransferUtilityConfig transferUtilityConfig = null) { return(new AwsS3BlobStorage(accessKeyId, secretAccessKey, sessionToken, bucketName, clientConfig, transferUtilityConfig)); }
private Task UploadDirectoryAsync(string directoryName, long size, DirectoryProgressValidator <UploadDirectoryProgressArgs> progressValidator, bool validate = true, bool concurrent = true) { var directoryPath = Path.Combine(basePath, directoryName); for (int i = 0; i < 5; i++) { var filePath = Path.Combine(Path.Combine(directoryPath, i.ToString()), "file.txt"); //MultipartUploadTests.UtilityMethods.GenerateFile(filePath, (i % 2 == 0) ? size : size / 2); UtilityMethods.GenerateFile(filePath, size); } var config = new TransferUtilityConfig { ConcurrentServiceRequests = 10, }; var transferUtility = new TransferUtility(Client, config); var request = new TransferUtilityUploadDirectoryRequest { BucketName = testBucketName, Directory = directoryPath, KeyPrefix = directoryName, SearchPattern = "*", SearchOption = SearchOption.AllDirectories, }; //if (concurrent) // request.UploadFilesConcurrently = true; if (progressValidator != null) { request.UploadDirectoryProgressEvent += progressValidator.OnProgressEvent; } HashSet <string> files = new HashSet <string>(); request.UploadDirectoryProgressEvent += (s, e) => { files.Add(e.CurrentFile); Console.WriteLine("Progress callback = " + e.ToString()); }; transferUtility.UploadDirectory(request); Assert.Equal(5, files.Count); if (validate) { return(ValidateDirectoryContents(testBucketName, directoryName, directoryPath)); } else { return(Task.FromResult <object>(null)); } }
public AmazonS3StorageProvider(IAmazonS3StorageConfiguration amazonS3StorageConfiguration) { _amazonS3StorageConfiguration = amazonS3StorageConfiguration; var cred = new BasicAWSCredentials(_amazonS3StorageConfiguration.AWSAccessKey, _amazonS3StorageConfiguration.AWSSecretKey); //TODO: aws region to config _client = new AmazonS3Client(cred, RegionEndpoint.USEast1); var config = new TransferUtilityConfig(); _transferUtility = new TransferUtility(_client, config); }
void UploadDirectory(string directoryName, long size, TransferUtilityTests.DirectoryProgressValidator <UploadDirectoryProgressArgs> progressValidator, bool validate = true) { var directoryPath = Path.Combine(basePath, directoryName); for (int i = 0; i < 5; i++) { var filePath = Path.Combine(Path.Combine(directoryPath, i.ToString()), "file.txt"); UtilityMethods.GenerateFile(filePath, size); } var config = new TransferUtilityConfig { ConcurrentServiceRequests = 10, }; var transferUtility = new TransferUtility(Client, config); var request = new TransferUtilityUploadDirectoryRequest { BucketName = bucketName, Directory = directoryPath, KeyPrefix = directoryName, SearchPattern = "*", SearchOption = SearchOption.AllDirectories, }; if (progressValidator != null) { request.UploadDirectoryProgressEvent += progressValidator.OnProgressEvent; } HashSet <string> files = new HashSet <string>(); request.UploadDirectoryProgressEvent += (s, e) => { files.Add(e.CurrentFile); Console.WriteLine("Progress callback = " + e.ToString()); }; request.UploadDirectoryFileRequestEvent += (s, e) => { var uploadRequest = e.UploadRequest; var key = uploadRequest.Key; keysToValidate.Add(key); SetMetadataAndHeaders(uploadRequest); }; transferUtility.UploadDirectory(request); Assert.AreEqual(5, files.Count); if (validate) { TransferUtilityTests.ValidateDirectoryContents(bucketName, directoryName, directoryPath); } }
void UploadDirectory(long size, TransferUtilityTests.DirectoryProgressValidator <UploadDirectoryProgressArgs> progressValidator, bool validate = true) { var directory = TransferUtilityTests.CreateTestDirectory(size); var directoryPath = directory.FullName; var keyPrefix = directory.Name; var config = new TransferUtilityConfig { ConcurrentServiceRequests = 10, }; var transferUtility = new TransferUtility(Client, config); var request = new TransferUtilityUploadDirectoryRequest { BucketName = bucketName, Directory = directoryPath, KeyPrefix = keyPrefix, SearchPattern = "*", SearchOption = SearchOption.AllDirectories, }; if (progressValidator != null) { request.UploadDirectoryProgressEvent += progressValidator.OnProgressEvent; } HashSet <string> files = new HashSet <string>(); request.UploadDirectoryProgressEvent += (s, e) => { files.Add(e.CurrentFile); Console.WriteLine("Progress callback = " + e.ToString()); }; request.UploadDirectoryFileRequestEvent += (s, e) => { var uploadRequest = e.UploadRequest; var key = uploadRequest.Key; keysToValidate.Add(key); SetMetadataAndHeaders(uploadRequest); }; transferUtility.UploadDirectory(request); Assert.AreEqual(5, files.Count); if (validate) { TransferUtilityTests.ValidateDirectoryContents(Client, bucketName, keyPrefix, directory); } }
DirectoryInfo UploadDirectory(long size, DirectoryProgressValidator <UploadDirectoryProgressArgs> progressValidator, bool validate = true, bool concurrent = true) { var directory = CreateTestDirectory(size); var keyPrefix = directory.Name; var directoryPath = directory.FullName; var config = new TransferUtilityConfig { ConcurrentServiceRequests = 10, }; var transferUtility = new TransferUtility(Client, config); var request = new TransferUtilityUploadDirectoryRequest { BucketName = bucketName, Directory = directoryPath, KeyPrefix = keyPrefix, ContentType = plainTextContentType, SearchPattern = "*", SearchOption = SearchOption.AllDirectories, }; //if (concurrent) // request.UploadFilesConcurrently = true; if (progressValidator != null) { request.UploadDirectoryProgressEvent += progressValidator.OnProgressEvent; } HashSet <string> files = new HashSet <string>(); request.UploadDirectoryProgressEvent += (s, e) => { files.Add(e.CurrentFile); Console.WriteLine("Progress callback = " + e.ToString()); }; transferUtility.UploadDirectory(request); Assert.AreEqual(5, files.Count); if (validate) { ValidateDirectoryContents(Client, bucketName, keyPrefix, directory, plainTextContentType); } return(directory); }
void Upload(string fileName, long size, TransferProgressValidator <UploadProgressArgs> progressValidator, AmazonS3Client client = null) { var key = fileName; Client.DeleteObject(new DeleteObjectRequest { BucketName = bucketName, Key = key }); var path = Path.Combine(basePath, fileName); UtilityMethods.GenerateFile(path, size); var config = new TransferUtilityConfig { //ConcurrentServiceRequests = 1, //MinSizeBeforePartUpload = MEG_SIZE }; var transferUtility = client != null ? new TransferUtility(client, config) : new TransferUtility(Client, config); var request = new TransferUtilityUploadRequest { BucketName = bucketName, FilePath = path, Key = key, ContentType = octetStreamContentType }; if (progressValidator != null) { request.UploadProgressEvent += progressValidator.OnProgressEvent; } transferUtility.Upload(request); var metadata = Client.GetObjectMetadata(new GetObjectMetadataRequest { BucketName = bucketName, Key = key }); Console.WriteLine("Expected Size: {0} , Actual Size {1}", size, metadata.ContentLength); Assert.AreEqual(octetStreamContentType, metadata.Headers.ContentType); Assert.AreEqual(size, metadata.ContentLength); ValidateFileContents(bucketName, key, path); }
public void TestUploadDirectoryViaTransferUtility() { var transferConfig = new TransferUtilityConfig { MinSizeBeforePartUpload = 6000000 }; var transfer = new TransferUtility(Client, transferConfig); var directoryKey = UtilityMethods.GenerateName(nameof(ObjectLockConfigurationTests)); var directoryPath = Path.Combine(Path.GetTempPath(), directoryKey); Dictionary <string, int> filesWithSize = new Dictionary <string, int>() { { directoryKey + "_1.txt", 7000000 }, // MultipartUpload { directoryKey + "_2.txt", 2000000 }, // SimpleUpload { directoryKey + "_3.txt", 4000000 }, // SimpleUpload }; // Create directory with files. Directory.CreateDirectory(directoryPath); foreach (var file in filesWithSize) { var filePath = Path.Combine(directoryPath, file.Key); var content = new string('a', file.Value); // Create the file using (StreamWriter writer = File.CreateText(filePath)) { writer.Write(content); } } var uploadDirectoryRequest = new TransferUtilityUploadDirectoryRequest { BucketName = bucketName, Directory = directoryPath, CalculateContentMD5Header = true }; transfer.UploadDirectory(uploadDirectoryRequest); // Verify the files foreach (var file in filesWithSize) { using (var getResponse = Client.GetObject(bucketName, file.Key)) { var getBody = new StreamReader(getResponse.ResponseStream).ReadToEnd(); Assert.AreEqual(new string('a', file.Value), getBody); } } }
public PersistentService() { _settings = Settings.Default; var credentials = new BasicAWSCredentials(_settings.AWSAccessKey, _settings.AWSSecretKey); var region = RegionEndpoint.GetBySystemName(_settings.AWSRegion); _s3Client = new AmazonS3Client(credentials, region); _config = new TransferUtilityConfig { // Use 5 concurrent requests. ConcurrentServiceRequests = 5, // Use multipart upload for file size greater 20 MB. MinSizeBeforePartUpload = 20 * MB_SIZE, }; }
private async Task UploadAsync(string fileName, long size, TransferProgressValidator <UploadProgressArgs> progressValidator, AmazonS3Client client = null) { var key = fileName; await Client.DeleteObjectAsync(new DeleteObjectRequest { BucketName = testBucketName, Key = key }).ConfigureAwait(false); var path = Path.Combine(basePath, fileName); UtilityMethods.GenerateFile(path, size); var config = new TransferUtilityConfig { //ConcurrentServiceRequests = 1, //MinSizeBeforePartUpload = MEG_SIZE }; var transferUtility = client != null ? new TransferUtility(client, config) : new TransferUtility(Client, config); var request = new TransferUtilityUploadRequest { BucketName = testBucketName, FilePath = path, Key = key, ContentType = OCTET_STREAM_CONTENT_TYPE }; if (progressValidator != null) { request.UploadProgressEvent += progressValidator.OnProgressEvent; } transferUtility.Upload(request); var metadata = await Client.GetObjectMetadataAsync(new GetObjectMetadataRequest { BucketName = testBucketName, Key = key }).ConfigureAwait(false); Console.WriteLine("Expected Size: {0} , Actual Size {1}", size, metadata.ContentLength); Assert.Equal(OCTET_STREAM_CONTENT_TYPE, metadata.Headers.ContentType); Assert.Equal(size, metadata.ContentLength); await ValidateFileContentsAsync(testBucketName, key, path).ConfigureAwait(false); }
/// <summary> /// AWS Credential Version /// </summary> /// <param name="credential"></param> public S3Client(S3ClientOption option, AWSCredentials credential) { S3Config = new AmazonS3Config { RegionEndpoint = !string.IsNullOrEmpty(option.Region) ? RegionEndpoint.GetBySystemName(option.Region) : defaultEndPoint, }; TransferConfig = new TransferUtilityConfig { MinSizeBeforePartUpload = 1024 * 1024 * 16, // 16MB ConcurrentServiceRequests = Environment.ProcessorCount * 2, }; Client = new AmazonS3Client(credential, S3Config); Transfer = new TransferUtility(Client); Transfer2 = new TransferUtility(Client, TransferConfig); Option = option; }
public AmazonS3DataBusStorage( AmazonS3Config amazonS3Config, AmazonS3DataBusOptions options, TransferUtilityConfig transferUtilityConfig, IRebusLoggerFactory rebusLoggerFactory, IRebusTime rebusTime) { _amazonS3Config = amazonS3Config ?? throw new ArgumentNullException(nameof(amazonS3Config)); _transferUtilityConfig = transferUtilityConfig ?? throw new ArgumentNullException(nameof(transferUtilityConfig)); _rebusTime = rebusTime ?? throw new ArgumentNullException(nameof(rebusTime)); _options = options ?? throw new ArgumentNullException(nameof(options)); _log = rebusLoggerFactory?.GetLogger <AmazonS3DataBusStorage>() ?? throw new ArgumentNullException(nameof(rebusLoggerFactory)); _metadataCollectionFactory = new S3MetadataCollectionFactory(options); if (options.AutoCreateBucket) { EnsureBucketExistsAsync().GetAwaiter().GetResult(); } }
public void TestSimpleUploadStreamFailViaTransferUtility() { var transferConfig = new TransferUtilityConfig { MinSizeBeforePartUpload = 6000000 }; var transfer = new TransferUtility(Client, transferConfig); var content = new string('a', 2000000); var contentStream = new MemoryStream(Encoding.UTF8.GetBytes(content)); // Do not set CalculateContentMD5Header as true which should cause upload to fail. var uploadRequest = new TransferUtilityUploadRequest { BucketName = bucketName, Key = UtilityMethods.GenerateName(nameof(ObjectLockConfigurationTests)), InputStream = contentStream }; transfer.Upload(uploadRequest); }
private void TestUploadDirectory(string bucketName, string keyId) { var directoryName = UtilityMethods.GenerateName("UploadDirectoryTest"); var directoryPath = Path.Combine(basePath, directoryName); for (int i = 0; i < 5; i++) { var filePath = Path.Combine(Path.Combine(directoryPath, i.ToString()), "file.txt"); UtilityMethods.WriteFile(filePath, fileContents); } var config = new TransferUtilityConfig { ConcurrentServiceRequests = 10, }; var transferUtility = new TransferUtility(Client, config); var request = new TransferUtilityUploadDirectoryRequest { BucketName = bucketName, Directory = directoryPath, KeyPrefix = directoryName, SearchPattern = "*", SearchOption = SearchOption.AllDirectories, ServerSideEncryptionMethod = ServerSideEncryptionMethod.AWSKMS, ServerSideEncryptionKeyManagementServiceKeyId = keyId }; HashSet <string> keys = new HashSet <string>(); request.UploadDirectoryFileRequestEvent += (s, e) => { keys.Add(e.UploadRequest.Key); }; transferUtility.UploadDirectory(request); Assert.AreEqual(5, keys.Count); foreach (var key in keys) { VerifyObject(bucketName, key, keyId); } }
/// <summary> /// Creates a new instance of <see cref="AwsS3BlobStorage"/> for a given S3 client configuration /// </summary> public AwsS3BlobStorage(string accessKeyId, string secretAccessKey, string sessionToken, string bucketName, AmazonS3Config clientConfig, TransferUtilityConfig transferUtilityConfig) { if (accessKeyId == null) { throw new ArgumentNullException(nameof(accessKeyId)); } if (secretAccessKey == null) { throw new ArgumentNullException(nameof(secretAccessKey)); } _bucketName = bucketName ?? throw new ArgumentNullException(nameof(bucketName)); AWSCredentials awsCreds = (sessionToken == null) ? (AWSCredentials) new BasicAWSCredentials(accessKeyId, secretAccessKey) : new SessionAWSCredentials(accessKeyId, secretAccessKey, sessionToken); _client = new AmazonS3Client(awsCreds, clientConfig); _fileTransferUtility = new TransferUtility(_client, transferUtilityConfig ?? new TransferUtilityConfig()); }
/// <summary> /// Initializes a new instance of the <see cref="MultipartUploadCommand"/> class. /// </summary> /// <param name="s3Client">The s3 client.</param> /// <param name="config">The config object that has the number of threads to use.</param> /// <param name="fileTransporterRequest">The file transporter request.</param> internal MultipartUploadCommand(IAmazonS3 s3Client, TransferUtilityConfig config, TransferUtilityUploadRequest fileTransporterRequest) { this._config = config; if (fileTransporterRequest.IsSetFilePath()) { _logger.DebugFormat("Beginning upload of file {0}.", fileTransporterRequest.FilePath); } else { _logger.DebugFormat("Beginning upload of stream."); } this._s3Client = s3Client; this._fileTransporterRequest = fileTransporterRequest; this._contentLength = this._fileTransporterRequest.ContentLength; if (fileTransporterRequest.IsSetPartSize()) { this._partSize = fileTransporterRequest.PartSize; } else { this._partSize = calculatePartSize(this._contentLength); } if (fileTransporterRequest.InputStream != null) { if (fileTransporterRequest.AutoResetStreamPosition && fileTransporterRequest.InputStream.CanSeek) { fileTransporterRequest.InputStream.Seek(0, SeekOrigin.Begin); } } _logger.DebugFormat("Upload part size {0}.", this._partSize); }
public ActionResult SummaryImage(HttpPostedFileBase file) { var dbid = Session["patientId"].ToString(); int db_id = Convert.ToInt32(dbid); var db_logic = new DatabaseLogic(connection, db_id); if (file.ContentLength > 0) { //decide where to save the image in the bucket, filename based on patient mrn number string savepath = Path.Combine(Server.MapPath("~/Resources/PatientImages/Patient" + Session["mrn"].ToString() + ".png")); string bucketName = "ehr-prod"; string keyName = "Patient" + Session["mrn"].ToString() + ".png"; //path to image uplaoded by user var fileName = Path.GetFileName(file.FileName); var path = Path.Combine(Server.MapPath("~/Resources/PatientImages"), fileName); //temp save the image locally file.SaveAs(savepath); IAmazonS3 client; client = new AmazonS3Client(awsAccessKey, awsSecretKey, RegionEndpoint.USWest2); TransferUtilityConfig config = new TransferUtilityConfig(); PutObjectRequest request = new PutObjectRequest() { BucketName = bucketName, Key = keyName, FilePath = savepath, CannedACL = S3CannedACL.PublicReadWrite }; //save image to S3 bucket PutObjectResponse response = client.PutObject(request); } return(RedirectToAction("Summary", new { databaseId = db_id })); }
public void TestMultipartUploadViaTransferUtility() { var transferConfig = new TransferUtilityConfig { MinSizeBeforePartUpload = 6000000 }; var transfer = new TransferUtility(Client, transferConfig); var content = new string('a', 7000000); var body = new MemoryStream(System.Text.UTF8Encoding.UTF8.GetBytes(content)); var uploadRequest = new TransferUtilityUploadRequest { BucketName = _bucketName, Key = "a-lot-of-as.txt", InputStream = body }; transfer.Upload(uploadRequest); using (var getResponse = Client.GetObject(_accesspointArn, uploadRequest.Key)) { var getBody = new StreamReader(getResponse.ResponseStream).ReadToEnd(); Assert.AreEqual(content, getBody); } }
internal UploadDirectoryCommand(TransferUtility utility, TransferUtilityConfig config, TransferUtilityUploadDirectoryRequest request) { this._utility = utility; this._request = request; this._config = config; }
internal DownloadDirectoryCommand(IAmazonS3 s3Client, TransferUtilityDownloadDirectoryRequest request, TransferUtilityConfig config) { this._s3Client = s3Client; this._request = request; this._config = config; }
private CmdletOutput UploadFileToS3(ExecutorContext context) { System.IO.Stream _Stream = null; try { var cmdletContext = context as CmdletContext; var request = new TransferUtilityUploadRequest { BucketName = cmdletContext.BucketName, Key = cmdletContext.Key }; if (!string.IsNullOrEmpty(cmdletContext.File)) { request.FilePath = cmdletContext.File; } else if (cmdletContext.Stream != null) { _Stream = Amazon.PowerShell.Common.StreamParameterConverter.TransformToStream(cmdletContext.Stream); request.InputStream = _Stream; } if (cmdletContext.CannedACL != null) { request.CannedACL = cmdletContext.CannedACL.Value; } if (!string.IsNullOrEmpty(cmdletContext.ContentType)) { request.ContentType = cmdletContext.ContentType; } if (cmdletContext.StorageClass != null) { request.StorageClass = cmdletContext.StorageClass.Value; } if (cmdletContext.ServerSideEncryptionMethod != null) { request.ServerSideEncryptionMethod = cmdletContext.ServerSideEncryptionMethod.Value; } if (cmdletContext.ServerSideEncryptionCustomerMethod != null) { request.ServerSideEncryptionCustomerMethod = cmdletContext.ServerSideEncryptionCustomerMethod; } if (cmdletContext.ServerSideEncryptionCustomerProvidedKey != null) { request.ServerSideEncryptionCustomerProvidedKey = cmdletContext.ServerSideEncryptionCustomerProvidedKey; } if (cmdletContext.ServerSideEncryptionCustomerProvidedKeyMD5 != null) { request.ServerSideEncryptionCustomerProvidedKeyMD5 = cmdletContext.ServerSideEncryptionCustomerProvidedKeyMD5; } if (cmdletContext.ServerSideEncryptionKeyManagementServiceKeyId != null) { request.ServerSideEncryptionKeyManagementServiceKeyId = cmdletContext.ServerSideEncryptionKeyManagementServiceKeyId; } if (cmdletContext.TagSet != null) { request.TagSet = new List <Tag>(cmdletContext.TagSet); } var transferUtilityConfig = new TransferUtilityConfig(); if (cmdletContext.ConcurrentServiceRequests.HasValue) { transferUtilityConfig.ConcurrentServiceRequests = cmdletContext.ConcurrentServiceRequests.Value; } AmazonS3Helper.SetMetadataAndHeaders(request, cmdletContext.Metadata, cmdletContext.Headers); CmdletOutput output; using (var tu = new TransferUtility(Client ?? CreateClient(_CurrentCredentials, _RegionEndpoint), transferUtilityConfig)) { Utils.Common.WriteVerboseEndpointMessage(this, Client.Config, "Amazon S3 object upload APIs"); var runner = new ProgressRunner(this); string fileName = string.IsNullOrEmpty(cmdletContext.File) ? cmdletContext.Key : cmdletContext.File; var tracker = new UploadFileProgressTracker(runner, handler => request.UploadProgressEvent += handler, fileName); output = runner.SafeRun(() => tu.Upload(request), tracker); } return(output); } finally { if (_Stream != null) { _Stream.Dispose(); } } }
/// <summary> /// Configures the storage of subscriptions in Amazon S3 /// </summary> public static void StoreInAmazonS3(this StandardConfigurer <ISubscriptionStorage> configurer, string accessKeyId, string secretAccessKey, RegionEndpoint regionEndpoint, string bucketName, TransferUtilityConfig transferUtilityConfig = null) { if (configurer == null) { throw new ArgumentNullException(nameof(configurer)); } if (accessKeyId == null) { throw new ArgumentNullException(nameof(accessKeyId)); } if (secretAccessKey == null) { throw new ArgumentNullException(nameof(secretAccessKey)); } if (regionEndpoint == null) { throw new ArgumentNullException(nameof(regionEndpoint)); } AmazonS3DataBusOptions options = (bucketName != null) ? new AmazonS3DataBusOptions(bucketName) : null; Configure(configurer, new BasicAWSCredentials(accessKeyId, secretAccessKey), new AmazonS3Config { RegionEndpoint = regionEndpoint }, options); }