public S3Storage(string tenant, HandlerConfigurationElement handlerConfig, ModuleConfigurationElement moduleConfig) { _tenant = tenant; _modulename = moduleConfig.Name; _dataList = new DataList(moduleConfig); _domains.AddRange( moduleConfig.Domains.Cast <DomainConfigurationElement>().Select(x => string.Format("{0}/", x.Name))); //Make expires _domainsExpires = moduleConfig.Domains.Cast <DomainConfigurationElement>().Where(x => x.Expires != TimeSpan.Zero). ToDictionary(x => x.Name, y => y.Expires); _domainsExpires.Add(string.Empty, moduleConfig.Expires); _domainsAcl = moduleConfig.Domains.Cast <DomainConfigurationElement>().ToDictionary(x => x.Name, y => GetS3Acl(y.Acl)); _moduleAcl = GetS3Acl(moduleConfig.Acl); }
private void StoreFile(string file, string key, string bucketName, bool isPublicRead, string contentType = null, string contentEncoding = null) { S3CannedACL acl = isPublicRead ? S3CannedACL.PublicRead : S3CannedACL.Private; var request = new PutObjectRequest() { CannedACL = acl, FilePath = file, BucketName = bucketName, Key = key }; if (contentType != null) // probably harmless to just set to null, but feels safer not to set at all if not specified. { request.ContentType = contentType; } if (contentEncoding != null) { request.Headers.ContentEncoding = contentEncoding; } Client.PutObject(request); }
public bool UploadFileToS3(string uploadAsFileName, Stream ImageStream, S3CannedACL filePermission, S3StorageClass storageType) { try { AmazonS3 client = Amazon.AWSClientFactory.CreateAmazonS3Client("AKIAJ4A6DAATIDU6ELAA", "EiA6EILkCp7pqzvnIUhXg3FFOft0j+pA/DtBM8if"); PutObjectRequest request = new PutObjectRequest(); request.WithKey("folder" + "/" + uploadAsFileName); request.WithInputStream(ImageStream); request.WithBucketName("shriners_rms"); request.CannedACL = filePermission; request.StorageClass = storageType; client.PutObject(request); client.Dispose(); } catch { return(false); } return(true); }
public async Task Deliver(Uri destination, MsgNThenMessage message) { //https://s3.us-east-2.amazonaws.com/my-bucket-name/filename //s3://john.doe@my-bucket-name/filename[ //s3://<credentialName>@<bucketname>/filename var credentials = GetCredentials(destination); var bucketName = destination.Host; var pathAndQuery = Uri.UnescapeDataString(destination.PathAndQuery).TrimStart('/'); var messageId = message.Headers[HeaderConstants.MessageId]; var messageGroupId = message.Headers[HeaderConstants.MessageGroupId]; var correlationId = message.Headers[HeaderConstants.CorrelationId]; var fileKey = string.Format(pathAndQuery, messageId, messageGroupId, correlationId); var queryDictionary = QueryHelpers.ParseQuery(destination.Query); if (message.Body.CanSeek) { message.Body.Position = 0; } using (var client = new AmazonS3Client(credentials.awsAccessKeyId, credentials.awsSecretAccessKey, RegionEndpoint.USEast1)) { var uploadRequest = new TransferUtilityUploadRequest { InputStream = message.Body, Key = fileKey, BucketName = bucketName, CannedACL = S3CannedACL.BucketOwnerFullControl }; if (queryDictionary.TryGetValue(QueryConstants.S3CannedACL, out var val)) { var cannedAcl = S3CannedACL.FindValue(val); if (cannedAcl != null) { uploadRequest.CannedACL = cannedAcl; } } var fileTransferUtility = new TransferUtility(client); await fileTransferUtility.UploadAsync(uploadRequest); } }
public async Task <PutObjectResponse> PutObjectAsync(string bucket, string key, Stream contents, S3CannedACL s3CannedAcl = null, CancellationToken cancellationToken = default) { this.Logger.LogDebug($"[{nameof(this.PutObjectAsync)}]"); this.Logger.LogTrace(JsonConvert.SerializeObject(new { bucket, key, s3CannedAcl })); if (string.IsNullOrWhiteSpace(bucket)) { throw new ArgumentNullException(nameof(bucket)); } if (string.IsNullOrWhiteSpace(key)) { throw new ArgumentNullException(nameof(key)); } if (contents == null) { throw new ArgumentNullException(nameof(contents)); } if (s3CannedAcl == null) { s3CannedAcl = S3CannedACL.BucketOwnerFullControl; } var request = new Amazon.S3.Model.PutObjectRequest { BucketName = bucket, CannedACL = s3CannedAcl, InputStream = contents, Key = key, }; return(await this.PutObjectAsync(request : request, cancellationToken : cancellationToken)); }
public async Task <string> MultipartUploadStartAsync(string bucket, string key, S3CannedACL s3CannedAcl = null, CancellationToken cancellationToken = default) { this.Logger.LogDebug($"[{nameof(this.MultipartUploadStartAsync)}]"); this.Logger.LogTrace(JsonConvert.SerializeObject(new { bucket, key, s3CannedAcl })); if (string.IsNullOrWhiteSpace(bucket)) { throw new ArgumentNullException(nameof(bucket)); } if (string.IsNullOrWhiteSpace(key)) { throw new ArgumentNullException(nameof(key)); } if (s3CannedAcl == null) { s3CannedAcl = S3CannedACL.BucketOwnerFullControl; } var request = new Amazon.S3.Model.InitiateMultipartUploadRequest { BucketName = bucket, CannedACL = s3CannedAcl, Key = key, }; this.Logger.LogTrace(JsonConvert.SerializeObject(value: request)); var response = await this.Repository.InitiateMultipartUploadAsync(request : request, cancellationToken : cancellationToken == default?this.CancellationToken.Token : cancellationToken); this.Logger.LogTrace(JsonConvert.SerializeObject(value: response)); return(response.UploadId); }
public static string UploadFile( string bucketname, string bucketUrl, S3CannedACL permissions, S3StorageClass storageclass, HttpPostedFileBase file ) { try { var S3Config = new AmazonS3Config { RegionEndpoint = RegionEndpoint.USEast1, //its default region set by amazon }; var s3Client = new AmazonS3Client(AppConfig.AWSAccessKey, AppConfig.AWSSecretKey, S3Config); PutObjectRequest putRequest = new PutObjectRequest { BucketName = bucketname, Key = file.FileName, StorageClass = storageclass, CannedACL = permissions, ContentType = file.ContentType }; //putRequest.Metadata.Add("size", file.metadane.size.ToString()); //putRequest.Metadata.Add("name", file.metadane.name); //putRequest.Metadata.Add("mime", file.metadane.mime); putRequest.InputStream = file.InputStream; PutObjectResponse response = s3Client.PutObject(putRequest); return("ok"); } catch (Exception) { throw; } }
public async Task PutACLAsync(string bucketName, string key, S3CannedACL s3CannedACL, CancellationToken token) { try { using (IAmazonS3 client = new AmazonS3Client(AccessKey, SecretKey, new AmazonS3Config { ServiceURL = ServiceUrl })) { Task task = client.PutACLAsync(new PutACLRequest { BucketName = bucketName, Key = key, CannedACL = s3CannedACL }, token); await task; } } catch (Exception) { throw; } }
public BucketFileSystem( string bucketName, string bucketHostName, string bucketKeyPrefix, string region, string cannedACL) { if (string.IsNullOrEmpty(bucketName)) { throw new ArgumentNullException("bucketName"); } BucketName = bucketName; BucketHostName = BucketExtensions.ParseBucketHostName(bucketHostName); BucketPrefix = BucketExtensions.ParseBucketPrefix(bucketKeyPrefix); ACL = AclExtensions.ParseCannedAcl(cannedACL); var regionEndpoint = RegionEndpoint.GetBySystemName(region); ClientFactory = () => new AmazonS3Client(regionEndpoint); LogHelper = new LogHelperWrapper(); MimeTypeResolver = new DefaultMimeTypeResolver(); }
public async Task <string> Upload(string bucket, string[] path, IFormFile file, RegionEndpoint region, S3CannedACL acl, bool unique) { using (var client = _client(region)) { using (var newMemoryStream = new MemoryStream()) { file.CopyTo(newMemoryStream); var fileName = (unique) ? file.GetUniqueFileName() : file.FileName; var uploadRequest = new TransferUtilityUploadRequest { BucketName = bucket, Key = fileName.ToBucketDirectory(path), InputStream = newMemoryStream, CannedACL = acl }; var fileTransferUtility = new TransferUtility(client); await fileTransferUtility.UploadAsync(uploadRequest); return(fileName); } } }
/// <summary> /// Uploads a file to S3. This method uses the "TransferUtility" class in order to upload the file. /// </summary> public bool FileUpload(string bucketname, string dataname, string filepath, S3StorageClass storageClass, S3CannedACL s3CannedACL) { // Reset error info ClearErrorInfo(); // Save data try { TransferUtilityUploadRequest fileTransferUtilityRequest = new TransferUtilityUploadRequest { BucketName = bucketname, FilePath = filepath, StorageClass = storageClass, PartSize = 6291456, // 6 MB. Key = dataname, ContentType = "binary/octet-stream", CannedACL = s3CannedACL }; fileTransferUtility.Upload(fileTransferUtilityRequest); } catch (Exception ex) { ErrorCode = e_Exception; ErrorMessage = ex.Message + "::" + ex.InnerException; } return(ErrorCode == 0); }
/// <summary> /// Uploads a file to S3. This method uses the "TransferUtility" class in order to upload the file. /// </summary> public bool FileUpload(string bucketname, string dataname, string filepath, S3CannedACL s3CannedACL) { return(FileUpload(bucketname, dataname, filepath, S3StorageClass.ReducedRedundancy, s3CannedACL)); }
/// <summary> /// Removes the cannned access control list (ACL) /// for the uploaded object. /// </summary> public void RemoveCannedACL() { this.cannedACL = null; }
private InitiateMultipartUploadRequest requestFromCtrls() { InitiateMultipartUploadRequest request = new InitiateMultipartUploadRequest(); // Set storage class from control values if (RadioReducedRedund.Checked) { request.StorageClass = S3StorageClass.ReducedRedundancy; } else if (RadioStandardIA.Checked) { request.StorageClass = S3StorageClass.StandardInfrequentAccess; } else { request.StorageClass = S3StorageClass.Standard; } // Set other fields from control values request.WebsiteRedirectLocation = TxtWebsite.Text; request.RequestPayer = (ChkRequestPayer.Checked ? RequestPayer.Requester : null); // Set metadata from control values IEnumerable <DataGridViewRow> metaRows = DgvMetadata.Rows.Cast <DataGridViewRow>().Where(r => !r.IsNewRow); foreach (DataGridViewRow row in metaRows) { string key = row.Cells[DgvColMetadataKey.Index].Value.ToString(); string val = row.Cells[DgvColMetadataValue.Index].Value.ToString(); request.Metadata.Add(key, val); } // Set headers from control values request.Headers.ContentType = TxtType.Text; request.Headers.ContentDisposition = TxtDisposition.Text; request.Headers.ContentEncoding = TxtEncoding.Text; request.Headers.Expires = DatePickerExpires.Value.ToUniversalTime(); // Initialize access control bool useCannedAcl = ChkUseCannedACLs.Checked; request.CannedACL = (useCannedAcl ? S3CannedACL.FindValue(ComboAcl.Text) : null); if (useCannedAcl) { request.Grants = null; } else { IEnumerable <S3Grant> grants = DgvGrants.Rows.Cast <DataGridViewRow>() .Where(r => !r.IsNewRow) .SelectMany(r => grantsFromRow(r)); request.Grants.AddRange(grants); } // Initialize server side encryption method bool kms = RadioSseKms.Checked; bool newKey = RadioSseNewKey.Checked; if (kms) { request.ServerSideEncryptionMethod = ServerSideEncryptionMethod.AWSKMS; } else if (newKey) { request.ServerSideEncryptionMethod = ServerSideEncryptionMethod.AES256; } else { request.ServerSideEncryptionMethod = ServerSideEncryptionMethod.None; } // Set server side encryption from control values request.ServerSideEncryptionKeyManagementServiceKeyId = (kms ? TxtSseKeyId.Text : null); request.ServerSideEncryptionCustomerMethod = (newKey ? ServerSideEncryptionCustomerMethod.AES256 : ServerSideEncryptionCustomerMethod.None); request.ServerSideEncryptionCustomerProvidedKey = (newKey ? TxtSseCustomerKey.Text : null); request.ServerSideEncryptionCustomerProvidedKeyMD5 = (newKey ? TxtSseCustomerKeyMd5.Text : null); return(request); }
/// <summary> /// Constructs a new empty BucketObjectsWindow. /// </summary> /// <param name="regionBucketAndPrefix">The region, bucket, and prefix, in the following form: [region:]bucket/prefix.</param> /// <param name="batchIdCounter">The <see cref="BatchIdCounter"/> for this window.</param> /// <param name="unprefixedStartAtKey">The key to start at or <b>null</b> to start at the beginning.</param> /// <param name="unprefixedStopAtKey">The key to stop at or <b>null</b> to start at the beginning.</param> /// <param name="cannedAcl">A <see cref="S3CannedACL"/> to use for the target file.</param> /// <param name="grant">A <see cref="S3Grant"/> indicating rights grants to apply to the target file.</param> public BucketObjectsWindow(string regionBucketAndPrefix, BatchIdCounter batchIdCounter, string unprefixedStartAtKey = null, string unprefixedStopAtKey = null, S3CannedACL cannedAcl = null, S3Grant grant = null, ServerSideEncryptionMethod targetEncryptionMethod = null) { _batchIdCounter = batchIdCounter; Tuple <string, string, string> parsedRegionBucketAndPrefix = ParseRegionBucketAndPrefix(regionBucketAndPrefix); Amazon.RegionEndpoint region = Amazon.RegionEndpoint.GetBySystemName(string.IsNullOrEmpty(parsedRegionBucketAndPrefix.Item1) ? GetBucketRegion(parsedRegionBucketAndPrefix.Item2) : parsedRegionBucketAndPrefix.Item1); _s3 = new AmazonS3Client(region); _bucket = parsedRegionBucketAndPrefix.Item2; _prefix = parsedRegionBucketAndPrefix.Item3; _grant = grant; _grantCannedAcl = cannedAcl; _queue = new ConcurrentQueue <Batch>(); if (!string.IsNullOrEmpty(unprefixedStartAtKey)) { _startAtKey = _prefix + unprefixedStartAtKey; _unprefixedLeastKey = unprefixedStartAtKey; } else { _unprefixedLeastKey = string.Empty; } if (!string.IsNullOrEmpty(unprefixedStopAtKey)) { _stopAtKey = _prefix + unprefixedStopAtKey; } _unprefixedGreatestKey = string.Empty; _targetEncryptionMethod = targetEncryptionMethod; }
/// <summary> /// Creates a new bucket under the specified account if a bucket /// with the same name does not already exist. /// </summary> /// <param name="options">The AWS S3 Storage cache options.</param> /// <param name="acl"> /// Specifies whether data in the bucket may be accessed publicly and the level of access. /// <see cref="S3CannedACL.PublicRead"/> specifies full public read access for bucket /// and object data. <see cref="S3CannedACL.Private"/> specifies that the bucket /// data is private to the account owner. /// </param> /// <returns> /// If the bucket does not already exist, a <see cref="PutBucketResponse"/> describing the newly /// created bucket. If the container already exists, <see langword="null"/>. /// </returns> public static PutBucketResponse CreateIfNotExists( AWSS3StorageCacheOptions options, S3CannedACL acl) => AsyncHelper.RunSync(() => CreateIfNotExistsAsync(options, acl));
/// <summary> /// Sets the CannedACL property for this request. /// If set the S3 Object will have this CannedACL /// permission. /// </summary> /// <param name="acl">The Canned ACL to be set on the object</param> /// <returns>The request with the CannedACL set</returns> public CopyObjectRequest WithCannedACL(S3CannedACL acl) { this.cannedACL = acl; return(this); }
/// <summary> /// Resets any previous CannedACL set in this object. /// </summary> public void RemoveCannedACL() { this.cannedACL = S3CannedACL.NoACL; }
public Task <string> StoreFileWithACLAsync(string filePath, string fileName, S3CannedACL cannedACL) { return(Task.FromResult <string>("Success")); }
/// <summary> /// Constructs a new access policy condition that compares an Amazon S3 /// canned ACL with the canned ACL specified by an incoming request. /// <para> /// You can use this condition to ensure that any objects uploaded to an /// Amazon S3 bucket have a specific canned ACL set. /// </para> /// </summary> /// <param name="cannedAcl">The Amazon S3 canned ACL to compare against.</param> /// <returns>A new access control policy condition that compares the Amazon S3 /// canned ACL specified in incoming requests against the value /// specified.</returns> public static Condition NewCannedACLCondition(S3CannedACL cannedAcl) { string cannedHeader = S3Constants.CannedAcls[(int)cannedAcl]; return ConditionFactory.NewCondition(StringComparisonType.StringEquals, S3_CANNED_ACL_CONDITION_KEY, cannedHeader); }
public static async void ChangeFilePermission(string awsAccessKeyId, string awsSecretAccessKey, RegionEndpoint region, string bucketName, string keyName, S3CannedACL permission) { try { IAmazonS3 s3Client = new AmazonS3Client(awsAccessKeyId, awsSecretAccessKey, region); PutACLRequest request = new PutACLRequest() { CannedACL = permission, BucketName = bucketName, Key = keyName }; PutACLResponse response1 = await s3Client.PutACLAsync(request); Debug.WriteLine("Change Permission Complete"); } catch (AmazonS3Exception ex) { Debug.WriteLine("Exception occur when Change Permission: '{0}'", ex.Message); } }
public string UploadFile(string sourceBucket, string subDirectoryInBucket, byte[] file, string fileName, S3CannedACL permission) { try { var client = new AmazonS3Client(AwsAccessKey, AwsSecretAccessKey, RegionEndpoint.USEast1); var stream = new MemoryStream(file); fileName = (string.IsNullOrEmpty(subDirectoryInBucket)) ? fileName : subDirectoryInBucket + @"/" + fileName; var request = new PutObjectRequest() { BucketName = sourceBucket, Key = fileName, InputStream = stream, CannedACL = permission }; PutObjectAsync(client, request); return(GetUrlFile(sourceBucket, fileName)); } catch (AmazonS3Exception s3Exception) { throw s3Exception; } catch { throw; } }
public S3FileSystem(IAmazonS3 client, string bucket, S3CannedACL acl) { _client = client; _bucket = bucket; _acl = acl; }
/// <summary> /// Sets the CannedACL property for this request. /// If set, the S3 Object will have this CannedACL /// permission. Please refer to /// <see cref="T:Amazon.S3.Model.S3CannedACL"/> for /// information on S3 Canned ACLs. /// </summary> /// <param name="acl">The Canned ACL to be set on the object</param> /// <returns>The request with the CannedACL set</returns> public InitiateMultipartUploadRequest WithCannedACL(S3CannedACL acl) { this.cannedACL = acl; return this; }
public (JsonResult result, bool Succeeded, string Error) UploadFile(string bucketName, string base64String, string Path, S3CannedACL s3CannedACL) { try { if (AmazonS3Util.DoesS3BucketExistV2Async(_client, bucketName).GetAwaiter().GetResult()) { PutObjectRequest folderRequest = new PutObjectRequest(); var fileTransferUtility = new TransferUtility(_client); byte[] bytes = Convert.FromBase64String(base64String); var fileToUpload = new MemoryStream(bytes); TransferUtilityUploadRequest request = new TransferUtilityUploadRequest(); request.BucketName = bucketName; request.CannedACL = s3CannedACL.Value; request.InputStream = fileToUpload; request.Key = Path; fileTransferUtility.Upload(request); return(new JsonResult("File uploaded successfully."), true, ""); } else { return(new JsonResult("Bucket not exist."), false, "Bucket not exist."); } } catch (Exception ex) { return(new JsonResult("File upload failed."), false, "File upload failed."); } }
public async Task <string> UploadDynamicFileWithNoGuidAsync(string s3OptionName, string filePath, string fileName, Stream stream, S3CannedACL s3CannedACL = null) { var s3Option = _s3Options[s3OptionName]; using (var client = CreateClient(s3Option)) { string key = $"{filePath}/{fileName}"; var request = new PutObjectRequest() { BucketName = s3Option.BucketName, Key = key, InputStream = stream, ServerSideEncryptionMethod = ServerSideEncryptionMethod.AES256, ServerSideEncryptionCustomerProvidedKey = s3Option.AesKey, }; if (s3CannedACL != null) { request.CannedACL = s3CannedACL; } var res = await client.PutObjectAsync(request); if (res.HttpStatusCode == System.Net.HttpStatusCode.OK) { return(key); } } return(null); }
/// <summary> /// Sets the CannedACL property for this request. /// The S3 Object or S3 Bucket will be set to this /// CannedACL. /// </summary> /// <param name="acl">The Canned ACL to be set on the object</param> /// <returns>The request with the CannedACL set</returns> public SetACLRequest WithCannedACL(S3CannedACL acl) { this.cannedACL = acl; return(this); }
/// <summary> /// Sets the canned access control list (ACL) /// for the uploaded objects /// and returns this object instance, /// enabling additional method calls to be chained together. /// Please refer to /// <see cref="T:Amazon.S3.Model.S3CannedACL"/> for /// information on Amazon S3 canned ACLs. /// </summary> /// <param name="acl"> /// The canned access control list (ACL) /// for the uploaded objects. /// </param> /// <returns> /// This object instance, enabling additional method calls to be chained together. /// </returns> public TransferUtilityUploadDirectoryRequest WithCannedACL(S3CannedACL acl) { this._cannedACL = acl; return(this); }
public async Task <ImageUploadedModel> UploadImage( string bucketName, string bucketUrl, string objectKey, S3StorageClass storageClass, S3CannedACL permissions, string glacierVaultName, ImageInfo image) { ImageUploadedModel model = new ImageUploadedModel(); try { PutObjectRequest putRequest = new PutObjectRequest { BucketName = bucketName, Key = objectKey, StorageClass = storageClass, CannedACL = permissions, ContentType = image.MimeType, AutoCloseStream = false }; putRequest.Metadata.Add("width", image.Width.ToString()); putRequest.Metadata.Add("height", image.Height.ToString()); putRequest.InputStream = image.Image; byte[] md5Hash = image.Image.Md5Hash(); putRequest.MD5Digest = md5Hash.ToBase64String(); PutObjectResponse response = await S3Client.PutObjectAsync(putRequest); string eTag = response.ETag.Trim('"').ToLowerInvariant(); string expectedETag = md5Hash.ToS3ETagString(); if (eTag != expectedETag) { throw new Exception("The eTag received from S3 doesn't match the eTag computed before uploading. This usually indicates that the image has been corrupted in transit."); } // upload to Glacier if needed if (!string.IsNullOrWhiteSpace(glacierVaultName)) { ArchiveDescription description = new ArchiveDescription { ObjectKey = objectKey, ContentType = image.MimeType, Width = image.Width, Height = image.Height }; // reset stream position in image image.Image.Position = 0; UploadArchiveRequest glacierRequest = new UploadArchiveRequest { ArchiveDescription = JsonConvert.SerializeObject(description, Formatting.None), Body = image.Image, VaultName = glacierVaultName, Checksum = TreeHashGenerator.CalculateTreeHash(image.Image) }; UploadArchiveResponse glacierResponse = await GlacierClient.UploadArchiveAsync(glacierRequest); model.ArchiveId = glacierResponse.ArchiveId; } model.ObjectKey = objectKey; model.ETag = eTag; model.ObjectLocation = bucketUrl + objectKey; model.VersionId = response.VersionId; } catch (Exception ex) { model.Exception = ex; } return(model); }
/// <summary> /// Sets the CannedACL property for this request. /// The S3 Object or S3 Bucket will be set to this /// CannedACL. /// </summary> /// <param name="acl">The Canned ACL to be set on the object</param> /// <returns>The request with the CannedACL set</returns> public SetACLRequest WithCannedACL(S3CannedACL acl) { this.cannedACL = acl; return this; }
/// <summary> /// Sets the CannedACL property for this request. /// If set, the S3 Object will have this CannedACL /// permission. Please refer to /// <see cref="T:Amazon.S3.Model.S3CannedACL"/> for /// information on S3 Canned ACLs. /// </summary> /// <param name="acl">The Canned ACL to be set on the object</param> /// <returns>The request with the CannedACL set</returns> public InitiateMultipartUploadRequest WithCannedACL(S3CannedACL acl) { this.cannedACL = acl; return(this); }
/// <summary> /// Sets the canned access control list (ACL) /// for the uploaded objects /// and returns this object instance, /// enabling additional method calls to be chained together. /// Please refer to /// <see cref="T:Amazon.S3.Model.S3CannedACL"/> for /// information on Amazon S3 canned ACLs. /// </summary> /// <param name="acl"> /// The canned access control list (ACL) /// for the uploaded objects. /// </param> /// <returns> /// This object instance, enabling additional method calls to be chained together. /// </returns> public TransferUtilityUploadDirectoryRequest WithCannedACL(S3CannedACL acl) { this._cannedACL = acl; return this; }
/// <summary> /// Adds an object to an Amazon S3 bucket /// </summary> /// <param name="bucketName">Name of an existed S3 bucket</param> /// <param name="key">Name of the object in the bucket. Can include subfolders: folder/file1.jpg </param> /// <param name="stream"></param> /// <param name="acl">Access control lists https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl</param> /// <param name="cancellationToken"></param> /// <returns>Url to the object</returns> public async Task <Result <string> > Add(string bucketName, string key, Stream stream, S3CannedACL acl, CancellationToken cancellationToken = default) { var putObjectRequest = new PutObjectRequest { Key = key, InputStream = stream, BucketName = bucketName, CannedACL = acl }; try { _logger.LogAddObjectToS3Request(GetLogMessage(bucketName, key)); var putObjectResponse = await _s3Client.PutObjectAsync(putObjectRequest, cancellationToken); _logger.LogAddObjectToS3Response( $"{GetLogMessage(bucketName, key)}, {nameof(putObjectResponse.ContentLength)}: {putObjectResponse.ContentLength}, {nameof(putObjectResponse.HttpStatusCode)}: {putObjectResponse.HttpStatusCode}"); if (putObjectResponse.HttpStatusCode == HttpStatusCode.OK) { return(Result.Success(GetUrlPath(bucketName, key))); } return(Result.Failure <string>( $"Failed to upload the object '{key}'. {nameof(putObjectResponse.HttpStatusCode)} is '{putObjectResponse.HttpStatusCode}'")); } catch (Exception ex) { AddObjectKey(ex, key); _logger.LogS3RequestException(ex); return(Result.Failure <string>(ex.ToString())); } }
/// <summary> /// Resets the S3CannedACL /// </summary> public void RemoveCannedACL() { this.cannedACL = S3CannedACL.NoACL; }
public PutObjectRequest WithCannedACL(S3CannedACL acl) { this.cannedACL = acl; return this; }
/// <summary> /// Applies a canned access control list (ACL) to the new bucket. /// </summary> /// <remarks>You can use either a canned ACL or specify access permissions explicitly. You cannot do both.</remarks> /// <param name="acl">The canned ACL to apply; you can specify only one canned ACL in your request</param> /// <returns>The request with the Grants set.</returns> public PutBucketRequest WithCannedACL(S3CannedACL acl) { this.cannedACL = acl; return(this); }