public async Task <bool> RenameFileAsync(string path, string newPath, CancellationToken cancellationToken = default(CancellationToken)) { if (String.IsNullOrEmpty(path)) { throw new ArgumentNullException(nameof(path)); } if (String.IsNullOrEmpty(newPath)) { throw new ArgumentNullException(nameof(newPath)); } var req = new CopyObjectRequest { CannedACL = _cannedAcl, SourceBucket = _bucket, SourceKey = path.Replace('\\', '/'), DestinationBucket = _bucket, DestinationKey = newPath.Replace('\\', '/') }; var res = await _client.CopyObjectAsync(req, cancellationToken).AnyContext(); if (!res.HttpStatusCode.IsSuccessful()) { return(false); } var delReq = new DeleteObjectRequest { BucketName = _bucket, Key = path.Replace('\\', '/') }; var delRes = await _client.DeleteObjectAsync(delReq, cancellationToken).AnyContext(); return(delRes.HttpStatusCode.IsSuccessful()); }
protected async Task ResetCloudBlobCacheControl(S3Object s3Object, int cacheControlMaxAge) { var copyRequest = new CopyObjectRequest { SourceBucket = _bucketName, SourceKey = s3Object.Key, DestinationBucket = _bucketName, DestinationKey = s3Object.Key, MetadataDirective = S3MetadataDirective.REPLACE }; copyRequest.Headers.CacheControl = $"public, max-age={cacheControlMaxAge}"; await _client.CopyObjectAsync(copyRequest); }
public async Task <BlobInfoV1> UpdateBlobInfoAsync(string correlationId, BlobInfoV1 item) { item.Group = EncodeString(item.Group); item.Name = EncodeString(item.Name); var filename = item.Name ?? (item.Id + ".dat"); var request = new CopyObjectRequest { SourceBucket = _bucket, SourceKey = item.Id, DestinationBucket = _bucket, DestinationKey = item.Id, CannedACL = S3CannedACL.PublicRead, ContentType = item.ContentType, StorageClass = _reducedRedundancy ? S3StorageClass.ReducedRedundancy : S3StorageClass.Standard }; request.Headers.ContentDisposition = "inline; filename=" + filename; if (item.ExpireTime != null) { request.Headers.Expires = item.ExpireTime; } request.Metadata.Add("name", item.Name); request.Metadata.Add("group", item.Group); request.Metadata.Add("completed", StringConverter.ToString(item.Completed)); await _client.CopyObjectAsync(request); return(item); }
public bool CopyFile(string sourceBucket, string destinationBucket, string sourceFile, string destinationFile) { try { using (var client = new AmazonS3Client(AwsAccessKey, AwsSecretAccessKey, RegionEndpoint.USEast1)) { var request = new CopyObjectRequest { SourceBucket = sourceBucket, SourceKey = sourceFile, DestinationBucket = string.IsNullOrEmpty(destinationBucket) ? sourceBucket : destinationBucket, DestinationKey = destinationFile }; var task = client.CopyObjectAsync(request); task.Wait(); CopyObjectResponse response = task.Result; } return(true); } catch (AmazonS3Exception s3Exception) { throw s3Exception; } catch { throw; } }
/// <summary> /// Copyings the object async. /// </summary> /// <returns>The object async.</returns> /// <param name="sourceBucketName">Source bucket name.</param> /// <param name="sourceKey">Source key.</param> /// <param name="destinationBucketName">Destination bucket name.</param> /// <param name="destinationKey">Destination key.</param> public async Task CopyingObjectAsync(string sourceBucketName, string sourceKey, string destinationBucketName, string destinationKey) { try { using (var client = new AmazonS3Client(Amazon.RegionEndpoint.APSouth1)) { var request = new CopyObjectRequest { SourceBucket = sourceBucketName, SourceKey = sourceKey, DestinationBucket = destinationBucketName, DestinationKey = destinationKey }; var response = await client.CopyObjectAsync(request); } } catch (AmazonS3Exception e) { Console.WriteLine("Error encountered on server. Message:'{0}' when CopyObjectAsync an object", e.Message); } catch (Exception e) { Console.WriteLine("Unknown encountered on server. Message:'{0}' when CopyObjectAsync an object", e.Message); } }
public async Task Copy(string sourceKey, string desitnationKey, string sourceBucket = null, string desitnationBucket = null, string sourceVersionId = null) { string sourceBucketName = sourceBucket; if (sourceBucketName == null) { sourceBucketName = awsS3Options.Bucket; } string destinationBucketName = desitnationBucket; if (destinationBucketName == null) { destinationBucketName = sourceBucketName; } using (AmazonS3Client client = CreateClient()) { TransferUtility transferUtility = new TransferUtility(client); await client.CopyObjectAsync(new CopyObjectRequest() { SourceBucket = sourceBucketName, DestinationBucket = destinationBucketName, SourceKey = sourceKey, DestinationKey = desitnationKey, SourceVersionId = sourceVersionId }); } }
public async Task <Metadata> SaveMetadata(StoreLocation location, Metadata metadata) { // Copy so that we are not modifying original! metadata = new Metadata(metadata); // Do not change the audit information! var current = await GetMetadata(location).ConfigureAwait(false); metadata.Audit = current.Audit; var key = GetObjectKey(location); var request = new CopyObjectRequest { SourceBucket = _bucket, SourceKey = key, DestinationBucket = _bucket, DestinationKey = key, MetadataDirective = S3MetadataDirective.REPLACE }; foreach (var m in metadata) { request.Metadata.Add("x-amz-meta-" + m.Key, m.Value); } // Copy the object (only way to update metadata) string versionToRemove; try { var copyResponse = await _client.CopyObjectAsync(request).ConfigureAwait(false); versionToRemove = copyResponse.SourceVersionId; } catch (AmazonS3Exception e) { if (e.StatusCode == HttpStatusCode.NotFound) { return(null); } throw; } // We will be grabbing the metadata var metadataTask = GetMetadata(location); var tasks = new List <Task> { metadataTask }; // Remove the double up so that we don't get heaps of extra snapshots... if (versionToRemove != null) { tasks.Add(_client.DeleteObjectAsync(_bucket, key, versionToRemove)); } await Task.WhenAll(tasks).ConfigureAwait(false); return(metadataTask.Result); }
public async Task <JToken> Handler(JToken @event, ILambdaContext context) { var resourceManager = AwsEnvironment.GetAwsV4ResourceManager(); try { var jobData = new JobBase { Status = "RUNNING", Progress = 9 }; await resourceManager.SendNotificationAsync(jobData, @event["notificationEndpoint"].ToMcmaObject <NotificationEndpoint>()); } catch (Exception error) { Logger.Error("Failed to send notification: {0}", error); } var inputFile = @event["input"]["inputFile"].ToMcmaObject <S3Locator>(); var s3Bucket = REPOSITORY_BUCKET; var s3Key = yyyymmdd() + "/" + Guid.NewGuid(); var idxLastDot = inputFile.AwsS3Key.LastIndexOf("."); if (idxLastDot > 0) { s3Key += inputFile.AwsS3Key.Substring(idxLastDot); } try { var s3Client = new AmazonS3Client(); var destBucketLocation = await s3Client.GetBucketLocationAsync(s3Bucket); var regionEndpoint = RegionEndpoint.GetBySystemName(!string.IsNullOrWhiteSpace(destBucketLocation.Location) ? (string)destBucketLocation.Location : "us-east-1"); var copyClient = new AmazonS3Client(regionEndpoint); await copyClient.CopyObjectAsync(new CopyObjectRequest { SourceBucket = inputFile.AwsS3Bucket, SourceKey = inputFile.AwsS3Key, DestinationBucket = s3Bucket, DestinationKey = s3Key }); } catch (Exception error) { throw new Exception("Unable to read input file in bucket '" + inputFile.AwsS3Bucket + "' with key '" + inputFile.AwsS3Key + "' due to error: " + error); } return(new S3Locator { AwsS3Bucket = s3Bucket, AwsS3Key = s3Key }.ToMcmaJson()); }
static async Task ReadObjectDataAsync(Amazon.RegionEndpoint awsRegion, string bucketName, string keyName) { IAmazonS3 client = new AmazonS3Client(awsRegion); try { GetObjectRequest request = new GetObjectRequest { BucketName = bucketName, Key = keyName }; using (GetObjectResponse response = await client.GetObjectAsync(request)) using (Stream responseStream = response.ResponseStream) using (MemoryStream memStream = new MemoryStream()) { responseStream.CopyTo(memStream); ParseExcelStream(memStream); //using (StreamReader reader = new StreamReader(responseStream)) //{ // string title = response.Metadata["x-amz-meta-title"]; // Assume you have "title" as medata added to the object. // string contentType = response.Headers["Content-Type"]; // Console.WriteLine("Object metadata, Title: {0}", title); // Console.WriteLine("Content type: {0}", contentType); // responseBody = reader.ReadToEnd(); // Now you process the response body. } //processed/test.txt //S3FileInfo currentObject = new S3FileInfo(client, bucketName, keyName); CopyObjectRequest cpreq = new CopyObjectRequest { SourceBucket = bucketName, SourceKey = keyName, DestinationBucket = bucketName, DestinationKey = "processed/" + keyName.Replace(".xlsx", ".processed") }; CopyObjectResponse cpresp = await client.CopyObjectAsync(cpreq); var req2 = new DeleteObjectRequest { BucketName = bucketName, Key = keyName }; var resp2 = await client.DeleteObjectAsync(req2); } catch (AmazonS3Exception e) { Console.WriteLine("Error encountered ***. Message:'{0}' when reading an object", e.Message); } catch (Exception e) { Console.WriteLine("Unknown encountered on server. Message:'{0}' when reading an object", e.Message); } }
static async System.Threading.Tasks.Task Main(string[] args) { var s3Client = new AmazonS3Client(); var stepFunctionclient = new AmazonStepFunctionsClient(); var bucket = Environment.GetEnvironmentVariable("STEP_FUNCTION_DEMO_BUCKET"); var requestObjects = await s3Client.ListObjectsAsync(new ListObjectsRequest { BucketName = bucket, Prefix = "TaskRequest" }); foreach (var obj in requestObjects.S3Objects) { var s3object = await s3Client.GetObjectAsync(new GetObjectRequest { BucketName = bucket, Key = obj.Key }); using var sr = new StreamReader(s3object.ResponseStream); var data = JsonSerializer.Deserialize <JobWithTaskToken>(await sr.ReadToEndAsync()); if (data.State.Data == 10) { data.State.Resolved = true; } await s3Client.PutObjectAsync(new PutObjectRequest { BucketName = bucket, Key = $"TaskResponse/Response-{System.DateTime.Now.ToString("MMddyyyyhhmmss")}.json", ContentBody = JsonSerializer.Serialize(data) }); if (!string.IsNullOrEmpty(data.TaskToken)) { await stepFunctionclient.SendTaskSuccessAsync(new SendTaskSuccessRequest { TaskToken = data.TaskToken, Output = JsonSerializer.Serialize(data.State) }); } await s3Client.CopyObjectAsync(new CopyObjectRequest { SourceBucket = obj.BucketName, DestinationBucket = bucket, SourceKey = obj.Key, DestinationKey = obj.Key.Replace("TaskRequest", "Completed") }); await s3Client.DeleteObjectAsync(bucket, obj.Key); } }
public async Task CopyingObjectAsync(string objectKey, string destObjectKey) { CopyObjectRequest request = new CopyObjectRequest { SourceBucket = bucketName, SourceKey = objectKey, DestinationBucket = bucketName, DestinationKey = destObjectKey }; CopyObjectResponse response = await client.CopyObjectAsync(request); logger.Info($"Object {objectKey} was copid to {destObjectKey}."); }
/// <summary> /// /// <para>CopyFile:</para> /// /// <para>Copy a file from a bucket and relative location to another in File Service, caller thread will be blocked before it is done</para> /// /// <para>Check <seealso cref="IBFileServiceInterface.CopyFile"/> for detailed documentation</para> /// /// </summary> public bool CopyFile( string _SourceBucketName, string _SourceKeyInBucket, string _DestinationBucketName, string _DestinationKeyInBucket, EBRemoteFileReadPublicity _RemoteFileReadAccess = EBRemoteFileReadPublicity.AuthenticatedRead, Action <string> _ErrorMessageAction = null) { if (S3Client == null) { _ErrorMessageAction?.Invoke("BFileServiceAWS->CopyFile: S3Client is null."); return(false); } CopyObjectRequest Request = new CopyObjectRequest { SourceBucket = _SourceBucketName, SourceKey = _SourceKeyInBucket, DestinationBucket = _DestinationBucketName, DestinationKey = _DestinationKeyInBucket }; if (_RemoteFileReadAccess == EBRemoteFileReadPublicity.PublicRead) { Request.CannedACL = S3CannedACL.PublicRead; } else if (_RemoteFileReadAccess == EBRemoteFileReadPublicity.ProjectWideProtectedRead) { Request.CannedACL = S3CannedACL.AuthenticatedRead; } else { Request.CannedACL = S3CannedACL.AuthenticatedRead; } try { using (var CreatedTask = S3Client.CopyObjectAsync(Request)) { CreatedTask.Wait(); } } catch (Exception e) { _ErrorMessageAction?.Invoke("BFileServiceAWS->CopyFile: " + e.Message + ", Trace: " + e.StackTrace); return(false); } return(true); }
public CopyObjectResponse CopyObject(CopyObjectRequest request) { var s3CopyObjectRequest = new Amazon.S3.Model.CopyObjectRequest() { SourceBucket = request.SourceBucket, SourceKey = request.SourceKey, DestinationBucket = request.Bucket, DestinationKey = request.Key, }; var result = _amazonS3Client.CopyObjectAsync(s3CopyObjectRequest).Result; return(null); }
public static async Task UpdateMetadataAsync( AmazonS3Client client, Blob blob, string bucketName, string key) { CopyObjectRequest request = new CopyObjectRequest { SourceBucket = bucketName, DestinationBucket = bucketName, SourceKey = key, DestinationKey = key, MetadataDirective = S3MetadataDirective.REPLACE }; foreach (KeyValuePair <string, string> keyValuePair in blob.Metadata) { request.Metadata[keyValuePair.Key] = keyValuePair.Value; } await client.CopyObjectAsync(request); }
private async Task MoveFileAsync(string sourceFileId, string sourceFileName, string sourceFileVersionId, string targetFileId, string targetFileName) { var sourceFileKey = CreateFileKey(sourceFileId, sourceFileVersionId, sourceFileName); var targetFileKey = CreateFileKey(targetFileId, targetFileId, targetFileName); var copyRequest = new CopyObjectRequest { SourceBucket = RootFolderName, DestinationBucket = RootFolderName, SourceKey = sourceFileKey, DestinationKey = targetFileKey, SourceVersionId = sourceFileVersionId }; CopyObjectResponse copyResponse = await client.CopyObjectAsync(copyRequest); if (copyResponse.HttpStatusCode == System.Net.HttpStatusCode.OK || copyResponse.HttpStatusCode == System.Net.HttpStatusCode.Created) { // await DeleteFileAsync(sourceFileId, new string[] { sourceFileVersionId }, sourceFileName, AccessModifier.Public); } }
/// <summary> /// Copy the new S3 file from the Met Office bucket into our ingest bucket /// </summary> private async Task ProcessMessageAsync(SQSEvent.SQSMessage sqsMessage, ILambdaContext context) { // First, extract the original message we received from SNS, by unwrapping it from the sqs Message headers JObject snsMessage = JObject.Parse(sqsMessage.Body); // Now get the file notification details from the "Message" field of the SNS message JObject fileNotification = JObject.Parse(snsMessage["Message"].ToString()); string bucket = fileNotification["bucket"].ToString(); string key = fileNotification["key"].ToString(); context.Logger.LogLine($"Received SQS notification new file available: s3://{bucket}/{key}"); AmazonS3Client s3 = new AmazonS3Client(); CopyObjectResponse result = await s3.CopyObjectAsync(bucket, key, "bigwind-ingest", key); if ((((int)result.HttpStatusCode) < 200) || (((int)result.HttpStatusCode) > 299)) { throw new System.Exception($"S3 copy request failed with HttpStatusCode: {result.HttpStatusCode}"); } }
public static async Task <bool> MoveObjectAsync(AmazonS3Client client, string srcBucket, string dstBucket, string src, string dst) { src = src.Replace("\\", "/"); dst = dst.Replace("\\", "/"); if (!await FileExistsAsync(client, srcBucket, src)) { return(false); } var request = new CopyObjectRequest { SourceBucket = srcBucket, DestinationBucket = dstBucket, SourceKey = src, DestinationKey = dst }; var response = await client.CopyObjectAsync(request); await DeleteFileAsync(client, srcBucket, src); return(true); }
public static async Task UpdateMetadataAsync(AmazonS3Client client, Blob blob, string bucketName, string key) { // there is no way to update metadata in S3, and the only way is to recreate it // however, you can copy object on top of itself (effectively a replace) and rewrite metadata, and this won't have to download the blob on the client var request = new CopyObjectRequest { SourceBucket = bucketName, DestinationBucket = bucketName, SourceKey = key, DestinationKey = key, MetadataDirective = S3MetadataDirective.REPLACE }; foreach (KeyValuePair <string, string> pair in blob.Metadata) { request.Metadata[pair.Key] = pair.Value; } await client.CopyObjectAsync(request).ConfigureAwait(false); }
public async Task <IFileInformation> CopyFileToSameServiceType(string identifier, IStorageService destinationService) { var destination = (S3StorageService)destinationService; var uniqueName = Guid.NewGuid().ToString(); using var client = new AmazonS3Client(S3Settings.AccessKeyId, S3Settings.SecretAccessKey, _region); var request = new CopyObjectRequest() { SourceBucket = S3Settings.Bucket, SourceKey = identifier, DestinationBucket = destination.S3Settings.Bucket, DestinationKey = uniqueName }; await client.CopyObjectAsync(request); var info = new S3FileInformation { StorageIdentifier = uniqueName }; return(info); }
private async Task MoveMessage(SimpleEmailMessage mail, string bucket, string sourceKey, string destinationKey) { if (await CheckDestinationExists(bucket, destinationKey)) { Log($"Destination already exists, message was moved previously or possible duplicate. Proceeding..."); return; } var copyConfig = new CopyObjectRequest { SourceBucket = bucket, DestinationBucket = bucket, SourceKey = sourceKey, DestinationKey = destinationKey }; SetMetadata(copyConfig, mail); var copy = await _s3client.CopyObjectAsync(copyConfig); var delete = await _s3client.DeleteObjectAsync(bucket, sourceKey); Log($"Moved message from {sourceKey} to {destinationKey}"); }
public async Task CopyFile(string sourceBucket, string sourceKey, string destinationBucket, string destinationKey) { var response = await client.CopyObjectAsync(sourceBucket, sourceKey, destinationBucket, destinationKey); //response. }
public async Task <S3Locator> Handler(JToken @event, ILambdaContext context) { var resourceManager = AwsEnvironment.GetAwsV4ResourceManager(); try { var jobData = new JobBase { Status = "RUNNING", Progress = 72 }; await resourceManager.SendNotificationAsync(jobData, @event["notificationEndpoint"].ToMcmaObject <NotificationEndpoint>()); } catch (Exception error) { Logger.Error("Failed to send notification: {0}", error); } var transformJobId = GetTransformJobId(@event); S3Locator outputFile; if (transformJobId == null) { Logger.Debug("Transform job ID is null. Transform was not done. Using original essence as proxy."); var bme = await resourceManager.ResolveAsync <BMEssence>(@event["data"]["bmEssence"]?.ToString()); outputFile = (S3Locator)bme.Locations[0]; } else { Logger.Debug($"Getting proxy location from transform job {transformJobId}."); var transformJob = await resourceManager.ResolveAsync <TransformJob>(transformJobId); outputFile = transformJob.JobOutput.Get <S3Locator>(nameof(outputFile)); } var s3Bucket = WEBSITE_BUCKET; var s3Key = "media/" + Guid.NewGuid(); var idxLastDot = outputFile.AwsS3Key.LastIndexOf("."); if (idxLastDot > 0) { s3Key += outputFile.AwsS3Key.Substring(idxLastDot); } var s3 = new AmazonS3Client(); var data = await s3.GetBucketLocationAsync(s3Bucket); try { var copyParams = new CopyObjectRequest { SourceBucket = outputFile.AwsS3Bucket, SourceKey = outputFile.AwsS3Key, DestinationBucket = s3Bucket, DestinationKey = s3Key }; var regionEndpoint = RegionEndpoint.GetBySystemName(!string.IsNullOrWhiteSpace(data.Location) ? (string)data.Location : "us-east-1"); var destS3 = new AmazonS3Client(regionEndpoint); await destS3.CopyObjectAsync(copyParams); } catch (Exception error) { throw new Exception("Unable to read input file in bucket '" + s3Bucket + "' with key '" + s3Key + "' due to error: " + error); } var s3SubDomain = !string.IsNullOrWhiteSpace(data.Location) ? $"s3-{data.Location}" : "s3"; var httpEndpoint = "https://" + s3SubDomain + ".amazonaws.com/" + s3Bucket + "/" + s3Key; return(new S3Locator { AwsS3Bucket = s3Bucket, AwsS3Key = s3Key, HttpEndpoint = httpEndpoint }); }
public static void Execute( request m, out HttpStatusCode hsc, out string status, string awsAccessKey, string awsSecretKey, Microsoft.AspNetCore.Http.HttpContext hc = null, CancellationToken?ct = null ) { hsc = HttpStatusCode.BadRequest; status = ""; try { if (!Exists.Execute( key: m.sourceKey, bucketName: m.sourceBucketName, url: out string sourceUrl, re: m.re, awsAccessKey: awsAccessKey, awsSecretKey: awsSecretKey )) { status = "source did not exist"; hsc = HttpStatusCode.BadRequest; return; } //validate dest doesn't already exist, fail if it does bc we aren't validating that it is different? maybe shar eeach in future? if (Exists.Execute( key: m.destKey, bucketName: m.destBucketName, url: out string destUrl, re: m.re, awsAccessKey: awsAccessKey, awsSecretKey: awsSecretKey )) { status = "dest existed already"; hsc = HttpStatusCode.BadRequest; return; } //copy using (var s3c = new AmazonS3Client( awsAccessKeyId: awsAccessKey, awsSecretAccessKey: awsSecretKey, region: m.re )) { var request = new CopyObjectRequest { SourceBucket = m.sourceBucketName, SourceKey = m.sourceKey, DestinationBucket = m.destBucketName, DestinationKey = m.destKey, CannedACL = m.destAcl }; if (!string.IsNullOrWhiteSpace(m.contentType)) { request.MetadataDirective = S3MetadataDirective.REPLACE; request.ContentType = m.contentType; } var response = s3c.CopyObjectAsync(request, cancellationToken: ct.HasValue ? ct.Value : CancellationToken.None).Result; hsc = response.HttpStatusCode; return; //fileLengthBytes = cor. } } catch (Exception ex) { LogIt.E(ex); hsc = HttpStatusCode.InternalServerError; status = "unexecpected error"; return; } finally { LogIt.I(JsonConvert.SerializeObject( new { hsc, status, m, //ipAddress = GetPublicIpAddress.Execute(hc), //executedBy = GetExecutingUsername.Execute() }, Formatting.Indented)); } }
public async Task CopyFileAsync(string srcPath, string dstPath) { if (srcPath == dstPath) { throw new ArgumentException( $"The values for {nameof(srcPath)} and {nameof(dstPath)} must not be the same."); } try { await _s3Client.GetObjectMetadataAsync(new GetObjectMetadataRequest { BucketName = _s3Settings.S3BucketName, Key = GetCompletePath(srcPath) }); } catch (Exception e) { throw new FileStoreException($"Cannot copy file from '{srcPath}' because it does not exists, with message {e.Message}."); } try { await _s3Client.GetObjectMetadataAsync(new GetObjectMetadataRequest { BucketName = _s3Settings.S3BucketName, Key = GetCompletePath(dstPath) }); throw new FileStoreException($"Cannot copy file to '{dstPath}' because it already exists, with message."); } catch { // ignored } try { var request = new CopyObjectRequest { SourceBucket = _s3Settings.S3BucketName, SourceKey = GetCompletePath(srcPath), DestinationBucket = _s3Settings.S3BucketName, DestinationKey = GetCompletePath(dstPath) }; var response = await _s3Client.CopyObjectAsync(request); if (response.LastModified != null) { throw new FileStoreException( $"Error while copying file '{srcPath}'; copy operation failed with status {response.HttpStatusCode}."); } } catch (AmazonS3Exception e) { throw new FileStoreException( $"Error while copying file '{srcPath}'; copy operation failed with exception {e.Message}."); } catch (Exception e) { throw new FileStoreException( $"Error while copying file '{srcPath}'; copy operation failed with exception {e.Message}."); } }