private void UploadToCdn() { try { // one thread only if (Interlocked.CompareExchange(ref work, 1, 0) == 0) { var @continue = false; try { CdnItem item; if (queue.TryDequeue(out item)) { @continue = true; var cdnpath = GetCdnPath(item.Bundle.Path); var key = new Uri(cdnpath).PathAndQuery.TrimStart('/'); var contentMD5 = Hasher.Base64Hash(item.Response.Content, HashAlg.MD5); var compressed = new MemoryStream(); if (ClientSettings.GZipEnabled) { using (var compression = new GZipStream(compressed, CompressionMode.Compress, true)) { new MemoryStream(Encoding.UTF8.GetBytes(item.Response.Content)).CopyTo(compression); } contentMD5 = Hasher.Base64Hash(compressed.GetCorrectBuffer(), HashAlg.MD5); } var config = new AmazonS3Config { RegionEndpoint = RegionEndpoint.GetBySystemName(s3region), UseHttp = true }; using (var s3 = new AmazonS3Client(s3publickey, s3privatekey, config)) { var upload = false; try { var request = new GetObjectMetadataRequest { BucketName = s3bucket, Key = key }; var response = s3.GetObjectMetadata(request); upload = !string.Equals(contentMD5, response.Metadata["x-amz-meta-etag"], StringComparison.InvariantCultureIgnoreCase); } catch (AmazonS3Exception ex) { if (ex.StatusCode == HttpStatusCode.NotFound) { upload = true; } else { throw; } } if (upload) { var request = new PutObjectRequest { BucketName = s3bucket, CannedACL = S3CannedACL.PublicRead, Key = key, ContentType = AmazonS3Util.MimeTypeFromExtension(Path.GetExtension(key).ToLowerInvariant()) }; if (ClientSettings.GZipEnabled) { request.InputStream = compressed; request.Headers.ContentEncoding = "gzip"; } else { request.ContentBody = item.Response.Content; } var cache = TimeSpan.FromDays(365); request.Headers.CacheControl = string.Format("public, maxage={0}", (int)cache.TotalSeconds); request.Headers.Expires = DateTime.UtcNow.Add(cache); request.Headers.ContentMD5 = contentMD5; request.Headers["x-amz-meta-etag"] = contentMD5; //request.AddHeader("Last-Modified", DateTime.UtcNow.ToString("R")); s3.PutObject(request); } item.Bundle.CdnPath = cdnpath; } } } catch (Exception err) { log.Error(err); } finally { work = 0; if (@continue) { Action upload = () => UploadToCdn(); upload.BeginInvoke(null, null); } } } } catch (Exception fatal) { log.Fatal(fatal); } }
public void MultipartEncryptionTestInstructionFile() { string filePath = @"C:\temp\Upload15MegFileIn3PartsViaStream.txt"; string retrievedFilepath = @"C:\temp\Upload15MegFileIn3PartsViaStreamRetreived.txt"; int MEG_SIZE = (int)Math.Pow(2, 20) + 4001; long totalSize = (long)(15 * MEG_SIZE); UtilityMethods.GenerateFile(filePath, totalSize); string key = "MultipartEncryptionTestInstrcutionFile" + random.Next(); s3EncryptionClientFileMode.PutBucket(new PutBucketRequest() { BucketName = bucketName }); Stream inputStream = File.OpenRead(filePath); try { InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest() { BucketName = bucketName, Key = key, StorageClass = S3StorageClass.ReducedRedundancy, ContentType = "text/html", CannedACL = S3CannedACL.PublicRead }; InitiateMultipartUploadResponse initResponse = s3EncryptionClientFileMode.InitiateMultipartUpload(initRequest); // Upload part 1 UploadPartRequest uploadRequest = new UploadPartRequest() { BucketName = bucketName, Key = key, UploadId = initResponse.UploadId, PartNumber = 1, PartSize = 5 * MEG_SIZE, InputStream = inputStream }; UploadPartResponse up1Response = s3EncryptionClientFileMode.UploadPart(uploadRequest); // Upload part 2 uploadRequest = new UploadPartRequest() { BucketName = bucketName, Key = key, UploadId = initResponse.UploadId, PartNumber = 2, PartSize = 5 * MEG_SIZE + 4001, InputStream = inputStream }; UploadPartResponse up2Response = s3EncryptionClientFileMode.UploadPart(uploadRequest); // Upload part 3 uploadRequest = new UploadPartRequest() { BucketName = bucketName, Key = key, UploadId = initResponse.UploadId, PartNumber = 3, InputStream = inputStream, IsLastPart = true }; //uploadRequest.setLastPart(); UploadPartResponse up3Response = s3EncryptionClientFileMode.UploadPart(uploadRequest); ListPartsRequest listPartRequest = new ListPartsRequest() { BucketName = bucketName, Key = key, UploadId = initResponse.UploadId }; ListPartsResponse listPartResponse = s3EncryptionClientFileMode.ListParts(listPartRequest); Assert.AreEqual(3, listPartResponse.Parts.Count); Assert.AreEqual(up1Response.PartNumber, listPartResponse.Parts[0].PartNumber); Assert.AreEqual(up1Response.ETag, listPartResponse.Parts[0].ETag); Assert.AreEqual(up2Response.PartNumber, listPartResponse.Parts[1].PartNumber); Assert.AreEqual(up2Response.ETag, listPartResponse.Parts[1].ETag); Assert.AreEqual(up3Response.PartNumber, listPartResponse.Parts[2].PartNumber); Assert.AreEqual(up3Response.ETag, listPartResponse.Parts[2].ETag); listPartRequest.MaxParts = 1; listPartResponse = s3EncryptionClientFileMode.ListParts(listPartRequest); Assert.AreEqual(1, listPartResponse.Parts.Count); // Complete the response CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest() { BucketName = bucketName, Key = key, UploadId = initResponse.UploadId }; compRequest.AddPartETags(up1Response, up2Response, up3Response); CompleteMultipartUploadResponse compResponse = s3EncryptionClientFileMode.CompleteMultipartUpload(compRequest); Assert.AreEqual(bucketName, compResponse.BucketName); Assert.IsNotNull(compResponse.ETag); Assert.AreEqual(key, compResponse.Key); Assert.IsNotNull(compResponse.Location); // Get the file back from S3 and make sure it is still the same. GetObjectRequest getRequest = new GetObjectRequest() { BucketName = bucketName, Key = key }; GetObjectResponse getResponse = s3EncryptionClientFileMode.GetObject(getRequest); getResponse.WriteResponseStreamToFile(retrievedFilepath); UtilityMethods.CompareFiles(filePath, retrievedFilepath); GetObjectMetadataRequest metaDataRequest = new GetObjectMetadataRequest() { BucketName = bucketName, Key = key }; GetObjectMetadataResponse metaDataResponse = s3EncryptionClientFileMode.GetObjectMetadata(metaDataRequest); Assert.AreEqual("text/html", metaDataResponse.Headers.ContentType); } finally { inputStream.Close(); if (File.Exists(filePath)) { File.Delete(filePath); } if (File.Exists(retrievedFilepath)) { File.Delete(retrievedFilepath); } } }
public Task <GetObjectMetadataResponse> GetObjectMetadataAsync(GetObjectMetadataRequest request, CancellationToken cancellationToken = default(CancellationToken)) { throw new NotImplementedException(); }
/// <summary> /// Gets a value indicating whether the image is new or updated in an asynchronous manner. /// </summary> /// <returns> /// The asynchronous <see cref="Task"/> returning the value. /// </returns> public override async Task <bool> IsNewOrUpdatedAsync() { string cachedFileName = await this.CreateCachedFileNameAsync(); // Collision rate of about 1 in 10000 for the folder structure. // That gives us massive scope to store millions of files. string pathFromKey = string.Join("\\", cachedFileName.ToCharArray().Take(6)); this.CachedPath = Path.Combine(this.cachedCdnRoot, this.imageProcessorCachePrefix, pathFromKey, cachedFileName) .Replace(@"\", "/"); // TODO: What is the S3 version of the following lines? The Above doesn't match what I expect //this.CachedPath = Path.Combine(this.cloudCachedBlobContainer.Uri.ToString(), pathFromKey, cachedFileName).Replace(@"\", "/"); //this.cachedRewritePath = Path.Combine(this.cachedCdnRoot, this.cloudCachedBlobContainer.Name, pathFromKey, cachedFileName).Replace(@"\", "/"); bool isUpdated = false; CachedImage cachedImage = CacheIndexer.Get(this.CachedPath); if (new Uri(this.CachedPath).IsFile) { FileInfo fileInfo = new FileInfo(this.CachedPath); if (fileInfo.Exists) { // Pull the latest info. fileInfo.Refresh(); cachedImage = new CachedImage { Key = Path.GetFileNameWithoutExtension(this.CachedPath), Path = this.CachedPath, CreationTimeUtc = fileInfo.CreationTimeUtc }; CacheIndexer.Add(cachedImage); } } if (cachedImage == null) { try { string path = this.GetFolderStructureForAmazon(this.CachedPath); string filename = Path.GetFileName(this.CachedPath); string key = this.GetKey(path, filename); GetObjectMetadataRequest objectMetaDataRequest = new GetObjectMetadataRequest { BucketName = this.awsBucketName, Key = key, }; GetObjectMetadataResponse response = await this.amazonS3ClientCache.GetObjectMetadataAsync(objectMetaDataRequest); if (response != null) { cachedImage = new CachedImage { Key = key, Path = this.CachedPath, CreationTimeUtc = response.LastModified.ToUniversalTime() }; CacheIndexer.Add(cachedImage); } } catch (AmazonS3Exception) { // Nothing in S3 so we should return true. isUpdated = true; } } if (cachedImage == null) { // Nothing in the cache so we should return true. isUpdated = true; } else { // Check to see if the cached image is set to expire. if (this.IsExpired(cachedImage.CreationTimeUtc)) { CacheIndexer.Remove(this.CachedPath); isUpdated = true; } } return(isUpdated); }
static void Main(string[] args) { IAmazonS3 s3Client = new AmazonS3Client(Amazon.RegionEndpoint.USEast1); // List to store upload part responses. List <UploadPartResponse> uploadResponses = new List <UploadPartResponse>(); List <CopyPartResponse> copyResponses = new List <CopyPartResponse>(); InitiateMultipartUploadRequest initiateRequest = new InitiateMultipartUploadRequest { BucketName = targetBucket, Key = targetObjectKey }; InitiateMultipartUploadResponse initResponse = s3Client.InitiateMultipartUpload(initiateRequest); String uploadId = initResponse.UploadId; try { // Get object size. GetObjectMetadataRequest metadataRequest = new GetObjectMetadataRequest { BucketName = sourceBucket, Key = sourceObjectKey }; GetObjectMetadataResponse metadataResponse = s3Client.GetObjectMetadata(metadataRequest); long objectSize = metadataResponse.ContentLength; // in bytes // Copy parts. long partSize = 5 * (long)Math.Pow(2, 20); // 5 MB long bytePosition = 0; for (int i = 1; bytePosition < objectSize; i++) { CopyPartRequest copyRequest = new CopyPartRequest { DestinationBucket = targetBucket, DestinationKey = targetObjectKey, SourceBucket = sourceBucket, SourceKey = sourceObjectKey, UploadId = uploadId, FirstByte = bytePosition, LastByte = bytePosition + partSize - 1 >= objectSize ? objectSize - 1 : bytePosition + partSize - 1, PartNumber = i }; copyResponses.Add(s3Client.CopyPart(copyRequest)); bytePosition += partSize; } CompleteMultipartUploadRequest completeRequest = new CompleteMultipartUploadRequest { BucketName = targetBucket, Key = targetObjectKey, UploadId = initResponse.UploadId }; completeRequest.AddPartETags(copyResponses); CompleteMultipartUploadResponse completeUploadResponse = s3Client.CompleteMultipartUpload(completeRequest); } catch (Exception e) { Console.WriteLine(e.Message); } }
private void UploadToCdn() { try { // one thread only if (Interlocked.CompareExchange(ref work, 1, 0) == 0) { var @continue = false; try { CdnItem item; if (queue.TryDequeue(out item)) { @continue = true; var cdnpath = GetCdnPath(item.Bundle.Path); var key = new Uri(cdnpath).PathAndQuery.TrimStart('/'); var etag = AmazonS3Util.GenerateChecksumForContent(item.Response.Content, false).ToLowerInvariant(); var config = new AmazonS3Config { ServiceURL = "s3.amazonaws.com", CommunicationProtocol = Protocol.HTTP }; using (var s3 = new AmazonS3Client(s3publickey, s3privatekey, config)) { var upload = false; try { var request = new GetObjectMetadataRequest { BucketName = s3bucket, Key = key, }; using (var response = s3.GetObjectMetadata(request)) { upload = !string.Equals(etag, response.ETag.Trim('"'), StringComparison.InvariantCultureIgnoreCase); } } catch (AmazonS3Exception ex) { if (ex.StatusCode == HttpStatusCode.NotFound) { upload = true; } else { throw; } } if (upload) { var request = new PutObjectRequest { BucketName = s3bucket, CannedACL = S3CannedACL.PublicRead, Key = key, ContentType = AmazonS3Util.MimeTypeFromExtension(Path.GetExtension(key).ToLowerInvariant()), }; if (ClientSettings.GZipEnabled) { var compressed = new MemoryStream(); using (var compression = new GZipStream(compressed, CompressionMode.Compress, true)) { new MemoryStream(Encoding.UTF8.GetBytes(item.Response.Content)).CopyTo(compression); } request.InputStream = compressed; request.AddHeader("Content-Encoding", "gzip"); } else { request.ContentBody = item.Response.Content; } var cache = TimeSpan.FromDays(365); request.AddHeader("Cache-Control", string.Format("public, maxage={0}", (int)cache.TotalSeconds)); request.AddHeader("Expires", DateTime.UtcNow.Add(cache).ToString("R")); request.AddHeader("Etag", etag); request.AddHeader("Last-Modified", DateTime.UtcNow.ToString("R")); using (s3.PutObject(request)) { } } item.Bundle.CdnPath = cdnpath; } } } catch (Exception err) { log.Error(err); } finally { work = 0; if (@continue) { Action upload = () => UploadToCdn(); upload.BeginInvoke(null, null); } } } } catch (Exception fatal) { log.Fatal(fatal); } }
public async Task <CreateImageFromImageResult> CreateImageFromExistingImage(FolderIdType folderId, FileIdType fileId, FileIdType newFileId, int quality, int width, bool asProgressive = true) { var fileName = $"{_prefix}/{folderId}/{fileId}"; var newFileName = $"{_prefix}/{folderId}/{newFileId}"; var newGetRequest = new GetObjectMetadataRequest { BucketName = _bucketName, Key = newFileName }; try { var newGetResponse = await _client.GetObjectMetadataAsync(newGetRequest); var newUri = GetUri(fileName); throw new Exception($"The blob you want to create already exists - {newUri}!"); } catch (AmazonS3Exception ex) { if (ex.ErrorCode == "NotFound") { var oldGetRequest = new GetObjectRequest { BucketName = _bucketName, Key = fileName }; var oldRequest = await _client.GetObjectAsync(oldGetRequest); var oldUri = GetUri(fileName); if (oldRequest.HttpStatusCode == System.Net.HttpStatusCode.OK) { byte[] bytes; using (var ms = new MemoryStream()) { await oldRequest.ResponseStream.CopyToAsync(ms); ms.Position = 0; bytes = ms.ToArray(); } var imageResult = await _imageService.GetImage(bytes, width, quality, asProgressive); var uri = await Upload(folderId, newFileId, imageResult.Bytes, oldRequest.Headers.ContentType); var thumbNailImageResult = await _imageService.GetImage(bytes, 250, 60, true); var thumbnailUri = await Upload(folderId, newFileId, thumbNailImageResult.Bytes, oldRequest.Headers.ContentType, true); return(new CreateImageFromImageResult { ImageProcessResult = imageResult, Uri = uri, ThumbnailUri = thumbnailUri }); } else { throw new Exception($"The blob you want to copy doesn't exists - {oldUri}!"); } } else { throw; } } }
private static async Task CopyObjectAsync(IAmazonS3 s3Client, string base64Key) { List <CopyPartResponse> uploadResponses = new List <CopyPartResponse>(); // 1. Initialize. InitiateMultipartUploadRequest initiateRequest = new InitiateMultipartUploadRequest { BucketName = existingBucketName, Key = targetKeyName, ServerSideEncryptionCustomerMethod = ServerSideEncryptionCustomerMethod.AES256, ServerSideEncryptionCustomerProvidedKey = base64Key, }; InitiateMultipartUploadResponse initResponse = await s3Client.InitiateMultipartUploadAsync(initiateRequest); // 2. Upload Parts. long partSize = 5 * (long)Math.Pow(2, 20); // 5 MB long firstByte = 0; long lastByte = partSize; try { // First find source object size. Because object is stored encrypted with // customer provided key you need to provide encryption information in your request. GetObjectMetadataRequest getObjectMetadataRequest = new GetObjectMetadataRequest() { BucketName = existingBucketName, Key = sourceKeyName, ServerSideEncryptionCustomerMethod = ServerSideEncryptionCustomerMethod.AES256, ServerSideEncryptionCustomerProvidedKey = base64Key // " * **source object encryption key ***" }; GetObjectMetadataResponse getObjectMetadataResponse = await s3Client.GetObjectMetadataAsync(getObjectMetadataRequest); long filePosition = 0; for (int i = 1; filePosition < getObjectMetadataResponse.ContentLength; i++) { CopyPartRequest copyPartRequest = new CopyPartRequest { UploadId = initResponse.UploadId, // Source. SourceBucket = existingBucketName, SourceKey = sourceKeyName, // Source object is stored using SSE-C. Provide encryption information. CopySourceServerSideEncryptionCustomerMethod = ServerSideEncryptionCustomerMethod.AES256, CopySourceServerSideEncryptionCustomerProvidedKey = base64Key, //"***source object encryption key ***", FirstByte = firstByte, // If the last part is smaller then our normal part size then use the remaining size. LastByte = lastByte > getObjectMetadataResponse.ContentLength ? getObjectMetadataResponse.ContentLength - 1 : lastByte, // Target. DestinationBucket = existingBucketName, DestinationKey = targetKeyName, PartNumber = i, // Encryption information for the target object. ServerSideEncryptionCustomerMethod = ServerSideEncryptionCustomerMethod.AES256, ServerSideEncryptionCustomerProvidedKey = base64Key }; uploadResponses.Add(await s3Client.CopyPartAsync(copyPartRequest)); filePosition += partSize; firstByte += partSize; lastByte += partSize; } // Step 3: complete. CompleteMultipartUploadRequest completeRequest = new CompleteMultipartUploadRequest { BucketName = existingBucketName, Key = targetKeyName, UploadId = initResponse.UploadId, }; completeRequest.AddPartETags(uploadResponses); CompleteMultipartUploadResponse completeUploadResponse = await s3Client.CompleteMultipartUploadAsync(completeRequest); } catch (Exception exception) { Console.WriteLine("Exception occurred: {0}", exception.Message); AbortMultipartUploadRequest abortMPURequest = new AbortMultipartUploadRequest { BucketName = existingBucketName, Key = targetKeyName, UploadId = initResponse.UploadId }; s3Client.AbortMultipartUpload(abortMPURequest); } }
public void MultipartEncryptionTestAPM(AmazonS3EncryptionClient s3EncryptionClient) { var nextRandom = random.Next(); var filePath = @"C:\temp\multi-" + nextRandom + ".txt"; var retrievedFilepath = @"C:\temp\retreived-" + nextRandom + ".txt"; var totalSize = MegSize * 15; UtilityMethods.GenerateFile(filePath, totalSize); string key = "key-" + random.Next(); Stream inputStream = File.OpenRead(filePath); try { InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest() { BucketName = bucketName, Key = key, StorageClass = S3StorageClass.ReducedRedundancy, ContentType = "text/html", CannedACL = S3CannedACL.PublicRead }; InitiateMultipartUploadResponse initResponse = null; if (IsKMSEncryptionClient(s3EncryptionClient)) { initResponse = s3EncryptionClient.InitiateMultipartUpload(initRequest); } else { initResponse = s3EncryptionClient.EndInitiateMultipartUpload( s3EncryptionClient.BeginInitiateMultipartUpload(initRequest, null, null)); } // Upload part 1 UploadPartRequest uploadRequest = new UploadPartRequest() { BucketName = bucketName, Key = key, UploadId = initResponse.UploadId, PartNumber = 1, PartSize = 5 * MegSize, InputStream = inputStream, }; UploadPartResponse up1Response = s3EncryptionClient.EndUploadPart( s3EncryptionClient.BeginUploadPart(uploadRequest, null, null)); // Upload part 2 uploadRequest = new UploadPartRequest() { BucketName = bucketName, Key = key, UploadId = initResponse.UploadId, PartNumber = 2, PartSize = 5 * MegSize, InputStream = inputStream, }; UploadPartResponse up2Response = s3EncryptionClient.EndUploadPart( s3EncryptionClient.BeginUploadPart(uploadRequest, null, null)); // Upload part 3 uploadRequest = new UploadPartRequest() { BucketName = bucketName, Key = key, UploadId = initResponse.UploadId, PartNumber = 3, InputStream = inputStream, IsLastPart = true }; UploadPartResponse up3Response = s3EncryptionClient.EndUploadPart( s3EncryptionClient.BeginUploadPart(uploadRequest, null, null)); ListPartsRequest listPartRequest = new ListPartsRequest() { BucketName = bucketName, Key = key, UploadId = initResponse.UploadId }; ListPartsResponse listPartResponse = s3EncryptionClient.EndListParts( s3EncryptionClient.BeginListParts(listPartRequest, null, null)); Assert.AreEqual(3, listPartResponse.Parts.Count); Assert.AreEqual(up1Response.PartNumber, listPartResponse.Parts[0].PartNumber); Assert.AreEqual(up1Response.ETag, listPartResponse.Parts[0].ETag); Assert.AreEqual(up2Response.PartNumber, listPartResponse.Parts[1].PartNumber); Assert.AreEqual(up2Response.ETag, listPartResponse.Parts[1].ETag); Assert.AreEqual(up3Response.PartNumber, listPartResponse.Parts[2].PartNumber); Assert.AreEqual(up3Response.ETag, listPartResponse.Parts[2].ETag); listPartRequest.MaxParts = 1; listPartResponse = s3EncryptionClient.EndListParts( s3EncryptionClient.BeginListParts(listPartRequest, null, null)); Assert.AreEqual(1, listPartResponse.Parts.Count); // Complete the response CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest() { BucketName = bucketName, Key = key, UploadId = initResponse.UploadId }; compRequest.AddPartETags(up1Response, up2Response, up3Response); CompleteMultipartUploadResponse compResponse = s3EncryptionClient.EndCompleteMultipartUpload( s3EncryptionClient.BeginCompleteMultipartUpload(compRequest, null, null)); Assert.AreEqual(bucketName, compResponse.BucketName); Assert.IsNotNull(compResponse.ETag); Assert.AreEqual(key, compResponse.Key); Assert.IsNotNull(compResponse.Location); // Get the file back from S3 and make sure it is still the same. GetObjectRequest getRequest = new GetObjectRequest() { BucketName = bucketName, Key = key }; GetObjectResponse getResponse = null; if (IsKMSEncryptionClient(s3EncryptionClient)) { getResponse = s3EncryptionClient.GetObject(getRequest); } else { getResponse = s3EncryptionClient.EndGetObject( s3EncryptionClient.BeginGetObject(getRequest, null, null)); } getResponse.WriteResponseStreamToFile(retrievedFilepath); UtilityMethods.CompareFiles(filePath, retrievedFilepath); GetObjectMetadataRequest metaDataRequest = new GetObjectMetadataRequest() { BucketName = bucketName, Key = key }; GetObjectMetadataResponse metaDataResponse = s3EncryptionClient.EndGetObjectMetadata( s3EncryptionClient.BeginGetObjectMetadata(metaDataRequest, null, null)); Assert.AreEqual("text/html", metaDataResponse.Headers.ContentType); } finally { inputStream.Close(); if (File.Exists(filePath)) { File.Delete(filePath); } if (File.Exists(retrievedFilepath)) { File.Delete(retrievedFilepath); } } }
public void ObjectSamples() { { #region ListObjects Sample // Create a client AmazonS3Client client = new AmazonS3Client(); // List all objects ListObjectsRequest listRequest = new ListObjectsRequest { BucketName = "SampleBucket", }; ListObjectsResponse listResponse; do { // Get a list of objects listResponse = client.ListObjects(listRequest); foreach (S3Object obj in listResponse.S3Objects) { Console.WriteLine("Object - " + obj.Key); Console.WriteLine(" Size - " + obj.Size); Console.WriteLine(" LastModified - " + obj.LastModified); Console.WriteLine(" Storage class - " + obj.StorageClass); } // Set the marker property listRequest.Marker = listResponse.NextMarker; } while (listResponse.IsTruncated); #endregion } { #region GetObject Sample // Create a client AmazonS3Client client = new AmazonS3Client(); // Create a GetObject request GetObjectRequest request = new GetObjectRequest { BucketName = "SampleBucket", Key = "Item1" }; // Issue request and remember to dispose of the response using (GetObjectResponse response = client.GetObject(request)) { using (StreamReader reader = new StreamReader(response.ResponseStream)) { string contents = reader.ReadToEnd(); Console.WriteLine("Object - " + response.Key); Console.WriteLine(" Version Id - " + response.VersionId); Console.WriteLine(" Contents - " + contents); } } #endregion } { #region GetObject WriteResponseStreamToFile Sample // Create a client AmazonS3Client client = new AmazonS3Client(); // Create a GetObject request GetObjectRequest request = new GetObjectRequest { BucketName = "SampleBucket", Key = "Item1" }; // Issue request and remember to dispose of the response using (GetObjectResponse response = client.GetObject(request)) { // Save object to local file response.WriteResponseStreamToFile("Item1.txt"); } #endregion } { #region GetObjectMetadata Sample // Create a client AmazonS3Client client = new AmazonS3Client(); // Create a GetObjectMetadata request GetObjectMetadataRequest request = new GetObjectMetadataRequest { BucketName = "SampleBucket", Key = "Item1" }; // Issue request and view the response GetObjectMetadataResponse response = client.GetObjectMetadata(request); Console.WriteLine("Content Length - " + response.ContentLength); Console.WriteLine("Content Type - " + response.Headers.ContentType); if (response.Expiration != null) { Console.WriteLine("Expiration Date - " + response.Expiration.ExpiryDate); Console.WriteLine("Expiration Rule Id - " + response.Expiration.RuleId); } #endregion } { #region PutObject Sample 1 // Create a client AmazonS3Client client = new AmazonS3Client(); // Create a PutObject request PutObjectRequest request = new PutObjectRequest { BucketName = "SampleBucket", Key = "Item1", ContentBody = "This is sample content..." }; // Put object PutObjectResponse response = client.PutObject(request); #endregion } { #region PutObject Sample 2 // Create a client AmazonS3Client client = new AmazonS3Client(); // Create a PutObject request PutObjectRequest request = new PutObjectRequest { BucketName = "SampleBucket", Key = "Item1", FilePath = "contents.txt" }; // Put object PutObjectResponse response = client.PutObject(request); #endregion } { #region PutObject Sample 3 // Create a client AmazonS3Client client = new AmazonS3Client(); // Create a PutObject request PutObjectRequest request = new PutObjectRequest { BucketName = "SampleBucket", Key = "Item1", }; using (FileStream stream = new FileStream("contents.txt", FileMode.Open)) { request.InputStream = stream; // Put object PutObjectResponse response = client.PutObject(request); } #endregion } { #region DeleteObject Sample // Create a client AmazonS3Client client = new AmazonS3Client(); // Create a DeleteObject request DeleteObjectRequest request = new DeleteObjectRequest { BucketName = "SampleBucket", Key = "Item1" }; // Issue request client.DeleteObject(request); #endregion } { #region DeleteObjects Sample // Create a client AmazonS3Client client = new AmazonS3Client(); // Create a DeleteObject request DeleteObjectsRequest request = new DeleteObjectsRequest { BucketName = "SampleBucket", Objects = new List <KeyVersion> { new KeyVersion() { Key = "Item1" }, // Versioned item new KeyVersion() { Key = "Item2", VersionId = "Rej8CiBxcZKVK81cLr39j27Y5FVXghDK", }, // Item in subdirectory new KeyVersion() { Key = "Logs/error.txt" } } }; try { // Issue request DeleteObjectsResponse response = client.DeleteObjects(request); } catch (DeleteObjectsException doe) { // Catch error and list error details DeleteObjectsResponse errorResponse = doe.Response; foreach (DeletedObject deletedObject in errorResponse.DeletedObjects) { Console.WriteLine("Deleted item " + deletedObject.Key); } foreach (DeleteError deleteError in errorResponse.DeleteErrors) { Console.WriteLine("Error deleting item " + deleteError.Key); Console.WriteLine(" Code - " + deleteError.Code); Console.WriteLine(" Message - " + deleteError.Message); } } #endregion } { #region CopyObject Sample // Create a client AmazonS3Client client = new AmazonS3Client(); // Create a CopyObject request CopyObjectRequest request = new CopyObjectRequest { SourceBucket = "SampleBucket", SourceKey = "Item1", DestinationBucket = "AnotherBucket", DestinationKey = "Copy1", CannedACL = S3CannedACL.PublicRead }; // Issue request client.CopyObject(request); #endregion } { #region CopyObject Sample // Create a client AmazonS3Client client = new AmazonS3Client(); // Create a CopyObject request CopyObjectRequest request = new CopyObjectRequest { SourceBucket = "SampleBucket", SourceKey = "Item1", DestinationBucket = "AnotherBucket", DestinationKey = "Copy1", CannedACL = S3CannedACL.PublicRead }; // Issue request client.CopyObject(request); #endregion } { #region ListVersions Sample // Create a client AmazonS3Client client = new AmazonS3Client(); // Turn versioning on for a bucket client.PutBucketVersioning(new PutBucketVersioningRequest { BucketName = "SampleBucket", VersioningConfig = new S3BucketVersioningConfig { Status = "Enable" } }); // Populate bucket with multiple items, each with multiple versions PopulateBucket(client, "SampleBucket"); // Get versions ListVersionsRequest request = new ListVersionsRequest { BucketName = "SampleBucket" }; // Make paged ListVersions calls ListVersionsResponse response; do { response = client.ListVersions(request); // View information about versions foreach (var version in response.Versions) { Console.WriteLine("Key = {0}, Version = {1}, IsLatest = {2}, LastModified = {3}, Size = {4}", version.Key, version.VersionId, version.IsLatest, version.LastModified, version.Size); } request.KeyMarker = response.NextKeyMarker; request.VersionIdMarker = response.NextVersionIdMarker; } while (response.IsTruncated); #endregion } { #region Multipart Upload Sample int MB = (int)Math.Pow(2, 20); // Create a client AmazonS3Client client = new AmazonS3Client(); // Define input stream Stream inputStream = Create13MBDataStream(); // Initiate multipart upload InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest { BucketName = "SampleBucket", Key = "Item1" }; InitiateMultipartUploadResponse initResponse = client.InitiateMultipartUpload(initRequest); // Upload part 1 UploadPartRequest uploadRequest = new UploadPartRequest { BucketName = "SampleBucket", Key = "Item1", UploadId = initResponse.UploadId, PartNumber = 1, PartSize = 5 * MB, InputStream = inputStream }; UploadPartResponse up1Response = client.UploadPart(uploadRequest); // Upload part 2 uploadRequest = new UploadPartRequest { BucketName = "SampleBucket", Key = "Item1", UploadId = initResponse.UploadId, PartNumber = 2, PartSize = 5 * MB, InputStream = inputStream }; UploadPartResponse up2Response = client.UploadPart(uploadRequest); // Upload part 3 uploadRequest = new UploadPartRequest { BucketName = "SampleBucket", Key = "Item1", UploadId = initResponse.UploadId, PartNumber = 3, InputStream = inputStream }; UploadPartResponse up3Response = client.UploadPart(uploadRequest); // List parts for current upload ListPartsRequest listPartRequest = new ListPartsRequest { BucketName = "SampleBucket", Key = "Item1", UploadId = initResponse.UploadId }; ListPartsResponse listPartResponse = client.ListParts(listPartRequest); Debug.Assert(listPartResponse.Parts.Count == 3); // Complete the multipart upload CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest { BucketName = "SampleBucket", Key = "Item1", UploadId = initResponse.UploadId, PartETags = new List <PartETag> { new PartETag { ETag = up1Response.ETag, PartNumber = 1 }, new PartETag { ETag = up2Response.ETag, PartNumber = 2 }, new PartETag { ETag = up3Response.ETag, PartNumber = 3 } } }; CompleteMultipartUploadResponse compResponse = client.CompleteMultipartUpload(compRequest); #endregion } }
private void UpdateConfig() { try { using (var client = HasAwsCredentials() ? new AmazonS3Client(_awsAccessKey, _awsSecretKey, RegionEndpoint.USEast1) : new AmazonS3Client(RegionEndpoint.USEast1)) { var getObjectMetadataRequest = new GetObjectMetadataRequest { BucketName = _s3Bucket, Key = _s3Key }; var cancelToken = new CancellationToken(); var getObjectMetadataResponse = client.GetObjectMetadataAsync(getObjectMetadataRequest, cancelToken); var fileLastModified = getObjectMetadataResponse.Result.LastModified; // Check if the file has been modified if (!ModifiedSinceLastUpdate(fileLastModified)) { return; } string fileContents; var objectRequestCancelToken = new CancellationToken(); var getObjectRequest = new GetObjectRequest { BucketName = _s3Bucket, Key = _s3Key }; // Read the S3 file to a string try { using (var getObjectResponse = client.GetObjectAsync(getObjectRequest, objectRequestCancelToken) ) using (var responseStream = getObjectResponse.Result.ResponseStream) using (var reader = new StreamReader(responseStream)) { fileContents = reader.ReadToEnd(); } } catch (Exception ex) { Log.Error(string.Format("Unable to read s3 file {0} {1}", _s3Bucket, _s3Key), ex); return; } // De-serialize the string try { var newConfig = new JsonSerializer <CountersConfig>().DeserializeFromString(fileContents); if (newConfig != null && newConfig.Counters != null) { if (ConfigUpdated != null) { ConfigUpdated(this, new CounterConfigEventArgs(newConfig.Counters)); } } DataLastModified = fileLastModified; } catch (Exception ex) { Log.Error("Unable load new counter config", ex); } } } catch (Exception ex) { Log.Error("Unable to update config", ex); } }
public static void Main(string[] args) { System.Net.ServicePointManager.ServerCertificateValidationCallback = ((sender, certificate, chain, sslPolicyErrors) => true); // create the AWS S3 client AmazonS3Client s3 = AWSS3Factory.getS3Client(); String bucketName = String.Join("-", AWSS3Factory.S3_BUCKET, DateTime.Now.ToString("yyyyMMddHHmmss")); //********************// // 1. Create a bucket // //********************// Console.Write(string.Format(" [*] Creating bucket '{0}'... ", bucketName)); PutBucketResponse pbRes = s3.PutBucket(bucketName); if (pbRes.HttpStatusCode != System.Net.HttpStatusCode.OK) { Console.WriteLine("fail"); Console.ReadLine(); System.Environment.Exit(1); } Console.WriteLine("done"); //************************************// // 2. Create and upload object // //************************************// String objectKey = "object-" + DateTime.Now.ToString("yyyyMMddHHmmssffff"); Console.Write(string.Format(" [*] Creating a new object with key '{0}'... ", objectKey)); PutObjectRequest poRequest = new PutObjectRequest() { BucketName = bucketName, ContentBody = "Lorem ipsum dolor sit amet, consectetur adipiscing elit...", Key = objectKey }; PutObjectResponse poResponse = s3.PutObject(poRequest); if (poResponse.HttpStatusCode != System.Net.HttpStatusCode.OK) { Console.WriteLine("fail"); Console.ReadLine(); System.Environment.Exit(1); } Console.WriteLine("done"); //****************************************// // 3. Obtain object metadata // //****************************************// Console.Write(string.Format(" [*] Obtaining object size and other metadata for object '{0}'... ", objectKey)); GetObjectMetadataRequest request = new GetObjectMetadataRequest() { BucketName = bucketName, Key = objectKey }; // get object metadata - not actual content (HEAD request not GET). GetObjectMetadataResponse gomResponse = s3.GetObjectMetadata(request); if (gomResponse.HttpStatusCode != System.Net.HttpStatusCode.OK) { Console.WriteLine("fail"); Console.ReadLine(); System.Environment.Exit(1); } Console.WriteLine("done"); // Obtain the object size in Bytes long objectSize = gomResponse.Headers.ContentLength; Console.WriteLine(string.Format(" [x] Object size is: {0} bytes", objectSize)); Console.WriteLine(" [x] Other headers and metadata:"); ICollection <string> headers = gomResponse.Headers.Keys; foreach (string header in headers) { Console.WriteLine("[x] {0}: {1}", header, gomResponse.Headers[header]); } ICollection <string> metaKeys = gomResponse.Metadata.Keys; foreach (string metaKey in metaKeys) { Console.WriteLine("[x] {0}: {1}", metaKey, gomResponse.Metadata[metaKey]); } //*******************************************// // 4. Delete the object // //*******************************************// Console.Write(string.Format(" [*] Deleting object '{0}'... ", objectKey)); // create the request object DeleteObjectRequest doRequest = new DeleteObjectRequest() { BucketName = bucketName, Key = objectKey }; // delete the object in the demo bucket DeleteObjectResponse doResponse = s3.DeleteObject(doRequest); if (doResponse.HttpStatusCode != System.Net.HttpStatusCode.NoContent) { Console.WriteLine("fail"); Console.ReadLine(); System.Environment.Exit(1); } Console.WriteLine("done"); //***********************// // 5. Delete the bucket // //***********************// Console.Write(String.Format(" [*] Deleting bucket '{0}' (sleeping 5 seconds)... ", bucketName)); System.Threading.Thread.Sleep(5000); DeleteBucketResponse dbRes = s3.DeleteBucket(bucketName); if (dbRes.HttpStatusCode != System.Net.HttpStatusCode.NoContent) { Console.WriteLine("fail"); Console.ReadLine(); System.Environment.Exit(1); } Console.WriteLine("done"); Console.WriteLine(" [*] Example is completed. Press any key to exit..."); Console.ReadLine(); }
private async Task MultipartEncryptionTestAsync(AmazonS3EncryptionClient s3EncryptionClient) { var nextRandom = random.Next(); var filePath = @"C:\temp\multi-" + nextRandom + ".txt"; var retrievedFilepath = @"C:\temp\retreived-" + nextRandom + ".txt"; var totalSize = MegSize * 15; UtilityMethods.GenerateFile(filePath, totalSize); string key = "key-" + random.Next(); Stream inputStream = File.OpenRead(filePath); try { InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest() { BucketName = bucketName, Key = key, StorageClass = S3StorageClass.ReducedRedundancy, ContentType = "text/html", CannedACL = S3CannedACL.PublicRead }; InitiateMultipartUploadResponse initResponse = await s3EncryptionClient.InitiateMultipartUploadAsync(initRequest).ConfigureAwait(false); // Upload part 1 UploadPartRequest uploadRequest = new UploadPartRequest() { BucketName = bucketName, Key = key, UploadId = initResponse.UploadId, PartNumber = 1, PartSize = 5 * MegSize, InputStream = inputStream, }; UploadPartResponse up1Response = await s3EncryptionClient.UploadPartAsync(uploadRequest).ConfigureAwait(false); // Upload part 2 uploadRequest = new UploadPartRequest() { BucketName = bucketName, Key = key, UploadId = initResponse.UploadId, PartNumber = 2, PartSize = 5 * MegSize, InputStream = inputStream, }; UploadPartResponse up2Response = await s3EncryptionClient.UploadPartAsync(uploadRequest).ConfigureAwait(false); // Upload part 3 uploadRequest = new UploadPartRequest() { BucketName = bucketName, Key = key, UploadId = initResponse.UploadId, PartNumber = 3, InputStream = inputStream, IsLastPart = true }; UploadPartResponse up3Response = await s3EncryptionClient.UploadPartAsync(uploadRequest).ConfigureAwait(false); ListPartsRequest listPartRequest = new ListPartsRequest() { BucketName = bucketName, Key = key, UploadId = initResponse.UploadId }; ListPartsResponse listPartResponse = await s3EncryptionClient.ListPartsAsync(listPartRequest).ConfigureAwait(false); Assert.Equal(3, listPartResponse.Parts.Count); Assert.Equal(up1Response.PartNumber, listPartResponse.Parts[0].PartNumber); Assert.Equal(up1Response.ETag, listPartResponse.Parts[0].ETag); Assert.Equal(up2Response.PartNumber, listPartResponse.Parts[1].PartNumber); Assert.Equal(up2Response.ETag, listPartResponse.Parts[1].ETag); Assert.Equal(up3Response.PartNumber, listPartResponse.Parts[2].PartNumber); Assert.Equal(up3Response.ETag, listPartResponse.Parts[2].ETag); listPartRequest.MaxParts = 1; listPartResponse = await s3EncryptionClient.ListPartsAsync(listPartRequest).ConfigureAwait(false); Assert.Equal(1, listPartResponse.Parts.Count); // Complete the response CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest() { BucketName = bucketName, Key = key, UploadId = initResponse.UploadId }; compRequest.AddPartETags(up1Response, up2Response, up3Response); CompleteMultipartUploadResponse compResponse = await s3EncryptionClient.CompleteMultipartUploadAsync(compRequest).ConfigureAwait(false); Assert.Equal(bucketName, compResponse.BucketName); Assert.NotNull(compResponse.ETag); Assert.Equal(key, compResponse.Key); Assert.NotNull(compResponse.Location); // Get the file back from S3 and make sure it is still the same. GetObjectRequest getRequest = new GetObjectRequest() { BucketName = bucketName, Key = key }; GetObjectResponse getResponse = await s3EncryptionClient.GetObjectAsync(getRequest).ConfigureAwait(false); await getResponse.WriteResponseStreamToFileAsync(retrievedFilepath, false, System.Threading.CancellationToken.None); UtilityMethods.CompareFiles(filePath, retrievedFilepath); GetObjectMetadataRequest metaDataRequest = new GetObjectMetadataRequest() { BucketName = bucketName, Key = key }; GetObjectMetadataResponse metaDataResponse = await s3EncryptionClient.GetObjectMetadataAsync(metaDataRequest).ConfigureAwait(false); Assert.Equal("text/html", metaDataResponse.Headers.ContentType); } finally { inputStream.Dispose(); if (File.Exists(filePath)) { File.Delete(filePath); } if (File.Exists(retrievedFilepath)) { File.Delete(retrievedFilepath); } } }
public async Task CopyBlobAsync(string sourceContainerName, string sourceBlobName, string destinationContainerName, string destinationBlobName = null) { if (string.IsNullOrEmpty(sourceContainerName)) { throw new StorageException(StorageErrorCode.InvalidName, $"Invalid {nameof(sourceContainerName)}"); } if (string.IsNullOrEmpty(sourceBlobName)) { throw new StorageException(StorageErrorCode.InvalidName, $"Invalid {nameof(sourceBlobName)}"); } if (string.IsNullOrEmpty(destinationContainerName)) { throw new StorageException(StorageErrorCode.InvalidName, $"Invalid {nameof(destinationContainerName)}"); } if (destinationBlobName == string.Empty) { throw new StorageException(StorageErrorCode.InvalidName, $"Invalid {nameof(destinationBlobName)}"); } var sourceKey = GenerateKeyName(sourceContainerName, sourceBlobName); var destinationKey = GenerateKeyName(destinationContainerName, destinationBlobName ?? sourceBlobName); try { // Get the size of the object. var metadataRequest = new GetObjectMetadataRequest { BucketName = _bucket, Key = sourceKey, }; AmazonWebServiceResponse response; var metadataResponse = await _s3Client.GetObjectMetadataAsync(metadataRequest); var objectSize = metadataResponse.ContentLength; // Length in bytes. var limit = 5 * (long)Math.Pow(2, 30); // CopyObject size limit 5 GB. if (objectSize >= limit) { var request = new InitiateMultipartUploadRequest { BucketName = _bucket, Key = destinationKey, ContentType = metadataResponse.Headers.ContentType, // CannedACL = metadataResponse.Headers.. GetCannedACL(properties), ServerSideEncryptionMethod = metadataResponse.ServerSideEncryptionMethod, }; request.Headers.ContentDisposition = metadataResponse.Headers.ContentDisposition; request.Metadata.AddMetadata(metadataResponse.Metadata.ToMetadata()); response = await MultipartCopy(sourceKey, destinationKey, objectSize, request); } else { var request = new CopyObjectRequest { SourceBucket = _bucket, SourceKey = sourceKey, DestinationBucket = _bucket, DestinationKey = destinationKey, ServerSideEncryptionMethod = _serverSideEncryptionMethod }; response = await _s3Client.CopyObjectAsync(request); } if (response.HttpStatusCode != HttpStatusCode.OK) { throw new StorageException(StorageErrorCode.GenericException, "Copy failed."); } } catch (AmazonS3Exception asex) { throw asex.ToStorageException(); } }
/// <summary> /// Gets a value indicating whether the image is new or updated in an asynchronous manner. /// </summary> /// <returns> /// The asynchronous <see cref="Task"/> returning the value. /// </returns> public override async Task <bool> IsNewOrUpdatedAsync() { // TODO: Before this check is performed it should be throttled. For example, only perform this check // if the last time it was checked is greater than 5 seconds. This would be much better for perf // if there is a high throughput of image requests. string cachedFileName = await this.CreateCachedFileNameAsync().ConfigureAwait(false); // Collision rate of about 1 in 10000 for the folder structure. // That gives us massive scope to store millions of files. string pathFromKey = string.Join("\\", cachedFileName.ToCharArray().Take(6)); this.CachedPath = Path.Combine(cachedCdnRoot, awsCacheKeyPrefix, pathFromKey, cachedFileName) .Replace(@"\", "/"); this.cachedRewritePath = this.CachedPath; bool isUpdated = false; CachedImage cachedImage = CacheIndexer.Get(this.CachedPath); if (new Uri(this.CachedPath).IsFile) { var fileInfo = new FileInfo(this.CachedPath); if (fileInfo.Exists) { // Pull the latest info. fileInfo.Refresh(); cachedImage = new CachedImage { Key = Path.GetFileNameWithoutExtension(this.CachedPath), Path = this.CachedPath, CreationTimeUtc = fileInfo.CreationTimeUtc }; CacheIndexer.Add(cachedImage, this.ImageCacheMaxMinutes); } } if (cachedImage == null) { try { string path = this.GetFolderStructureForAmazon(this.CachedPath); string filename = Path.GetFileName(this.CachedPath); string key = this.GetKey(path, filename); var objectMetaDataRequest = new GetObjectMetadataRequest { BucketName = awsBucketName, Key = key, }; GetObjectMetadataResponse response = await amazonS3ClientCache.GetObjectMetadataAsync(objectMetaDataRequest).ConfigureAwait(false); if (response != null) { cachedImage = new CachedImage { Key = key, Path = this.CachedPath, CreationTimeUtc = response.LastModified.ToUniversalTime() }; CacheIndexer.Add(cachedImage, this.ImageCacheMaxMinutes); } } catch (AmazonS3Exception) { // Nothing in S3 so we should return true. isUpdated = true; } } if (cachedImage == null) { // Nothing in the cache so we should return true. isUpdated = true; } else { // Check to see if the cached image is set to expire // or a new file with the same name has replaced our current image if (this.IsExpired(cachedImage.CreationTimeUtc) || await this.IsUpdatedAsync(cachedImage.CreationTimeUtc).ConfigureAwait(false)) { CacheIndexer.Remove(this.CachedPath); isUpdated = true; } } return(isUpdated); }
public static void MultipartEncryptionTest(AmazonS3Client s3EncryptionClient, IAmazonS3 s3DecryptionClient, string bucketName) { var guid = Guid.NewGuid(); var filePath = Path.Combine(Path.GetTempPath(), $"multi-{guid}.txt"); var retrievedFilepath = Path.Combine(Path.GetTempPath(), $"retrieved-{guid}.txt"); var totalSize = MegaByteSize * 15; UtilityMethods.GenerateFile(filePath, totalSize); var key = $"key-{guid}"; Stream inputStream = File.OpenRead(filePath); try { var initRequest = new InitiateMultipartUploadRequest() { BucketName = bucketName, Key = key, StorageClass = S3StorageClass.OneZoneInfrequentAccess, ContentType = "text/html", }; var initResponse = s3EncryptionClient.InitiateMultipartUpload(initRequest); // Upload part 1 var uploadRequest = new UploadPartRequest() { BucketName = bucketName, Key = key, UploadId = initResponse.UploadId, PartNumber = 1, PartSize = 5 * MegaByteSize, InputStream = inputStream }; var up1Response = s3EncryptionClient.UploadPart(uploadRequest); // Upload part 2 uploadRequest = new UploadPartRequest() { BucketName = bucketName, Key = key, UploadId = initResponse.UploadId, PartNumber = 2, PartSize = 5 * MegaByteSize, InputStream = inputStream }; var up2Response = s3EncryptionClient.UploadPart(uploadRequest); // Upload part 3 uploadRequest = new UploadPartRequest() { BucketName = bucketName, Key = key, UploadId = initResponse.UploadId, PartNumber = 3, InputStream = inputStream, IsLastPart = true }; var up3Response = s3EncryptionClient.UploadPart(uploadRequest); var listPartRequest = new ListPartsRequest() { BucketName = bucketName, Key = key, UploadId = initResponse.UploadId }; var listPartResponse = s3EncryptionClient.ListParts(listPartRequest); Assert.Equal(3, listPartResponse.Parts.Count); Assert.Equal(up1Response.PartNumber, listPartResponse.Parts[0].PartNumber); Assert.Equal(up1Response.ETag, listPartResponse.Parts[0].ETag); Assert.Equal(up2Response.PartNumber, listPartResponse.Parts[1].PartNumber); Assert.Equal(up2Response.ETag, listPartResponse.Parts[1].ETag); Assert.Equal(up3Response.PartNumber, listPartResponse.Parts[2].PartNumber); Assert.Equal(up3Response.ETag, listPartResponse.Parts[2].ETag); listPartRequest.MaxParts = 1; listPartResponse = s3EncryptionClient.ListParts(listPartRequest); Assert.Equal(1, listPartResponse.Parts.Count); // Complete the response var compRequest = new CompleteMultipartUploadRequest() { BucketName = bucketName, Key = key, UploadId = initResponse.UploadId }; compRequest.AddPartETags(up1Response, up2Response, up3Response); var compResponse = s3EncryptionClient.CompleteMultipartUpload(compRequest); Assert.Equal(bucketName, compResponse.BucketName); Assert.NotNull(compResponse.ETag); Assert.Equal(key, compResponse.Key); Assert.NotNull(compResponse.Location); // Get the file back from S3 and make sure it is still the same. var getRequest = new GetObjectRequest() { BucketName = bucketName, Key = key }; var getResponse = s3DecryptionClient.GetObject(getRequest); getResponse.WriteResponseStreamToFile(retrievedFilepath); UtilityMethods.CompareFiles(filePath, retrievedFilepath); var metaDataRequest = new GetObjectMetadataRequest() { BucketName = bucketName, Key = key }; var metaDataResponse = s3DecryptionClient.GetObjectMetadata(metaDataRequest); Assert.Equal("text/html", metaDataResponse.Headers.ContentType); } finally { inputStream.Close(); if (File.Exists(filePath)) { File.Delete(filePath); } if (File.Exists(retrievedFilepath)) { File.Delete(retrievedFilepath); } } #if ASYNC_AWAIT // run the async version of the same test WaitForAsyncTask(MultipartEncryptionTestAsync(s3EncryptionClient, s3DecryptionClient, bucketName)); #elif AWS_APM_API // run the APM version of the same test MultipartEncryptionTestAPM(s3EncryptionClient, s3DecryptionClient, bucketName); #endif }
public static async Task MultipartEncryptionTestAsync(AmazonS3Client s3EncryptionClient, AmazonS3Client s3DecryptionClient, string bucketName) { var filePath = Path.GetTempFileName(); var retrievedFilepath = Path.GetTempFileName(); var totalSize = MegaBytesSize * 15; UtilityMethods.GenerateFile(filePath, totalSize); var key = Guid.NewGuid().ToString(); Stream inputStream = File.OpenRead(filePath); try { InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest() { BucketName = bucketName, Key = key, StorageClass = S3StorageClass.OneZoneInfrequentAccess, ContentType = "text/html", }; InitiateMultipartUploadResponse initResponse = await s3EncryptionClient.InitiateMultipartUploadAsync(initRequest).ConfigureAwait(false); // Upload part 1 UploadPartRequest uploadRequest = new UploadPartRequest() { BucketName = bucketName, Key = key, UploadId = initResponse.UploadId, PartNumber = 1, PartSize = 5 * MegaBytesSize, InputStream = inputStream, }; UploadPartResponse up1Response = await s3EncryptionClient.UploadPartAsync(uploadRequest).ConfigureAwait(false); // Upload part 2 uploadRequest = new UploadPartRequest() { BucketName = bucketName, Key = key, UploadId = initResponse.UploadId, PartNumber = 2, PartSize = 5 * MegaBytesSize, InputStream = inputStream }; UploadPartResponse up2Response = await s3EncryptionClient.UploadPartAsync(uploadRequest).ConfigureAwait(false); // Upload part 3 uploadRequest = new UploadPartRequest() { BucketName = bucketName, Key = key, UploadId = initResponse.UploadId, PartNumber = 3, InputStream = inputStream, IsLastPart = true }; UploadPartResponse up3Response = await s3EncryptionClient.UploadPartAsync(uploadRequest).ConfigureAwait(false); ListPartsRequest listPartRequest = new ListPartsRequest() { BucketName = bucketName, Key = key, UploadId = initResponse.UploadId }; ListPartsResponse listPartResponse = await s3EncryptionClient.ListPartsAsync(listPartRequest).ConfigureAwait(false); Assert.Equal(3, listPartResponse.Parts.Count); Assert.Equal(up1Response.PartNumber, listPartResponse.Parts[0].PartNumber); Assert.Equal(up1Response.ETag, listPartResponse.Parts[0].ETag); Assert.Equal(up2Response.PartNumber, listPartResponse.Parts[1].PartNumber); Assert.Equal(up2Response.ETag, listPartResponse.Parts[1].ETag); Assert.Equal(up3Response.PartNumber, listPartResponse.Parts[2].PartNumber); Assert.Equal(up3Response.ETag, listPartResponse.Parts[2].ETag); listPartRequest.MaxParts = 1; listPartResponse = await s3EncryptionClient.ListPartsAsync(listPartRequest).ConfigureAwait(false); Assert.Single(listPartResponse.Parts); // Complete the response CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest() { BucketName = bucketName, Key = key, UploadId = initResponse.UploadId }; compRequest.AddPartETags(up1Response, up2Response, up3Response); CompleteMultipartUploadResponse compResponse = await s3EncryptionClient.CompleteMultipartUploadAsync(compRequest).ConfigureAwait(false); Assert.Equal(bucketName, compResponse.BucketName); Assert.NotNull(compResponse.ETag); Assert.Equal(key, compResponse.Key); Assert.NotNull(compResponse.Location); // Get the file back from S3 and make sure it is still the same. GetObjectRequest getRequest = new GetObjectRequest() { BucketName = bucketName, Key = key }; GetObjectResponse getResponse = await s3DecryptionClient.GetObjectAsync(getRequest).ConfigureAwait(false); await getResponse.WriteResponseStreamToFileAsync(retrievedFilepath, false, CancellationToken.None); UtilityMethods.CompareFiles(filePath, retrievedFilepath); GetObjectMetadataRequest metaDataRequest = new GetObjectMetadataRequest() { BucketName = bucketName, Key = key }; GetObjectMetadataResponse metaDataResponse = await s3DecryptionClient.GetObjectMetadataAsync(metaDataRequest).ConfigureAwait(false); Assert.Equal("text/html", metaDataResponse.Headers.ContentType); } finally { inputStream.Dispose(); if (File.Exists(filePath)) { File.Delete(filePath); } if (File.Exists(retrievedFilepath)) { File.Delete(retrievedFilepath); } } }
public static GetObjectMetadataResponse GetObjectMetadata(this IAmazonS3 client, GetObjectMetadataRequest request) { return(client.GetObjectMetadataAsync(request).GetResult()); }
private static void UploadFiles(IAmazonS3 client) { try { TransferUtility transferUtility = new TransferUtility(client); var filePaths = Directory.GetFiles(sourceDirectory, "*", SearchOption.AllDirectories); foreach (var filePath in filePaths) { var lastWrite = File.GetLastWriteTime(filePath).ToUniversalTime(); var key = filePath.Replace(sourceDirectory, @""); if (key.StartsWith(@"\")) { key = key.Substring(1); } key = key.Replace(@"\", @"/"); bool fileExists = false, fileNewer = false; var fileInfo = new S3FileInfo(client, bucketName, key); if ((fileExists = fileInfo.Exists)) { var getObjectMetadataRequest = new GetObjectMetadataRequest { BucketName = bucketName, Key = key }; var getObjectMetadataResponse = client.GetObjectMetadata(getObjectMetadataRequest); fileNewer = lastWrite > getObjectMetadataResponse.LastModified; } if (!fileExists || fileNewer) { var uploadRequest = new TransferUtilityUploadRequest { BucketName = bucketName, FilePath = filePath, Key = key, CannedACL = S3CannedACL.PublicRead }; uploadRequest.UploadProgressEvent += new EventHandler <UploadProgressArgs>(UploadRequest_UploadPartProgressEvent); transferUtility.Upload(uploadRequest); } } } catch (AmazonS3Exception amazonS3Exception) { if (amazonS3Exception.ErrorCode != null && (amazonS3Exception.ErrorCode.Equals("InvalidAccessKeyId") || amazonS3Exception.ErrorCode.Equals("InvalidSecurity"))) { Console.WriteLine("Check the provided AWS Credentials."); Console.WriteLine("For service sign up go to http://aws.amazon.com/s3"); } else { Console.WriteLine("Error occurred. Message:'{0}' when writing an object", amazonS3Exception.Message); } } }
/// <summary> /// This method uses the passed client object to perform a multi-part /// copy operation. /// </summary> /// <param name="client">An Amazon S3 client object that will be used /// to perform the copy.</param> public static async Task MPUCopyObjectAsync(AmazonS3Client client) { // Create a list to store the upload part responses. var uploadResponses = new List <UploadPartResponse>(); var copyResponses = new List <CopyPartResponse>(); // Setup information required to initiate the multipart upload. var initiateRequest = new InitiateMultipartUploadRequest { BucketName = TargetBucket, Key = TargetObjectKey, }; // Initiate the upload. InitiateMultipartUploadResponse initResponse = await client.InitiateMultipartUploadAsync(initiateRequest); // Save the upload ID. string uploadId = initResponse.UploadId; try { // Get the size of the object. var metadataRequest = new GetObjectMetadataRequest { BucketName = SourceBucket, Key = SourceObjectKey, }; GetObjectMetadataResponse metadataResponse = await client.GetObjectMetadataAsync(metadataRequest); var objectSize = metadataResponse.ContentLength; // Length in bytes. // Copy the parts. var partSize = 5 * (long)Math.Pow(2, 20); // Part size is 5 MB. long bytePosition = 0; for (int i = 1; bytePosition < objectSize; i++) { var copyRequest = new CopyPartRequest { DestinationBucket = TargetBucket, DestinationKey = TargetObjectKey, SourceBucket = SourceBucket, SourceKey = SourceObjectKey, UploadId = uploadId, FirstByte = bytePosition, LastByte = bytePosition + partSize - 1 >= objectSize ? objectSize - 1 : bytePosition + partSize - 1, PartNumber = i, }; copyResponses.Add(await client.CopyPartAsync(copyRequest)); bytePosition += partSize; } // Set up to complete the copy. var completeRequest = new CompleteMultipartUploadRequest { BucketName = TargetBucket, Key = TargetObjectKey, UploadId = initResponse.UploadId, }; completeRequest.AddPartETags(copyResponses); // Complete the copy. CompleteMultipartUploadResponse completeUploadResponse = await client.CompleteMultipartUploadAsync(completeRequest); } catch (AmazonS3Exception e) { Console.WriteLine($"Error encountered on server. Message:'{e.Message}' when writing an object"); } catch (Exception e) { Console.WriteLine($"Unknown encountered on server. Message:'{e.Message}' when writing an object"); } }
public void GetObjectMetadataAsync(GetObjectMetadataRequest request, AmazonServiceCallback <GetObjectMetadataRequest, GetObjectMetadataResponse> callback, AsyncOptions options = null) { throw new System.NotImplementedException(); }
public GetObjectMetadataResponse GetObjectMetadata(GetObjectMetadataRequest request) { throw new NotImplementedException(); }
public void ServerSideEncryptionBYOKPutAndGet() { var bucketName = S3TestUtils.CreateBucket(Client); try { Aes aesEncryption = Aes.Create(); aesEncryption.KeySize = 256; aesEncryption.GenerateKey(); string base64Key = Convert.ToBase64String(aesEncryption.Key); string base64KeyMd5 = ComputeEncodedMD5FromEncodedString(base64Key); PutObjectRequest putRequest = new PutObjectRequest { BucketName = bucketName, Key = key, ContentBody = "The Data To Encrypt in S3", ServerSideEncryptionCustomerMethod = ServerSideEncryptionCustomerMethod.AES256, ServerSideEncryptionCustomerProvidedKey = base64Key, ServerSideEncryptionCustomerProvidedKeyMD5 = base64KeyMd5 }; Client.PutObject(putRequest); GetObjectMetadataRequest getObjectMetadataRequest = new GetObjectMetadataRequest { BucketName = bucketName, Key = key, ServerSideEncryptionCustomerMethod = ServerSideEncryptionCustomerMethod.AES256, ServerSideEncryptionCustomerProvidedKey = base64Key, ServerSideEncryptionCustomerProvidedKeyMD5 = base64KeyMd5 }; GetObjectMetadataResponse getObjectMetadataResponse = Client.GetObjectMetadata(getObjectMetadataRequest); Assert.AreEqual(ServerSideEncryptionCustomerMethod.AES256, getObjectMetadataResponse.ServerSideEncryptionCustomerMethod); GetObjectRequest getObjectRequest = new GetObjectRequest { BucketName = bucketName, Key = key, ServerSideEncryptionCustomerMethod = ServerSideEncryptionCustomerMethod.AES256, ServerSideEncryptionCustomerProvidedKey = base64Key, ServerSideEncryptionCustomerProvidedKeyMD5 = base64KeyMd5 }; using (GetObjectResponse getResponse = Client.GetObject(getObjectRequest)) using (StreamReader reader = new StreamReader(getResponse.ResponseStream)) { string content = reader.ReadToEnd(); Assert.AreEqual(putRequest.ContentBody, content); Assert.AreEqual(ServerSideEncryptionCustomerMethod.AES256, getResponse.ServerSideEncryptionCustomerMethod); } GetPreSignedUrlRequest getPresignedUrlRequest = new GetPreSignedUrlRequest { BucketName = bucketName, Key = key, ServerSideEncryptionCustomerMethod = ServerSideEncryptionCustomerMethod.AES256, Expires = DateTime.Now.AddMinutes(5) }; var url = Client.GetPreSignedURL(getPresignedUrlRequest); var webRequest = HttpWebRequest.Create(url); webRequest.Headers.Add("x-amz-server-side-encryption-customer-algorithm", "AES256"); webRequest.Headers.Add("x-amz-server-side-encryption-customer-key", base64Key); webRequest.Headers.Add("x-amz-server-side-encryption-customer-key-MD5", base64KeyMd5); using (var response = webRequest.GetResponse()) using (var reader = new StreamReader(response.GetResponseStream())) { var contents = reader.ReadToEnd(); Assert.AreEqual(putRequest.ContentBody, contents); } aesEncryption.GenerateKey(); string copyBase64Key = Convert.ToBase64String(aesEncryption.Key); CopyObjectRequest copyRequest = new CopyObjectRequest { SourceBucket = bucketName, SourceKey = key, DestinationBucket = bucketName, DestinationKey = "EncryptedObject_Copy", CopySourceServerSideEncryptionCustomerMethod = ServerSideEncryptionCustomerMethod.AES256, CopySourceServerSideEncryptionCustomerProvidedKey = base64Key, ServerSideEncryptionCustomerMethod = ServerSideEncryptionCustomerMethod.AES256, ServerSideEncryptionCustomerProvidedKey = copyBase64Key }; Client.CopyObject(copyRequest); getObjectMetadataRequest = new GetObjectMetadataRequest { BucketName = bucketName, Key = "EncryptedObject_Copy", ServerSideEncryptionCustomerMethod = ServerSideEncryptionCustomerMethod.AES256, ServerSideEncryptionCustomerProvidedKey = copyBase64Key }; getObjectMetadataResponse = Client.GetObjectMetadata(getObjectMetadataRequest); Assert.AreEqual(ServerSideEncryptionCustomerMethod.AES256, getObjectMetadataResponse.ServerSideEncryptionCustomerMethod); } finally { AmazonS3Util.DeleteS3BucketWithObjects(Client, bucketName); } }
public async Task <IList <BlobDescriptor> > ListBlobsAsync(string containerName) { var descriptors = new List <BlobDescriptor>(); var objectsRequest = new ListObjectsRequest { BucketName = _bucket, Prefix = containerName, MaxKeys = 100000 }; try { do { var objectsResponse = await _s3Client.ListObjectsAsync(objectsRequest); foreach (S3Object entry in objectsResponse.S3Objects) { var objectMetaRequest = new GetObjectMetadataRequest() { BucketName = _bucket, Key = entry.Key }; var objectMetaResponse = await _s3Client.GetObjectMetadataAsync(objectMetaRequest); var objectAclRequest = new GetACLRequest() { BucketName = _bucket, Key = entry.Key }; var objectAclResponse = await _s3Client.GetACLAsync(objectAclRequest); var isPublic = objectAclResponse.AccessControlList.Grants .Where(x => x.Grantee.URI == "http://acs.amazonaws.com/groups/global/AllUsers").Count() > 0; descriptors.Add(new BlobDescriptor { Name = entry.Key.Remove(0, containerName.Length + 1), Container = containerName, Length = entry.Size, ETag = entry.ETag, ContentMD5 = entry.ETag, ContentType = objectMetaResponse.Headers.ContentType, LastModified = entry.LastModified, Security = isPublic ? BlobSecurity.Public : BlobSecurity.Private }); } // If response is truncated, set the marker to get the next set of keys. if (objectsResponse.IsTruncated) { objectsRequest.Marker = objectsResponse.NextMarker; } else { objectsRequest = null; } } while (objectsRequest != null); return(descriptors); } catch (AmazonS3Exception asex) { if (IsInvalidAccessException(asex)) { throw new StorageException(1000.ToStorageError(), asex); } else { throw new StorageException(1001.ToStorageError(), asex); } } }
public void ObjectBasicTest() { //Get Case var key = OssTestUtils.GetObjectKey(_className); _ossClient.PutObject(_bucketName, key, new MemoryStream(Encoding.ASCII.GetBytes("hello world"))); var getRequest = new GetObjectRequest(_bucketName, key); Assert.AreEqual(getRequest.RequestPayer, RequestPayer.BucketOwner); try { _ossPayerClient.GetObject(getRequest); Assert.Fail("should not here."); } catch (OssException e) { Assert.AreEqual(e.ErrorCode, "AccessDenied"); } getRequest.RequestPayer = RequestPayer.Requester; var getResult = _ossPayerClient.GetObject(getRequest); Assert.AreEqual(getResult.ContentLength, 11); _ossClient.DeleteObject(_bucketName, key); //Put Case key = OssTestUtils.GetObjectKey(_className); var content = new MemoryStream(Encoding.ASCII.GetBytes("hello world")); var putRequest = new PutObjectRequest(_bucketName, key, content); Assert.AreEqual(putRequest.RequestPayer, RequestPayer.BucketOwner); try { _ossPayerClient.PutObject(putRequest); Assert.Fail("should not here."); } catch (OssException e) { Assert.AreEqual(e.ErrorCode, "AccessDenied"); } content = new MemoryStream(Encoding.ASCII.GetBytes("hello world")); putRequest = new PutObjectRequest(_bucketName, key, content); putRequest.RequestPayer = RequestPayer.Requester; var putResult = _ossPayerClient.PutObject(putRequest); Assert.AreEqual(putResult.HttpStatusCode, HttpStatusCode.OK); //head object var headRequest = new GetObjectMetadataRequest(_bucketName, key); Assert.AreEqual(headRequest.RequestPayer, RequestPayer.BucketOwner); try { _ossPayerClient.GetObjectMetadata(headRequest); Assert.Fail("should not here."); } catch (Exception e) { Assert.IsTrue(true, e.Message); } headRequest.RequestPayer = RequestPayer.Requester; var headResult = _ossPayerClient.GetObjectMetadata(headRequest); Assert.AreEqual(headResult.ContentLength, 11); //Delete Case Assert.IsTrue(_ossClient.DoesObjectExist(_bucketName, key)); var delReqeust = new DeleteObjectRequest(_bucketName, key); Assert.AreEqual(delReqeust.RequestPayer, RequestPayer.BucketOwner); try { _ossPayerClient.DeleteObject(delReqeust); Assert.Fail("should not here."); } catch (OssException e) { Assert.AreEqual(e.ErrorCode, "AccessDenied"); } delReqeust = new DeleteObjectRequest(_bucketName, key); delReqeust.RequestPayer = RequestPayer.Requester; _ossPayerClient.DeleteObject(delReqeust); Assert.IsFalse(_ossClient.DoesObjectExist(_bucketName, key)); //delete objects var keys = new List <string>(); keys.Add(key); var delsReqeust = new DeleteObjectsRequest(_bucketName, keys); Assert.AreEqual(delsReqeust.RequestPayer, RequestPayer.BucketOwner); try { _ossPayerClient.DeleteObjects(delsReqeust); Assert.Fail("should not here."); } catch (OssException e) { Assert.AreEqual(e.ErrorCode, "AccessDenied"); } delsReqeust.RequestPayer = RequestPayer.Requester; _ossPayerClient.DeleteObjects(delsReqeust); //delete object versions var objects = new List <ObjectIdentifier>(); objects.Add(new ObjectIdentifier(key)); var delvsReqeust = new DeleteObjectVersionsRequest(_bucketName, objects); Assert.AreEqual(delvsReqeust.RequestPayer, RequestPayer.BucketOwner); try { _ossPayerClient.DeleteObjectVersions(delvsReqeust); Assert.Fail("should not here."); } catch (OssException e) { Assert.AreEqual(e.ErrorCode, "AccessDenied"); } delvsReqeust.RequestPayer = RequestPayer.Requester; _ossPayerClient.DeleteObjectVersions(delvsReqeust); //list objets var lsRequest = new ListObjectsRequest(_bucketName); Assert.AreEqual(lsRequest.RequestPayer, RequestPayer.BucketOwner); try { _ossPayerClient.ListObjects(lsRequest); Assert.Fail("should not here."); } catch (OssException e) { Assert.AreEqual(e.ErrorCode, "AccessDenied"); } lsRequest.RequestPayer = RequestPayer.Requester; _ossPayerClient.ListObjects(lsRequest); }
public async Task <List <FileMetadata> > List(string prefix = null) { var descriptors = new List <FileMetadata>(); var objectsRequest = new ListObjectsRequest { BucketName = _bucketName, Prefix = prefix, MaxKeys = 100000 }; do { var objectsResponse = await _client.ListObjectsAsync(objectsRequest); foreach (S3Object entry in objectsResponse.S3Objects) { var objectMetaRequest = new GetObjectMetadataRequest { BucketName = _bucketName, Key = entry.Key }; var objectMetaResponse = await _client.GetObjectMetadataAsync(objectMetaRequest); var objectAclRequest = new GetACLRequest { BucketName = _bucketName, Key = entry.Key }; var objectAclResponse = await _client.GetACLAsync(objectAclRequest); var isPublic = objectAclResponse.AccessControlList.Grants.Any(x => x.Grantee.URI == "http://acs.amazonaws.com/groups/global/AllUsers"); descriptors.Add(new FileMetadata { Name = entry.Key, BucketName = _bucketName, Length = entry.Size, ETag = entry.ETag, ContentMD5 = entry.ETag, ContentType = objectMetaResponse.Headers.ContentType, LastModifiedOn = entry.LastModified, //Security = isPublic ? FileSecurity.Public : FileSecurity.Private, ContentDisposition = objectMetaResponse.Headers.ContentDisposition, Metadata = objectMetaResponse.Metadata.ToMetadata(), }); } // If response is truncated, set the marker to get the next set of keys. if (objectsResponse.IsTruncated) { objectsRequest.Marker = objectsResponse.NextMarker; } else { objectsRequest = null; } } while (objectsRequest != null); return(descriptors); }
public async Task MultipartEncryptionTestInstructionFile() { string filePath = Path.Combine(Path.GetTempPath(), "MulitpartEncryptionTestInstructionFile_upload.txt"); string retrievedFilepath = Path.Combine(Path.GetTempPath(), "MulitpartEncryptionTestInstructionFile_download.txt"); int MEG_SIZE = (int)Math.Pow(2, 20); long totalSize = (long)(15 * MEG_SIZE) + 4001; UtilityMethods.GenerateFile(filePath, totalSize); _filesToDelete.Add(filePath); string key = "MultipartEncryptionTestInstrcutionFile" + random.Next(); using (Stream inputStream = File.OpenRead(filePath)) { InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest() { BucketName = bucketName, Key = key, StorageClass = S3StorageClass.ReducedRedundancy, ContentType = "text/html", CannedACL = S3CannedACL.PublicRead }; InitiateMultipartUploadResponse initResponse = await s3EncryptionClientFileMode.InitiateMultipartUploadAsync(initRequest); // Upload part 1 UploadPartRequest uploadRequest = new UploadPartRequest() { BucketName = bucketName, Key = key, UploadId = initResponse.UploadId, PartNumber = 1, PartSize = 5 * MEG_SIZE, InputStream = inputStream }; UploadPartResponse up1Response = await s3EncryptionClientFileMode.UploadPartAsync(uploadRequest); // Upload part 2 uploadRequest = new UploadPartRequest() { BucketName = bucketName, Key = key, UploadId = initResponse.UploadId, PartNumber = 2, PartSize = 5 * MEG_SIZE + 4001, InputStream = inputStream }; UploadPartResponse up2Response = await s3EncryptionClientFileMode.UploadPartAsync(uploadRequest); // Upload part 3 uploadRequest = new UploadPartRequest() { BucketName = bucketName, Key = key, UploadId = initResponse.UploadId, PartNumber = 3, InputStream = inputStream, IsLastPart = true }; //uploadRequest.setLastPart(); UploadPartResponse up3Response = await s3EncryptionClientFileMode.UploadPartAsync(uploadRequest); ListPartsRequest listPartRequest = new ListPartsRequest() { BucketName = bucketName, Key = key, UploadId = initResponse.UploadId }; ListPartsResponse listPartResponse = await s3EncryptionClientFileMode.ListPartsAsync(listPartRequest); Assert.Equal(3, listPartResponse.Parts.Count); Assert.Equal(up1Response.PartNumber, listPartResponse.Parts[0].PartNumber); Assert.Equal(up1Response.ETag, listPartResponse.Parts[0].ETag); Assert.Equal(up2Response.PartNumber, listPartResponse.Parts[1].PartNumber); Assert.Equal(up2Response.ETag, listPartResponse.Parts[1].ETag); Assert.Equal(up3Response.PartNumber, listPartResponse.Parts[2].PartNumber); Assert.Equal(up3Response.ETag, listPartResponse.Parts[2].ETag); listPartRequest.MaxParts = 1; listPartResponse = await s3EncryptionClientFileMode.ListPartsAsync(listPartRequest); Assert.Equal(1, listPartResponse.Parts.Count); // Complete the response CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest() { BucketName = bucketName, Key = key, UploadId = initResponse.UploadId }; compRequest.AddPartETags(up1Response, up2Response, up3Response); CompleteMultipartUploadResponse compResponse = await s3EncryptionClientFileMode.CompleteMultipartUploadAsync(compRequest); Assert.Equal(bucketName, compResponse.BucketName); Assert.NotNull(compResponse.ETag); Assert.Equal(key, compResponse.Key); Assert.NotNull(compResponse.Location); // Get the file back from S3 and make sure it is still the same. GetObjectRequest getRequest = new GetObjectRequest() { BucketName = bucketName, Key = key }; GetObjectResponse getResponse = await s3EncryptionClientFileMode.GetObjectAsync(getRequest); await getResponse.WriteResponseStreamToFileAsync(retrievedFilepath, false, System.Threading.CancellationToken.None); _filesToDelete.Add(retrievedFilepath); UtilityMethods.CompareFiles(filePath, retrievedFilepath); GetObjectMetadataRequest metaDataRequest = new GetObjectMetadataRequest() { BucketName = bucketName, Key = key }; GetObjectMetadataResponse metaDataResponse = await s3EncryptionClientFileMode.GetObjectMetadataAsync(metaDataRequest); Assert.Equal("text/html", metaDataResponse.Headers.ContentType); } }
/// <summary> /// Provides the actual implementation to move or copy an S3 object /// </summary> /// <param name="client"></param> /// <param name="request"></param> /// <param name="partSize"></param> /// <param name="deleteSource"></param> /// <returns></returns> private static async Task <CopyObjectRequestResponse> CopyOrMoveObjectAsync(this IAmazonS3 client, CopyObjectRequest request, long partSize, bool deleteSource, Func <long, long, bool> useMulitpart) { /// Handle operation cancelled exceptions ExponentialBackoffAndRetryClient backoffClient = new ExponentialBackoffAndRetryClient(4, 100, 1000) { ExceptionHandlingLogic = (ex) => { if (ex is OperationCanceledException) { return(true); } else { return(false); } } }; try { ParameterTests.NonNull(request, "request"); ParameterTests.OutOfRange(partSize >= Constants.MINIMUM_MULTIPART_PART_SIZE, "partSize", $"The part size must be at least {Constants.MINIMUM_MULTIPART_PART_SIZE} bytes."); ParameterTests.OutOfRange(partSize <= Constants.MAXIMUM_MULTIPART_PART_SIZE, "partSize", $"The part size cannot exceed {Constants.MAXIMUM_MULTIPART_PART_SIZE} bytes."); if (request.SourceKey == request.DestinationKey && request.SourceBucket != null && request.SourceBucket.Equals(request.DestinationBucket, StringComparison.OrdinalIgnoreCase)) { throw new SourceDestinationSameException("The source and destination of the copy operation cannot be the same.", new CopyObjectRequest[] { request }); } // Get the size of the object. GetObjectMetadataRequest metadataRequest = new GetObjectMetadataRequest { BucketName = request.SourceBucket, Key = request.SourceKey }; long objectSize; GetObjectMetadataResponse metadataResponse; try { metadataResponse = await backoffClient.RunAsync(() => client.GetObjectMetadataAsync(metadataRequest)); objectSize = metadataResponse.ContentLength; // Length in bytes. } catch (Exception e) { throw e; } CopyObjectResponse response = null; if (UseMultipart(objectSize, partSize)) { // If it takes more than a 5 GiB part to make 10000 or less parts, than this operation // isn't supported for an object this size if (objectSize / partSize > Constants.MAXIMUM_PARTS) { throw new NotSupportedException($"The object size, {objectSize}, cannot be broken into fewer than {Constants.MAXIMUM_PARTS} parts using a part size of {partSize} bytes."); } List <Task <CopyPartResponse> > copyResponses = new List <Task <CopyPartResponse> >(); // This property has a nullable backing private field that when set to // anything non-null causes the x-amz-object-lock-retain-until-date // header to be sent which in turn results in an exception being thrown // that the Bucket is missing ObjectLockConfiguration InitiateMultipartUploadRequest initiateRequest = request.ConvertTo <InitiateMultipartUploadRequest>("ObjectLockRetainUntilDate"); initiateRequest.BucketName = request.DestinationBucket; initiateRequest.Key = request.DestinationKey; InitiateMultipartUploadResponse initiateResponse = await backoffClient.RunAsync(() => client.InitiateMultipartUploadAsync(initiateRequest)); try { long bytePosition = 0; int counter = 1; // Launch all of the copy parts while (bytePosition < objectSize) { CopyPartRequest copyRequest = request.ConvertTo <CopyPartRequest>("ObjectLockRetainUntilDate"); copyRequest.UploadId = initiateResponse.UploadId; copyRequest.FirstByte = bytePosition; // If we're on the last part, the last byte is the object size minus 1, otherwise the last byte is the part size minus one // added to the current byte position copyRequest.LastByte = ((bytePosition + partSize - 1) >= objectSize) ? objectSize - 1 : bytePosition + partSize - 1; copyRequest.PartNumber = counter++; copyResponses.Add(backoffClient.RunAsync(() => client.CopyPartAsync(copyRequest))); bytePosition += partSize; } IEnumerable <CopyPartResponse> responses = (await Task.WhenAll(copyResponses)).OrderBy(x => x.PartNumber); // Set up to complete the copy. CompleteMultipartUploadRequest completeRequest = new CompleteMultipartUploadRequest { BucketName = request.DestinationBucket, Key = request.DestinationKey, UploadId = initiateResponse.UploadId }; completeRequest.AddPartETags(responses); // Complete the copy. CompleteMultipartUploadResponse completeUploadResponse = await backoffClient.RunAsync(() => client.CompleteMultipartUploadAsync(completeRequest)); response = completeUploadResponse.CopyProperties <CopyObjectResponse>(); response.SourceVersionId = metadataResponse.VersionId; } catch (AmazonS3Exception e) { AbortMultipartUploadRequest abortRequest = new AbortMultipartUploadRequest() { BucketName = request.DestinationBucket, Key = request.DestinationKey, UploadId = initiateResponse.UploadId }; await backoffClient.RunAsync(() => client.AbortMultipartUploadAsync(abortRequest)); throw e; } } else { response = await backoffClient.RunAsync(() => client.CopyObjectAsync(request)); } if (response.HttpStatusCode != HttpStatusCode.OK) { throw new AmazonS3Exception($"Could not copy object from s3://{request.SourceBucket}/{request.SourceKey} to s3://{request.DestinationBucket}/{request.DestinationKey}. Received response : {(int)response.HttpStatusCode}"); } else { // We already checked to make sure the source and destination weren't the same // and it's safe to delete the source object if (deleteSource) { DeleteObjectRequest deleteRequest = new DeleteObjectRequest() { BucketName = request.SourceBucket, Key = request.SourceKey }; DeleteObjectResponse deleteResponse = await backoffClient.RunAsync(() => client.DeleteObjectAsync(deleteRequest)); if (deleteResponse.HttpStatusCode != HttpStatusCode.NoContent) { throw new AmazonS3Exception($"Could not delete s3://{request.SourceBucket}/{request.SourceKey}. Received response : {(int)deleteResponse.HttpStatusCode}"); } } return(new CopyObjectRequestResponse(request, response)); } } catch (Exception e) { return(null); } }
public static void MultipartEncryptionTestAPM(IAmazonS3 s3EncryptionClient, IAmazonS3 s3DecryptionClient, string bucketName) { var guid = Guid.NewGuid(); var filePath = Path.Combine(Path.GetTempPath(), $"multi-{guid}.txt"); var retrievedFilepath = Path.Combine(Path.GetTempPath(), $"retrieved-{guid}.txt"); var totalSize = MegaByteSize * 15; UtilityMethods.GenerateFile(filePath, totalSize); string key = $"key-{guid}"; Stream inputStream = File.OpenRead(filePath); try { InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest() { BucketName = bucketName, Key = key, StorageClass = S3StorageClass.OneZoneInfrequentAccess, ContentType = "text/html" }; InitiateMultipartUploadResponse initResponse = null; if (IsKMSEncryptionClient(s3EncryptionClient)) { initResponse = s3EncryptionClient.InitiateMultipartUpload(initRequest); } else { initResponse = s3EncryptionClient.EndInitiateMultipartUpload( s3EncryptionClient.BeginInitiateMultipartUpload(initRequest, null, null)); } // Upload part 1 UploadPartRequest uploadRequest = new UploadPartRequest() { BucketName = bucketName, Key = key, UploadId = initResponse.UploadId, PartNumber = 1, PartSize = 5 * MegaByteSize, InputStream = inputStream, }; UploadPartResponse up1Response = s3EncryptionClient.EndUploadPart( s3EncryptionClient.BeginUploadPart(uploadRequest, null, null)); // Upload part 2 uploadRequest = new UploadPartRequest() { BucketName = bucketName, Key = key, UploadId = initResponse.UploadId, PartNumber = 2, PartSize = 5 * MegaByteSize, InputStream = inputStream, }; UploadPartResponse up2Response = s3EncryptionClient.EndUploadPart( s3EncryptionClient.BeginUploadPart(uploadRequest, null, null)); // Upload part 3 uploadRequest = new UploadPartRequest() { BucketName = bucketName, Key = key, UploadId = initResponse.UploadId, PartNumber = 3, InputStream = inputStream, IsLastPart = true }; UploadPartResponse up3Response = s3EncryptionClient.EndUploadPart( s3EncryptionClient.BeginUploadPart(uploadRequest, null, null)); ListPartsRequest listPartRequest = new ListPartsRequest() { BucketName = bucketName, Key = key, UploadId = initResponse.UploadId }; ListPartsResponse listPartResponse = s3EncryptionClient.EndListParts( s3EncryptionClient.BeginListParts(listPartRequest, null, null)); Assert.Equal(3, listPartResponse.Parts.Count); Assert.Equal(up1Response.PartNumber, listPartResponse.Parts[0].PartNumber); Assert.Equal(up1Response.ETag, listPartResponse.Parts[0].ETag); Assert.Equal(up2Response.PartNumber, listPartResponse.Parts[1].PartNumber); Assert.Equal(up2Response.ETag, listPartResponse.Parts[1].ETag); Assert.Equal(up3Response.PartNumber, listPartResponse.Parts[2].PartNumber); Assert.Equal(up3Response.ETag, listPartResponse.Parts[2].ETag); listPartRequest.MaxParts = 1; listPartResponse = s3EncryptionClient.EndListParts( s3EncryptionClient.BeginListParts(listPartRequest, null, null)); Assert.Equal(1, listPartResponse.Parts.Count); // Complete the response CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest() { BucketName = bucketName, Key = key, UploadId = initResponse.UploadId }; compRequest.AddPartETags(up1Response, up2Response, up3Response); CompleteMultipartUploadResponse compResponse = s3EncryptionClient.EndCompleteMultipartUpload( s3EncryptionClient.BeginCompleteMultipartUpload(compRequest, null, null)); Assert.Equal(bucketName, compResponse.BucketName); Assert.NotNull(compResponse.ETag); Assert.Equal(key, compResponse.Key); Assert.NotNull(compResponse.Location); // Get the file back from S3 and make sure it is still the same. GetObjectRequest getRequest = new GetObjectRequest() { BucketName = bucketName, Key = key }; GetObjectResponse getResponse = null; if (IsKMSEncryptionClient(s3EncryptionClient)) { getResponse = s3DecryptionClient.GetObject(getRequest); } else { getResponse = s3DecryptionClient.EndGetObject( s3DecryptionClient.BeginGetObject(getRequest, null, null)); } getResponse.WriteResponseStreamToFile(retrievedFilepath); UtilityMethods.CompareFiles(filePath, retrievedFilepath); GetObjectMetadataRequest metaDataRequest = new GetObjectMetadataRequest() { BucketName = bucketName, Key = key }; GetObjectMetadataResponse metaDataResponse = s3DecryptionClient.EndGetObjectMetadata( s3DecryptionClient.BeginGetObjectMetadata(metaDataRequest, null, null)); Assert.Equal("text/html", metaDataResponse.Headers.ContentType); } finally { inputStream.Close(); if (File.Exists(filePath)) { File.Delete(filePath); } if (File.Exists(retrievedFilepath)) { File.Delete(retrievedFilepath); } } }
private void UploadToCdn() { try { // one thread only if (Interlocked.CompareExchange(ref work, 1, 0) == 0) { var @continue = false; try { CdnItem item; if (queue.TryDequeue(out item)) { @continue = true; var cdnpath = GetCdnPath(item.Bundle.Path); var key = new Uri(cdnpath).PathAndQuery.TrimStart('/'); var content = Encoding.UTF8.GetBytes(item.Response.Content); var inputStream = new MemoryStream(); if (ClientSettings.GZipEnabled) { using (var zip = new GZipStream(inputStream, CompressionMode.Compress, true)) { zip.Write(content, 0, content.Length); zip.Flush(); } } else { inputStream.Write(content, 0, content.Length); } var checksum = AmazonS3Util.GenerateChecksumForContent(item.Response.Content, true); var config = new AmazonS3Config { RegionEndpoint = RegionEndpoint.GetBySystemName(s3region), UseHttp = true }; using (var s3 = new AmazonS3Client(s3publickey, s3privatekey, config)) { var upload = false; try { var request = new GetObjectMetadataRequest { BucketName = s3bucket, Key = key, }; var response = s3.GetObjectMetadata(request); upload = !string.Equals(checksum, response.Metadata["x-amz-meta-etag"], StringComparison.InvariantCultureIgnoreCase); } catch (AmazonS3Exception ex) { if (ex.StatusCode == HttpStatusCode.NotFound) { upload = true; } else { throw; } } if (upload) { var request = new PutObjectRequest { BucketName = s3bucket, CannedACL = S3CannedACL.PublicRead, AutoCloseStream = true, AutoResetStreamPosition = true, Key = key, ContentType = AmazonS3Util.MimeTypeFromExtension(Path.GetExtension(key).ToLowerInvariant()), InputStream = inputStream }; if (ClientSettings.GZipEnabled) { request.Headers.ContentEncoding = "gzip"; } var cache = TimeSpan.FromDays(365); request.Headers.CacheControl = string.Format("public, maxage={0}", (int)cache.TotalSeconds); request.Headers.ExpiresUtc = DateTime.UtcNow.Add(cache); request.Headers["x-amz-meta-etag"] = checksum; s3.PutObject(request); } else { inputStream.Close(); } item.Bundle.CdnPath = cdnpath; } } } catch (Exception err) { log.Error(err); } finally { work = 0; if (@continue) { Action upload = () => UploadToCdn(); upload.BeginInvoke(null, null); } } } } catch (Exception fatal) { log.Fatal(fatal); } }
// Always returns an object even if file doesn't exist public FileMetadata FileMetadata( string name ) { FileMetadata results = new FileMetadata { Name = name }; using ( AmazonS3 client = AWSClientFactory.CreateAmazonS3Client( this.amazonKey, this.amazonSecret ) ) { GetObjectMetadataRequest request = new GetObjectMetadataRequest { BucketName = this.amazonBucket, Key = name }; try { GetObjectMetadataResponse response = client.GetObjectMetadata( request ); results.Exists = true; // else AWSSDK threw results.Length = response.ContentLength; results.LastModified = response.LastModified; } catch ( AmazonS3Exception ex ) { if ( ex.ErrorCode == "NoSuchKey" ) { results.Exists = false; // File doesn't exist } else { throw; } } } return results; }