private async Task UploadToS3( S3Settings settings, Stream stream, string folderName, string fileName, Progress progress, string archiveDescription) { using (var client = new RavenAwsS3Client(settings.AwsAccessKey, settings.AwsSecretKey, settings.AwsRegionName, settings.BucketName, progress, TaskCancelToken.Token)) { var key = CombinePathAndKey(settings.RemoteFolderName, folderName, fileName); await client.PutObject(key, stream, new Dictionary <string, string> { { "Description", archiveDescription } }); if (_logger.IsInfoEnabled) { _logger.Info(string.Format($"Successfully uploaded backup file '{fileName}' " + $"to S3 bucket named: {settings.BucketName}, " + $"with key: {key}")); } } }
public void PutObject() { var bucketName = "ravendb"; var key = "testKey"; using (var client = new RavenAwsS3Client("<aws_access_key>", "<aws_secret_key>", "<aws_region_for_bucket>")) { client.PutObject(bucketName, key, new MemoryStream(Encoding.UTF8.GetBytes("321")), new Dictionary <string, string> { { "property1", "value1" }, { "property2", "value2" } }, 60 * 60); var @object = client.GetObject(bucketName, key); Assert.NotNull(@object); using (var reader = new StreamReader(@object.Data)) Assert.Equal("321", reader.ReadToEnd()); var property1 = @object.Metadata.Keys.Single(x => x.Contains("property1")); var property2 = @object.Metadata.Keys.Single(x => x.Contains("property2")); Assert.Equal("value1", @object.Metadata[property1]); Assert.Equal("value2", @object.Metadata[property2]); } }
public void can_get_correct_error_s3() { var settings = GetS3Settings(); string region1 = settings.AwsRegionName; string region2 = settings.AwsRegionName = WestRegion2; var bucketName = settings.BucketName; using (var clientRegion2 = new RavenAwsS3Client(settings, DefaultConfiguration)) { var sb = new StringBuilder(); for (var i = 0; i < 1 * 1024 * 1024; i++) { sb.Append("a"); } var blobs = GenerateBlobNames(settings, 1, out _); Assert.Equal(1, blobs.Count); var key = blobs[0]; var error2 = Assert.Throws <InvalidOperationException>(() => { using (var memoryStream = new MemoryStream(Encoding.UTF8.GetBytes(sb.ToString()))) { clientRegion2.PutObject(key, memoryStream, new Dictionary <string, string>()); } }); Assert.Equal($"AWS location is set to '{region2}', but the bucket named: '{bucketName}' is located in: {region1}", error2.Message); } }
public async Task put_object() { var settings = GetS3Settings(); using (var client = new RavenAwsS3Client(settings, DefaultConfiguration)) { var blobs = GenerateBlobNames(settings, 1, out _); Assert.Equal(1, blobs.Count); var key = blobs[0]; var value1 = Guid.NewGuid().ToString(); var value2 = Guid.NewGuid().ToString(); client.PutObject(key, new MemoryStream(Encoding.UTF8.GetBytes("231")), new Dictionary <string, string> { { "property1", value1 }, { "property2", value2 } }); var @object = await client.GetObjectAsync(key); Assert.NotNull(@object); using (var reader = new StreamReader(@object.Data)) Assert.Equal("231", reader.ReadToEnd()); var property1 = @object.Metadata.Keys.Single(x => x.Contains("property1")); var property2 = @object.Metadata.Keys.Single(x => x.Contains("property2")); Assert.Equal(value1, @object.Metadata[property1]); Assert.Equal(value2, @object.Metadata[property2]); } }
public async Task can_get_correct_error_s3(string region1, string region2) { var bucketName = $"testing-{Guid.NewGuid()}"; var key = Guid.NewGuid().ToString(); using (var clientRegion1 = new RavenAwsS3Client(AwsAccessKey, AwsSecretKey, region1, bucketName)) using (var clientRegion2 = new RavenAwsS3Client(AwsAccessKey, AwsSecretKey, region2, bucketName)) { // make sure that the bucket doesn't exist await clientRegion1.DeleteBucket(); try { var sb = new StringBuilder(); for (var i = 0; i < 1 * 1024 * 1024; i++) { sb.Append("a"); } var error1 = await Assert.ThrowsAsync <BucketNotFoundException>(async() => { using (var memoryStream = new MemoryStream(Encoding.UTF8.GetBytes(sb.ToString()))) { await clientRegion1.PutObject(key, memoryStream, new Dictionary <string, string>()); } }); Assert.Equal($"Bucket name '{bucketName}' doesn't exist!", error1.Message); await clientRegion1.PutBucket(); var error2 = await Assert.ThrowsAsync <InvalidOperationException>(async() => { using (var memoryStream = new MemoryStream(Encoding.UTF8.GetBytes(sb.ToString()))) { await clientRegion2.PutObject(key, memoryStream, new Dictionary <string, string>()); } }); Assert.Equal($"AWS location as set to {region2}, but the bucket named: '{bucketName}' is located in: {region1}", error2.Message); } finally { await clientRegion1.DeleteBucket(); } } }
private void UploadToS3(string backupPath, PeriodicExportSetup localExportConfigs, bool isFullBackup) { using (var client = new RavenAwsS3Client(awsAccessKey, awsSecretKey, localExportConfigs.AwsRegionEndpoint ?? RavenAwsClient.DefaultRegion)) using (var fileStream = File.OpenRead(backupPath)) { var key = Path.GetFileName(backupPath); client.PutObject(localExportConfigs.S3BucketName, key, fileStream, new Dictionary <string, string> { { "Description", GetArchiveDescription(isFullBackup) } }, 60 * 60); logger.Info(string.Format("Successfully uploaded backup {0} to S3 bucket {1}, with key {2}", Path.GetFileName(backupPath), localExportConfigs.S3BucketName, key)); } }
public async Task put_object(string region) { var bucketName = $"testing-{Guid.NewGuid()}"; var key = $"test-key-{Guid.NewGuid()}"; using (var client = new RavenAwsS3Client(AwsAccessKey, AwsSecretKey, region, bucketName)) { // make sure that the bucket doesn't exist await client.DeleteBucket(); try { await client.PutBucket(); var value1 = Guid.NewGuid().ToString(); var value2 = Guid.NewGuid().ToString(); await client.PutObject(key, new MemoryStream(Encoding.UTF8.GetBytes("231")), new Dictionary <string, string> { { "property1", value1 }, { "property2", value2 } }); // can't delete a bucket with existing objects var e = await Assert.ThrowsAsync <StorageException>(async() => await client.DeleteBucket()); Assert.True(e.Message.Contains("The bucket you tried to delete is not empty")); var @object = await client.GetObject(key); Assert.NotNull(@object); using (var reader = new StreamReader(@object.Data)) Assert.Equal("231", reader.ReadToEnd()); var property1 = @object.Metadata.Keys.Single(x => x.Contains("property1")); var property2 = @object.Metadata.Keys.Single(x => x.Contains("property2")); Assert.Equal(value1, @object.Metadata[property1]); Assert.Equal(value2, @object.Metadata[property2]); } finally { await client.DeleteObject(key); await client.DeleteBucket(); } } }
private void UploadToS3(S3Settings settings, Stream stream, Progress progress) { using (var client = new RavenAwsS3Client(settings, _settings.Configuration, progress, TaskCancelToken.Token)) { var key = CombinePathAndKey(settings.RemoteFolderName); client.PutObject(key, stream, new Dictionary <string, string> { { "Description", GetArchiveDescription() } }); if (_logger.IsInfoEnabled) { _logger.Info($"{ReportSuccess(S3Name)} bucket named: {settings.BucketName}, with key: {key}"); } var runner = new S3RetentionPolicyRunner(_retentionPolicyParameters, client); runner.Execute(); } }
private void UploadToS3(string backupPath, PeriodicExportSetup localExportConfigs, bool isFullBackup) { if (awsAccessKey == Constants.DataCouldNotBeDecrypted || awsSecretKey == Constants.DataCouldNotBeDecrypted) { throw new InvalidOperationException("Could not decrypt the AWS access settings, if you are running on IIS, make sure that load user profile is set to true."); } using (var client = new RavenAwsS3Client(awsAccessKey, awsSecretKey, localExportConfigs.AwsRegionEndpoint ?? RavenAwsClient.DefaultRegion)) using (var fileStream = File.OpenRead(backupPath)) { var key = Path.GetFileName(backupPath); client.PutObject(localExportConfigs.S3BucketName, key, fileStream, new Dictionary <string, string> { { "Description", GetArchiveDescription(isFullBackup) } }, 60 * 60); logger.Info(string.Format("Successfully uploaded backup {0} to S3 bucket {1}, with key {2}", Path.GetFileName(backupPath), localExportConfigs.S3BucketName, key)); } }
private async Task UploadToS3(string exportPath, string fileName, bool isFullExport) { if (_awsAccessKey == Constants.DataCouldNotBeDecrypted || _awsSecretKey == Constants.DataCouldNotBeDecrypted) { throw new InvalidOperationException("Could not decrypt the AWS access settings, if you are running on IIS, make sure that load user profile is set to true."); } using (var client = new RavenAwsS3Client(_awsAccessKey, _awsSecretKey, _configuration.AwsRegionName ?? RavenAwsClient.DefaultRegion)) using (var fileStream = File.OpenRead(exportPath)) { var key = CombinePathAndKey(_configuration.S3RemoteFolderName, fileName); await client.PutObject(_configuration.S3BucketName, key, fileStream, new Dictionary <string, string> { { "Description", GetArchiveDescription(isFullExport) } }, 60 *60); if (_logger.IsInfoEnabled) { _logger.Info(string.Format("Successfully uploaded export {0} to S3 bucket {1}, with key {2}", fileName, _configuration.S3BucketName, key)); } } }
// ReSharper disable once InconsistentNaming private static async Task PutObject(string region, int sizeInMB, bool testBlobKeyAsFolder, UploadType uploadType) { var bucketName = $"testing-{Guid.NewGuid()}"; var key = testBlobKeyAsFolder == false? Guid.NewGuid().ToString() : $"{Guid.NewGuid()}/folder/testKey"; var uploadProgress = new UploadProgress(); var maxUploadPutObjectInBytesSetter = ExpressionHelper.CreateFieldSetter <RavenAwsS3Client, int>("MaxUploadPutObjectSizeInBytes"); var minOnePartUploadSizeLimitInBytesSetter = ExpressionHelper.CreateFieldSetter <RavenAwsS3Client, int>("MinOnePartUploadSizeLimitInBytes"); using (var client = new RavenAwsS3Client(AwsAccessKey, AwsSecretKey, region, bucketName, uploadProgress)) { maxUploadPutObjectInBytesSetter(client, 10 * 1024 * 1024); // 10MB minOnePartUploadSizeLimitInBytesSetter(client, 7 * 1024 * 1024); // 7MB // make sure that the bucket doesn't exist await client.DeleteBucket(); try { await client.PutBucket(); var value1 = Guid.NewGuid().ToString(); var value2 = Guid.NewGuid().ToString(); var sb = new StringBuilder(); for (var i = 0; i < sizeInMB * 1024 * 1024; i++) { sb.Append("a"); } long streamLength; using (var memoryStream = new MemoryStream(Encoding.UTF8.GetBytes(sb.ToString()))) { streamLength = memoryStream.Length; await client.PutObject(key, memoryStream, new Dictionary <string, string> { { "property1", value1 }, { "property2", value2 } }); } var @object = await client.GetObject(key); Assert.NotNull(@object); using (var reader = new StreamReader(@object.Data)) Assert.Equal(sb.ToString(), reader.ReadToEnd()); var property1 = @object.Metadata.Keys.Single(x => x.Contains("property1")); var property2 = @object.Metadata.Keys.Single(x => x.Contains("property2")); Assert.Equal(value1, @object.Metadata[property1]); Assert.Equal(value2, @object.Metadata[property2]); Assert.Equal(UploadState.Done, uploadProgress.UploadState); Assert.Equal(uploadType, uploadProgress.UploadType); Assert.Equal(streamLength, uploadProgress.TotalInBytes); Assert.Equal(streamLength, uploadProgress.UploadedInBytes); } finally { await client.DeleteObject(key); await client.DeleteBucket(); } } }
// ReSharper disable once InconsistentNaming private async Task PutObject(int sizeInMB, bool testBlobKeyAsFolder, UploadType uploadType) { var settings = GetS3Settings(); var blobs = GenerateBlobNames(settings, 1, out _); Assert.Equal(1, blobs.Count); var key = $"{blobs[0]}"; if (testBlobKeyAsFolder) { key += "/"; } var progress = new Progress(); using (var client = new RavenAwsS3Client(settings, DefaultConfiguration, progress)) { client.MaxUploadPutObject = new Sparrow.Size(10, SizeUnit.Megabytes); client.MinOnePartUploadSizeLimit = new Sparrow.Size(7, SizeUnit.Megabytes); var value1 = Guid.NewGuid().ToString(); var value2 = Guid.NewGuid().ToString(); var sb = new StringBuilder(); for (var i = 0; i < sizeInMB * 1024 * 1024; i++) { sb.Append("a"); } long streamLength; using (var memoryStream = new MemoryStream(Encoding.UTF8.GetBytes(sb.ToString()))) { streamLength = memoryStream.Length; client.PutObject(key, memoryStream, new Dictionary <string, string> { { "property1", value1 }, { "property2", value2 } }); } var @object = await client.GetObjectAsync(key); Assert.NotNull(@object); using (var reader = new StreamReader(@object.Data)) Assert.Equal(sb.ToString(), reader.ReadToEnd()); var property1 = @object.Metadata.Keys.Single(x => x.Contains("property1")); var property2 = @object.Metadata.Keys.Single(x => x.Contains("property2")); Assert.Equal(value1, @object.Metadata[property1]); Assert.Equal(value2, @object.Metadata[property2]); Assert.Equal(UploadState.Done, progress.UploadProgress.UploadState); Assert.Equal(uploadType, progress.UploadProgress.UploadType); Assert.Equal(streamLength, progress.UploadProgress.TotalInBytes); Assert.Equal(streamLength, progress.UploadProgress.UploadedInBytes); } }
// ReSharper disable once InconsistentNaming private async Task PutObject(int sizeInMB, bool testBlobKeyAsFolder, UploadType uploadType, bool noAsciiDbName) { var settings = GetS3Settings(); var blobs = GenerateBlobNames(settings, 1, out _); Assert.Equal(1, blobs.Count); var key = ""; var progress = new Progress(); using (var client = new RavenAwsS3Client(settings, DefaultConfiguration, progress)) { client.MaxUploadPutObject = new Sparrow.Size(10, SizeUnit.Megabytes); client.MinOnePartUploadSizeLimit = new Sparrow.Size(7, SizeUnit.Megabytes); var property1 = "property1"; var property2 = "property2"; var value1 = Guid.NewGuid().ToString(); var value2 = Guid.NewGuid().ToString(); if (noAsciiDbName) { string dateStr = DateTime.Now.ToString("yyyy-MM-dd-HH-mm-ss"); key = $"{dateStr}.ravendb-żżżרייבן-A-backup/{dateStr}.ravendb-full-backup"; property1 = "Description-żżרייבן"; value1 = "ravendb-żżżרייבן-A-backup"; } else { key = $"{blobs[0]}"; } if (testBlobKeyAsFolder) { key += "/"; } var sb = new StringBuilder(); for (var i = 0; i < sizeInMB * 1024 * 1024; i++) { sb.Append("a"); } long streamLength; using (var memoryStream = new MemoryStream(Encoding.UTF8.GetBytes(sb.ToString()))) { streamLength = memoryStream.Length; client.PutObject(key, memoryStream, new Dictionary <string, string> { { property1, value1 }, { property2, value2 } }); } var @object = await client.GetObjectAsync(key); Assert.NotNull(@object); using (var reader = new StreamReader(@object.Data)) Assert.Equal(sb.ToString(), reader.ReadToEnd()); var property1check = @object.Metadata.Keys.Single(x => x.Contains(Uri.EscapeDataString(property1).ToLower())); var property2check = @object.Metadata.Keys.Single(x => x.Contains(property2)); Assert.Equal(Uri.EscapeDataString(value1), @object.Metadata[property1check]); Assert.Equal(value2, @object.Metadata[property2check]); Assert.Equal(UploadState.Done, progress.UploadProgress.UploadState); Assert.Equal(uploadType, progress.UploadProgress.UploadType); Assert.Equal(streamLength, progress.UploadProgress.TotalInBytes); Assert.Equal(streamLength, progress.UploadProgress.UploadedInBytes); } }