public static void SetBucketLifecycle() { try { var setBucketLifecycleRequest = new SetBucketLifecycleRequest(bucketName); LifecycleRule lcr1 = new LifecycleRule() { ID = "delete obsoleted files", Prefix = "obsoleted/", Status = RuleStatus.Enabled, ExpriationDays = 3 }; LifecycleRule lcr2 = new LifecycleRule() { ID = "delete temporary files", Prefix = "temporary/", Status = RuleStatus.Enabled, ExpirationTime = DateTime.Parse("2022-10-12T00:00:00.000Z") }; setBucketLifecycleRequest.AddLifecycleRule(lcr1); setBucketLifecycleRequest.AddLifecycleRule(lcr2); client.SetBucketLifecycle(setBucketLifecycleRequest); } catch (OssException ex) { Console.WriteLine("Failed with error code: {0}; Error info: {1}. \nRequestID:{2}\tHostID:{3}", ex.ErrorCode, ex.Message, ex.RequestId, ex.HostId); } catch (Exception ex) { Console.WriteLine("Failed with error info: {0}", ex.Message); } }
public static void InvalidLifecycleRule(this IGuardClause guardClause, LifecycleRule lifecycleRule) { if (lifecycleRule.DaysFromHidingToDeleting == 0) { throw new B2InvalidLifecycleRuleException("Setting zero days to delete files is not allowed."); } if (lifecycleRule.DaysFromUploadingToHiding == 0) { throw new B2InvalidLifecycleRuleException("Setting zero days to hide files is not allowed."); } if (lifecycleRule.DaysFromUploadingToHiding == null && lifecycleRule.DaysFromUploadingToHiding == null) { throw new B2InvalidLifecycleRuleException("Setting both days to delete and days to hide to null is not allowed."); } }
public void LifecycleBasicSettingTest() { LifecycleRule rule = new LifecycleRule(); rule.ID = "StandardExpireRule" + Guid.NewGuid(); rule.Prefix = "test"; rule.Status = RuleStatus.Enabled; rule.ExpriationDays = 200; Test(rule); rule = new LifecycleRule(); rule.ID = "StandardExpireRule" + Guid.NewGuid(); rule.Prefix = "object"; rule.Status = RuleStatus.Disabled; rule.ExpriationDays = 365; Test(rule); }
private static void TestLifecycleFilterPredicate(LifecycleFilterPredicate predicate, AmazonS3Client client = null) { var filter = new LifecycleFilter() { LifecycleFilterPredicate = predicate }; var rule = new LifecycleRule { Filter = filter, Status = LifecycleRuleStatus.Enabled, Transitions = new List<LifecycleTransition>() { new LifecycleTransition { Days = 1, StorageClass = S3StorageClass.Glacier } } }; if (client == null) client = Client; client.PutLifecycleConfiguration(new PutLifecycleConfigurationRequest { BucketName = bucketName, Configuration = new LifecycleConfiguration { Rules = new List<LifecycleRule> { rule } } }); var actualConfig = S3TestUtils.WaitForConsistency(() => { var res = client.GetLifecycleConfiguration(bucketName); return res.Configuration?.Rules?.Count == 1 ? res.Configuration : null; }); Assert.IsNotNull(actualConfig); Assert.IsNotNull(actualConfig.Rules); Assert.AreEqual(1, actualConfig.Rules.Count); AssertRulesAreEqual(rule, actualConfig.Rules[0]); }
private static void SetBucketLifecycle() { try { SetBucketLifecycleRequest request = new SetBucketLifecycleRequest() { BucketName = bucketName, Configuration = new LifecycleConfiguration(), }; LifecycleRule rule1 = new LifecycleRule(); rule1.Id = "rule1"; rule1.Prefix = "prefix"; rule1.Status = RuleStatusEnum.Enabled; rule1.Expiration.Days = 30; Transition transition = new Transition() { Date = new DateTime(2018, 12, 30, 0, 0, 0), StorageClass = StorageClassEnum.Warm }; rule1.Transitions.Add(transition); NoncurrentVersionTransition noncurrentVersionTransition = new NoncurrentVersionTransition() { NoncurrentDays = 30, StorageClass = StorageClassEnum.Cold, }; rule1.NoncurrentVersionTransitions.Add(noncurrentVersionTransition); rule1.NoncurrentVersionExpiration.NoncurrentDays = 30; request.Configuration.Rules.Add(rule1); SetBucketLifecycleResponse response = client.SetBucketLifecycle(request); Console.WriteLine("Set bucket lifecycle response: {0}", response.StatusCode); } catch (ObsException ex) { Console.WriteLine("Exception errorcode: {0}, when set bucket lifecycle.", ex.ErrorCode); Console.WriteLine("Exception errormessage: {0}", ex.ErrorMessage); } }
public MyStack(Construct scope, string id, IStackProps props) : base(scope, id, props) { Bucket bucket = new Bucket( this, "MyFirstBucket", new BucketProps { Versioned = true, BucketName = bn, Encryption = BucketEncryption.KmsManaged }); var lifecycleRule = new LifecycleRule { Id = ruleName, ExpirationInDays = 30 }; bucket.AddLifecycleRule(lifecycleRule); }
private static void AssertRulesAreEqual(LifecycleRule expected, LifecycleRule actual) { Assert.IsFalse(string.IsNullOrEmpty(actual.Id)); #pragma warning disable 618 Assert.AreEqual(expected.Prefix, actual.Prefix); #pragma warning restore 618 AssertFiltersAreEqual(expected.Filter, actual.Filter); Assert.AreEqual(expected.Transitions.Count, actual.Transitions.Count); Assert.AreEqual(expected.NoncurrentVersionTransitions.Count, actual.NoncurrentVersionTransitions.Count); if (expected.AbortIncompleteMultipartUpload == null) { Assert.IsNull(actual.AbortIncompleteMultipartUpload); } else { Assert.AreEqual(expected.AbortIncompleteMultipartUpload.DaysAfterInitiation, actual.AbortIncompleteMultipartUpload.DaysAfterInitiation); } Assert.AreEqual(expected.Status, actual.Status); if (expected.Expiration == null) { Assert.IsNull(actual.Expiration); } else { Assert.AreEqual(expected.Expiration.Days, actual.Expiration.Days); Assert.AreEqual(expected.Expiration.ExpiredObjectDeleteMarker, actual.Expiration.ExpiredObjectDeleteMarker); } #pragma warning disable 618 Assert.AreEqual(expected.Transition.Days, actual.Transition.Days); if (expected.NoncurrentVersionTransition == null) { Assert.IsNull(actual.NoncurrentVersionTransition); } else { Assert.AreEqual(expected.NoncurrentVersionTransition.NoncurrentDays, actual.NoncurrentVersionTransition.NoncurrentDays); } #pragma warning restore 618 }
public void CreateExpirationRule(string bucket, string prefix, int expirationDays, string description) { var rule = new LifecycleRule { Id = description, Prefix = prefix, Status = LifecycleRuleStatus.Enabled, Expiration = new LifecycleRuleExpiration { Days = expirationDays } }; var lifecycleConfiguration = new LifecycleConfiguration { Rules = new List <LifecycleRule> { rule } }; _s3Client.PutLifecycleConfiguration(bucket, lifecycleConfiguration); }
public static void SetBucketLifecycle(string bucketName) { try { var setBucketLifecycleRequest = new SetBucketLifecycleRequest(bucketName); LifecycleRule lcr1 = new LifecycleRule() { ID = "delete obsoleted files", Prefix = "obsoleted/", Status = RuleStatus.Enabled, ExpriationDays = 3 }; LifecycleRule lcr2 = new LifecycleRule() { ID = "delete temporary files", Prefix = "temporary/", Status = RuleStatus.Enabled, ExpirationTime = DateTime.Parse("2022-10-12T00:00:00.000Z") }; setBucketLifecycleRequest.AddLifecycleRule(lcr1); setBucketLifecycleRequest.AddLifecycleRule(lcr2); client.SetBucketLifecycle(setBucketLifecycleRequest); Console.WriteLine("Set bucket:{0} Lifecycle succeeded ", bucketName); } catch (OssException ex) { Console.WriteLine("Failed with error code: {0}; Error info: {1}. \nRequestID:{2}\tHostID:{3}", ex.ErrorCode, ex.Message, ex.RequestId, ex.HostId); } catch (Exception ex) { Console.WriteLine("Failed with error info: {0}", ex.Message); } }
internal CdkStack(Construct scope, string id, IStackProps props = null) : base(scope, id, props) { var stackProps = ReportStackProps.ParseOrDefault(props); var dframeWorkerLogGroup = "MagicOnionBenchWorkerLogGroup"; var dframeMasterLogGroup = "MagicOnionBenchMasterLogGroup"; var benchNetwork = stackProps.GetBenchNetwork(); var recreateMagicOnionTrigger = stackProps.GetBenchmarkServerBinariesHash(); // s3 var s3 = new Bucket(this, "Bucket", new BucketProps { AutoDeleteObjects = true, RemovalPolicy = RemovalPolicy.DESTROY, AccessControl = BucketAccessControl.PRIVATE, }); var lifecycleRule = new LifecycleRule { Enabled = true, Prefix = "reports/", Expiration = Duration.Days(stackProps.DaysKeepReports), AbortIncompleteMultipartUploadAfter = Duration.Days(1), }; s3.AddLifecycleRule(lifecycleRule); s3.AddToResourcePolicy(new PolicyStatement(new PolicyStatementProps { Sid = "AllowPublicRead", Effect = Effect.ALLOW, Principals = new[] { new AnyPrincipal() }, Actions = new[] { "s3:GetObject*" }, Resources = new[] { $"{s3.BucketArn}/html/*" }, })); s3.AddToResourcePolicy(new PolicyStatement(new PolicyStatementProps { Sid = "AllowAwsAccountAccess", Effect = Effect.ALLOW, Principals = new[] { new AccountRootPrincipal() }, Actions = new[] { "s3:*" }, Resources = new[] { $"{s3.BucketArn}/*" }, })); // s3 deploy var masterDllDeployment = new BucketDeployment(this, "DeployMasterDll", new BucketDeploymentProps { DestinationBucket = s3, Sources = new[] { Source.Asset(Path.Combine(Directory.GetCurrentDirectory(), $"out/linux/server")) }, DestinationKeyPrefix = $"assembly/linux/server" }); var userdataDeployment = new BucketDeployment(this, "UserData", new BucketDeploymentProps { DestinationBucket = s3, Sources = new[] { Source.Asset(Path.Combine(Directory.GetCurrentDirectory(), "userdata/")) }, DestinationKeyPrefix = "userdata/" }); // docker deploy var dockerImage = new DockerImageAsset(this, "dframeWorkerImage", new DockerImageAssetProps { Directory = Path.Combine(Directory.GetCurrentDirectory(), "app"), File = "ConsoleAppEcs/Dockerfile.Ecs", }); var dframeImage = ContainerImage.FromDockerImageAsset(dockerImage); // network var vpc = new Vpc(this, "Vpc", new VpcProps { MaxAzs = 2, NatGateways = 0, SubnetConfiguration = new[] { new SubnetConfiguration { Name = "public", SubnetType = SubnetType.PUBLIC } }, }); var allsubnets = new SubnetSelection { Subnets = vpc.PublicSubnets }; var singleSubnets = new SubnetSelection { Subnets = new[] { vpc.PublicSubnets.First() } }; var sg = new SecurityGroup(this, "MasterSg", new SecurityGroupProps { AllowAllOutbound = true, Vpc = vpc, }); foreach (var subnet in vpc.PublicSubnets) { sg.AddIngressRule(Peer.Ipv4(vpc.VpcCidrBlock), Port.AllTcp(), "VPC", true); } // service discovery var serviceDiscoveryDomain = "local"; var serverMapName = "server"; var dframeMapName = "dframe-master"; var ns = new PrivateDnsNamespace(this, "Namespace", new PrivateDnsNamespaceProps { Vpc = vpc, Name = serviceDiscoveryDomain, }); var serviceDiscoveryServer = ns.CreateService("server", new DnsServiceProps { Name = serverMapName, DnsRecordType = DnsRecordType.A, RoutingPolicy = RoutingPolicy.MULTIVALUE, }); // alb var albDnsName = "benchmark-alb"; var benchToMagicOnionDnsName = benchNetwork.RequireAlb ? $"{benchNetwork.EndpointScheme}://{albDnsName}.{stackProps.AlbDomain.domain}" : $"{benchNetwork.EndpointScheme}://{serverMapName}.{serviceDiscoveryDomain}"; IApplicationTargetGroup grpcTargetGroup = null; IApplicationTargetGroup httpsTargetGroup = null; if (benchNetwork.RequireAlb) { // route53 var hostedZone = HostedZone.FromHostedZoneAttributes(this, "HostedZone", new HostedZoneAttributes { HostedZoneId = stackProps.AlbDomain.zoneId, ZoneName = stackProps.AlbDomain.domain, }); // acm var certificate = new DnsValidatedCertificate(this, "certificate", new DnsValidatedCertificateProps { DomainName = $"{albDnsName}.{hostedZone.ZoneName}", HostedZone = hostedZone, }); // alb var lb = new ApplicationLoadBalancer(this, "LB", new ApplicationLoadBalancerProps { Vpc = vpc, VpcSubnets = allsubnets, SecurityGroup = new SecurityGroup(this, "AlbSg", new SecurityGroupProps { AllowAllOutbound = true, Vpc = vpc, }), InternetFacing = false, Http2Enabled = true, }); grpcTargetGroup = AddGrpcTargetGroup(benchNetwork, vpc, certificate, lb); httpsTargetGroup = AddHttpsTargetGroup(benchNetwork, vpc, certificate, lb); // Dns Record _ = new CnameRecord(this, "alb-alias-record", new CnameRecordProps { RecordName = $"{albDnsName}.{stackProps.AlbDomain.domain}", Ttl = Duration.Seconds(60), Zone = hostedZone, DomainName = lb.LoadBalancerDnsName, }); } // iam var iamEc2MagicOnionRole = GetIamEc2MagicOnionRole(s3, serviceDiscoveryServer); var iamEcsTaskExecuteRole = GetIamEcsTaskExecuteRole(new[] { dframeWorkerLogGroup, dframeMasterLogGroup }); var iamDFrameTaskDefRole = GetIamEcsDframeTaskDefRole(s3); var iamWorkerTaskDefRole = GetIamEcsWorkerTaskDefRole(s3); // secrets var ddToken = stackProps.UseEc2DatadogAgentProfiler || stackProps.UseFargateDatadogAgentProfiler ? Amazon.CDK.AWS.SecretsManager.Secret.FromSecretNameV2(this, "dd-token", "magiconion-benchmark-datadog-token") : null; // MagicOnion var asg = new AutoScalingGroup(this, "MagicOnionAsg", new AutoScalingGroupProps { // Monitoring is default DETAILED. SpotPrice = "1.0", // 0.0096 for spot price average for m3.medium Vpc = vpc, SecurityGroup = sg, VpcSubnets = singleSubnets, InstanceType = stackProps.MagicOnionInstanceType, DesiredCapacity = 1, MaxCapacity = 1, MinCapacity = 0, AssociatePublicIpAddress = true, MachineImage = new AmazonLinuxImage(new AmazonLinuxImageProps { CpuType = AmazonLinuxCpuType.X86_64, Generation = AmazonLinuxGeneration.AMAZON_LINUX_2, Storage = AmazonLinuxStorage.GENERAL_PURPOSE, Virtualization = AmazonLinuxVirt.HVM, }), AllowAllOutbound = true, GroupMetrics = new[] { GroupMetrics.All() }, Role = iamEc2MagicOnionRole, UpdatePolicy = UpdatePolicy.ReplacingUpdate(), Signals = Signals.WaitForCount(1, new SignalsOptions { Timeout = Duration.Minutes(10), }), }); asg.AddSecretsReadGrant(ddToken, () => stackProps.UseEc2DatadogAgentProfiler); var userdata = GetUserData(recreateMagicOnionTrigger, s3.BucketName, stackProps.BenchmarkBinaryNames, serviceDiscoveryServer.ServiceId, stackProps.UseEc2CloudWatchAgentProfiler, stackProps.UseEc2DatadogAgentProfiler); asg.AddUserData(userdata); asg.UserData.AddSignalOnExitCommand(asg); asg.Node.AddDependency(masterDllDeployment); asg.Node.AddDependency(userdataDeployment); if (stackProps.EnableMagicOnionScaleInCron) { asg.ScaleOnSchedule("ScheduleOut", new BasicScheduledActionProps { DesiredCapacity = 1, MaxCapacity = 1, // AM9:00 (JST+9) on Monday to Wednesday Schedule = Schedule.Expression("0 0 * 1-3 *"), }); asg.ScaleOnSchedule("ScheduleIn", new BasicScheduledActionProps { DesiredCapacity = 0, MaxCapacity = 0, // PM9:00 (JST+9) on Everyday Schedule = Schedule.Expression("0 12 * 1-7 *"), }); } if (benchNetwork.RequireAlb) { asg.AttachToApplicationTargetGroup(grpcTargetGroup); asg.AttachToApplicationTargetGroup(httpsTargetGroup); } // ECS var cluster = new Cluster(this, "WorkerCluster", new ClusterProps { Vpc = vpc, }); cluster.Node.AddDependency(asg); // wait until asg is up // dframe-worker var dframeWorkerContainerName = "worker"; var dframeWorkerTaskDef = new FargateTaskDefinition(this, "DFrameWorkerTaskDef", new FargateTaskDefinitionProps { ExecutionRole = iamEcsTaskExecuteRole, TaskRole = iamWorkerTaskDefRole, Cpu = stackProps.WorkerFargate.CpuSize, MemoryLimitMiB = stackProps.WorkerFargate.MemorySize, }); dframeWorkerTaskDef.AddContainer(dframeWorkerContainerName, new ContainerDefinitionOptions { Image = dframeImage, Command = new[] { "--worker-flag" }, Environment = new Dictionary <string, string> { { "DFRAME_MASTER_CONNECT_TO_HOST", $"{dframeMapName}.{serviceDiscoveryDomain}" }, { "DFRAME_MASTER_CONNECT_TO_PORT", "12345" }, { "BENCH_SERVER_HOST", benchToMagicOnionDnsName }, { "BENCH_REPORTID", stackProps.ReportId }, { "BENCH_S3BUCKET", s3.BucketName }, }, Logging = LogDriver.AwsLogs(new AwsLogDriverProps { LogGroup = new LogGroup(this, "WorkerLogGroup", new LogGroupProps { LogGroupName = dframeWorkerLogGroup, RemovalPolicy = RemovalPolicy.DESTROY, Retention = RetentionDays.TWO_WEEKS, }), StreamPrefix = dframeWorkerLogGroup, }), }); dframeWorkerTaskDef.AddDatadogContainer($"{dframeWorkerContainerName}-datadog", ddToken, () => stackProps.UseFargateDatadogAgentProfiler); var dframeWorkerService = new FargateService(this, "DFrameWorkerService", new FargateServiceProps { ServiceName = "DFrameWorkerService", DesiredCount = 0, Cluster = cluster, TaskDefinition = dframeWorkerTaskDef, VpcSubnets = singleSubnets, SecurityGroups = new[] { sg }, PlatformVersion = FargatePlatformVersion.VERSION1_4, MinHealthyPercent = 0, AssignPublicIp = true, }); // dframe-master var dframeMasterTaskDef = new FargateTaskDefinition(this, "DFrameMasterTaskDef", new FargateTaskDefinitionProps { ExecutionRole = iamEcsTaskExecuteRole, TaskRole = iamDFrameTaskDefRole, Cpu = stackProps.MasterFargate.CpuSize, MemoryLimitMiB = stackProps.MasterFargate.MemorySize, }); dframeMasterTaskDef.AddContainer("dframe", new ContainerDefinitionOptions { Image = dframeImage, Environment = new Dictionary <string, string> { { "DFRAME_CLUSTER_NAME", cluster.ClusterName }, { "DFRAME_MASTER_SERVICE_NAME", "DFrameMasterService" }, { "DFRAME_WORKER_CONTAINER_NAME", dframeWorkerContainerName }, { "DFRAME_WORKER_SERVICE_NAME", dframeWorkerService.ServiceName }, { "DFRAME_WORKER_TASK_NAME", Fn.Select(1, Fn.Split("/", dframeWorkerTaskDef.TaskDefinitionArn)) }, { "DFRAME_WORKER_IMAGE", dockerImage.ImageUri }, { "BENCH_REPORTID", stackProps.ReportId }, { "BENCH_S3BUCKET", s3.BucketName }, }, Logging = LogDriver.AwsLogs(new AwsLogDriverProps { LogGroup = new LogGroup(this, "MasterLogGroup", new LogGroupProps { LogGroupName = dframeMasterLogGroup, RemovalPolicy = RemovalPolicy.DESTROY, Retention = RetentionDays.TWO_WEEKS, }), StreamPrefix = dframeMasterLogGroup, }), }); dframeMasterTaskDef.AddDatadogContainer($"dframe-datadog", ddToken, () => stackProps.UseFargateDatadogAgentProfiler); var dframeMasterService = new FargateService(this, "DFrameMasterService", new FargateServiceProps { ServiceName = "DFrameMasterService", DesiredCount = 1, Cluster = cluster, TaskDefinition = dframeMasterTaskDef, VpcSubnets = singleSubnets, SecurityGroups = new[] { sg }, PlatformVersion = FargatePlatformVersion.VERSION1_4, MinHealthyPercent = 0, AssignPublicIp = true, }); dframeMasterService.EnableCloudMap(new CloudMapOptions { CloudMapNamespace = ns, Name = dframeMapName, DnsRecordType = DnsRecordType.A, DnsTtl = Duration.Seconds(300), }); // output new CfnOutput(this, "ReportUrl", new CfnOutputProps { Value = $"https://{s3.BucketRegionalDomainName}/html/{stackProps.ReportId}/index.html" }); new CfnOutput(this, "EndPointStyle", new CfnOutputProps { Value = stackProps.BenchmarkEndpoint.ToString() }); new CfnOutput(this, "AsgName", new CfnOutputProps { Value = asg.AutoScalingGroupName }); new CfnOutput(this, "EcsClusterName", new CfnOutputProps { Value = cluster.ClusterName }); new CfnOutput(this, "DFrameWorkerEcsTaskdefImage", new CfnOutputProps { Value = dockerImage.ImageUri }); }
public void LifecycleAdvancedSettingTest() { LifecycleRule rule = new LifecycleRule(); rule.ID = "StandardExpireRule" + Guid.NewGuid(); rule.Prefix = "test"; rule.Status = RuleStatus.Enabled; rule.ExpriationDays = 400; rule.AbortMultipartUpload = new LifecycleRule.LifeCycleExpiration() { CreatedBeforeDate = DateTime.UtcNow.Date.AddDays(400) }; rule.Transitions = new LifecycleRule.LifeCycleTransition[2] { new LifecycleRule.LifeCycleTransition() { StorageClass = StorageClass.IA }, new LifecycleRule.LifeCycleTransition() { StorageClass = StorageClass.Archive } }; rule.Transitions[0].LifeCycleExpiration.Days = 180; rule.Transitions[1].LifeCycleExpiration.Days = 365; Test(rule); rule = new LifecycleRule(); rule.ID = "StandardExpireRule" + Guid.NewGuid(); rule.Prefix = "object"; rule.Status = RuleStatus.Disabled; rule.ExpriationDays = 365; rule.AbortMultipartUpload = new LifecycleRule.LifeCycleExpiration() { Days = 200 }; rule.Transitions = new LifecycleRule.LifeCycleTransition[1] { new LifecycleRule.LifeCycleTransition() { StorageClass = StorageClass.Archive } }; rule.Transitions[0].LifeCycleExpiration.Days = 250; Test(rule); rule = new LifecycleRule(); rule.ID = "StandardExpireRule" + Guid.NewGuid(); rule.Prefix = "object"; rule.Status = RuleStatus.Disabled; rule.CreatedBeforeDate = DateTime.UtcNow.Date.AddDays(365); rule.AbortMultipartUpload = new LifecycleRule.LifeCycleExpiration() { CreatedBeforeDate = DateTime.UtcNow.Date.AddDays(200) }; Test(rule); }
public void LifecycleWithVersioningSettingTest() { var bucketName = _bucketName; LifecycleRule rule1 = new LifecycleRule(); rule1.ID = "rule1"; rule1.Prefix = "test1"; rule1.Status = RuleStatus.Enabled; rule1.ExpriationDays = 200; LifecycleRule rule2 = new LifecycleRule(); rule2.ID = "rule2"; rule2.Prefix = "test2"; rule2.Status = RuleStatus.Enabled; rule2.ExpriationDays = 400; rule2.Transitions = new LifecycleRule.LifeCycleTransition[2] { new LifecycleRule.LifeCycleTransition() { StorageClass = StorageClass.IA }, new LifecycleRule.LifeCycleTransition() { StorageClass = StorageClass.Archive } }; rule2.Transitions[0].LifeCycleExpiration.Days = 180; rule2.Transitions[1].LifeCycleExpiration.Days = 365; LifecycleRule rule3 = new LifecycleRule(); rule3.ID = "delete example"; rule3.Prefix = "object"; rule3.Status = RuleStatus.Disabled; rule3.ExpiredObjectDeleteMarker = true; rule3.NoncurrentVersionExpiration = new LifecycleRule.LifeCycleNoncurrentVersionExpiration() { NoncurrentDays = 200 }; rule3.NoncurrentVersionTransitions = new LifecycleRule.LifeCycleNoncurrentVersionTransition[2] { new LifecycleRule.LifeCycleNoncurrentVersionTransition() { StorageClass = StorageClass.IA }, new LifecycleRule.LifeCycleNoncurrentVersionTransition() { StorageClass = StorageClass.Archive } }; rule3.NoncurrentVersionTransitions[0].NoncurrentDays = 90; rule3.NoncurrentVersionTransitions[1].NoncurrentDays = 180; SetBucketLifecycleRequest req = new SetBucketLifecycleRequest(bucketName); req.AddLifecycleRule(rule1); req.AddLifecycleRule(rule2); req.AddLifecycleRule(rule3); _ossClient.SetBucketLifecycle(req); OssTestUtils.WaitForCacheExpire(1); var rules = _ossClient.GetBucketLifecycle(bucketName); Assert.IsTrue(rules.Count == 3); Assert.AreEqual(rules[0], rule1); Assert.AreEqual(rules[1], rule2); Assert.AreEqual(rules[2], rule3); //Only ExpiredObjectDeleteMarker LifecycleRule rule4 = new LifecycleRule(); rule4.ID = "only delete marker"; rule4.Prefix = "test1"; rule4.Status = RuleStatus.Enabled; rule4.ExpiredObjectDeleteMarker = true; req = new SetBucketLifecycleRequest(bucketName); req.AddLifecycleRule(rule4); _ossClient.SetBucketLifecycle(req); OssTestUtils.WaitForCacheExpire(1); rules = _ossClient.GetBucketLifecycle(bucketName); Assert.IsTrue(rules.Count == 1); Assert.AreEqual(rules[0], rule4); Assert.AreEqual(rules[0].ExpiredObjectDeleteMarker, true); //Only NoncurrentVersionTransition LifecycleRule rule5 = new LifecycleRule(); rule5.ID = "only NoncurrentVersionTransition"; rule5.Prefix = "test1"; rule5.Status = RuleStatus.Enabled; rule5.NoncurrentVersionTransitions = new LifecycleRule.LifeCycleNoncurrentVersionTransition[2] { new LifecycleRule.LifeCycleNoncurrentVersionTransition() { StorageClass = StorageClass.IA, NoncurrentDays = 90 }, new LifecycleRule.LifeCycleNoncurrentVersionTransition() { StorageClass = StorageClass.Archive, NoncurrentDays = 180 } }; req = new SetBucketLifecycleRequest(bucketName); req.AddLifecycleRule(rule5); _ossClient.SetBucketLifecycle(req); OssTestUtils.WaitForCacheExpire(1); rules = _ossClient.GetBucketLifecycle(bucketName); Assert.IsTrue(rules.Count == 1); Assert.AreEqual(rules[0], rule5); Assert.AreEqual(rules[0].ExpiredObjectDeleteMarker.HasValue, false); Assert.AreEqual(rules[0].NoncurrentVersionTransitions[0].NoncurrentDays, 90); Assert.AreEqual(rules[0].NoncurrentVersionTransitions[0].StorageClass, StorageClass.IA); Assert.AreEqual(rules[0].NoncurrentVersionTransitions[1].NoncurrentDays, 180); Assert.AreEqual(rules[0].NoncurrentVersionTransitions[1].StorageClass, StorageClass.Archive); //Only NoncurrentVersionExpiration LifecycleRule rule6 = new LifecycleRule(); rule6.ID = "only NoncurrentVersionExpiration"; rule6.Prefix = "test1"; rule6.Status = RuleStatus.Enabled; rule6.NoncurrentVersionExpiration = new LifecycleRule.LifeCycleNoncurrentVersionExpiration() { NoncurrentDays = 100 }; req = new SetBucketLifecycleRequest(bucketName); req.AddLifecycleRule(rule6); _ossClient.SetBucketLifecycle(req); OssTestUtils.WaitForCacheExpire(1); rules = _ossClient.GetBucketLifecycle(bucketName); Assert.IsTrue(rules.Count == 1); Assert.AreEqual(rules[0], rule6); Assert.AreEqual(rules[0].NoncurrentVersionExpiration.NoncurrentDays, 100); _ossClient.DeleteBucketLifecycle(bucketName); }
public void LifecycleWithTagsAdvancedSettingTest() { LifecycleRule rule1 = new LifecycleRule(); rule1.ID = "StandardExpireRule" + Guid.NewGuid(); rule1.Prefix = "test"; rule1.Status = RuleStatus.Enabled; rule1.ExpriationDays = 200; rule1.AbortMultipartUpload = new LifecycleRule.LifeCycleExpiration() { CreatedBeforeDate = DateTime.UtcNow.Date.AddDays(400) }; LifecycleRule rule2 = new LifecycleRule(); rule2.ID = "StandardExpireRule" + Guid.NewGuid(); rule2.Prefix = "test2"; rule2.Status = RuleStatus.Enabled; rule2.ExpriationDays = 400; rule2.Transitions = new LifecycleRule.LifeCycleTransition[2] { new LifecycleRule.LifeCycleTransition() { StorageClass = StorageClass.IA }, new LifecycleRule.LifeCycleTransition() { StorageClass = StorageClass.Archive } }; rule2.Transitions[0].LifeCycleExpiration.Days = 180; rule2.Transitions[1].LifeCycleExpiration.Days = 365; rule2.Tags = new Tag[2] { new Tag() { Key = "key1", Value = "value1" }, new Tag() { Key = "key2", Value = "value2" } }; LifecycleRule rule3 = new LifecycleRule(); rule3.ID = "StandardExpireRule" + Guid.NewGuid(); rule3.Prefix = "object"; rule3.Status = RuleStatus.Disabled; rule3.CreatedBeforeDate = DateTime.UtcNow.Date.AddDays(365); rule3.Tags = new Tag[3] { new Tag() { Key = "key3-1", Value = "value3-1" }, new Tag() { Key = "key3-2", Value = "value3-2" }, new Tag() { Key = "key3-3", Value = "value3-3" } }; SetBucketLifecycleRequest req = new SetBucketLifecycleRequest(_bucketName); req.AddLifecycleRule(rule1); req.AddLifecycleRule(rule2); req.AddLifecycleRule(rule3); _ossClient.SetBucketLifecycle(req); OssTestUtils.WaitForCacheExpire(); var rules = _ossClient.GetBucketLifecycle(_bucketName); Assert.IsTrue(rules.Count == 3); Assert.AreEqual(rules[0], rule1); Assert.AreEqual(rules[1], rule2); Assert.AreEqual(rules[2], rule3); Assert.AreEqual(rules[0].Tags, null); Assert.AreEqual(rules[1].Tags[0].Key, "key1"); Assert.AreEqual(rules[1].Tags[0].Value, "value1"); Assert.AreEqual(rules[2].Tags[1].Key, "key3-2"); Assert.AreEqual(rules[2].Tags[1].Value, "value3-2"); _ossClient.DeleteBucketLifecycle(_bucketName); }
public void LifecycleWithTagsSettingTest() { LifecycleRule rule = new LifecycleRule(); rule.ID = "StandardExpireRule" + Guid.NewGuid(); rule.Prefix = "test"; rule.Status = RuleStatus.Enabled; rule.ExpriationDays = 200; rule.Tags = new Tag[2] { new Tag() { Key = "key1", Value = "value1" }, new Tag() { Key = "key2", Value = "value2" } }; Test(rule); rule = new LifecycleRule(); rule.ID = "StandardExpireRule" + Guid.NewGuid(); rule.Prefix = "object"; rule.Status = RuleStatus.Disabled; rule.ExpriationDays = 365; Test(rule); rule = new LifecycleRule(); rule.ID = "StandardExpireRule" + Guid.NewGuid(); rule.Prefix = "object"; rule.Status = RuleStatus.Enabled; rule.CreatedBeforeDate = DateTime.UtcNow.Date.AddDays(200); rule.Tags = new Tag[1] { new Tag() { Key = "key1", Value = "value1" } }; Test(rule); rule = new LifecycleRule(); rule.ID = "StandardExpireRule" + Guid.NewGuid(); rule.Prefix = "object"; rule.Status = RuleStatus.Disabled; rule.CreatedBeforeDate = DateTime.UtcNow.Date.AddDays(365); rule.Tags = new Tag[3] { new Tag() { Key = "key1", Value = "value1" }, new Tag() { Key = "key2", Value = "value1" }, new Tag() { Key = "key3", Value = "value1" } }; Test(rule); }