public IDatabaseInstance CreateDatabase(DeputyBase databaseEngineVersion, string identification, string databaseName, double?port, string userName, string secretName, StorageType storageType, InstanceClass instanceClass, InstanceSize instanceSize, IVpc vpc, string securityId, string securityGroupId, string parameterGroupId = null, IRole[] roles = null, double?allocatedStorageGb = 5, RemovalPolicy removalPolicy = RemovalPolicy.DESTROY, bool deleteAutomatedBackups = false, int backupRetentionDays = 1, bool deletionProtection = false, SubnetType subnetType = SubnetType.PRIVATE_ISOLATED, string defaultSubnetDomainSeparator = ",", string subnets = "", bool multiAZEnabled = true, bool autoMinorVersionUpgrade = false, bool?storageEncrypted = true) { BasicDatabaseInfra(vpc, secretName, securityId, securityGroupId, subnetType, defaultSubnetDomainSeparator, subnets, out var securityGroup, out var secret, out var subnetSelection); var engine = GetInstanceEngine(databaseEngineVersion); return(new DatabaseInstance(Scope, identification, new DatabaseInstanceProps { Engine = engine, RemovalPolicy = removalPolicy, DeletionProtection = deletionProtection, Credentials = Credentials.FromPassword(userName, secret.SecretValue), StorageType = storageType, DatabaseName = databaseName, Port = port, VpcSubnets = subnetSelection, Vpc = vpc, SecurityGroups = new[] { securityGroup }, DeleteAutomatedBackups = deleteAutomatedBackups, BackupRetention = Duration.Days(backupRetentionDays), AllocatedStorage = allocatedStorageGb, InstanceType = InstanceType.Of(instanceClass, instanceSize), ParameterGroup = CreateClusterParameterGroup(parameterGroupId, engine, roles), MultiAz = multiAZEnabled, AutoMinorVersionUpgrade = autoMinorVersionUpgrade, StorageEncrypted = storageEncrypted })); }
/// <summary> /// AwsCdkDatabaseHandler /// </summary> /// <param name="databaseEngineVersion"></param> /// <param name="identification"></param> /// <param name="clusterIdentifier"></param> /// <param name="instanceIdentifierBase"></param> /// <param name="databaseName"></param> /// <param name="port"></param> /// <param name="instances"></param> /// <param name="userName"></param> /// <param name="secretName"></param> /// <param name="vpc"></param> /// <param name="instanceClass"></param> /// <param name="instanceSize"></param> /// <param name="securityId"></param> /// <param name="securityGroupId"></param> /// <param name="parameterGroupId"></param> /// <param name="roles"></param> /// <param name="storageEncrypted"></param> /// <param name="subnetType"></param> /// <param name="defaultSubnetDomainSeparator"></param> /// <param name="subnets"></param> /// <param name="removalPolicy"></param> /// <param name="backupRetentionDays"></param> /// <param name="deletionProtection"></param> public IDatabaseCluster CreateDatabaseCluster(DeputyBase databaseEngineVersion, string identification, string clusterIdentifier, string instanceIdentifierBase, string databaseName, double?port, double?instances, string userName, string secretName, IVpc vpc, InstanceClass instanceClass, InstanceSize instanceSize, string securityId, string securityGroupId, string parameterGroupId = null, IRole[] roles = null, bool storageEncrypted = true, SubnetType subnetType = SubnetType.PRIVATE_ISOLATED, string defaultSubnetDomainSeparator = ",", string subnets = "", RemovalPolicy removalPolicy = RemovalPolicy.DESTROY, int backupRetentionDays = 1, bool deletionProtection = false) { BasicDatabaseInfra(vpc, secretName, securityId, securityGroupId, subnetType, defaultSubnetDomainSeparator, subnets, out var securityGroup, out var secret, out var subnetSelection); var engine = GetClusterEngine(databaseEngineVersion); return(new DatabaseCluster(Scope, identification, new DatabaseClusterProps { ClusterIdentifier = clusterIdentifier, InstanceIdentifierBase = instanceIdentifierBase, Engine = engine, RemovalPolicy = removalPolicy, DeletionProtection = deletionProtection, Port = port, InstanceProps = new Amazon.CDK.AWS.RDS.InstanceProps { InstanceType = InstanceType.Of(instanceClass, instanceSize), VpcSubnets = subnetSelection, Vpc = vpc, SecurityGroups = new[] { securityGroup } }, StorageEncrypted = storageEncrypted, Instances = instances, Credentials = Credentials.FromPassword(userName, secret.SecretValue), DefaultDatabaseName = databaseName, ParameterGroup = CreateClusterParameterGroup(parameterGroupId, engine, roles), Backup = new BackupProps { Retention = Duration.Days(backupRetentionDays) } })); }
public void IncludeExcludeTest() { DateAdd dateAdd = new DateAdd(); dateAdd.IncludePeriods.Add(new TimeRange(new DateTime(2011, 3, 17), new DateTime(2011, 4, 20))); // setup some periods to exclude dateAdd.ExcludePeriods.Add(new TimeRange( new DateTime(2011, 3, 22), new DateTime(2011, 3, 25))); dateAdd.ExcludePeriods.Add(new TimeRange( new DateTime(2011, 4, 1), new DateTime(2011, 4, 7))); dateAdd.ExcludePeriods.Add(new TimeRange( new DateTime(2011, 4, 15), new DateTime(2011, 4, 16))); // positive DateTime periodStart = new DateTime(2011, 3, 19); Assert.Equal(dateAdd.Add(periodStart, Duration.Hours(1)), new DateTime(2011, 3, 19, 1, 0, 0)); Assert.Equal(dateAdd.Add(periodStart, Duration.Days(4)), new DateTime(2011, 3, 26, 0, 0, 0)); Assert.Equal(dateAdd.Add(periodStart, Duration.Days(17)), new DateTime(2011, 4, 14)); Assert.Equal(dateAdd.Add(periodStart, Duration.Days(20)), new DateTime(2011, 4, 18)); Assert.Null(dateAdd.Add(periodStart, Duration.Days(22))); // negative DateTime periodEnd = new DateTime(2011, 4, 18); Assert.Equal(dateAdd.Add(periodEnd, Duration.Hours(-1)), new DateTime(2011, 4, 17, 23, 0, 0)); Assert.Equal(dateAdd.Add(periodEnd, Duration.Days(-4)), new DateTime(2011, 4, 13)); Assert.Equal(dateAdd.Add(periodEnd, Duration.Days(-17)), new DateTime(2011, 3, 22)); Assert.Equal(dateAdd.Add(periodEnd, Duration.Days(-20)), new DateTime(2011, 3, 19)); Assert.Null(dateAdd.Add(periodEnd, Duration.Days(-22))); } // IncludeExcludeTest
public void CalendarGetGapTest() { // simmulation of some reservations TimePeriodCollection periods = new TimePeriodCollection(); periods.Add(new Days(2011, 3, 7, 2)); periods.Add(new Days(2011, 3, 16, 2)); // the overall search range CalendarTimeRange limits = new CalendarTimeRange(new DateTime(2011, 3, 4), new DateTime(2011, 3, 21)); Days days = new Days(limits.Start, limits.Duration.Days + 1); ITimePeriodCollection dayList = days.GetDays(); foreach (Day day in dayList) { if (!limits.HasInside(day)) { continue; // outside of the search scope } if (day.DayOfWeek == DayOfWeek.Saturday || day.DayOfWeek == DayOfWeek.Sunday) { periods.Add(day); // // exclude weekend day } } TimeGapCalculator <TimeRange> gapCalculator = new TimeGapCalculator <TimeRange>(new TimeCalendar()); ITimePeriodCollection gaps = gapCalculator.GetGaps(periods, limits); Assert.Equal(4, gaps.Count); Assert.True(gaps[0].IsSamePeriod(new TimeRange(new DateTime(2011, 3, 4), Duration.Days(1)))); Assert.True(gaps[1].IsSamePeriod(new TimeRange(new DateTime(2011, 3, 9), Duration.Days(3)))); Assert.True(gaps[2].IsSamePeriod(new TimeRange(new DateTime(2011, 3, 14), Duration.Days(2)))); Assert.True(gaps[3].IsSamePeriod(new TimeRange(new DateTime(2011, 3, 18), Duration.Days(1)))); } // CalendarGetGapTest
public IDatabaseInstance CreateDatabase(DeputyBase databaseEngineVersion, string identification, string databaseName, double?port, string userName, ISecret passwordSecret, StorageType storageType, InstanceClass instanceClass, InstanceSize instanceSize, IVpc vpc, ISecurityGroup securityGroup, ISubnetGroup subnetGroup, IParameterGroup parameterGroup = null, double?allocatedStorageGb = 5, RemovalPolicy removalPolicy = RemovalPolicy.DESTROY, bool deleteAutomatedBackups = false, int backupRetentionDays = 1, bool deletionProtection = false, string[] logTypes = null, bool?storageEncrypted = null, bool?enableIamAuthentication = false, Duration enhancedMonitoringInterval = null, bool multiAZEnabled = true, bool autoMinorVersionUpgrade = false) { BasicDatabaseInfra(vpc); var engine = GetInstanceEngine(databaseEngineVersion); return(new DatabaseInstance(Scope, identification, new DatabaseInstanceProps { Engine = engine, RemovalPolicy = removalPolicy, DeletionProtection = deletionProtection, Credentials = Credentials.FromPassword(userName, passwordSecret.SecretValue), StorageType = storageType, DatabaseName = databaseName, Port = port, SubnetGroup = subnetGroup, Vpc = vpc, SecurityGroups = new[] { securityGroup }, DeleteAutomatedBackups = deleteAutomatedBackups, BackupRetention = Duration.Days(backupRetentionDays), AllocatedStorage = allocatedStorageGb, InstanceType = InstanceType.Of(instanceClass, instanceSize), ParameterGroup = parameterGroup, CloudwatchLogsExports = logTypes, StorageEncrypted = storageEncrypted, IamAuthentication = enableIamAuthentication, MonitoringInterval = enhancedMonitoringInterval, MultiAz = multiAZEnabled, AutoMinorVersionUpgrade = autoMinorVersionUpgrade })); }
public IDatabaseInstance CreateDatabaseSqlServer(DeputyBase databaseEngineVersion, string identification, string databaseName, string userName, string password, StorageType storageType, InstanceClass instanceClass, string instanceSize, IVpc vpc, ISecurityGroup security, string securityGroupId, string parameterGroupId = null, IRole[] roles = null, double?allocatedStorageGb = 5, RemovalPolicy removalPolicy = RemovalPolicy.DESTROY, bool deleteAutomatedBackups = false, int backupRetentionDays = 1, bool?deletionProtection = false, SubnetType subnetType = SubnetType.PRIVATE_ISOLATED, string defaultSubnetDomainSeparator = ",", string subnets = "", bool multiAZEnabled = true, bool?autoMinorVersionUpgrade = false, bool?storageEncrypted = true, string licenseOption = "LICENSE_INCLUDED", string edition = "ex") { BasicDatabaseInfraWithHardcodedPassword(vpc, subnetType, defaultSubnetDomainSeparator, subnets, out var subnetSelection); var engine = GetInstanceEngine(databaseEngineVersion, edition); return(new DatabaseInstance(Scope, identification, new DatabaseInstanceProps { Engine = engine, RemovalPolicy = removalPolicy, DeletionProtection = deletionProtection, Credentials = Credentials.FromPassword(userName, SecretValue.PlainText(password)), StorageType = storageType, DatabaseName = licenseOption == LicenseModel.LICENSE_INCLUDED.ToString() ? null : databaseName, VpcSubnets = subnetSelection, Vpc = vpc, SecurityGroups = new[] { security }, DeleteAutomatedBackups = deleteAutomatedBackups, BackupRetention = Duration.Days(backupRetentionDays), AllocatedStorage = allocatedStorageGb, InstanceType = InstanceType.Of(instanceClass, GetInstanceSize(instanceSize)), ParameterGroup = CreateClusterParameterGroup(parameterGroupId, engine, roles), MultiAz = multiAZEnabled, AutoMinorVersionUpgrade = autoMinorVersionUpgrade, StorageEncrypted = storageEncrypted, LicenseModel = GetLicenseModel(licenseOption) })); }
public DotNetLambdaWithApiGetway(Stack scope, string id, DotNetLambdaWithApiGetwayProps props) : base(scope, id) { // domain and certificate have been created manually on AWS Console for security purposes var snsTopic = new Topic(this, "WorkSplitRequest", new TopicProps() { TopicName = "work-split-request", DisplayName = "Work Split Request" }); snsTopic.AddSubscription(new EmailSubscription("*****@*****.**", new EmailSubscriptionProps())); var queue = new Queue(this, "QueueProcessor", new QueueProps() { RetentionPeriod = Duration.Days(2) }); snsTopic.AddSubscription(new SqsSubscription(queue)); var dotnetWebApiLambda = new Function(this, "WebLambda", new FunctionProps { Runtime = Runtime.DOTNET_CORE_3_1, Code = props.Code, Handler = "Web::Web.LambdaEntryPoint::FunctionHandlerAsync" }); dotnetWebApiLambda.AddEnvironment("Region", scope.Region); dotnetWebApiLambda.AddEnvironment("SnsTopic", snsTopic.TopicArn); snsTopic.GrantPublish(dotnetWebApiLambda); var fullDomain = $"{props.SiteSubDomain}.{props.DomainName}"; var apiGetway = new LambdaRestApi(this, fullDomain, new LambdaRestApiProps() { Handler = dotnetWebApiLambda }); var apiDomain = apiGetway.AddDomainName("customDomain", new DomainNameOptions() { DomainName = fullDomain, Certificate = props.Certificate, SecurityPolicy = SecurityPolicy.TLS_1_2, EndpointType = EndpointType.EDGE }); new ARecord(this, "ApiGateway-ARecord", new ARecordProps() { Zone = props.Zone, RecordName = fullDomain, Target = RecordTarget.FromAlias(new ApiGateway(apiGetway)), }); scope.Log($"ApiGateway Url {id}", apiGetway.Url); scope.Log($"ApiGateway public domain {id}", apiDomain.DomainNameAliasDomainName); }
private void CreateBackerNotifyParticipantFinishedQueue(string environmentName) { new Queue(this, "participantfinishedqueue", new QueueProps { QueueName = $"{environmentName}ParticipantFinishedQueue", ReceiveMessageWaitTime = Duration.Seconds(20), DeliveryDelay = Duration.Seconds(0), VisibilityTimeout = Duration.Seconds(60), RetentionPeriod = Duration.Days(7) }); }
// ---------------------------------------------------------------------- public void DateAddSample() { DateAdd dateAdd = new DateAdd(); dateAdd.IncludePeriods.Add(new TimeRange(new DateTime(2011, 3, 17), new DateTime(2011, 4, 20))); // setup some periods to exclude dateAdd.ExcludePeriods.Add(new TimeRange( new DateTime(2011, 3, 22), new DateTime(2011, 3, 25))); dateAdd.ExcludePeriods.Add(new TimeRange( new DateTime(2011, 4, 1), new DateTime(2011, 4, 7))); dateAdd.ExcludePeriods.Add(new TimeRange( new DateTime(2011, 4, 15), new DateTime(2011, 4, 16))); // positive DateTime dateDiffPositive = new DateTime(2011, 3, 19); DateTime?positive1 = dateAdd.Add(dateDiffPositive, Duration.Hours(1)); Console.WriteLine("DateAdd Positive1: {0}", positive1); // > DateAdd Positive1: 19.03.2011 01:00:00 DateTime?positive2 = dateAdd.Add(dateDiffPositive, Duration.Days(4)); Console.WriteLine("DateAdd Positive2: {0}", positive2); // > DateAdd Positive2: 26.03.2011 00:00:00 DateTime?positive3 = dateAdd.Add(dateDiffPositive, Duration.Days(17)); Console.WriteLine("DateAdd Positive3: {0}", positive3); // > DateAdd Positive3: 14.04.2011 00:00:00 DateTime?positive4 = dateAdd.Add(dateDiffPositive, Duration.Days(20)); Console.WriteLine("DateAdd Positive4: {0}", positive4); // > DateAdd Positive4: 18.04.2011 00:00:00 // negative DateTime dateDiffNegative = new DateTime(2011, 4, 18); DateTime?negative1 = dateAdd.Add(dateDiffNegative, Duration.Hours(-1)); Console.WriteLine("DateAdd Negative1: {0}", negative1); // > DateAdd Negative1: 17.04.2011 23:00:00 DateTime?negative2 = dateAdd.Add(dateDiffNegative, Duration.Days(-4)); Console.WriteLine("DateAdd Negative2: {0}", negative2); // > DateAdd Negative2: 13.04.2011 00:00:00 DateTime?negative3 = dateAdd.Add(dateDiffNegative, Duration.Days(-17)); Console.WriteLine("DateAdd Negative3: {0}", negative3); // > DateAdd Negative3: 22.03.2011 00:00:00 DateTime?negative4 = dateAdd.Add(dateDiffNegative, Duration.Days(-20)); Console.WriteLine("DateAdd Negative4: {0}", negative4); // > DateAdd Negative4: 19.03.2011 00:00:00 } // DateAddSample
public DatabaseStack(Construct scope, string name, Vpc vpc, StackProps props = null) : base(scope, $"database-{name}", props) { // pricing - rds // 750 horas de uso de instâncias db.t2.micro Single-AZ do Amazon RDS para execução de // MySQL, MariaDB, PostgreSQL, Oracle BYOL ou SQL Server(executando SQL Server Express Edition) // // 20 GB de armazenamento de banco de dados de SSD // // 20 GB de armazenamento de backup para seus backups de banco de dados automatizados e // quaisquer snapshots de banco de dados iniciados por usuário // // pricing - secret manager // 0,40 USD por segredo por mês. No caso de segredos armazenados por menos de um mês, // o preço é pro-rata (com base no número de horas). // 0,05 USD por 10.000 chamadas de API. var secret = new Secret(this, $"database-{name}-secret", new SecretProps() { Description = $"Database {name} password", SecretName = $"database-{name}-secret" }); var databaseSecret = new DatabaseSecret(this, $"database-{name}-databasesecret", new DatabaseSecretProps() { Username = "******", MasterSecret = secret, ExcludeCharacters = "{}[]()'\"/\\" }); _databaseInstance = new DatabaseInstance(this, $"database-{name}-cluster", new DatabaseInstanceProps() { InstanceIdentifier = name + "-instance", DatabaseName = name, Credentials = Credentials.FromSecret(databaseSecret), Engine = DatabaseInstanceEngine.Mysql(new MySqlInstanceEngineProps() { Version = MysqlEngineVersion.VER_8_0_21 }), InstanceType = new InstanceType("t2.micro"), Vpc = vpc, VpcSubnets = new SubnetSelection() { SubnetType = SubnetType.ISOLATED } }); _databaseInstance.AddRotationSingleUser(new RotationSingleUserOptions() { AutomaticallyAfter = Duration.Days(7), ExcludeCharacters = "!@#$%^&*" }); }
public void IncludeExclude6Test() { DateAdd dateAdd = new DateAdd(); dateAdd.IncludePeriods.Add(new TimeRange(new DateTime(2011, 3, 10), new DateTime(2011, 3, 20))); dateAdd.ExcludePeriods.Add(new TimeRange(new DateTime(2011, 3, 5), new DateTime(2011, 3, 12))); dateAdd.ExcludePeriods.Add(new TimeRange(new DateTime(2011, 3, 18), new DateTime(2011, 3, 30))); DateTime test = new DateTime(2011, 3, 10); Assert.Equal(dateAdd.Add(test, TimeSpan.Zero), new DateTime(2011, 3, 12)); Assert.Equal(dateAdd.Add(test, Duration.Days(1)), new DateTime(2011, 3, 13)); } // IncludeExclude6Test
public ILifecycleRule CreateLifecycleRule(string description, int maxImageAgeDays, int maxImageNumber, int priorityOrder, List <string> tagPrefixList, string tagStatus) { GetLifecyleTagStatus(tagStatus, out var tagStatusEnum); return(new LifecycleRule { Description = description, MaxImageAge = Duration.Days(maxImageAgeDays), MaxImageCount = maxImageNumber, RulePriority = priorityOrder, TagPrefixList = tagPrefixList.ToArray(), TagStatus = tagStatusEnum }); }
public void IncludeExclude4Test() { DateAdd dateAdd = new DateAdd(); dateAdd.IncludePeriods.Add(new TimeRange(new DateTime(2011, 3, 10), new DateTime(2011, 3, 20))); dateAdd.ExcludePeriods.Add(new TimeRange(new DateTime(2011, 3, 10), new DateTime(2011, 3, 15))); dateAdd.ExcludePeriods.Add(new TimeRange(new DateTime(2011, 3, 15), new DateTime(2011, 3, 20))); DateTime test = new DateTime(2011, 3, 10); Assert.Null(dateAdd.Add(test, TimeSpan.Zero)); Assert.Null(dateAdd.Add(test, Duration.Days(1))); Assert.Null(dateAdd.Add(test, Duration.Days(5))); } // IncludeExclude4Test
public ConsumerStack(Construct scope, string name, double memory, Vpc vpc, StackProps props = null) : base(scope, $"consumer-{name}", props) { // pricing - lambda // 1 milhão de solicitações gratuitas por mês e // 400.000 GB/segundos de tempo de computação por mês. // // pricing - sqs // Primeiro milhão de solicitações/mês - padrao: Gratuito - fifo: Gratuito // De 1 milhão a 100 bilhões de solicitações mês - padrão: 0,40 USD - fifo: 0,50 USD // De 100 milhões a 200 bilhões de solicitações/mês - padrão: 0,30 USD - fifo: 0,40 USD // Mais de 200 bilhões de solicitações/mês - padrão: 0,24 USD - fifo: 0,35 USD _function = new Function(this, $"consumer-{name}-lambda", new FunctionProps() { FunctionName = name, MemorySize = memory, Runtime = Runtime.DOTNET_CORE_3_1, Handler = "Lambda::Lambda.Function::Handler", Code = Code.FromAsset("../tools/consumer"), Timeout = Duration.Seconds(20), LogRetention = RetentionDays.ONE_DAY, Vpc = vpc, VpcSubnets = new SubnetSelection() { SubnetType = SubnetType.PRIVATE } }); _deadletter = new Queue(this, $"consumer-{name}-deadletter", new QueueProps() { QueueName = name + "-deadletter", VisibilityTimeout = Duration.Seconds(30), RetentionPeriod = Duration.Days(10) }); ; _queue = new Queue(this, $"consumer-{name}-queue", new QueueProps() { QueueName = name, VisibilityTimeout = Duration.Seconds(30), RetentionPeriod = Duration.Days(10), DeadLetterQueue = new DeadLetterQueue() { MaxReceiveCount = 3, Queue = _deadletter } }); _function.AddEventSource(new SqsEventSource(_queue)); }
public void DayTest() { Assert.AreEqual(Duration.Day, new TimeSpan(1, 0, 0, 0)); Assert.AreEqual(Duration.Days(0), TimeSpan.Zero); Assert.AreEqual(Duration.Days(1), new TimeSpan(1, 0, 0, 0)); Assert.AreEqual(Duration.Days(2), new TimeSpan(2, 0, 0, 0)); Assert.AreEqual(Duration.Days(-1), new TimeSpan(-1, 0, 0, 0)); Assert.AreEqual(Duration.Days(-2), new TimeSpan(-2, 0, 0, 0)); Assert.AreEqual(Duration.Days(1, 23), new TimeSpan(1, 23, 0, 0)); Assert.AreEqual(Duration.Days(1, 23, 22), new TimeSpan(1, 23, 22, 0)); Assert.AreEqual(Duration.Days(1, 23, 22, 18), new TimeSpan(1, 23, 22, 18)); Assert.AreEqual(Duration.Days(1, 23, 22, 18, 875), new TimeSpan(1, 23, 22, 18, 875)); } // DayTest
private Repository CreateDockerImageRepo() { return(new Repository(this, "DockerImageRepository", new RepositoryProps { RepositoryName = this.settings.DockerImageRepository, // RemovalPolicy = RemovalPolicy.DESTROY, // Destroy can only destroy empty repos. Ones with images will cause stack deletion to fail, requiring more manual cleanup. LifecycleRules = new [] { new LifecycleRule { Description = $"Expire untagged images in {this.settings.UntaggedImageExpirationDays} days", TagStatus = TagStatus.UNTAGGED, MaxImageAge = Duration.Days(this.settings.UntaggedImageExpirationDays), } } } )); }
public void IncludeExclude2Test() { DateAdd dateAdd = new DateAdd(); dateAdd.IncludePeriods.Add(new TimeRange(new DateTime(2011, 3, 1), new DateTime(2011, 3, 5))); dateAdd.ExcludePeriods.Add(new TimeRange(new DateTime(2011, 3, 5), new DateTime(2011, 3, 10))); dateAdd.IncludePeriods.Add(new TimeRange(new DateTime(2011, 3, 10), new DateTime(2011, 3, 15))); dateAdd.ExcludePeriods.Add(new TimeRange(new DateTime(2011, 3, 15), new DateTime(2011, 3, 20))); dateAdd.IncludePeriods.Add(new TimeRange(new DateTime(2011, 3, 20), new DateTime(2011, 3, 25))); DateTime periodStart = new DateTime(2011, 3, 1); DateTime periodEnd = new DateTime(2011, 3, 25); // add from start Assert.Equal(dateAdd.Add(periodStart, Duration.Days(1)), new DateTime(2011, 3, 2)); Assert.Equal(dateAdd.Add(periodStart, Duration.Days(4)), new DateTime(2011, 3, 10)); Assert.Equal(dateAdd.Add(periodStart, Duration.Days(5)), new DateTime(2011, 3, 11)); Assert.Equal(dateAdd.Add(periodStart, Duration.Days(9)), new DateTime(2011, 3, 20)); Assert.Equal(dateAdd.Add(periodStart, Duration.Days(10)), new DateTime(2011, 3, 21)); Assert.Null(dateAdd.Add(periodStart, Duration.Days(15))); // add from end Assert.Equal(dateAdd.Add(periodEnd, Duration.Days(-1)), new DateTime(2011, 3, 24)); Assert.Equal(dateAdd.Add(periodEnd, Duration.Days(-5)), new DateTime(2011, 3, 15)); Assert.Equal(dateAdd.Add(periodEnd, Duration.Days(-6)), new DateTime(2011, 3, 14)); Assert.Equal(dateAdd.Add(periodEnd, Duration.Days(-10)), new DateTime(2011, 3, 5)); Assert.Equal(dateAdd.Add(periodEnd, Duration.Days(-11)), new DateTime(2011, 3, 4)); Assert.Null(dateAdd.Add(periodEnd, Duration.Days(-15))); // subtract form end Assert.Equal(dateAdd.Subtract(periodEnd, Duration.Days(1)), new DateTime(2011, 3, 24)); Assert.Equal(dateAdd.Subtract(periodEnd, Duration.Days(5)), new DateTime(2011, 3, 15)); Assert.Equal(dateAdd.Subtract(periodEnd, Duration.Days(6)), new DateTime(2011, 3, 14)); Assert.Equal(dateAdd.Subtract(periodEnd, Duration.Days(10)), new DateTime(2011, 3, 5)); Assert.Equal(dateAdd.Subtract(periodEnd, Duration.Days(11)), new DateTime(2011, 3, 4)); Assert.Null(dateAdd.Subtract(periodStart, Duration.Days(15))); // subtract form start Assert.Equal(dateAdd.Subtract(periodStart, Duration.Days(-1)), new DateTime(2011, 3, 2)); Assert.Equal(dateAdd.Subtract(periodStart, Duration.Days(-4)), new DateTime(2011, 3, 10)); Assert.Equal(dateAdd.Subtract(periodStart, Duration.Days(-5)), new DateTime(2011, 3, 11)); Assert.Equal(dateAdd.Subtract(periodStart, Duration.Days(-9)), new DateTime(2011, 3, 20)); Assert.Equal(dateAdd.Subtract(periodStart, Duration.Days(-10)), new DateTime(2011, 3, 21)); Assert.Null(dateAdd.Subtract(periodStart, Duration.Days(-15))); } // IncludeExclude2Test
private ILifecycleRule[] GetLifeCycleRules(IList <ILifecycleRule> bucketLifecycleRules, int bucketExpirationDays) { var result = new List <ILifecycleRule>(); if (bucketExpirationDays > 0) { result.Add(new LifecycleRule { Expiration = Duration.Days(bucketExpirationDays) }); } if (bucketLifecycleRules != null && bucketLifecycleRules.Any()) { result.AddRange(bucketLifecycleRules); } return(result.Any() ? result.ToArray() : null); }
internal AssetStack(Construct scope, string id, AssetStackProps props) : base(scope, id, props) { var censusBucket = new Bucket(this, "Aussie-Stats-Asset-Bucket", new BucketProps { BucketName = props.AssetBucket, PublicReadAccess = false, Encryption = BucketEncryption.UNENCRYPTED, RemovalPolicy = RemovalPolicy.RETAIN, BlockPublicAccess = new BlockPublicAccess(new BlockPublicAccessOptions { BlockPublicAcls = true, IgnorePublicAcls = true, BlockPublicPolicy = true, RestrictPublicBuckets = true }), LifecycleRules = new ILifecycleRule[] { new LifecycleRule { Enabled = true, Transitions = new ITransition[] { new Transition { StorageClass = StorageClass.INFREQUENT_ACCESS, TransitionAfter = Duration.Days(30) } } } } }); new Bucket(this, "Aussie-Stats-Lambda-Artifacts", new BucketProps { BucketName = props.LambdaAritifactBucket, PublicReadAccess = true, Encryption = BucketEncryption.UNENCRYPTED, RemovalPolicy = RemovalPolicy.RETAIN }); BuildUserAccessTokens(censusBucket); }
internal LambdaApiSolutionStack(Construct scope, string id, IStackProps props = null) : base(scope, id, props) { DockerImageCode dockerImageCode = DockerImageCode.FromImageAsset("src/LambdaApiSolution.DockerFunction/src/LambdaApiSolution.DockerFunction"); DockerImageFunction dockerImageFunction = new DockerImageFunction(this, "LambdaFunction", new DockerImageFunctionProps() { Code = dockerImageCode, Description = ".NET 5 Docker Lambda function" }); HttpApi httpApi = new HttpApi(this, "APIGatewayForLambda", new HttpApiProps() { ApiName = "APIGatewayForLambda", CreateDefaultStage = true, CorsPreflight = new CorsPreflightOptions() { AllowMethods = new[] { HttpMethod.GET }, AllowOrigins = new[] { "*" }, MaxAge = Duration.Days(10) } }); LambdaProxyIntegration lambdaProxyIntegration = new LambdaProxyIntegration(new LambdaProxyIntegrationProps() { Handler = dockerImageFunction, PayloadFormatVersion = PayloadFormatVersion.VERSION_2_0 }); httpApi.AddRoutes(new AddRoutesOptions() { Path = "/casing", Integration = lambdaProxyIntegration, Methods = new[] { HttpMethod.POST } }); string guid = Guid.NewGuid().ToString(); CfnOutput apiUrl = new CfnOutput(this, "APIGatewayURLOutput", new CfnOutputProps() { ExportName = $"APIGatewayEndpointURL-{guid}", Value = httpApi.ApiEndpoint }); }
public DatabaseInstance Create(Amazon.CDK.AWS.EC2.Vpc vpc, IConfigSettings configSettings, SecurityGroup[] securityGroups) { var db = new DatabaseInstance(this, $"{configSettings.Rds.Name}", new DatabaseInstanceProps { // todo change all properties based on config settings Engine = DatabaseInstanceEngine.Mysql(new MySqlInstanceEngineProps { //todo change based on config settings Version = MysqlEngineVersion.VER_5_7, }), Credentials = GetCredentials(configSettings), InstanceType = InstanceType.Of(InstanceClass.BURSTABLE2, InstanceSize.SMALL), VpcSubnets = new SubnetSelection { SubnetType = SubnetType.ISOLATED }, Vpc = vpc, MultiAz = configSettings.Rds.MultiAz, BackupRetention = Duration.Days(configSettings.Rds.BackupRetentionInDays), StorageEncrypted = configSettings.Rds.StorageEncrypted, AutoMinorVersionUpgrade = configSettings.Rds.AutoMinorVersionUpgrade, // todo StorageType = StorageType.GP2, SecurityGroups = securityGroups, InstanceIdentifier = configSettings.Rds.Name, DeletionProtection = configSettings.Rds.DeletionProtection, }); // rotate the master password (use this when storing it in secrets manager) //db.AddRotationSingleUser(); //EaSdRDpAgGjGKd0AL-uI2fwSJ,znW5 DBInstance = db; return(db); }
private IBucket CreateBucket(BucketEntity bucket) { if (string.IsNullOrEmpty(bucket.WebSiteRedirectHost)) { return(new Bucket(Scope, bucket.BucketName, new BucketProps { Versioned = bucket.Versioned, RemovalPolicy = bucket.RemovalPolicy, Encryption = bucket.Encryption, LifecycleRules = new ILifecycleRule[] { new LifecycleRule { Expiration = Duration.Days(bucket.ExpirationDays) } } })); } return(new Bucket(Scope, bucket.BucketName, new BucketProps { Versioned = bucket.Versioned, RemovalPolicy = bucket.RemovalPolicy, Encryption = bucket.Encryption, LifecycleRules = new ILifecycleRule[] { new LifecycleRule { Expiration = Duration.Days(bucket.ExpirationDays) } }, WebsiteRedirect = new RedirectTarget { HostName = bucket.WebSiteRedirectHost } })); }
public ILifecycleRule CreateLifecycleRule(string id, int expirationTime, string expirationTagName, string expirationTagValue, bool isVersionedBucket, int?previousVersionsExpirationDays = null) { Duration nonCurrentVersionExpirationDuration = null; if (isVersionedBucket) { if (previousVersionsExpirationDays.HasValue) { nonCurrentVersionExpirationDuration = Duration.Days(previousVersionsExpirationDays.Value); } else { throw new ArgumentException($"The LifeCycle rule {id} belongs to a versioned bucket and no previousVersionsExpirationDays has been established"); } } return(new LifecycleRule { Id = id, Expiration = Duration.Days(expirationTime), TagFilters = CreateTagFilters(expirationTagName, expirationTagValue), NoncurrentVersionExpiration = nonCurrentVersionExpirationDuration }); }
public QaStage(Construct scope, string id, QaStageProps props) : base(scope, id, props) { var mainStack = new Stack(this, "main-stack", new StackProps { Env = Constants.DefaultEnv }); var credentials = new GitHubSourceCredentials(mainStack, "github-source-credentials", new GitHubSourceCredentialsProps { AccessToken = SecretValue.SecretsManager("github/oauth/token") }); var vpc = new Vpc(mainStack, "main-vpc", new VpcProps { Cidr = "10.0.0.0/16" }); //should change this to Aurora Serverless!!! //https://dev.to/cjjenkinson/how-to-create-an-aurora-serverless-rds-instance-on-aws-with-cdk-5bb0 var db = new PostgresStack(this, "postgres-db-stack", new DatabaseInstanceProps { Vpc = vpc, Engine = DatabaseInstanceEngine.Postgres(new PostgresInstanceEngineProps { Version = PostgresEngineVersion.VER_12_3 }), AllocatedStorage = 5, BackupRetention = Duration.Days(0), DeletionProtection = false, InstanceType = InstanceType.Of(InstanceClass.BURSTABLE2, InstanceSize.MICRO), MasterUsername = "******", MultiAz = false, DatabaseName = "postgres", RemovalPolicy = RemovalPolicy.DESTROY, AllowMajorVersionUpgrade = false }, new StackProps { Env = Constants.DefaultEnv }); var containerEnvVars = new Dictionary <string, string> { { "DB__ADDRESS", db.Instance.InstanceEndpoint.SocketAddress } }; var containerSecrets = new Dictionary <string, Secret> { { "DatabaseConnection", Secret.FromSecretsManager(db.Instance.Secret) } }; var accountMetadataTable = new Table(mainStack, "AccountMetadata", new TableProps { TableName = "AccountMetadata", PartitionKey = new Attribute { Name = "UserId", Type = AttributeType.NUMBER }, SortKey = new Attribute { Name = "AccountId", Type = AttributeType.NUMBER }, Stream = StreamViewType.NEW_IMAGE }); var ecsCluster = new Cluster(mainStack, "app-cluster", new ClusterProps { Vpc = vpc, ClusterName = "app-cluster", ContainerInsights = true }); var fargateSslCertArn = SecretValue.SecretsManager("fargateSslCertArn").ToString(); var albCert = Certificate.FromCertificateArn(mainStack, "alb-cert", fargateSslCertArn); var sandbankBuildInfra = this.CreateApiBuildStack("SandBank", vpc); var sandbankApi = this.CreateApiStack("SandBank", ecsCluster, vpc, sandbankBuildInfra.EcrRepository, "sandbank-api", props.HostedZoneName, props.HostedZoneId, albCert, containerEnvVars, containerSecrets); accountMetadataTable.GrantFullAccess(sandbankApi.FargateService.TaskDefinition.TaskRole); var cloudfrontCertArn = SecretValue.SecretsManager("cloudfrontcertarn").ToString(); var cert = Certificate.FromCertificateArn(mainStack, "cloudfront-cert", cloudfrontCertArn); var sandbankSpa = new SpaStack(this, "sandbank-spa-stack", new SpaStackProps { Env = Constants.DefaultEnv, Vpc = vpc, ServiceName = "sandbank-spa", SubDomain = "sandbank", HostedZoneName = props.HostedZoneName, HostedZoneId = props.HostedZoneId, CloudFrontCert = cert, GitHubSourceProps = Constants.GithubRepo, BuildSpecFile = Constants.NpmBuildSpec, SpaDirectory = "App/FrontEnd/sandbank.spa", ApiUrl = $"{sandbankApi.ApiUrl}/api" //maybe should use CfnOutput instead }); //lambda //SandBank.Lambda.ConfigAuditTrail::SandBank.Lambda.ConfigAuditTrail.Function::FunctionHandler }
internal CdkStack(Construct scope, string id, IStackProps props = null) : base(scope, id, props) { var stackProps = ReportStackProps.ParseOrDefault(props); var dframeWorkerLogGroup = "MagicOnionBenchWorkerLogGroup"; var dframeMasterLogGroup = "MagicOnionBenchMasterLogGroup"; var benchNetwork = stackProps.GetBenchNetwork(); var recreateMagicOnionTrigger = stackProps.GetBenchmarkServerBinariesHash(); // s3 var s3 = new Bucket(this, "Bucket", new BucketProps { AutoDeleteObjects = true, RemovalPolicy = RemovalPolicy.DESTROY, AccessControl = BucketAccessControl.PRIVATE, }); var lifecycleRule = new LifecycleRule { Enabled = true, Prefix = "reports/", Expiration = Duration.Days(stackProps.DaysKeepReports), AbortIncompleteMultipartUploadAfter = Duration.Days(1), }; s3.AddLifecycleRule(lifecycleRule); s3.AddToResourcePolicy(new PolicyStatement(new PolicyStatementProps { Sid = "AllowPublicRead", Effect = Effect.ALLOW, Principals = new[] { new AnyPrincipal() }, Actions = new[] { "s3:GetObject*" }, Resources = new[] { $"{s3.BucketArn}/html/*" }, })); s3.AddToResourcePolicy(new PolicyStatement(new PolicyStatementProps { Sid = "AllowAwsAccountAccess", Effect = Effect.ALLOW, Principals = new[] { new AccountRootPrincipal() }, Actions = new[] { "s3:*" }, Resources = new[] { $"{s3.BucketArn}/*" }, })); // s3 deploy var masterDllDeployment = new BucketDeployment(this, "DeployMasterDll", new BucketDeploymentProps { DestinationBucket = s3, Sources = new[] { Source.Asset(Path.Combine(Directory.GetCurrentDirectory(), $"out/linux/server")) }, DestinationKeyPrefix = $"assembly/linux/server" }); var userdataDeployment = new BucketDeployment(this, "UserData", new BucketDeploymentProps { DestinationBucket = s3, Sources = new[] { Source.Asset(Path.Combine(Directory.GetCurrentDirectory(), "userdata/")) }, DestinationKeyPrefix = "userdata/" }); // docker deploy var dockerImage = new DockerImageAsset(this, "dframeWorkerImage", new DockerImageAssetProps { Directory = Path.Combine(Directory.GetCurrentDirectory(), "app"), File = "ConsoleAppEcs/Dockerfile.Ecs", }); var dframeImage = ContainerImage.FromDockerImageAsset(dockerImage); // network var vpc = new Vpc(this, "Vpc", new VpcProps { MaxAzs = 2, NatGateways = 0, SubnetConfiguration = new[] { new SubnetConfiguration { Name = "public", SubnetType = SubnetType.PUBLIC } }, }); var allsubnets = new SubnetSelection { Subnets = vpc.PublicSubnets }; var singleSubnets = new SubnetSelection { Subnets = new[] { vpc.PublicSubnets.First() } }; var sg = new SecurityGroup(this, "MasterSg", new SecurityGroupProps { AllowAllOutbound = true, Vpc = vpc, }); foreach (var subnet in vpc.PublicSubnets) { sg.AddIngressRule(Peer.Ipv4(vpc.VpcCidrBlock), Port.AllTcp(), "VPC", true); } // service discovery var serviceDiscoveryDomain = "local"; var serverMapName = "server"; var dframeMapName = "dframe-master"; var ns = new PrivateDnsNamespace(this, "Namespace", new PrivateDnsNamespaceProps { Vpc = vpc, Name = serviceDiscoveryDomain, }); var serviceDiscoveryServer = ns.CreateService("server", new DnsServiceProps { Name = serverMapName, DnsRecordType = DnsRecordType.A, RoutingPolicy = RoutingPolicy.MULTIVALUE, }); // alb var albDnsName = "benchmark-alb"; var benchToMagicOnionDnsName = benchNetwork.RequireAlb ? $"{benchNetwork.EndpointScheme}://{albDnsName}.{stackProps.AlbDomain.domain}" : $"{benchNetwork.EndpointScheme}://{serverMapName}.{serviceDiscoveryDomain}"; IApplicationTargetGroup grpcTargetGroup = null; IApplicationTargetGroup httpsTargetGroup = null; if (benchNetwork.RequireAlb) { // route53 var hostedZone = HostedZone.FromHostedZoneAttributes(this, "HostedZone", new HostedZoneAttributes { HostedZoneId = stackProps.AlbDomain.zoneId, ZoneName = stackProps.AlbDomain.domain, }); // acm var certificate = new DnsValidatedCertificate(this, "certificate", new DnsValidatedCertificateProps { DomainName = $"{albDnsName}.{hostedZone.ZoneName}", HostedZone = hostedZone, }); // alb var lb = new ApplicationLoadBalancer(this, "LB", new ApplicationLoadBalancerProps { Vpc = vpc, VpcSubnets = allsubnets, SecurityGroup = new SecurityGroup(this, "AlbSg", new SecurityGroupProps { AllowAllOutbound = true, Vpc = vpc, }), InternetFacing = false, Http2Enabled = true, }); grpcTargetGroup = AddGrpcTargetGroup(benchNetwork, vpc, certificate, lb); httpsTargetGroup = AddHttpsTargetGroup(benchNetwork, vpc, certificate, lb); // Dns Record _ = new CnameRecord(this, "alb-alias-record", new CnameRecordProps { RecordName = $"{albDnsName}.{stackProps.AlbDomain.domain}", Ttl = Duration.Seconds(60), Zone = hostedZone, DomainName = lb.LoadBalancerDnsName, }); } // iam var iamEc2MagicOnionRole = GetIamEc2MagicOnionRole(s3, serviceDiscoveryServer); var iamEcsTaskExecuteRole = GetIamEcsTaskExecuteRole(new[] { dframeWorkerLogGroup, dframeMasterLogGroup }); var iamDFrameTaskDefRole = GetIamEcsDframeTaskDefRole(s3); var iamWorkerTaskDefRole = GetIamEcsWorkerTaskDefRole(s3); // secrets var ddToken = stackProps.UseEc2DatadogAgentProfiler || stackProps.UseFargateDatadogAgentProfiler ? Amazon.CDK.AWS.SecretsManager.Secret.FromSecretNameV2(this, "dd-token", "magiconion-benchmark-datadog-token") : null; // MagicOnion var asg = new AutoScalingGroup(this, "MagicOnionAsg", new AutoScalingGroupProps { // Monitoring is default DETAILED. SpotPrice = "1.0", // 0.0096 for spot price average for m3.medium Vpc = vpc, SecurityGroup = sg, VpcSubnets = singleSubnets, InstanceType = stackProps.MagicOnionInstanceType, DesiredCapacity = 1, MaxCapacity = 1, MinCapacity = 0, AssociatePublicIpAddress = true, MachineImage = new AmazonLinuxImage(new AmazonLinuxImageProps { CpuType = AmazonLinuxCpuType.X86_64, Generation = AmazonLinuxGeneration.AMAZON_LINUX_2, Storage = AmazonLinuxStorage.GENERAL_PURPOSE, Virtualization = AmazonLinuxVirt.HVM, }), AllowAllOutbound = true, GroupMetrics = new[] { GroupMetrics.All() }, Role = iamEc2MagicOnionRole, UpdatePolicy = UpdatePolicy.ReplacingUpdate(), Signals = Signals.WaitForCount(1, new SignalsOptions { Timeout = Duration.Minutes(10), }), }); asg.AddSecretsReadGrant(ddToken, () => stackProps.UseEc2DatadogAgentProfiler); var userdata = GetUserData(recreateMagicOnionTrigger, s3.BucketName, stackProps.BenchmarkBinaryNames, serviceDiscoveryServer.ServiceId, stackProps.UseEc2CloudWatchAgentProfiler, stackProps.UseEc2DatadogAgentProfiler); asg.AddUserData(userdata); asg.UserData.AddSignalOnExitCommand(asg); asg.Node.AddDependency(masterDllDeployment); asg.Node.AddDependency(userdataDeployment); if (stackProps.EnableMagicOnionScaleInCron) { asg.ScaleOnSchedule("ScheduleOut", new BasicScheduledActionProps { DesiredCapacity = 1, MaxCapacity = 1, // AM9:00 (JST+9) on Monday to Wednesday Schedule = Schedule.Expression("0 0 * 1-3 *"), }); asg.ScaleOnSchedule("ScheduleIn", new BasicScheduledActionProps { DesiredCapacity = 0, MaxCapacity = 0, // PM9:00 (JST+9) on Everyday Schedule = Schedule.Expression("0 12 * 1-7 *"), }); } if (benchNetwork.RequireAlb) { asg.AttachToApplicationTargetGroup(grpcTargetGroup); asg.AttachToApplicationTargetGroup(httpsTargetGroup); } // ECS var cluster = new Cluster(this, "WorkerCluster", new ClusterProps { Vpc = vpc, }); cluster.Node.AddDependency(asg); // wait until asg is up // dframe-worker var dframeWorkerContainerName = "worker"; var dframeWorkerTaskDef = new FargateTaskDefinition(this, "DFrameWorkerTaskDef", new FargateTaskDefinitionProps { ExecutionRole = iamEcsTaskExecuteRole, TaskRole = iamWorkerTaskDefRole, Cpu = stackProps.WorkerFargate.CpuSize, MemoryLimitMiB = stackProps.WorkerFargate.MemorySize, }); dframeWorkerTaskDef.AddContainer(dframeWorkerContainerName, new ContainerDefinitionOptions { Image = dframeImage, Command = new[] { "--worker-flag" }, Environment = new Dictionary <string, string> { { "DFRAME_MASTER_CONNECT_TO_HOST", $"{dframeMapName}.{serviceDiscoveryDomain}" }, { "DFRAME_MASTER_CONNECT_TO_PORT", "12345" }, { "BENCH_SERVER_HOST", benchToMagicOnionDnsName }, { "BENCH_REPORTID", stackProps.ReportId }, { "BENCH_S3BUCKET", s3.BucketName }, }, Logging = LogDriver.AwsLogs(new AwsLogDriverProps { LogGroup = new LogGroup(this, "WorkerLogGroup", new LogGroupProps { LogGroupName = dframeWorkerLogGroup, RemovalPolicy = RemovalPolicy.DESTROY, Retention = RetentionDays.TWO_WEEKS, }), StreamPrefix = dframeWorkerLogGroup, }), }); dframeWorkerTaskDef.AddDatadogContainer($"{dframeWorkerContainerName}-datadog", ddToken, () => stackProps.UseFargateDatadogAgentProfiler); var dframeWorkerService = new FargateService(this, "DFrameWorkerService", new FargateServiceProps { ServiceName = "DFrameWorkerService", DesiredCount = 0, Cluster = cluster, TaskDefinition = dframeWorkerTaskDef, VpcSubnets = singleSubnets, SecurityGroups = new[] { sg }, PlatformVersion = FargatePlatformVersion.VERSION1_4, MinHealthyPercent = 0, AssignPublicIp = true, }); // dframe-master var dframeMasterTaskDef = new FargateTaskDefinition(this, "DFrameMasterTaskDef", new FargateTaskDefinitionProps { ExecutionRole = iamEcsTaskExecuteRole, TaskRole = iamDFrameTaskDefRole, Cpu = stackProps.MasterFargate.CpuSize, MemoryLimitMiB = stackProps.MasterFargate.MemorySize, }); dframeMasterTaskDef.AddContainer("dframe", new ContainerDefinitionOptions { Image = dframeImage, Environment = new Dictionary <string, string> { { "DFRAME_CLUSTER_NAME", cluster.ClusterName }, { "DFRAME_MASTER_SERVICE_NAME", "DFrameMasterService" }, { "DFRAME_WORKER_CONTAINER_NAME", dframeWorkerContainerName }, { "DFRAME_WORKER_SERVICE_NAME", dframeWorkerService.ServiceName }, { "DFRAME_WORKER_TASK_NAME", Fn.Select(1, Fn.Split("/", dframeWorkerTaskDef.TaskDefinitionArn)) }, { "DFRAME_WORKER_IMAGE", dockerImage.ImageUri }, { "BENCH_REPORTID", stackProps.ReportId }, { "BENCH_S3BUCKET", s3.BucketName }, }, Logging = LogDriver.AwsLogs(new AwsLogDriverProps { LogGroup = new LogGroup(this, "MasterLogGroup", new LogGroupProps { LogGroupName = dframeMasterLogGroup, RemovalPolicy = RemovalPolicy.DESTROY, Retention = RetentionDays.TWO_WEEKS, }), StreamPrefix = dframeMasterLogGroup, }), }); dframeMasterTaskDef.AddDatadogContainer($"dframe-datadog", ddToken, () => stackProps.UseFargateDatadogAgentProfiler); var dframeMasterService = new FargateService(this, "DFrameMasterService", new FargateServiceProps { ServiceName = "DFrameMasterService", DesiredCount = 1, Cluster = cluster, TaskDefinition = dframeMasterTaskDef, VpcSubnets = singleSubnets, SecurityGroups = new[] { sg }, PlatformVersion = FargatePlatformVersion.VERSION1_4, MinHealthyPercent = 0, AssignPublicIp = true, }); dframeMasterService.EnableCloudMap(new CloudMapOptions { CloudMapNamespace = ns, Name = dframeMapName, DnsRecordType = DnsRecordType.A, DnsTtl = Duration.Seconds(300), }); // output new CfnOutput(this, "ReportUrl", new CfnOutputProps { Value = $"https://{s3.BucketRegionalDomainName}/html/{stackProps.ReportId}/index.html" }); new CfnOutput(this, "EndPointStyle", new CfnOutputProps { Value = stackProps.BenchmarkEndpoint.ToString() }); new CfnOutput(this, "AsgName", new CfnOutputProps { Value = asg.AutoScalingGroupName }); new CfnOutput(this, "EcsClusterName", new CfnOutputProps { Value = cluster.ClusterName }); new CfnOutput(this, "DFrameWorkerEcsTaskdefImage", new CfnOutputProps { Value = dockerImage.ImageUri }); }
internal NorthwindCdkStack(Construct scope, string id, IStackProps props = null) : base(scope, id, props) { var vpc = new Vpc(this, "LabVpc", new VpcProps { MaxAzs = 2 }); // SQL Server var sg = new SecurityGroup(this, "NorthwindDatabaseSecurityGroup", new SecurityGroupProps { Vpc = vpc, SecurityGroupName = "Northwind-DB-SG", AllowAllOutbound = false }); // !!!!!!!!!! replace IP according to the instructions above sg.AddIngressRule(Peer.Ipv4("35.171.193.180/32"), Port.Tcp(1433)); // SQL Server // !!!!!!!!!! var sql = new DatabaseInstance(this, "NorthwindSQLServer", new DatabaseInstanceProps { Vpc = vpc, InstanceIdentifier = "northwind-sqlserver", Engine = DatabaseInstanceEngine.SqlServerEx(new SqlServerExInstanceEngineProps { Version = SqlServerEngineVersion.VER_14 }), // SQL Server Express Credentials = Credentials.FromUsername("adminuser", new CredentialsFromUsernameOptions() { Password = new SecretValue("Admin12345?") }), //MasterUsername = "******", //MasterUserPassword = new SecretValue("Admin12345?"), InstanceType = InstanceType.Of(InstanceClass.BURSTABLE3, InstanceSize.SMALL), // t3.small SecurityGroups = new ISecurityGroup[] { sg }, MultiAz = false, VpcSubnets = new SubnetSelection() { SubnetType = SubnetType.PUBLIC }, // public subnet DeletionProtection = false, // you need to be able to delete database DeleteAutomatedBackups = true, BackupRetention = Duration.Days(0), RemovalPolicy = RemovalPolicy.DESTROY // you need to be able to delete database });; new CfnOutput(this, "SQLServerEndpointAddress", new CfnOutputProps { Value = sql.DbInstanceEndpointAddress }); // SQL Server connection string in Systems Manager Parameter Store new StringParameter(this, "NorthwindDatabaseConnectionString", new StringParameterProps { ParameterName = "/Northwind/ConnectionStrings/NorthwindDatabase", Type = ParameterType.STRING, Description = "SQL Server connection string", StringValue = string.Format("Server={0},1433;Integrated Security=false;User ID=adminuser;Password=Admin12345?;Initial Catalog=NorthwindTraders;", sql.DbInstanceEndpointAddress) }); // PostgreSQL setup // !!!!!!!!!! add 2 rules when you use provided VM, add 1 rule when you use your computer sg.AddIngressRule(Peer.Ipv4("35.171.193.180/32"), Port.Tcp(5432)); // PostgreSQL sg.AddIngressRule(Peer.Ipv4("3.238.53.13/32"), Port.Tcp(5432)); // PostgreSQL // !!!!!!!!!! var postgreSql = new DatabaseCluster(this, "NorthwindPostgreSQL", new DatabaseClusterProps { InstanceProps = new Amazon.CDK.AWS.RDS.InstanceProps { Vpc = vpc, InstanceType = InstanceType.Of(InstanceClass.BURSTABLE3, InstanceSize.MEDIUM), // t3.medium SecurityGroups = new ISecurityGroup[] { sg }, VpcSubnets = new SubnetSelection() { SubnetType = SubnetType.PUBLIC }, // you need to access database from your developer PC ParameterGroup = ParameterGroup.FromParameterGroupName(this, "DBInstanceParameterGroup", "default.aurora-postgresql11"), }, ParameterGroup = ParameterGroup.FromParameterGroupName(this, "DBClusterParameterGroup", "default.aurora-postgresql11"), ClusterIdentifier = "northwind-postgresql", Engine = DatabaseClusterEngine.AuroraPostgres(new AuroraPostgresClusterEngineProps { Version = AuroraPostgresEngineVersion.VER_11_6 }), // Aurora PostgreSQL Credentials = Credentials.FromUsername("adminuser", new CredentialsFromUsernameOptions { Password = new SecretValue("Admin12345?") }), //MasterUser = new Login //{ // Username = "******", // Password = new SecretValue("Admin12345?") //}, Instances = 1, Port = 5432, Backup = new BackupProps { Retention = Duration.Days(1) // minimum is 1 }, DefaultDatabaseName = "NorthwindTraders", InstanceIdentifierBase = "northwind-postgresql-instance", RemovalPolicy = RemovalPolicy.DESTROY // you need to be able to delete database, });; new CfnOutput(this, "PostgreSQLEndpointAddress", new CfnOutputProps { Value = postgreSql.ClusterEndpoint.Hostname }); // Aurora PostgreSQL connection string in Systems Manager Parameter Store new StringParameter(this, "NorthwindPostgreSQLDatabaseConnectionString", new StringParameterProps { ParameterName = "/Northwind/ConnectionStrings/NorthwindPostgreDatabase", Type = ParameterType.STRING, Description = "PostgreSQL connection string", StringValue = string.Format("Server={0};Database=NorthwindTraders;Username=adminuser;Password=Admin12345?", postgreSql.ClusterEndpoint.Hostname) }); }
private ISecret SetDatabasePassword(DatabaseOptions databaseOption) { ISecret passwordSecret; if (string.IsNullOrWhiteSpace(databaseOption.Password)) { if (databaseOption.Secrets.ContainsKey(DatabaseOptionConsts.PasswordAttributeName)) { if (!StackResources.Lambdas.TryGetValue(databaseOption.RotationLambdaId, out var rotationLambda)) { throw new ArgumentException($"The database {databaseOption.Id} lambda id {databaseOption.RotationLambdaId} was not found"); } var rotationPeriod = databaseOption.PasswordRotationDaysPeriod.HasValue ? Duration.Days(databaseOption.PasswordRotationDaysPeriod.Value) : null; passwordSecret = AwsCdkHandler.AddSecret(databaseOption.Secrets[DatabaseOptionConsts.PasswordAttributeName], rotationPeriod: rotationPeriod, rotationLambda: rotationLambda); } else { throw new ArgumentException($"The database {databaseOption.DatabaseName} has no secret to store the password"); } } else { passwordSecret = null; } return(passwordSecret); }