/// <summary> /// Convert AzureHDInsightAutoscaleConfiguration to Autoscale /// </summary> /// <returns></returns> public Autoscale ToAutoscale() { Autoscale autoscale = new Autoscale(); if (Capacity != null) { autoscale.Capacity = new AutoscaleCapacity(Capacity.MinInstanceCount, Capacity.MaxInstanceCount); } if (Recurrence != null) { autoscale.Recurrence = new AutoscaleRecurrence(Recurrence.TimeZone, Recurrence.Condition?.Select(condition => condition.ToAutoscaleSchedule()).ToList()); } return(autoscale); }
/// <summary> /// Validate auto scale configuration /// </summary> /// <param name="expectedAutoscaleConfiguration"></param> /// <param name="actualAutoscaleConfiguration"></param> public static void ValidateAutoScaleConfig(Autoscale expectedAutoscaleConfiguration, Autoscale actualAutoscaleConfiguration) { Assert.NotNull(actualAutoscaleConfiguration); if (actualAutoscaleConfiguration.Capacity != null && expectedAutoscaleConfiguration.Capacity != null) { Assert.Equal(expectedAutoscaleConfiguration.Capacity.MinInstanceCount, actualAutoscaleConfiguration.Capacity.MinInstanceCount); Assert.Equal(expectedAutoscaleConfiguration.Capacity.MaxInstanceCount, actualAutoscaleConfiguration.Capacity.MaxInstanceCount); } else { Assert.Equal(expectedAutoscaleConfiguration.Capacity, actualAutoscaleConfiguration.Capacity); } if (actualAutoscaleConfiguration.Recurrence != null && expectedAutoscaleConfiguration.Recurrence != null) { Assert.Equal(expectedAutoscaleConfiguration.Recurrence.TimeZone, actualAutoscaleConfiguration.Recurrence.TimeZone); Assert.NotNull(expectedAutoscaleConfiguration.Recurrence.Schedule); Assert.NotNull(actualAutoscaleConfiguration.Recurrence.Schedule); Assert.Equal(expectedAutoscaleConfiguration.Recurrence.Schedule.Count, actualAutoscaleConfiguration.Recurrence.Schedule.Count); Assert.NotEmpty(expectedAutoscaleConfiguration.Recurrence.Schedule); for (int i = 0; i < expectedAutoscaleConfiguration.Recurrence.Schedule.Count; i++) { var expectedSchedule = expectedAutoscaleConfiguration.Recurrence.Schedule[i]; var actualSchedule = actualAutoscaleConfiguration.Recurrence.Schedule[i]; Assert.Equal(expectedSchedule.Days, actualSchedule.Days); Assert.NotNull(expectedSchedule.TimeAndCapacity); Assert.NotNull(actualSchedule.TimeAndCapacity); Assert.Equal(expectedSchedule.TimeAndCapacity.Time, actualSchedule.TimeAndCapacity.Time); Assert.Equal(expectedSchedule.TimeAndCapacity.MinInstanceCount, actualSchedule.TimeAndCapacity.MinInstanceCount); /* * Note: You may find that we don't compare expectedSchedule.TimeAndCapacity.MaxInstanceCount with actualSchedule.TimeAndCapacity.MaxInstanceCount here. * This is not an error. We do this intentionally. * The reason is that now RP will not make use of the parameter "expectedSchedule.TimeAndCapacity.MaxInstanceCount" when create cluster. * And the actualSchedule.TimeAndCapacity.MaxInstanceCount is equal with expectedSchedule.TimeAndCapacity.MinInstanceCount now. * We are not sure whether RP will change this design or not in the future. So We decided not to compare. */ } } else { Assert.Equal(expectedAutoscaleConfiguration.Recurrence, actualAutoscaleConfiguration.Recurrence); } }
public override void ExecuteCmdlet() { parameters.UserName = HttpCredential.UserName; parameters.Password = HttpCredential.Password.ConvertToString(); if (RdpCredential != null) { parameters.RdpUsername = RdpCredential.UserName; parameters.RdpPassword = RdpCredential.Password.ConvertToString(); } if (SshCredential != null) { parameters.SshUserName = SshCredential.UserName; if (!string.IsNullOrEmpty(SshCredential.Password.ConvertToString())) { parameters.SshPassword = SshCredential.Password.ConvertToString(); } if (!string.IsNullOrEmpty(SshPublicKey)) { parameters.SshPublicKey = SshPublicKey; } } if (DefaultStorageAccountType == null || DefaultStorageAccountType == StorageType.AzureStorage) { parameters.DefaultStorageInfo = new AzureStorageInfo(DefaultStorageAccountName, DefaultStorageAccountKey, DefaultStorageContainer); } else { parameters.DefaultStorageInfo = new AzureDataLakeStoreInfo(DefaultStorageAccountName, DefaultStorageRootPath); } foreach ( var storageAccount in AdditionalStorageAccounts.Where( storageAccount => !parameters.AdditionalStorageAccounts.ContainsKey(storageAccount.Key))) { parameters.AdditionalStorageAccounts.Add(storageAccount.Key, storageAccount.Value); } foreach (var config in Configurations.Where(config => !parameters.Configurations.ContainsKey(config.Key))) { parameters.Configurations.Add(config.Key, config.Value); } foreach (var action in ScriptActions.Where(action => parameters.ScriptActions.ContainsKey(action.Key))) { parameters.ScriptActions.Add(action.Key, action.Value.Select(a => a.GetScriptActionFromPSModel()).ToList()); } foreach (var component in ComponentVersion.Where(component => !parameters.ComponentVersion.ContainsKey(component.Key))) { parameters.ComponentVersion.Add(component.Key, component.Value); } if (OozieMetastore != null) { var metastore = OozieMetastore; parameters.OozieMetastore = new Metastore(metastore.SqlAzureServerName, metastore.DatabaseName, metastore.Credential.UserName, metastore.Credential.Password.ConvertToString()); } if (HiveMetastore != null) { var metastore = HiveMetastore; parameters.HiveMetastore = new Metastore(metastore.SqlAzureServerName, metastore.DatabaseName, metastore.Credential.UserName, metastore.Credential.Password.ConvertToString()); } if (!string.IsNullOrEmpty(CertificatePassword)) { if (!string.IsNullOrEmpty(CertificateFilePath)) { CertificateFileContents = File.ReadAllBytes(CertificateFilePath); } var servicePrincipal = new Management.HDInsight.Models.ServicePrincipal( GetApplicationId(ApplicationId), GetTenantId(AadTenantId), CertificateFileContents, CertificatePassword); parameters.Principal = servicePrincipal; } if (SecurityProfile != null) { parameters.SecurityProfile = new SecurityProfile() { DirectoryType = DirectoryType.ActiveDirectory, Domain = SecurityProfile.Domain, DomainUsername = SecurityProfile.DomainUserCredential != null ? SecurityProfile.DomainUserCredential.UserName : null, DomainUserPassword = SecurityProfile.DomainUserCredential != null && SecurityProfile.DomainUserCredential.Password != null ? SecurityProfile.DomainUserCredential.Password.ConvertToString() : null, OrganizationalUnitDN = SecurityProfile.OrganizationalUnitDN, LdapsUrls = SecurityProfile.LdapsUrls, ClusterUsersGroupDNs = SecurityProfile.ClusterUsersGroupDNs }; } if (DisksPerWorkerNode > 0) { parameters.WorkerNodeDataDisksGroups = new List <DataDisksGroups>() { new DataDisksGroups() { DisksPerNode = DisksPerWorkerNode } }; } if (EncryptionKeyName != null && EncryptionKeyVersion != null && EncryptionVaultUri != null && AssignedIdentity != null) { parameters.ClusterIdentity = new ClusterIdentity { Type = ResourceIdentityType.UserAssigned, UserAssignedIdentities = new Dictionary <string, ClusterIdentityUserAssignedIdentitiesValue> { { AssignedIdentity, new ClusterIdentityUserAssignedIdentitiesValue() } } }; parameters.DiskEncryptionProperties = new DiskEncryptionProperties() { KeyName = EncryptionKeyName, KeyVersion = EncryptionKeyVersion, VaultUri = EncryptionVaultUri, EncryptionAlgorithm = EncryptionAlgorithm != null ? EncryptionAlgorithm : JsonWebKeyEncryptionAlgorithm.RSAOAEP, MsiResourceId = AssignedIdentity }; } if (EncryptionAtHost != null) { if (parameters.DiskEncryptionProperties != null) { parameters.DiskEncryptionProperties.EncryptionAtHost = EncryptionAtHost; } else { parameters.DiskEncryptionProperties = new DiskEncryptionProperties() { EncryptionAtHost = EncryptionAtHost }; } } Autoscale autoscaleParameter = null; if (AutoscaleConfiguration != null) { autoscaleParameter = AutoscaleConfiguration.ToAutoscale(); } var cluster = HDInsightManagementClient.CreateNewCluster(ResourceGroupName, ClusterName, OSType, parameters, MinSupportedTlsVersion, this.DefaultContext.Environment.ActiveDirectoryAuthority, this.DefaultContext.Environment.DataLakeEndpointResourceId, PublicNetworkAccessType, OutboundPublicNetworkAccessType, EncryptionInTransit, autoscaleParameter); if (cluster != null) { WriteObject(new AzureHDInsightCluster(cluster)); } }
public override void ExecuteCmdlet() { foreach (var component in ComponentVersion.Where(component => !clusterComponentVersion.ContainsKey(component.Key))) { clusterComponentVersion.Add(component.Key, component.Value); } // Construct Configurations foreach (var config in Configurations.Where(config => !clusterConfigurations.ContainsKey(config.Key))) { clusterConfigurations.Add(config.Key, config.Value); } // Add cluster username/password to gateway config. ClusterCreateHelper.AddClusterCredentialToGatewayConfig(HttpCredential, clusterConfigurations); // Construct OS Profile OsProfile osProfile = ClusterCreateHelper.CreateOsProfile(SshCredential, SshPublicKey); // Construct Virtual Network Profile VirtualNetworkProfile vnetProfile = ClusterCreateHelper.CreateVirtualNetworkProfile(VirtualNetworkId, SubnetName); // Handle storage account StorageProfile storageProfile = new StorageProfile() { Storageaccounts = new List <StorageAccount> { } }; if (StorageAccountType == null || StorageAccountType == StorageType.AzureStorage) { var azureStorageAccount = ClusterCreateHelper.CreateAzureStorageAccount(ClusterName, StorageAccountResourceId, StorageAccountKey, StorageContainer, this.DefaultContext.Environment.StorageEndpointSuffix); storageProfile.Storageaccounts.Add(azureStorageAccount); } else if (StorageAccountType == StorageType.AzureDataLakeStore) { ClusterCreateHelper.AddAzureDataLakeStorageGen1ToCoreConfig(StorageAccountResourceId, StorageRootPath, this.DefaultContext.Environment.AzureDataLakeStoreFileSystemEndpointSuffix, clusterConfigurations); } else if (StorageAccountType == StorageType.AzureDataLakeStorageGen2) { var adlsgen2Account = ClusterCreateHelper.CreateAdlsGen2StorageAccount(ClusterName, StorageAccountResourceId, StorageAccountKey, StorageFileSystem, StorageAccountManagedIdentity, this.DefaultContext.Environment.StorageEndpointSuffix); storageProfile.Storageaccounts.Add(adlsgen2Account); } // Handle additional storage accounts foreach ( var storageAccount in AdditionalStorageAccounts.Where( storageAccount => !clusterAdditionalStorageAccounts.ContainsKey(storageAccount.Key))) { clusterAdditionalStorageAccounts.Add(storageAccount.Key, storageAccount.Value); } ClusterCreateHelper.AddAdditionalStorageAccountsToCoreConfig(clusterAdditionalStorageAccounts, clusterConfigurations); // Handle script action foreach (var action in ScriptActions.Where(action => clusterScriptActions.ContainsKey(action.Key))) { clusterScriptActions.Add(action.Key, action.Value.Select(a => a.GetScriptActionFromPSModel()).ToList()); } // Handle metastore if (OozieMetastore != null) { ClusterCreateHelper.AddOozieMetastoreToConfigurations(OozieMetastore, clusterConfigurations); } if (HiveMetastore != null) { ClusterCreateHelper.AddHiveMetastoreToConfigurations(HiveMetastore, clusterConfigurations); } // Handle ADLSGen1 identity if (!string.IsNullOrEmpty(CertificatePassword)) { if (!string.IsNullOrEmpty(CertificateFilePath)) { CertificateFileContents = File.ReadAllBytes(CertificateFilePath); } ClusterCreateHelper.AddDataLakeStorageGen1IdentityToIdentityConfig( GetApplicationId(ApplicationId), GetTenantId(AadTenantId), CertificateFileContents, CertificatePassword, clusterConfigurations, this.DefaultContext.Environment.ActiveDirectoryAuthority, this.DefaultContext.Environment.DataLakeEndpointResourceId); } // Handle Kafka Rest Proxy KafkaRestProperties kafkaRestProperties = null; if (KafkaClientGroupId != null && KafkaClientGroupName != null) { kafkaRestProperties = new KafkaRestProperties() { ClientGroupInfo = new ClientGroupInfo(KafkaClientGroupName, KafkaClientGroupId) }; } // Compute profile contains headnode, workernode, zookeepernode, edgenode, kafkamanagementnode, idbrokernode, etc. ComputeProfile computeProfile = ClusterCreateHelper.CreateComputeProfile(osProfile, vnetProfile, clusterScriptActions, ClusterType, ClusterSizeInNodes, HeadNodeSize, WorkerNodeSize, ZookeeperNodeSize, EdgeNodeSize, KafkaManagementNodeSize, EnableIDBroker.IsPresent); // Handle SecurityProfile SecurityProfile securityProfile = ClusterCreateHelper.ConvertAzureHDInsightSecurityProfileToSecurityProfile(SecurityProfile, AssignedIdentity); // Handle DisksPerWorkerNode feature Role workerNode = Utils.ExtractRole(ClusterNodeType.WorkerNode.ToString(), computeProfile); if (DisksPerWorkerNode > 0) { workerNode.DataDisksGroups = new List <DataDisksGroups>() { new DataDisksGroups() { DisksPerNode = DisksPerWorkerNode } }; } // Handle ClusterIdentity ClusterIdentity clusterIdentity = null; if (AssignedIdentity != null || StorageAccountManagedIdentity != null) { clusterIdentity = new ClusterIdentity { Type = ResourceIdentityType.UserAssigned, UserAssignedIdentities = new Dictionary <string, ClusterIdentityUserAssignedIdentitiesValue>() }; if (AssignedIdentity != null) { clusterIdentity.UserAssignedIdentities.Add(AssignedIdentity, new ClusterIdentityUserAssignedIdentitiesValue()); } if (StorageAccountManagedIdentity != null) { clusterIdentity.UserAssignedIdentities.Add(StorageAccountManagedIdentity, new ClusterIdentityUserAssignedIdentitiesValue()); } } // Handle CMK feature DiskEncryptionProperties diskEncryptionProperties = null; if (EncryptionKeyName != null && EncryptionKeyVersion != null && EncryptionVaultUri != null) { diskEncryptionProperties = new DiskEncryptionProperties() { KeyName = EncryptionKeyName, KeyVersion = EncryptionKeyVersion, VaultUri = EncryptionVaultUri, EncryptionAlgorithm = EncryptionAlgorithm != null ? EncryptionAlgorithm : JsonWebKeyEncryptionAlgorithm.RSAOAEP, MsiResourceId = AssignedIdentity }; } // Handle encryption at host feature if (EncryptionAtHost != null) { if (diskEncryptionProperties != null) { diskEncryptionProperties.EncryptionAtHost = EncryptionAtHost; } else { diskEncryptionProperties = new DiskEncryptionProperties() { EncryptionAtHost = EncryptionAtHost }; } } // Handle autoscale featurer Autoscale autoscaleParameter = null; if (AutoscaleConfiguration != null) { autoscaleParameter = AutoscaleConfiguration.ToAutoscale(); workerNode.AutoscaleConfiguration = autoscaleParameter; } // Construct cluster create parameter ClusterCreateParametersExtended createParams = new ClusterCreateParametersExtended { Location = Location, //Tags = Tags, //To Do add this Tags parameter Properties = new ClusterCreateProperties { Tier = ClusterTier, ClusterDefinition = new ClusterDefinition { Kind = ClusterType ?? "Hadoop", ComponentVersion = clusterComponentVersion, Configurations = clusterConfigurations }, ClusterVersion = Version ?? "default", KafkaRestProperties = kafkaRestProperties, ComputeProfile = computeProfile, OsType = OSType, SecurityProfile = securityProfile, StorageProfile = storageProfile, DiskEncryptionProperties = diskEncryptionProperties, //handle Encryption In Transit feature EncryptionInTransitProperties = EncryptionInTransit != null ? new EncryptionInTransitProperties() { IsEncryptionInTransitEnabled = EncryptionInTransit } : null, MinSupportedTlsVersion = MinSupportedTlsVersion }, Identity = clusterIdentity }; var cluster = HDInsightManagementClient.CreateCluster(ResourceGroupName, ClusterName, createParams); if (cluster != null) { WriteObject(new AzureHDInsightCluster(cluster)); } }
public override void ExecuteCmdlet() { if (this.IsParameterBound(c => c.ResourceId)) { var resourceIdentifier = new ResourceIdentifier(ResourceId); this.ClusterName = resourceIdentifier.ResourceName; this.ResourceGroupName = resourceIdentifier.ResourceGroupName; } if (this.IsParameterBound(c => c.InputObject)) { this.ClusterName = this.InputObject.Name; this.ResourceGroupName = this.InputObject.ResourceGroup; } if (ClusterName != null && ResourceGroupName == null) { ResourceGroupName = GetResourceGroupByAccountName(ClusterName); } var clusterBeforeUpdate = HDInsightManagementClient.Get(ResourceGroupName, ClusterName); Autoscale autoscaleConfig = Utils.ExtractWorkerNode(clusterBeforeUpdate)?.AutoscaleConfiguration; if (autoscaleConfig == null) { autoscaleConfig = new Autoscale(); } switch (ParameterSetName) { case LoadAutoscaleByNameParameterSet: case LoadAutoscaleByResourceIdParameterSet: case LoadAutoscaleByInputObjectParameterSet: // override Recurrence to support switch from schedule to load autoscaleConfig.Recurrence = null; if (autoscaleConfig.Capacity == null) { autoscaleConfig.Capacity = new AutoscaleCapacity(MinWorkerNodeCount, MaxWorkerNodeCount); } else { if (this.IsParameterBound(c => c.MinWorkerNodeCount)) { autoscaleConfig.Capacity.MinInstanceCount = MinWorkerNodeCount; } if (this.IsParameterBound(c => c.MaxWorkerNodeCount)) { autoscaleConfig.Capacity.MaxInstanceCount = MaxWorkerNodeCount; } } break; case ScheduleAutoscaleByNameParameterSet: case ScheduleAutoscaleByResourceIdParameterSet: case ScheduleAutoscaleByInputObjectParameterSet: // override Capacity to support switch from Load to Schedule autoscaleConfig.Capacity = null; if (autoscaleConfig.Recurrence == null) { var schedules = Condition?.Select(conditon => conditon.ToAutoscaleSchedule()).ToList(); autoscaleConfig.Recurrence = new AutoscaleRecurrence(TimeZone, schedules); } else { if (this.IsParameterBound(c => c.TimeZone)) { autoscaleConfig.Recurrence.TimeZone = TimeZone; } if (this.IsParameterBound(c => c.Condition)) { autoscaleConfig.Recurrence.Schedule = Condition?.Select(conditon => conditon.ToAutoscaleSchedule()).ToList(); } } break; case AutoscaleConfigurationByNameParameterSet: case AutoscaleConfigurationByResourceIdParameterSet: case AutoscaleConfigurationByInputObjectParameterSet: autoscaleConfig = AutoscaleConfiguration?.ToAutoscale(); break; default: break; } if (ShouldProcess(ClusterName)) { HDInsightManagementClient.UpdateAutoScaleConfiguration(ResourceGroupName, ClusterName, new AutoscaleConfigurationUpdateParameter(autoscaleConfig)); Cluster cluster = HDInsightManagementClient.Get(ResourceGroupName, ClusterName); WriteObject(new AzureHDInsightCluster(cluster)); } }
public virtual Cluster CreateNewCluster(string resourceGroupName, string clusterName, OSType osType, ClusterCreateParameters parameters, string minSupportedTlsVersion = default(string), string cloudAadAuthority = default(string), string cloudDataLakeAudience = default(string), string PublicNetworkAccessType = default(string), string OutboundOnlyNetworkAccessType = default(string), bool?EnableEncryptionInTransit = default(bool?), Autoscale autoscaleParameter = null) { var createParams = CreateParametersConverter.GetExtendedClusterCreateParameters(clusterName, parameters); createParams.Properties.OsType = osType; createParams.Properties.MinSupportedTlsVersion = minSupportedTlsVersion; ResetClusterIdentity(createParams, cloudAadAuthority, cloudDataLakeAudience); if (EnableEncryptionInTransit.HasValue) { createParams.Properties.EncryptionInTransitProperties = new EncryptionInTransitProperties() { IsEncryptionInTransitEnabled = EnableEncryptionInTransit }; } if (!string.IsNullOrEmpty(PublicNetworkAccessType) || !string.IsNullOrEmpty(OutboundOnlyNetworkAccessType)) { NetworkSettings networkSettings = new NetworkSettings() { PublicNetworkAccess = PublicNetworkAccessType, OutboundOnlyPublicNetworkAccessType = OutboundOnlyNetworkAccessType }; createParams.Properties.NetworkSettings = networkSettings; } if (autoscaleParameter != null) { createParams.Properties.ComputeProfile.Roles.FirstOrDefault(role => role.Name.Equals("workernode")).AutoscaleConfiguration = autoscaleParameter; } return(HdInsightManagementClient.Clusters.Create(resourceGroupName, clusterName, createParams)); }
public AzureHDInsightAutoscale(Autoscale autoscale) { Capacity = autoscale?.Capacity != null ? new AzureHDInsightAutoscaleCapacity(autoscale.Capacity) : null; Recurrence = autoscale?.Recurrence != null ? new AzureHDInsightAutoscaleRecurrence(autoscale.Recurrence) : null; }