public void CanConvertMLServicesCluster() { ClusterCreateParameters createParams = GetClusterCreateParamsWithMinRequiredValues(); createParams.ClusterType = "MlServicEs"; ExtendedParameterValidators.ValidateSpecConversion(createParams); }
public void CanConvertRServerCluster() { ClusterCreateParameters createParams = GetClusterCreateParamsWithMinRequiredValues(); createParams.ClusterType = "RsErVeR"; ExtendedParameterValidators.ValidateSpecConversion(createParams); }
private static string GetHeadNodeSize(ClusterCreateParameters clusterCreateParameters) { string headNodeSize; if (clusterCreateParameters.HeadNodeSize != null) { headNodeSize = clusterCreateParameters.HeadNodeSize; } else { switch (clusterCreateParameters.ClusterType) { case HDInsightClusterType.Hadoop: headNodeSize = "Standard_D3"; break; case HDInsightClusterType.Spark: headNodeSize = "Standard_D12"; break; default: headNodeSize = "Large"; break; } } return(headNodeSize); }
//Create a new HDI cluster public static void CreateCluster() { var store = new X509Store(); store.Open(OpenFlags.ReadOnly); var cert = store.Certificates.Cast <X509Certificate2>().First(item => item.Thumbprint == Constants.thumbprint); var creds = new HDInsightCertificateCredential(Constants.subscriptionId, cert); var client = HDInsightClient.Connect(creds); //Cluster information var clusterInfo = new ClusterCreateParameters() { Name = "AutomatedHDICluster", Location = "West Europe", DefaultStorageAccountName = Constants.storageAccount, DefaultStorageAccountKey = Constants.storageAccountKey, DefaultStorageContainer = Constants.container, UserName = Constants.clusterUser, Password = Constants.clusterPassword, ClusterSizeInNodes = 2, Version = "2.1" }; Console.Write("Creating cluster..."); var clusterDetails = client.CreateCluster(clusterInfo); Console.Write("Done\n"); ListClusters(); }
public void TestCreateHumboldtCluster() { string testName = "TestCreateHumboldtCluster"; ClusterCreateParameters parameters = ClusterCreateParametersHelpers.GetCustomCreateParametersIaas(testName); RunCreateClusterTestInNewResourceGroup(GetType().FullName, testName, "hdisdk-humboldt", parameters); }
public void TestCreateWithDataLakeStorage() { string testName = "TestCreateWithDataLakeStorage"; ClusterCreateParameters parameters = ClusterCreateParametersHelpers.GetCustomCreateParametersForAdl(testName); RunCreateClusterTestInNewResourceGroup(GetType().FullName, testName, "hdisdk-adl", parameters); }
/// <summary> /// Creates the rdfe resource input from wire input. /// This method wraps the what is need for the RP within RDFE resource input. /// </summary> /// <param name="wireCreateParameters">The wire create parameters.</param> /// <returns>An RDFE Resource input from wire create parameters.</returns> /// <param name="schemaVersion">The schema version for the RDFE resource.</param> /// <exception cref="System.ArgumentNullException">Thrown if wireCreateParameters is null.</exception> public static RDFEResource CreateRdfeResourceInputFromWireInput(ClusterCreateParameters wireCreateParameters, string schemaVersion) { if (wireCreateParameters == null) { throw new ArgumentNullException("wireCreateParameters"); } if (schemaVersion == null) { throw new ArgumentNullException("schemaVersion"); } var ccpAsXmlString = wireCreateParameters.SerializeAndOptionallyWriteToStream(); var doc = new XmlDocument(); using (var stringReader = new StringReader(ccpAsXmlString)) { using (var reader = XmlReader.Create(stringReader)) { doc.Load(reader); } } var retval = new RDFEResource { SchemaVersion = schemaVersion, IntrinsicSettings = new XmlNode[] { doc.DocumentElement } }; return(retval); }
public NewAzureHDInsightClusterCommand() { parameters = new ClusterCreateParameters(); AdditionalStorageAccounts = new Dictionary <string, string>(); Configurations = new Dictionary <string, Dictionary <string, string> >(); ScriptActions = new Dictionary <ClusterNodeType, List <AzureHDInsightScriptAction> >(); }
public void TestListClustersInResourceGroup() { string suiteName = GetType().FullName; string testName = "TestListClustersInResourceGroup"; HDInsightManagementTestUtilities.RunTestInNewResourceGroup(suiteName, testName, (resClient, client, rgName) => { string clusterName1 = "hdisdk-cluster1"; string clusterName2 = "hdisdk-cluster2"; try { var list = client.Clusters.ListByResourceGroup(rgName); Assert.DoesNotContain(list, c => c.Name.Equals(clusterName1, StringComparison.OrdinalIgnoreCase)); Assert.DoesNotContain(list, c => c.Name.Equals(clusterName2, StringComparison.OrdinalIgnoreCase)); // Create one cluster with ADLS so both clusters aren't using the same storage account at the same time ClusterCreateParameters parameters1 = ClusterCreateParametersHelpers.GetCustomCreateParametersIaas(testName); ClusterCreateParameters parameters2 = ClusterCreateParametersHelpers.GetCustomCreateParametersForAdl(testName); Parallel.Invoke( () => client.Clusters.Create(rgName, clusterName1, parameters1), () => client.Clusters.Create(rgName, clusterName2, parameters2)); list = client.Clusters.ListByResourceGroup(rgName); Assert.Contains(list, c => c.Name.Equals(clusterName1, StringComparison.OrdinalIgnoreCase)); Assert.Contains(list, c => c.Name.Equals(clusterName2, StringComparison.OrdinalIgnoreCase)); } finally { client.Clusters.BeginDelete(rgName, clusterName1); client.Clusters.BeginDelete(rgName, clusterName2); } }); }
private static string GetHeadNodeSize(ClusterCreateParameters clusterCreateParameters) { string headNodeSize; if (clusterCreateParameters.HeadNodeSize != null) { headNodeSize = clusterCreateParameters.HeadNodeSize; } else { if (clusterCreateParameters.ClusterType.Equals("Hadoop", StringComparison.OrdinalIgnoreCase)) { headNodeSize = "Standard_D3"; } else if (clusterCreateParameters.ClusterType.Equals("Spark", StringComparison.OrdinalIgnoreCase)) { headNodeSize = "Standard_D12"; } else { headNodeSize = "Large"; } } return(headNodeSize); }
private static void AddDefaultStorageAccountToCoreConfig(string clusterName, ClusterCreateParameters clusterCreateParameters, Dictionary <string, string> coreConfig) { string coreConfigDefaultFSKey = "fs.defaultFS"; string coreConfigDefaultFSKeyFor_2_1_Clusters = "fs.default.name"; var defaultStorageAccountKey = (clusterCreateParameters.Version != null && clusterCreateParameters.Version.Equals("2.1")) ? coreConfigDefaultFSKeyFor_2_1_Clusters : coreConfigDefaultFSKey; var azureStorageAccountInfo = clusterCreateParameters.DefaultStorageInfo as AzureStorageInfo; var azureDataLakeStorageInfo = clusterCreateParameters.DefaultStorageInfo as AzureDataLakeStoreInfo; if (azureStorageAccountInfo != null) { if (string.IsNullOrWhiteSpace(azureStorageAccountInfo.StorageContainer)) { var storageInfoWithContainerName = new AzureStorageInfo(azureStorageAccountInfo.StorageAccountName, azureStorageAccountInfo.StorageAccountKey, clusterName); clusterCreateParameters.DefaultStorageInfo = storageInfoWithContainerName; coreConfig[defaultStorageAccountKey] = storageInfoWithContainerName.StorageAccountUri; } else { coreConfig[defaultStorageAccountKey] = azureStorageAccountInfo.StorageAccountUri; } } else if (azureDataLakeStorageInfo != null) { // setup the parameters required for DataLake containers coreConfig[defaultStorageAccountKey] = "adl://home"; coreConfig["dfs.adls.home.hostname"] = azureDataLakeStorageInfo.StorageAccountName; coreConfig["dfs.adls.home.mountpoint"] = azureDataLakeStorageInfo.StorageRootPath; } }
public Task <ClusterDetails> CreateClusterAsync(ClusterCreateParameters clusterCreateParameters) { this.LogMessage("Creating cluster '{0}' in location {1}", clusterCreateParameters.Name, clusterCreateParameters.Location); LastCreateRequest = clusterCreateParameters; var clusterDetails = new ClusterDetails(); if (clusterCreateParameters.EnsureHighAvailability) { clusterDetails.ClusterSizeInNodes = clusterCreateParameters.ClusterSizeInNodes + 2; } clusterDetails.Name = clusterCreateParameters.Name; clusterDetails.HttpPassword = clusterCreateParameters.Password; clusterDetails.HttpUserName = clusterCreateParameters.UserName; clusterDetails.Version = clusterCreateParameters.Version; clusterDetails.Location = clusterCreateParameters.Location; clusterDetails.State = ClusterState.Running; clusterDetails.AdditionalStorageAccounts = clusterCreateParameters.AdditionalStorageAccounts; clusterDetails.DefaultStorageAccount = new WabStorageAccountConfiguration( clusterCreateParameters.DefaultStorageAccountName, clusterCreateParameters.DefaultStorageAccountKey, clusterCreateParameters.DefaultStorageContainer); Clusters.Add(new SimulatorClusterContainer { Cluster = clusterDetails }); return(TaskEx2.FromResult(clusterDetails)); }
private static OsProfile GetOsProfile(ClusterCreateParameters createProperties) { List <SshPublicKey> sshPublicKeys = new List <SshPublicKey>(); if (!string.IsNullOrEmpty(createProperties.SshPublicKey)) { sshPublicKeys.Add(new SshPublicKey { CertificateData = createProperties.SshPublicKey }); } SshProfile sshProfile = null; if (sshPublicKeys.Count > 0) { sshProfile = new SshProfile { PublicKeys = sshPublicKeys.ToArray() }; } return(new OsProfile { LinuxOperatingSystemProfile = new LinuxOperatingSystemProfile { SshProfile = sshProfile, Password = createProperties.SshPassword, Username = createProperties.SshUserName } }); }
public ClusterDetails CreateCluster(ClusterCreateParameters cluster) { Task <ClusterDetails> createTask = this.CreateClusterAsync(cluster); createTask.Wait(); return(createTask.Result); }
public void CanConvertSandboxCluster() { ClusterCreateParameters createParams = GetClusterCreateParamsWithMinRequiredValues(); createParams.ClusterType = "SaNdBoX"; ExtendedParameterValidators.ValidateSpecConversion(createParams); }
public void TestOMSOnRunningCluster() { string clusterName = "hdisdk-oms"; string testName = "TestOMSOnRunningCluster"; string suiteName = GetType().FullName; ClusterCreateParameters createParams = ClusterCreateParametersHelpers.GetCustomCreateParametersIaas(testName); createParams.Version = "3.6"; createParams.ClusterType = "Spark"; HDInsightManagementTestUtilities.CreateClusterInNewResourceGroupAndRunTest(suiteName, testName, clusterName, createParams, (client, rgName) => { ClusterMonitoringRequest request = new ClusterMonitoringRequest { WorkspaceId = WorkspaceId, PrimaryKey = PrimaryKey }; client.Extensions.EnableMonitoring(rgName, clusterName, request); ClusterMonitoringResponse monitoringStatus = client.Extensions.GetMonitoringStatus(rgName, clusterName); Assert.True(monitoringStatus.ClusterMonitoringEnabled); Assert.Equal(monitoringStatus.WorkspaceId, WorkspaceId); client.Extensions.DisableMonitoring(rgName, clusterName); monitoringStatus = client.Extensions.GetMonitoringStatus(rgName, clusterName); Assert.False(monitoringStatus.ClusterMonitoringEnabled); Assert.Null(monitoringStatus.WorkspaceId); }); }
private static StorageProfile GetStorageProfile(ClusterCreateParameters createProperties) { // Note: Only WASB and ADLS Gen 1 storage accounts will be populated directly into configurations. // Other storage account types will be populated into StorageProfile. AzureDataLakeStoreGen2Info adlsGen2Info = createProperties.DefaultStorageInfo as AzureDataLakeStoreGen2Info; if (adlsGen2Info == null) { return(null); } return(new StorageProfile { Storageaccounts = new[] { new StorageAccount { Name = adlsGen2Info.StorageAccountName, FileSystem = adlsGen2Info.StorageFileSystem, Key = adlsGen2Info.StorageAccountKey, IsDefault = true } } }); }
public static void ValidateRoles(IList <Role> roleCollection, ClusterCreateParameters createParams) { Assert.NotNull(roleCollection); foreach (Role role in roleCollection) { ValidateOsProfile(role.OsProfile, createParams); ValidateVnet(role.VirtualNetworkProfile, createParams); Assert.NotNull(role.HardwareProfile); Assert.True(Enum.TryParse(role.Name, true, out ClusterNodeType nodeType)); Assert.Equal(CreateParametersConverter.GetNodeSize(createParams, nodeType), role.HardwareProfile.VmSize); if (createParams.ScriptActions != null && createParams.ScriptActions.ContainsKey(nodeType)) { Assert.Equal(createParams.ScriptActions[nodeType], role.ScriptActions); } } //Validate headnode. Role headnode = roleCollection.FirstOrDefault(role => role.Name == "headnode"); Assert.NotNull(headnode); int targetCount = createParams.ClusterType.Equals("Sandbox", StringComparison.OrdinalIgnoreCase) ? 1 : 2; Assert.Equal(targetCount, headnode.TargetInstanceCount); //Sandbox clusters only have 1 headnode. Return if Sandbox. if (createParams.ClusterType.Equals("Sandbox", StringComparison.OrdinalIgnoreCase)) { Assert.Equal(1, roleCollection.Count); return; } //Validate workernode. Role workernode = roleCollection.FirstOrDefault(role => role.Name == "workernode"); Assert.NotNull(workernode); Assert.Equal(createParams.ClusterSizeInNodes, workernode.TargetInstanceCount); //Validate zookeeper. Role zookeepernode = roleCollection.FirstOrDefault(role => role.Name == "zookeepernode"); Assert.NotNull(zookeepernode); Assert.Equal(3, zookeepernode.TargetInstanceCount); //RServer & MLServices clusters contain an additional edge node. Return if not RServer or not MLServices. if (!new [] { "RServer", "MLServices" }.Contains(createParams.ClusterType, StringComparer.OrdinalIgnoreCase)) { Assert.Equal(3, roleCollection.Count); return; } //Validate edgenode. Role edgenode = roleCollection.FirstOrDefault(role => role.Name == "edgenode"); Assert.NotNull(edgenode); Assert.Equal(1, edgenode.TargetInstanceCount); Assert.Equal(4, roleCollection.Count); }
public static ClusterCreateParametersExtended GetExtendedClusterCreateParameters(string clusterName, ClusterCreateParameters createParameters) { //Deep copy so the createParameters object isn't touched. ClusterCreateParameters clusterCreateParameters = new ClusterCreateParameters(createParameters); //Convert to extended spec. ClusterCreateParametersExtended extendedParams = new ClusterCreateParametersExtended { Location = clusterCreateParameters.Location, Tags = clusterCreateParameters.Tags, Properties = new ClusterCreateProperties { ClusterDefinition = new ClusterDefinition { Kind = clusterCreateParameters.ClusterType, ComponentVersion = clusterCreateParameters.ComponentVersion, Configurations = GetConfigurations(clusterName, clusterCreateParameters) }, Tier = clusterCreateParameters.ClusterTier, ClusterVersion = clusterCreateParameters.Version, ComputeProfile = new ComputeProfile { Roles = GetRoleCollection(clusterCreateParameters) }, OsType = OSType.Linux, SecurityProfile = clusterCreateParameters.SecurityProfile, StorageProfile = GetStorageProfile(clusterCreateParameters), DiskEncryptionProperties = clusterCreateParameters.DiskEncryptionProperties }, Identity = clusterCreateParameters.ClusterIdentity }; return(extendedParams); }
public static void ValidateSpecConversion(ClusterCreateParameters createParams) { //Convert spec. ClusterCreateParametersExtended extendedParams = CreateParametersConverter.GetExtendedClusterCreateParameters("testCluster", createParams); //Validate properties. Assert.Equal(createParams.Tags, extendedParams.Tags); Assert.Equal(createParams.Location, extendedParams.Location); Assert.Equal(createParams.ClusterType, extendedParams.Properties.ClusterDefinition.Kind); Assert.Equal(createParams.ComponentVersion, extendedParams.Properties.ClusterDefinition.ComponentVersion); Assert.Equal(createParams.ClusterTier, extendedParams.Properties.Tier); Assert.Equal(createParams.Version, extendedParams.Properties.ClusterVersion); Assert.Equal(OSType.Linux, extendedParams.Properties.OsType); Assert.Equal(createParams.SecurityProfile, extendedParams.Properties.SecurityProfile); ValidateStorageProfile(createParams.DefaultStorageInfo, extendedParams.Properties.StorageProfile); //Validate configurations. Dictionary <string, Dictionary <string, string> > configurations = extendedParams.Properties.ClusterDefinition.Configurations as Dictionary <string, Dictionary <string, string> >; Assert.NotNull(configurations); ValidateStorageConfigurations(configurations, createParams.DefaultStorageInfo, createParams.AdditionalStorageAccounts); ValidateClusterCredentialConfigurations(configurations); ValidateAdlConfigurations(configurations, createParams.Principal); ValidateHiveMetastoreConfigurations(configurations, createParams.HiveMetastore); ValidateOozieMetastoreConfigurations(configurations, createParams.OozieMetastore); //Validate roles. ValidateRoles(extendedParams.Properties.ComputeProfile.Roles, createParams); }
private static void ConfigVirtualNetwork(ClusterCreateParameters cluster, HDInsight.ClusterCreateParametersV2 inputs) { // Check if the virtual network configuration is partially set if (string.IsNullOrEmpty(inputs.VirtualNetworkId) ^ string.IsNullOrEmpty(inputs.SubnetName)) { if (inputs.VirtualNetworkId == null) { throw new ArgumentException("Subnet name is set however virtual network GUID is not set."); } else { throw new ArgumentException("Virtual newtork GUID is set however subnet name is not set."); } } // Set virtual network configuration if is provided in the input if (!string.IsNullOrEmpty(inputs.VirtualNetworkId) && !string.IsNullOrEmpty(inputs.SubnetName)) { VirtualNetworkConfiguration virtualNetworkConf = new VirtualNetworkConfiguration(); virtualNetworkConf.VirtualNetworkSite = inputs.VirtualNetworkId; foreach (var role in cluster.ClusterRoleCollection) { AddressAssignment aa = new AddressAssignment(); Subnet subnet = new Subnet(); subnet.Name = inputs.SubnetName; aa.Subnets.Add(subnet); aa.Role = role; virtualNetworkConf.AddressAssignments.Add(aa); } cluster.VirtualNetworkConfiguration = virtualNetworkConf; } }
public void CanConvertAdlsGen2Cluster() { ClusterCreateParameters createParams = GetClusterCreateParamsWithMinRequiredValues(); createParams.DefaultStorageInfo = new AzureDataLakeStoreGen2Info("adlsGen2StorageAccount", "key", "fileSystem"); ExtendedParameterValidators.ValidateSpecConversion(createParams); }
public void ICanCreateAClusterUsingPowerShellAndConfig_New_Set__EnableHeadNodeHA() { AzureTestCredentials creds = GetCredentials(TestCredentialsNames.Default); string dnsName = this.GetRandomClusterName(); using (IRunspace runspace = this.GetPowerShellRunspace()) { IGetAzureHDInsightClusterCommand getCommand = ServiceLocator.Instance.Locate <IAzureHDInsightCommandFactory>().CreateGet(); getCommand = ServiceLocator.Instance.Locate <IAzureHDInsightCommandFactory>().CreateGet(); getCommand.CurrentSubscription = GetCurrentSubscription(); getCommand.EndProcessing(); int expected = getCommand.Output.Count(); IPipelineResult results = runspace.NewPipeline().AddCommand(CmdletConstants.NewAzureHDInsightCluster) // Ensure that the subscription Id can be accepted as a string as well as a guid. .WithParameter(CmdletConstants.Name, dnsName) .WithParameter(CmdletConstants.Location, CmdletConstants.EastUs) .WithParameter( CmdletConstants.DefaultStorageAccountName, TestCredentials.Environments[0].DefaultStorageAccount.Name) .WithParameter( CmdletConstants.DefaultStorageAccountKey, TestCredentials.Environments[0].DefaultStorageAccount.Key) .WithParameter( CmdletConstants.DefaultStorageContainerName, TestCredentials.Environments[0].DefaultStorageAccount.Container) .WithParameter(CmdletConstants.Credential, GetPSCredential("hadoop", this.GetRandomValidPassword())) .WithParameter(CmdletConstants.EnableHeadNodeHighAvailibility, null) .WithParameter(CmdletConstants.ClusterSizeInNodes, 5) .Invoke(); ClusterCreateParameters request = AzureHDInsightClusterManagementClientSimulator.LastCreateRequest; Assert.IsTrue(request.EnsureHighAvailability); var testCluster = results.Results.ToEnumerable <AzureHDInsightCluster>().FirstOrDefault(); Assert.IsNotNull(testCluster); Assert.AreEqual(dnsName, testCluster.Name); Assert.AreEqual(7, testCluster.ClusterSizeInNodes); getCommand = ServiceLocator.Instance.Locate <IAzureHDInsightCommandFactory>().CreateGet(); getCommand.CurrentSubscription = GetCurrentSubscription(); getCommand.Name = dnsName; getCommand.EndProcessing(); Assert.AreEqual(1, getCommand.Output.Count); Assert.AreEqual(dnsName, getCommand.Output.ElementAt(0).Name); results = runspace.NewPipeline().AddCommand(CmdletConstants.RemoveAzureHDInsightCluster) // Ensure that subscription id can be accepted as a sting as well as a guid. .WithParameter(CmdletConstants.Name, dnsName) .Invoke(); Assert.AreEqual(0, results.Results.Count); getCommand = ServiceLocator.Instance.Locate <IAzureHDInsightCommandFactory>().CreateGet(); getCommand.CurrentSubscription = GetCurrentSubscription(); getCommand.EndProcessing(); Assert.AreEqual(expected, getCommand.Output.Count); } }
public async Task <HttpResponseMessage> CreateCluster(string subscriptionId, string cloudServiceName, string resourceNamespace, string dnsName) { var requestMessage = this.Request; var rdfeResource = await requestMessage.Content.ReadAsAsync <RDFEResource>(); XmlNode node = rdfeResource.IntrinsicSettings[0]; MemoryStream stm = new MemoryStream(); StreamWriter stw = new StreamWriter(stm); stw.Write(node.OuterXml); stw.Flush(); stm.Position = 0; DataContractSerializer ser = new DataContractSerializer(typeof(ClusterCreateParameters)); ClusterCreateParameters clusterCreateParams = (ClusterCreateParameters)ser.ReadObject(stm); // Spark cluster creation in introduced after schema version 3.0 if (clusterCreateParams.Components.Any(c => c.GetType() == typeof(SparkComponent))) { if (!requestMessage.Headers.GetValues("SchemaVersion").Any(v => v.Equals("3.0"))) { throw new NotSupportedException(ClustersTestConstants.NotSupportedBySubscriptionException); } } var testCluster = new Cluster { ClusterRoleCollection = clusterCreateParams.ClusterRoleCollection, CreatedTime = DateTime.UtcNow, Error = null, FullyQualifiedDnsName = clusterCreateParams.DnsName, State = ClusterState.Running, UpdatedTime = DateTime.UtcNow, DnsName = clusterCreateParams.DnsName, Components = clusterCreateParams.Components, ExtensionData = clusterCreateParams.ExtensionData, Location = clusterCreateParams.Location, Version = ClusterVersionUtils.TryGetVersionNumber(clusterCreateParams.Version), VirtualNetworkConfiguration = clusterCreateParams.VirtualNetworkConfiguration }; List <Cluster> clusters; bool subExists = _clustersAvailable.TryGetValue(subscriptionId, out clusters); if (subExists) { clusters.Add(testCluster); _clustersAvailable[subscriptionId] = clusters; } else { _clustersAvailable.Add( new KeyValuePair <string, List <Cluster> >(subscriptionId, new List <Cluster> { testCluster })); } return(this.Request.CreateResponse(HttpStatusCode.Created)); }
public void CanConvertHadoopClusterWithCustomVnet() { ClusterCreateParameters createParams = GetClusterCreateParamsWithMinRequiredValues(); createParams.VirtualNetworkId = "vnetId"; createParams.SubnetName = "subnet"; ExtendedParameterValidators.ValidateSpecConversion(createParams); }
/// <summary> /// Begins creating a new HDInsight cluster with the specified parameters. /// </summary> /// <param name="operations">Reference to the /// Microsoft.Azure.Management.HDInsight.IClusterOperations.</param> /// <param name="resourceGroupName">Required. The name of the resource group.</param> /// <param name="clusterName">Required. The name of the cluster.</param> /// <param name="parameters">Required. The cluster create request.</param> /// <param name="cancellationToken">The cancellation token.</param> /// <returns> /// The CreateCluster operation response. /// </returns> public static async Task <Cluster> BeginCreatingAsync(this IClustersOperations operations, string resourceGroupName, string clusterName, ClusterCreateParameters parameters, CancellationToken cancellationToken = default(CancellationToken)) { using (var _result = await operations.BeginCreatingAsync(resourceGroupName, clusterName, parameters, cancellationToken).ConfigureAwait(false)) { return(_result.Body); } }
public override async Task EndProcessing() { IHDInsightClient client = this.GetClient(); client.ClusterProvisioning += this.ClientOnClusterProvisioning; ClusterCreateParameters createClusterRequest = this.GetClusterCreateParameters(); var cluster = await client.CreateClusterAsync(createClusterRequest); this.Output.Add(new AzureHDInsightCluster(cluster)); }
internal ClusterCreateParameters GetClusterCreateParameters() { var createClusterRequest = new ClusterCreateParameters(); createClusterRequest.Name = this.Name; createClusterRequest.Version = this.Version; createClusterRequest.Location = this.Location; createClusterRequest.CoreConfiguration.AddRange(this.CoreConfiguration); createClusterRequest.YarnConfiguration.AddRange(this.YarnConfiguration); createClusterRequest.HdfsConfiguration.AddRange(this.HdfsConfiguration); createClusterRequest.MapReduceConfiguration.ConfigurationCollection.AddRange(this.MapReduceConfiguration.ConfigurationCollection); createClusterRequest.MapReduceConfiguration.CapacitySchedulerConfigurationCollection.AddRange( this.MapReduceConfiguration.CapacitySchedulerConfigurationCollection); createClusterRequest.HiveConfiguration.AdditionalLibraries = this.HiveConfiguration.AdditionalLibraries; createClusterRequest.HiveConfiguration.ConfigurationCollection.AddRange(this.HiveConfiguration.ConfigurationCollection); createClusterRequest.OozieConfiguration.ConfigurationCollection.AddRange(this.OozieConfiguration.ConfigurationCollection); createClusterRequest.OozieConfiguration.AdditionalSharedLibraries = this.OozieConfiguration.AdditionalSharedLibraries; createClusterRequest.OozieConfiguration.AdditionalActionExecutorLibraries = this.OozieConfiguration.AdditionalActionExecutorLibraries; createClusterRequest.StormConfiguration.AddRange(this.StormConfiguration); createClusterRequest.HBaseConfiguration.AdditionalLibraries = this.HBaseConfiguration.AdditionalLibraries; createClusterRequest.HBaseConfiguration.ConfigurationCollection.AddRange(this.HBaseConfiguration.ConfigurationCollection); createClusterRequest.HeadNodeSize = this.HeadNodeSize; createClusterRequest.DefaultStorageAccountName = this.DefaultStorageAccountName; createClusterRequest.DefaultStorageAccountKey = this.DefaultStorageAccountKey; createClusterRequest.DefaultStorageContainer = this.DefaultStorageContainerName; createClusterRequest.UserName = this.Credential.UserName; createClusterRequest.Password = this.Credential.GetCleartextPassword(); createClusterRequest.ClusterSizeInNodes = this.ClusterSizeInNodes; createClusterRequest.ClusterType = this.ClusterType; if (!string.IsNullOrEmpty(this.VirtualNetworkId)) { createClusterRequest.VirtualNetworkId = this.VirtualNetworkId; } if (!string.IsNullOrEmpty(this.SubnetName)) { createClusterRequest.SubnetName = this.SubnetName; } createClusterRequest.AdditionalStorageAccounts.AddRange( this.AdditionalStorageAccounts.Select(act => new WabStorageAccountConfiguration(act.StorageAccountName, act.StorageAccountKey))); createClusterRequest.ConfigActions.AddRange(this.ConfigActions.Select(ca => ca.ToSDKConfigAction())); if (this.HiveMetastore.IsNotNull()) { createClusterRequest.HiveMetastore = new Metastore( this.HiveMetastore.SqlAzureServerName, this.HiveMetastore.DatabaseName, this.HiveMetastore.Credential.UserName, this.HiveMetastore.Credential.GetCleartextPassword()); } if (this.OozieMetastore.IsNotNull()) { createClusterRequest.OozieMetastore = new Metastore( this.OozieMetastore.SqlAzureServerName, this.OozieMetastore.DatabaseName, this.OozieMetastore.Credential.UserName, this.OozieMetastore.Credential.GetCleartextPassword()); } return(createClusterRequest); }
public void CanConvertHadoopClusterWithCustomMetastoresToMarchSpec() { ClusterCreateParameters createParams = GetClusterCreateParamsWithMinRequiredValues(); createParams.HiveMetastore = new Metastore("server.database.windows.net", "hiveDb", "username", "password"); createParams.OozieMetastore = new Metastore("server.database.windows.net", "oozieDb", "username", "password"); ExtendedParameterValidators.ValidateSpecConversion(createParams); }
public void TestCreateRServerCluster() { string testName = "TestCreateRServerCluster"; ClusterCreateParameters parameters = ClusterCreateParametersHelpers.GetCustomCreateParametersIaas(testName); parameters.ClusterType = "RServer"; RunCreateClusterTestInNewResourceGroup(GetType().FullName, testName, "hdisdk-rserver", parameters); }
private static WabStorageAccountConfiguration GetStorageAccountForScript(ScriptAction sa, ClusterCreateParameters details) { var accts = new List<WabStorageAccountConfiguration>(); accts.Add(new WabStorageAccountConfiguration( details.DefaultStorageAccountName, details.DefaultStorageAccountKey, details.DefaultStorageContainer)); accts.AddRange(details.AdditionalStorageAccounts); // Tests whether the host for the script is in the list of provided storage accounts. var storage = (from acct in accts where GetFullyQualifiedStorageAccountName(acct.Name).Equals( sa.Uri.Host, StringComparison.OrdinalIgnoreCase) select acct).FirstOrDefault(); return storage; }
/// <summary> /// Validates, appends the FQDN suffix if required to storage accounts and creates the default cluster specified in <paramref name="details"/>. /// </summary> /// <param name="details">The details.</param> public static void ValidateAndResolveAsvAccountsAndPrep(ClusterCreateParameters details) { var defaultStorageAccount = new WabStorageAccountConfiguration( details.DefaultStorageAccountName, details.DefaultStorageAccountKey, details.DefaultStorageContainer); // Flattens all the configurations into a single list for more uniform validation var asvList = ResolveStorageAccounts(details.AdditionalStorageAccounts).ToList(); asvList.Add(ResolveStorageAccount(defaultStorageAccount)); // Basic validation on the ASV configurations if (string.IsNullOrEmpty(details.DefaultStorageContainer)) { throw new InvalidOperationException("Invalid Container. Default Storage Account Container cannot be null or empty"); } if (asvList.Any(asv => string.IsNullOrEmpty(asv.Name) || string.IsNullOrEmpty(asv.Key))) { throw new InvalidOperationException("Invalid Azure Configuration. Credentials cannot be null or empty"); } if (asvList.GroupBy(asv => asv.Name).Count(group => group.Count() > 1) > 0) { throw new InvalidOperationException("Invalid Azure Storage credential. Duplicated values detected"); } // Validates that we can establish the connection to the ASV Names and the default container var client = ServiceLocator.Instance.Locate<IAsvValidatorClientFactory>().Create(); asvList.ForEach(asv => client.ValidateAccount(asv.Name, asv.Key).Wait()); var resolvedAccounts = ResolveStorageAccounts(details.AdditionalStorageAccounts); details.AdditionalStorageAccounts.Clear(); foreach (var resolvedAccount in resolvedAccounts) { details.AdditionalStorageAccounts.Add(resolvedAccount); } var resolvedDefaultStorageAccount = ResolveStorageAccount(defaultStorageAccount); details.DefaultStorageAccountName = resolvedDefaultStorageAccount.Name; client.CreateContainerIfNotExists(details.DefaultStorageAccountName, details.DefaultStorageAccountKey, details.DefaultStorageContainer).Wait(); }
/// <summary> /// Creates the rdfe resource input from wire input. /// This method wraps the what is need for the RP within RDFE resource input. /// </summary> /// <param name="wireCreateParameters">The wire create parameters.</param> /// <returns>An RDFE Resource input from wire create parameters.</returns> /// <param name="schemaVersion">The schema version for the RDFE resource.</param> /// <exception cref="System.ArgumentNullException">Thrown if wireCreateParameters is null.</exception> public static RDFEResource CreateRdfeResourceInputFromWireInput(ClusterCreateParameters wireCreateParameters, string schemaVersion) { if (wireCreateParameters == null) { throw new ArgumentNullException("wireCreateParameters"); } if (schemaVersion == null) { throw new ArgumentNullException("schemaVersion"); } var ccpAsXmlString = wireCreateParameters.SerializeAndOptionallyWriteToStream(); var doc = new XmlDocument(); using (var stringReader = new StringReader(ccpAsXmlString)) { using (var reader = XmlReader.Create(stringReader)) { doc.Load(reader); } } var retval = new RDFEResource { SchemaVersion = schemaVersion, IntrinsicSettings = new XmlNode[] { doc.DocumentElement } }; return retval; }
public Task<ClusterDetails> CreateClusterAsync(ClusterCreateParameters clusterCreateParameters) { this.LogMessage("Creating cluster '{0}' in location {1}", clusterCreateParameters.Name, clusterCreateParameters.Location); LastCreateRequest = clusterCreateParameters; var clusterDetails = new ClusterDetails(); if (clusterCreateParameters.EnsureHighAvailability) { clusterDetails.ClusterSizeInNodes = clusterCreateParameters.ClusterSizeInNodes + 2; } clusterDetails.Name = clusterCreateParameters.Name; clusterDetails.HttpPassword = clusterCreateParameters.Password; clusterDetails.HttpUserName = clusterCreateParameters.UserName; clusterDetails.Version = clusterCreateParameters.Version; clusterDetails.Location = clusterCreateParameters.Location; clusterDetails.State = ClusterState.Running; clusterDetails.AdditionalStorageAccounts = clusterCreateParameters.AdditionalStorageAccounts; clusterDetails.DefaultStorageAccount = new WabStorageAccountConfiguration( clusterCreateParameters.DefaultStorageAccountName, clusterCreateParameters.DefaultStorageAccountKey, clusterCreateParameters.DefaultStorageContainer); Clusters.Add(new SimulatorClusterContainer { Cluster = clusterDetails }); return TaskEx2.FromResult(clusterDetails); }
public ClusterDetails CreateCluster(ClusterCreateParameters cluster, TimeSpan timeout) { return this.CreateCluster(cluster); }
public ClusterDetails CreateCluster(ClusterCreateParameters cluster) { Task<ClusterDetails> createTask = this.CreateClusterAsync(cluster); createTask.Wait(); return createTask.Result; }
public static void ValidateAndResolveConfigActionEndpointUris(ClusterCreateParameters details) { if (details == null || details.ConfigActions == null) { return; } foreach (ConfigAction ca in details.ConfigActions) { ScriptAction sa = ca as ScriptAction; if (sa == null) { continue; } // Basic validation on the script action URI. if (sa.Uri == null || string.IsNullOrEmpty(sa.Uri.AbsoluteUri)) { throw new InvalidOperationException("Invalid Container. Script action URI cannot be null or empty"); } var storageAccount = GetStorageAccountForScript(sa, details); if (storageAccount != null) { // Check if the URI is in one of the provided storage accounts and whether it is reachable. ValidateAndResolveWasbScriptActionEndpointUri(sa.Uri, storageAccount).Wait(); } else { // Check if the URI is publicly reachable in Http format. ValidateAndResolveHttpScriptActionEndpointUri(sa.Uri).Wait(); } } }
/// <summary> /// Generate ClusterCreateParameters object for 3.X cluster with only Hadoop. /// </summary> /// <param name="inputs">Cluster creation parameter inputs.</param> /// <returns>The corresponding ClusterCreateParameter object.</returns> internal static ClusterCreateParameters Create3XClusterFromMapReduceTemplate(HDInsight.ClusterCreateParametersV2 inputs) { if (inputs == null) { throw new ArgumentNullException("inputs"); } var remoteDesktopSettings = (string.IsNullOrEmpty(inputs.RdpUsername)) ? new RemoteDesktopSettings() { IsEnabled = false } : new RemoteDesktopSettings() { IsEnabled = true, AuthenticationCredential = new UsernamePasswordCredential() { Username = inputs.RdpUsername, Password = inputs.RdpPassword }, RemoteAccessExpiry = (DateTime)inputs.RdpAccessExpiry }; var cluster = new ClusterCreateParameters { DnsName = inputs.Name, Version = inputs.Version, }; var headnodeRole = new ClusterRole { FriendlyName = "HeadNodeRole", InstanceCount = 2, VMSizeAsString = inputs.HeadNodeSize, RemoteDesktopSettings = remoteDesktopSettings }; var workernodeRole = new ClusterRole { InstanceCount = inputs.ClusterSizeInNodes, FriendlyName = "WorkerNodeRole", VMSizeAsString = inputs.DataNodeSize, RemoteDesktopSettings = remoteDesktopSettings }; var zookeeperRole = new ClusterRole { InstanceCount = 3, FriendlyName = "ZKRole", VMSizeAsString = inputs.ZookeeperNodeSize ?? VmSize.Small.ToString(), RemoteDesktopSettings = remoteDesktopSettings }; cluster.ClusterRoleCollection.Add(headnodeRole); cluster.ClusterRoleCollection.Add(workernodeRole); cluster.ClusterRoleCollection.Add(zookeeperRole); var gateway = new GatewayComponent { IsEnabled = true, RestAuthCredential = new UsernamePasswordCredential { Username = inputs.UserName, Password = inputs.Password } }; cluster.Components.Add(gateway); cluster.Location = inputs.Location; //Add yarn component YarnComponent yarn = new YarnComponent { ResourceManagerRole = headnodeRole, NodeManagerRole = workernodeRole, }; ConfigYarnComponent(yarn, inputs); MapReduceApplication mapreduceApp = new MapReduceApplication(); ConfigMapReduceApplication(mapreduceApp, inputs); yarn.Applications.Add(mapreduceApp); cluster.Components.Add(yarn); // Adding Hive component HiveComponent hive = new HiveComponent { HeadNodeRole = headnodeRole }; ConfigHiveComponent(hive, inputs); cluster.Components.Add(hive); // Adding config action component if needed if (inputs.ConfigActions != null && inputs.ConfigActions.Count > 0) { CustomActionComponent configAction = new CustomActionComponent { HeadNodeRole = headnodeRole, WorkerNodeRole = workernodeRole }; AddConfigActionComponent(configAction, inputs, headnodeRole, workernodeRole, zookeeperRole); cluster.Components.Add(configAction); } // Adding Oozie component OozieComponent oozie = new OozieComponent { HeadNodeRole = headnodeRole }; ConfigOozieComponent(oozie, inputs); cluster.Components.Add(oozie); // Adding Hdfs component HdfsComponent hdfs = new HdfsComponent { HeadNodeRole = headnodeRole, WorkerNodeRole = workernodeRole }; ConfigHdfsComponent(hdfs, inputs); cluster.Components.Add(hdfs); // Adding HadoopCore component HadoopCoreComponent hadoopCore = new HadoopCoreComponent(); ConfigHadoopCoreComponent(hadoopCore, inputs); cluster.Components.Add(hadoopCore); // Adding Zookeeper component cluster.Components.Add(new ZookeeperComponent { ZookeeperRole = zookeeperRole }); ConfigVirtualNetwork(cluster, inputs); return cluster; }
private static void ConfigVirtualNetwork(ClusterCreateParameters cluster, HDInsight.ClusterCreateParameters inputs) { // Check if the virtual network configuration is partially set if (string.IsNullOrEmpty(inputs.VirtualNetworkId) ^ string.IsNullOrEmpty(inputs.SubnetName)) { if (inputs.VirtualNetworkId == null) { throw new ArgumentException("Subnet name is set however virtual network GUID is not set."); } else { throw new ArgumentException("Virtual newtork GUID is set however subnet name is not set."); } } // Set virtual network configuration if is provided in the input if (!string.IsNullOrEmpty(inputs.VirtualNetworkId) && !string.IsNullOrEmpty(inputs.SubnetName)) { VirtualNetworkConfiguration virtualNetworkConf = new VirtualNetworkConfiguration(); virtualNetworkConf.VirtualNetworkSite = inputs.VirtualNetworkId; foreach (var role in cluster.ClusterRoleCollection) { AddressAssignment aa = new AddressAssignment(); Subnet subnet = new Subnet(); subnet.Name = inputs.SubnetName; aa.Subnets.Add(subnet); aa.Role = role; virtualNetworkConf.AddressAssignments.Add(aa); } cluster.VirtualNetworkConfiguration = virtualNetworkConf; } }
/// <summary> /// Generate ClusterCreateParameters object for 2.X cluster with only Hadoop. /// </summary> /// <param name="inputs">Cluster creation parameter inputs.</param> /// <returns>The corresponding ClusterCreateParameter object.</returns> internal static ClusterCreateParameters Create2XClusterForMapReduceTemplate(HDInsight.ClusterCreateParameters inputs) { if (inputs == null) { throw new ArgumentNullException("inputs"); } var cluster = new ClusterCreateParameters { DnsName = inputs.Name, Version = inputs.Version }; var headnodeRole = new ClusterRole { FriendlyName = "HeadNodeRole", InstanceCount = 2, VMSize = inputs.HeadNodeSize.ToVmSize(), }; var workernodeRole = new ClusterRole { InstanceCount = inputs.ClusterSizeInNodes, FriendlyName = "WorkerNodeRole", VMSize = VmSize.Large }; var zookeeperRole = new ClusterRole { InstanceCount = 3, FriendlyName = "ZKRole", VMSize = VmSize.Small }; cluster.ClusterRoleCollection.Add(headnodeRole); cluster.ClusterRoleCollection.Add(workernodeRole); cluster.ClusterRoleCollection.Add(zookeeperRole); var gateway = new GatewayComponent { IsEnabled = true, RestAuthCredential = new UsernamePasswordCredential { Username = inputs.UserName, Password = inputs.Password } }; cluster.Components.Add(gateway); cluster.Location = inputs.Location; // Adding MapReduce component MapReduceComponent mapReduce = new MapReduceComponent { HeadNodeRole = headnodeRole, WorkerNodeRole = workernodeRole }; ConfigMapReduceComponent(mapReduce, inputs); cluster.Components.Add(mapReduce); // Adding Hive component HiveComponent hive = new HiveComponent { HeadNodeRole = headnodeRole }; ConfigHiveComponent(hive, inputs); cluster.Components.Add(hive); // Adding config action component if needed if (inputs.ConfigActions != null && inputs.ConfigActions.Count > 0) { CustomActionComponent configAction = new CustomActionComponent { HeadNodeRole = headnodeRole, WorkerNodeRole = workernodeRole }; AddConfigActionComponent(configAction, inputs, headnodeRole, workernodeRole); cluster.Components.Add(configAction); } // Adding Oozie component OozieComponent oozie = new OozieComponent { HeadNodeRole = headnodeRole }; ConfigOozieComponent(oozie, inputs); cluster.Components.Add(oozie); // Adding Hdfs component HdfsComponent hdfs = new HdfsComponent { HeadNodeRole = headnodeRole, WorkerNodeRole = workernodeRole }; ConfigHdfsComponent(hdfs, inputs); cluster.Components.Add(hdfs); // Adding HadoopCore component HadoopCoreComponent hadoopCore = new HadoopCoreComponent(); ConfigHadoopCoreComponent(hadoopCore, inputs); cluster.Components.Add(hadoopCore); ConfigVirtualNetwork(cluster, inputs); return cluster; }