public async Task CanCreateIaasClusterWithD12Headnode() { var restClient = ServiceLocator.Instance.Locate <IRdfeClustersResourceRestClientFactory>() .Create(this.DefaultHandler, this.HdInsightCertCred, this.Context, false, SchemaVersionUtils.GetSchemaVersion(Capabilities)); var clustersPocoClient = new PaasClustersPocoClient(this.HdInsightCertCred, false, this.Context, Capabilities, restClient); var clusterCreateParameters = new HDInsight.ClusterCreateParametersV2 { Name = "D12HeadnodeCreationTest", DefaultStorageAccountKey = IntegrationTestBase.TestCredentials.Environments[0].DefaultStorageAccount.Key, DefaultStorageAccountName = IntegrationTestBase.TestCredentials.Environments[0].DefaultStorageAccount.Name, DefaultStorageContainer = "D12HeadnodeCreationTest", ClusterSizeInNodes = 2, Location = "East US", UserName = "******", Password = "******", OSType = OSType.Linux, Version = "3.2", ClusterType = ClusterProvisioning.Data.ClusterType.Hadoop, HeadNodeSize = "Standard_D12" }; await clustersPocoClient.CreateContainer(clusterCreateParameters); var containersList = clustersPocoClient.ListContainers().Result; Assert.AreEqual(containersList.Count, 1); Assert.IsNotNull(containersList.SingleOrDefault(cluster => cluster.Name.Equals("D12HeadnodeCreationTest"))); }
internal void CreateClusterWithoutCapability(string dnsName, string location) { var clusterCreateParameters = new HDInsight.ClusterCreateParametersV2 { Name = dnsName, DefaultStorageAccountKey = "storageaccountkey", DefaultStorageAccountName = "teststorage", ClusterSizeInNodes = 2, Location = location, UserName = "******", Password = "******", Version = "3.1" }; var testCluster = CreateClusterFromCreateParameters(clusterCreateParameters); List <Cluster> clusters; bool subExists = RootHandlerSimulatorController._clustersAvailable.TryGetValue(TestSubscription, out clusters); if (subExists) { clusters.Add(testCluster); RootHandlerSimulatorController._clustersAvailable[TestSubscription] = clusters; } else { RootHandlerSimulatorController._clustersAvailable.Add( new KeyValuePair <string, List <Cluster> >(TestSubscription, new List <Cluster> { testCluster })); } }
internal void CreateCluster(string dnsName, string location) { var clusterCreateParameters = new HDInsight.ClusterCreateParametersV2 { Name = dnsName, DefaultStorageAccountKey = "storageaccountkey", DefaultStorageAccountName = "teststorage", ClusterSizeInNodes = 2, Location = location, UserName = "******", Password = "******", Version = "3.1" }; var testCluster = CreateClusterFromCreateParameters(clusterCreateParameters); testCluster.ClusterCapabilities = new List<string> { PaasClustersPocoClient.ResizeCapabilityEnabled }; List<Cluster> clusters; bool subExists = RootHandlerSimulatorController._clustersAvailable.TryGetValue(TestSubscription, out clusters); if (subExists) { clusters.Add(testCluster); RootHandlerSimulatorController._clustersAvailable[TestSubscription] = clusters; } else { RootHandlerSimulatorController._clustersAvailable.Add( new KeyValuePair<string, List<Cluster>>(TestSubscription, new List<Cluster> { testCluster })); } }
private static void ConfigVirtualNetwork(ClusterCreateParameters cluster, HDInsight.ClusterCreateParametersV2 inputs) { // Check if the virtual network configuration is partially set if (string.IsNullOrEmpty(inputs.VirtualNetworkId) ^ string.IsNullOrEmpty(inputs.SubnetName)) { if (inputs.VirtualNetworkId == null) { throw new ArgumentException("Subnet name is set however virtual network GUID is not set."); } else { throw new ArgumentException("Virtual newtork GUID is set however subnet name is not set."); } } // Set virtual network configuration if is provided in the input if (!string.IsNullOrEmpty(inputs.VirtualNetworkId) && !string.IsNullOrEmpty(inputs.SubnetName)) { VirtualNetworkConfiguration virtualNetworkConf = new VirtualNetworkConfiguration(); virtualNetworkConf.VirtualNetworkSite = inputs.VirtualNetworkId; foreach (var role in cluster.ClusterRoleCollection) { AddressAssignment aa = new AddressAssignment(); Subnet subnet = new Subnet(); subnet.Name = inputs.SubnetName; aa.Subnets.Add(subnet); aa.Role = role; virtualNetworkConf.AddressAssignments.Add(aa); } cluster.VirtualNetworkConfiguration = virtualNetworkConf; } }
private static void ConfigHiveComponent(HiveComponent hive, HDInsight.ClusterCreateParametersV2 inputs) { hive.HiveSiteXmlProperties.AddRange( inputs.HiveConfiguration.ConfigurationCollection.Select(prop => new Property { Name = prop.Key, Value = prop.Value })); if (inputs.HiveConfiguration.AdditionalLibraries != null) { hive.AdditionalLibraries = new BlobContainerCredentialBackedResource() { AccountDnsName = inputs.HiveConfiguration.AdditionalLibraries.Name, BlobContainerName = inputs.HiveConfiguration.AdditionalLibraries.Container, Key = inputs.HiveConfiguration.AdditionalLibraries.Key }; } if (inputs.HiveMetastore != null) { hive.Metastore = new SqlAzureDatabaseCredentialBackedResource() { SqlServerName = inputs.HiveMetastore.Server, Credentials = new UsernamePasswordCredential() { Username = inputs.HiveMetastore.User, Password = inputs.HiveMetastore.Password }, DatabaseName = inputs.HiveMetastore.Database }; } }
public async Task CanCreateClusterWithHwxPrivateVersion() { Capabilities.Add("CAPABILITY_FEATURE_CLUSTERS_CONTRACT_1_SDK"); Capabilities.Add("CAPABILITY_FEATURE_CLUSTERS_CONTRACT_VERSION_3_SDK"); var restClient = ServiceLocator.Instance.Locate <IRdfeClustersResourceRestClientFactory>() .Create(this.DefaultHandler, this.HdInsightCertCred, this.Context, false, SchemaVersionUtils.GetSchemaVersion(Capabilities)); var clustersPocoClient = new PaasClustersPocoClient(this.HdInsightCertCred, false, this.Context, Capabilities, restClient); var clusterCreateParameters = new HDInsight.ClusterCreateParametersV2 { Name = "HwxVersionTest", DefaultStorageAccountKey = IntegrationTestBase.TestCredentials.Environments[0].DefaultStorageAccount.Key, DefaultStorageAccountName = IntegrationTestBase.TestCredentials.Environments[0].DefaultStorageAccount.Name, DefaultStorageContainer = "HwxVersionTest", ClusterSizeInNodes = 2, Location = "East US", UserName = "******", Password = "******", Version = "3.2-hwx-trunk", ClusterType = ClusterType.Hadoop, }; await clustersPocoClient.CreateContainer(clusterCreateParameters); var containersList = clustersPocoClient.ListContainers().Result; Assert.AreEqual(containersList.Count, 1); Assert.IsNotNull(containersList.SingleOrDefault(cluster => cluster.Name.Equals("HwxVersionTest"))); }
private static void ConfigMapReduceComponent(MapReduceComponent mapReduce, HDInsight.ClusterCreateParametersV2 inputs) { mapReduce.MapRedConfXmlProperties.AddRange( inputs.MapReduceConfiguration.ConfigurationCollection.Select(prop => new Property { Name = prop.Key, Value = prop.Value })); mapReduce.CapacitySchedulerConfXmlProperties.AddRange( inputs.MapReduceConfiguration.CapacitySchedulerConfigurationCollection.Select( prop => new Property { Name = prop.Key, Value = prop.Value })); mapReduce.DefaultStorageAccountAndContainer = new BlobContainerCredentialBackedResource() { AccountDnsName = inputs.DefaultStorageAccountName, BlobContainerName = inputs.DefaultStorageContainer, Key = inputs.DefaultStorageAccountKey }; if (inputs.AdditionalStorageAccounts.Any()) { mapReduce.AdditionalStorageAccounts.AddRange( inputs.AdditionalStorageAccounts.Select( storageAccount => new BlobContainerCredentialBackedResource() { AccountDnsName = storageAccount.Name, BlobContainerName = storageAccount.Container, Key = storageAccount.Key })); } }
public async Task ICanCreateACluster_WithOldVmSizes_All_Specified() { var restClient = ServiceLocator.Instance.Locate <IRdfeClustersResourceRestClientFactory>() .Create(this.DefaultHandler, this.HdInsightCertCred, this.Context, false, SchemaVersionUtils.GetSchemaVersion(Capabilities)); var clustersPocoClient = new PaasClustersPocoClient(this.HdInsightCertCred, false, this.Context, Capabilities, restClient); try { var clusterCreateParameters = new HDInsight.ClusterCreateParametersV2 { Name = "ConfigActionTest", DefaultStorageAccountKey = IntegrationTestBase.TestCredentials.Environments[0].DefaultStorageAccount.Key, DefaultStorageAccountName = IntegrationTestBase.TestCredentials.Environments[0].DefaultStorageAccount.Name, DefaultStorageContainer = "ConfigActionTest", ClusterSizeInNodes = 2, Location = "East US", UserName = "******", Password = "******", Version = "3.1", HeadNodeSize = "ExtraLarge", DataNodeSize = "Large", ZookeeperNodeSize = "Medium", ClusterType = ClusterType.HBase, }; // Add in valid config action. clusterCreateParameters.ConfigActions.Add(new ScriptAction("TestScriptAction", new ClusterNodeType[] { ClusterNodeType.HeadNode }, new Uri("http://www.microsoft.com"), null)); await clustersPocoClient.CreateContainer(clusterCreateParameters); } catch (NotSupportedException ex) { Assert.IsNotNull(ex); } }
/// <inheritdoc /> public async Task CreateContainer(HDInsight.ClusterCreateParametersV2 clusterCreateParameters) { if (clusterCreateParameters == null) { throw new ArgumentNullException("clusterCreateParameters"); } if (string.IsNullOrEmpty(clusterCreateParameters.Name)) { throw new ArgumentException("ClusterCreateParameters.Name cannot be null or empty", "clusterCreateParameters"); } if (string.IsNullOrEmpty(clusterCreateParameters.Location)) { throw new ArgumentException("ClusterCreateParameters.Location cannot be null or empty", "clusterCreateParameters"); } if (clusterCreateParameters.ClusterSizeInNodes < 1) { throw new ArgumentException("clusterCreateParameters.ClusterSizeInNodes must be > 0"); } try { await this.RegisterSubscriptionIfExistsAsync(); await this.CreateCloudServiceAsyncIfNotExists(clusterCreateParameters.Location); // TODO: fix hard-coded schema version string schemaVersion = "1.0"; var iaasCluster = PayloadConverterIaasClusters.ConvertToIaasCluster(clusterCreateParameters, this.credentials.SubscriptionId.ToString()); var rdfeResource = PayloadConverterIaasClusters.CreateRdfeResource(iaasCluster, schemaVersion); await this.rdfeRestClient.CreateCluster( this.credentials.SubscriptionId.ToString(), this.GetCloudServiceName(clusterCreateParameters.Location), this.credentials.DeploymentNamespace, clusterCreateParameters.Name, rdfeResource, this.Context.CancellationToken); } catch (InvalidExpectedStatusCodeException iEx) { string content = iEx.Response.Content != null?iEx.Response.Content.ReadAsStringAsync().Result : string.Empty; throw new HttpLayerException(iEx.ReceivedStatusCode, content); } }
private static HDInsight.ClusterCreateParametersV2 CreateClusterRequest_FromInternalV3( Microsoft.WindowsAzure.Management.HDInsight.Contracts.May2014.ClusterCreateParameters payloadObject) { var cluster = new HDInsight.ClusterCreateParametersV2 { Location = payloadObject.Location, Name = payloadObject.DnsName, UserName = GetClusterUsernameFromPayloadObject(payloadObject), Password = GetClusterPasswordFromPayloadObject(payloadObject), Version = payloadObject.Version, DefaultStorageAccountName = GetDefaultStorageAccountFromFromPayloadObject(payloadObject).Name, DefaultStorageAccountKey = GetDefaultStorageAccountFromFromPayloadObject(payloadObject).Key, DefaultStorageContainer = GetDefaultStorageAccountFromFromPayloadObject(payloadObject).Container, ClusterSizeInNodes = payloadObject.ClusterRoleCollection.ToList().Single(role => role.FriendlyName == WorkerNodeRoleName).InstanceCount, }; var headNodeRole = payloadObject.ClusterRoleCollection.ToList().Where(role => role.FriendlyName == HeadNodeRoleName).ToList(); if (headNodeRole.Any()) { cluster.HeadNodeSize = headNodeRole.First().VMSizeAsString; } var dataNodeRole = payloadObject.ClusterRoleCollection.ToList().Where(role => role.FriendlyName == WorkerNodeRoleName).ToList(); if (dataNodeRole.Any()) { cluster.DataNodeSize = dataNodeRole.First().VMSizeAsString; } var zookeeperNodeRole = payloadObject.ClusterRoleCollection.ToList().Where(role => role.FriendlyName == ZookeeperNodeRoleName).ToList(); if (zookeeperNodeRole.Any()) { cluster.ZookeeperNodeSize = zookeeperNodeRole.First().VMSizeAsString; } if (payloadObject.VirtualNetworkConfiguration != null) { cluster.VirtualNetworkId = payloadObject.VirtualNetworkConfiguration.VirtualNetworkSite; cluster.SubnetName = payloadObject.VirtualNetworkConfiguration.AddressAssignments.First().Subnets.First().Name; } CopyConfigurationForCluster(payloadObject, cluster); return(cluster); }
private static void ConfigHBaseComponent(HBaseComponent hbase, HDInsight.ClusterCreateParametersV2 inputs) { hbase.HBaseConfXmlProperties.AddRange( inputs.HBaseConfiguration.ConfigurationCollection.Select(prop => new Property { Name = prop.Key, Value = prop.Value })); if (inputs.HBaseConfiguration.AdditionalLibraries != null) { hbase.AdditionalLibraries = new BlobContainerCredentialBackedResource() { AccountDnsName = inputs.HBaseConfiguration.AdditionalLibraries.Name, BlobContainerName = inputs.HBaseConfiguration.AdditionalLibraries.Container, Key = inputs.HBaseConfiguration.AdditionalLibraries.Key }; } }
/// <summary> /// Generate ClusterCreateParameters object for 1.X cluster with only Hadoop. /// </summary> /// <param name="inputs">The inputs.</param> /// <returns>An instance of the cluster create parameters.</returns> internal static ClusterCreateParameters Create1XClusterForMapReduceTemplate(HDInsight.ClusterCreateParametersV2 inputs) { if (inputs == null) { throw new ArgumentNullException("inputs"); } if (inputs.HeadNodeSize.Equals(VmSize.Large.ToString())) { throw new ArgumentException(string.Format(CultureInfo.InvariantCulture, "Version 1.X('{0}') clusters can only contain ExtraLarge headnodes.", inputs.Version)); } var createParameters = Create2XClusterForMapReduceTemplate(inputs); var headNodeRole = createParameters.ClusterRoleCollection .Find(role => role.FriendlyName.Equals("HeadNodeRole", StringComparison.OrdinalIgnoreCase)); //We do not support HA clusters for 1.X so we need to set the instance count to 1 headNodeRole.InstanceCount = 1; headNodeRole.VMSize = VmSize.ExtraLarge; return(createParameters); }
private static Cluster CreateClusterFromCreateParameters(HDInsight.ClusterCreateParametersV2 clusterCreateParameters) { var clusterCreateParams = HDInsightClusterRequestGenerator.Create3XClusterFromMapReduceTemplate(clusterCreateParameters); var cluster = new Cluster { ClusterRoleCollection = clusterCreateParams.ClusterRoleCollection, CreatedTime = DateTime.UtcNow, Error = null, FullyQualifiedDnsName = clusterCreateParams.DnsName, State = ClusterState.Running, UpdatedTime = DateTime.UtcNow, DnsName = clusterCreateParams.DnsName, Components = clusterCreateParams.Components, ExtensionData = clusterCreateParams.ExtensionData, Location = clusterCreateParams.Location, Version = clusterCreateParams.Version, VirtualNetworkConfiguration = clusterCreateParams.VirtualNetworkConfiguration }; return(cluster); }
public async Task ICanCreateACluster_WithVmSizes_All_Specified_NonHBase_Negative() { var restClient = ServiceLocator.Instance.Locate <IRdfeClustersResourceRestClientFactory>() .Create(this.DefaultHandler, this.HdInsightCertCred, this.Context, false, SchemaVersionUtils.GetSchemaVersion(Capabilities)); var clustersPocoClient = new PaasClustersPocoClient(this.HdInsightCertCred, false, this.Context, Capabilities, restClient); try { var clusterCreateParameters = new HDInsight.ClusterCreateParametersV2 { Name = "ConfigActionTest", DefaultStorageAccountKey = IntegrationTestBase.TestCredentials.Environments[0].DefaultStorageAccount.Key, DefaultStorageAccountName = IntegrationTestBase.TestCredentials.Environments[0].DefaultStorageAccount.Name, DefaultStorageContainer = "ConfigActionTest", ClusterSizeInNodes = 2, Location = "East US", UserName = "******", Password = "******", Version = "3.1", HeadNodeSize = "ExtraLarge", DataNodeSize = "Large", ZookeeperNodeSize = "Medium", ClusterType = ClusterType.Spark, }; // Add in valid config action. clusterCreateParameters.ConfigActions.Add(new ScriptAction("TestScriptAction", new ClusterNodeType[] { ClusterNodeType.HeadNode }, new Uri("http://www.microsoft.com"), null)); await clustersPocoClient.CreateContainer(clusterCreateParameters); //this should not work for non hbase clusters Assert.Fail("Zookeeper node size should not be settable for non-hbase clusters"); } catch (ArgumentException aex) { Assert.AreEqual(aex.Message, "clusterCreateParameters.ZookeeperNodeSize must be null for Spark clusters."); } }
public async Task CanEnableAndDisableRdpUser() { Capabilities.Add("CAPABILITY_FEATURE_CLUSTERS_CONTRACT_1_SDK"); Capabilities.Add("CAPABILITY_FEATURE_CLUSTERS_CONTRACT_VERSION_3_SDK"); var restClient = ServiceLocator.Instance.Locate <IRdfeClustersResourceRestClientFactory>() .Create(this.DefaultHandler, this.HdInsightCertCred, this.Context, false, SchemaVersionUtils.GetSchemaVersion(Capabilities)); var clusterDnsName = "rdpTestCluster"; var clustersPocoClient = new PaasClustersPocoClient(this.HdInsightCertCred, false, this.Context, Capabilities, restClient); var clusterCreateParameters = new HDInsight.ClusterCreateParametersV2 { Name = clusterDnsName, DefaultStorageAccountKey = IntegrationTestBase.TestCredentials.Environments[0].DefaultStorageAccount.Key, DefaultStorageAccountName = IntegrationTestBase.TestCredentials.Environments[0].DefaultStorageAccount.Name, DefaultStorageContainer = "EnableDisableRdpTest", ClusterSizeInNodes = 2, Location = "East US", UserName = "******", Password = "******", Version = "3.1", ClusterType = ClusterType.Hadoop, }; await clustersPocoClient.CreateContainer(clusterCreateParameters); var cluster = clustersPocoClient.ListContainer(clusterDnsName).Result; var rdpUsername = "******"; await clustersPocoClient.EnableRdp(clusterDnsName, cluster.Location, rdpUsername, "Had00p!123", DateTime.Now.AddHours(1)); cluster = clustersPocoClient.ListContainer(clusterDnsName).Result; var actualRdpUserName = cluster.RdpUserName; Assert.AreEqual(rdpUsername, actualRdpUserName); await clustersPocoClient.DisableRdp(clusterDnsName, cluster.Location); cluster = clustersPocoClient.ListContainer(clusterDnsName).Result; Assert.IsNull(cluster.RdpUserName); await clustersPocoClient.DeleteContainer(cluster.Name, cluster.Location); }
public void InternalValidation_PayloadConverter_SerializationCreateHadoopClusterRequestWithVirtualNetworkConfigurationV3() { var expected = new HDInsight.ClusterCreateParametersV2 { UserName = Guid.NewGuid().ToString("N"), Password = Guid.NewGuid().ToString("N"), DefaultStorageAccountKey = Guid.NewGuid().ToString("N"), DefaultStorageAccountName = Guid.NewGuid().ToString("N"), DefaultStorageContainer = Guid.NewGuid().ToString("N"), Name = GetRandomClusterName(), Location = "East US", Version = "3.0", ClusterSizeInNodes = new Random().Next() }; expected.AdditionalStorageAccounts.Add(new WabStorageAccountConfiguration(Guid.NewGuid().ToString("N"), Guid.NewGuid().ToString("N"))); expected.AdditionalStorageAccounts.Add(new WabStorageAccountConfiguration(Guid.NewGuid().ToString("N"), Guid.NewGuid().ToString("N"))); expected.VirtualNetworkId = Guid.NewGuid().ToString(); expected.SubnetName = "MySubnet"; string payload = new PayloadConverter().SerializeClusterCreateRequestV3(expected); var actual = ServerSerializer.DeserializeClusterCreateRequestV3(payload); fixDefaultExpectedZookeeperSize(expected); Assert.IsTrue(Equals(expected, actual)); }
public void InternalValidation_PayloadConverter_SerializationCreateRequest_MayContracts() { var cluster1 = new HDInsight.ClusterCreateParametersV2 { Name = "bcarlson", ClusterSizeInNodes = 1, UserName = "******", Version = "default", Password = "******", Location = "East US" }; cluster1.DefaultStorageAccountName = "storageaccount.blob.core.windows.net"; cluster1.DefaultStorageContainer = "newcontainer"; cluster1.DefaultStorageAccountKey = "fakekey"; var metaStore = new Metastore("serverabcd.bigbean.windowsazure.mscds.com", "newmaytestdb", "bcarlson", "SuperPass1!"); cluster1.HiveMetastore = cluster1.OozieMetastore = metaStore; string payload = new PayloadConverter().SerializeClusterCreateRequest(cluster1); var resource = ServerSerializer.DeserializeClusterCreateRequestIntoResource(payload); Assert.AreEqual(resource.SchemaVersion, "2.0"); }
public void InternalValidation_PayloadConverter_SerializationCreateRequestV3() { var cluster1 = new HDInsight.ClusterCreateParametersV2 { UserName = Guid.NewGuid().ToString("N"), Password = Guid.NewGuid().ToString("N"), DefaultStorageAccountKey = Guid.NewGuid().ToString("N"), DefaultStorageAccountName = Guid.NewGuid().ToString("N"), DefaultStorageContainer = Guid.NewGuid().ToString("N"), Name = GetRandomClusterName(), Location = "East US", Version = "3.0", ClusterSizeInNodes = new Random().Next(), ClusterType = ClusterType.HBase, ZookeeperNodeSize = "Large", }; cluster1.AdditionalStorageAccounts.Add(new WabStorageAccountConfiguration(Guid.NewGuid().ToString("N"), Guid.NewGuid().ToString("N"))); cluster1.AdditionalStorageAccounts.Add(new WabStorageAccountConfiguration(Guid.NewGuid().ToString("N"), Guid.NewGuid().ToString("N"))); string payload = new PayloadConverter().SerializeClusterCreateRequestV3(cluster1); var cluster2 = ServerSerializer.DeserializeClusterCreateRequestV3(payload); Assert.IsTrue(Equals(cluster1, cluster2)); }
private static void ConfigHadoopCoreComponent(HadoopCoreComponent hadoopCore, HDInsight.ClusterCreateParametersV2 inputs) { hadoopCore.CoreSiteXmlProperties.AddRange(inputs.CoreConfiguration.Select(prop => new Property { Name = prop.Key, Value = prop.Value })); }
/// <summary> /// Generate ClusterCreateParameters object for 3.X cluster with only Hadoop. /// </summary> /// <param name="inputs">Cluster creation parameter inputs.</param> /// <returns>The corresponding ClusterCreateParameter object.</returns> internal static ClusterCreateParameters Create3XClusterFromMapReduceTemplate(HDInsight.ClusterCreateParametersV2 inputs) { if (inputs == null) { throw new ArgumentNullException("inputs"); } var cluster = new ClusterCreateParameters { DnsName = inputs.Name, Version = inputs.Version }; var headnodeRole = new ClusterRole { FriendlyName = "HeadNodeRole", InstanceCount = 2, VMSizeAsString = inputs.HeadNodeSize, }; var workernodeRole = new ClusterRole { InstanceCount = inputs.ClusterSizeInNodes, FriendlyName = "WorkerNodeRole", VMSizeAsString = inputs.DataNodeSize, }; var zookeeperRole = new ClusterRole { InstanceCount = 3, FriendlyName = "ZKRole", VMSizeAsString = inputs.ZookeeperNodeSize ?? VmSize.Small.ToString(), }; cluster.ClusterRoleCollection.Add(headnodeRole); cluster.ClusterRoleCollection.Add(workernodeRole); cluster.ClusterRoleCollection.Add(zookeeperRole); var gateway = new GatewayComponent { IsEnabled = true, RestAuthCredential = new UsernamePasswordCredential { Username = inputs.UserName, Password = inputs.Password } }; cluster.Components.Add(gateway); cluster.Location = inputs.Location; //Add yarn component YarnComponent yarn = new YarnComponent { ResourceManagerRole = headnodeRole, NodeManagerRole = workernodeRole, }; ConfigYarnComponent(yarn, inputs); MapReduceApplication mapreduceApp = new MapReduceApplication(); ConfigMapReduceApplication(mapreduceApp, inputs); yarn.Applications.Add(mapreduceApp); cluster.Components.Add(yarn); // Adding Hive component HiveComponent hive = new HiveComponent { HeadNodeRole = headnodeRole }; ConfigHiveComponent(hive, inputs); cluster.Components.Add(hive); // Adding config action component if needed if (inputs.ConfigActions != null && inputs.ConfigActions.Count > 0) { CustomActionComponent configAction = new CustomActionComponent { HeadNodeRole = headnodeRole, WorkerNodeRole = workernodeRole }; AddConfigActionComponent(configAction, inputs, headnodeRole, workernodeRole); cluster.Components.Add(configAction); } // Adding Oozie component OozieComponent oozie = new OozieComponent { HeadNodeRole = headnodeRole }; ConfigOozieComponent(oozie, inputs); cluster.Components.Add(oozie); // Adding Hdfs component HdfsComponent hdfs = new HdfsComponent { HeadNodeRole = headnodeRole, WorkerNodeRole = workernodeRole }; ConfigHdfsComponent(hdfs, inputs); cluster.Components.Add(hdfs); // Adding HadoopCore component HadoopCoreComponent hadoopCore = new HadoopCoreComponent(); ConfigHadoopCoreComponent(hadoopCore, inputs); cluster.Components.Add(hadoopCore); // Adding Zookeeper component cluster.Components.Add(new ZookeeperComponent { ZookeeperRole = zookeeperRole }); ConfigVirtualNetwork(cluster, inputs); return(cluster); }
public void InternalValidation_PayloadConverter_SerializationCreateRequestWithHiveConfiguration_Resources() { var expected = new HDInsight.ClusterCreateParametersV2 { UserName = Guid.NewGuid().ToString("N"), Password = Guid.NewGuid().ToString("N"), Version = IntegrationTestBase.TestCredentials.WellKnownCluster.Version, DefaultStorageAccountKey = Guid.NewGuid().ToString("N"), DefaultStorageAccountName = Guid.NewGuid().ToString("N"), DefaultStorageContainer = Guid.NewGuid().ToString("N"), Name = GetRandomClusterName(), Location = "East US", ClusterSizeInNodes = new Random().Next() }; expected.HiveConfiguration.ConfigurationCollection.Add(new KeyValuePair<string, string>("my setting 1", "my value 1")); expected.HiveConfiguration.ConfigurationCollection.Add(new KeyValuePair<string, string>("my setting 2", "my value 2")); expected.HiveConfiguration.AdditionalLibraries = new WabStorageAccountConfiguration( Guid.NewGuid().ToString(), Guid.NewGuid().ToString(), Guid.NewGuid().ToString()); string payload = new PayloadConverter().SerializeClusterCreateRequest(expected); var actual = ServerSerializer.DeserializeClusterCreateRequest(payload); Assert.IsTrue(Equals(expected, actual)); }
public async Task CanCreateClusterWithHwxPrivateVersion() { Capabilities.Add("CAPABILITY_FEATURE_CLUSTERS_CONTRACT_1_SDK"); Capabilities.Add("CAPABILITY_FEATURE_CLUSTERS_CONTRACT_VERSION_3_SDK"); var restClient = ServiceLocator.Instance.Locate<IRdfeClustersResourceRestClientFactory>() .Create(this.DefaultHandler, this.HdInsightCertCred, this.Context, false, SchemaVersionUtils.GetSchemaVersion(Capabilities)); var clustersPocoClient = new PaasClustersPocoClient(this.HdInsightCertCred, false, this.Context, Capabilities, restClient); var clusterCreateParameters = new HDInsight.ClusterCreateParametersV2 { Name = "HwxVersionTest", DefaultStorageAccountKey = IntegrationTestBase.TestCredentials.Environments[0].DefaultStorageAccount.Key, DefaultStorageAccountName = IntegrationTestBase.TestCredentials.Environments[0].DefaultStorageAccount.Name, DefaultStorageContainer = "HwxVersionTest", ClusterSizeInNodes = 2, Location = "East US", UserName = "******", Password = "******", Version = "3.2-hwx-trunk", ClusterType = ClusterType.Hadoop, }; await clustersPocoClient.CreateContainer(clusterCreateParameters); var containersList = clustersPocoClient.ListContainers().Result; Assert.AreEqual(containersList.Count, 1); Assert.IsNotNull(containersList.SingleOrDefault(cluster => cluster.Name.Equals("HwxVersionTest"))); }
/// <inheritdoc /> public async Task CreateContainer(HDInsight.ClusterCreateParametersV2 clusterCreateParameters) { if (clusterCreateParameters == null) { throw new ArgumentNullException("clusterCreateParameters"); } if (string.IsNullOrEmpty(clusterCreateParameters.Name)) { throw new ArgumentException("ClusterCreateParameters.Name cannot be null or empty", "clusterCreateParameters"); } if (string.IsNullOrEmpty(clusterCreateParameters.Location)) { throw new ArgumentException("ClusterCreateParameters.Location cannot be null or empty", "clusterCreateParameters"); } if (clusterCreateParameters.ClusterSizeInNodes < 1) { throw new ArgumentException("clusterCreateParameters.ClusterSizeInNodes must be > 0"); } try { await this.RegisterSubscriptionIfExistsAsync(); await this.CreateCloudServiceAsyncIfNotExists(clusterCreateParameters.Location); // TODO: fix hard-coded schema version string schemaVersion = "1.0"; var iaasCluster = PayloadConverterIaasClusters.ConvertToIaasCluster(clusterCreateParameters, this.credentials.SubscriptionId.ToString()); var rdfeResource = PayloadConverterIaasClusters.CreateRdfeResource(iaasCluster, schemaVersion); var resp = await this.rdfeRestClient.CreateCluster( this.credentials.SubscriptionId.ToString(), this.GetCloudServiceName(clusterCreateParameters.Location), this.credentials.DeploymentNamespace, clusterCreateParameters.Name, rdfeResource, this.Context.CancellationToken); IEnumerable <String> requestIds; if (resp.Headers.TryGetValues("x-ms-request-id", out requestIds)) { Guid operationId; if (!Guid.TryParse(requestIds.First(), out operationId)) { throw new InvalidOperationException("Could not retrieve a valid operation id for the PUT (cluster create) operation."); } // Wait for the operation specified by the request id to complete (succeed or fail). TimeSpan interval = TimeSpan.FromSeconds(1); TimeSpan timeout = TimeSpan.FromMinutes(5); await this.WaitForRdfeOperationToComplete(operationId, interval, timeout, Context.CancellationToken); } } catch (InvalidExpectedStatusCodeException iEx) { string content = iEx.Response.Content != null?iEx.Response.Content.ReadAsStringAsync().Result : string.Empty; throw new HttpLayerException(iEx.ReceivedStatusCode, content); } }
private static void AddConfigActionComponent(CustomActionComponent configAction, HDInsight.ClusterCreateParametersV2 inputs, ClusterRole headnodeRole, ClusterRole workernodeRole, ClusterRole zookeperRole) { configAction.CustomActions = new CustomActionList(); // Converts config action from PS/SDK to wire contract. foreach (ConfigAction ca in inputs.ConfigActions) { CustomAction newConfigAction; // Based on the config action type defined in SDK, convert them to config action defined in wire contract. ScriptAction sca = ca as ScriptAction; if (sca != null) { newConfigAction = new ScriptCustomAction { Name = ca.Name, Uri = sca.Uri, Parameters = sca.Parameters }; } else { throw new NotSupportedException("No such config action supported."); } newConfigAction.ClusterRoleCollection = new ClusterRoleCollection(); // Add in cluster role collection for each config action. foreach (ClusterNodeType clusterRoleType in ca.ClusterRoleCollection) { if (clusterRoleType == ClusterNodeType.HeadNode) { newConfigAction.ClusterRoleCollection.Add(headnodeRole); } else if (clusterRoleType == ClusterNodeType.DataNode) { newConfigAction.ClusterRoleCollection.Add(workernodeRole); } else if (clusterRoleType == ClusterNodeType.ZookeperNode) { if (inputs.ClusterType.Equals(ClusterType.HBase) || inputs.ClusterType.Equals(ClusterType.Storm)) { newConfigAction.ClusterRoleCollection.Add(zookeperRole); } else { throw new NotSupportedException(string.Format("Customization of zookeper nodes only supported for cluster types {0} and {1}", ClusterType.HBase.ToString(), ClusterType.Storm.ToString())); } } else { throw new NotSupportedException("No such node type supported."); } } configAction.CustomActions.Add(newConfigAction); } }
private static void ConfigHdfsComponent(HdfsComponent hdfs, HDInsight.ClusterCreateParametersV2 inputs) { hdfs.HdfsSiteXmlProperties.AddRange(inputs.HdfsConfiguration.Select(prop => new Property { Name = prop.Key, Value = prop.Value })); }
private static void ConfigSparkComponent(SparkComponent spark, HDInsight.ClusterCreateParametersV2 inputs) { spark.SparkConfiguration.AddRange(inputs.SparkConfiguration.Select(prop => new Property { Name = prop.Key, Value = prop.Value })); }
/// <summary> /// Generate ClusterCreateParameters object for 2.X cluster with only Hadoop. /// </summary> /// <param name="inputs">Cluster creation parameter inputs.</param> /// <returns>The corresponding ClusterCreateParameter object.</returns> internal static ClusterCreateParameters Create2XClusterForMapReduceTemplate(HDInsight.ClusterCreateParametersV2 inputs) { if (inputs == null) { throw new ArgumentNullException("inputs"); } var cluster = new ClusterCreateParameters { DnsName = inputs.Name, Version = inputs.Version }; var remoteDesktopSettings = (string.IsNullOrEmpty(inputs.RdpUsername)) ? new RemoteDesktopSettings() { IsEnabled = false } : new RemoteDesktopSettings() { IsEnabled = true, AuthenticationCredential = new UsernamePasswordCredential() { Username = inputs.RdpUsername, Password = inputs.RdpPassword }, RemoteAccessExpiry = (DateTime)inputs.RdpAccessExpiry }; var headnodeRole = new ClusterRole { FriendlyName = "HeadNodeRole", InstanceCount = 2, VMSizeAsString = inputs.HeadNodeSize, RemoteDesktopSettings = remoteDesktopSettings }; var workernodeRole = new ClusterRole { InstanceCount = inputs.ClusterSizeInNodes, FriendlyName = "WorkerNodeRole", VMSizeAsString = inputs.DataNodeSize, RemoteDesktopSettings = remoteDesktopSettings }; var zookeeperRole = new ClusterRole { InstanceCount = 3, FriendlyName = "ZKRole", VMSizeAsString = VmSize.Small.ToString(), RemoteDesktopSettings = remoteDesktopSettings }; cluster.ClusterRoleCollection.Add(headnodeRole); cluster.ClusterRoleCollection.Add(workernodeRole); cluster.ClusterRoleCollection.Add(zookeeperRole); var gateway = new GatewayComponent { IsEnabled = true, RestAuthCredential = new UsernamePasswordCredential { Username = inputs.UserName, Password = inputs.Password } }; cluster.Components.Add(gateway); cluster.Location = inputs.Location; // Adding MapReduce component MapReduceComponent mapReduce = new MapReduceComponent { HeadNodeRole = headnodeRole, WorkerNodeRole = workernodeRole }; ConfigMapReduceComponent(mapReduce, inputs); cluster.Components.Add(mapReduce); // Adding Hive component HiveComponent hive = new HiveComponent { HeadNodeRole = headnodeRole }; ConfigHiveComponent(hive, inputs); cluster.Components.Add(hive); // Adding config action component if needed if (inputs.ConfigActions != null && inputs.ConfigActions.Count > 0) { CustomActionComponent configAction = new CustomActionComponent { HeadNodeRole = headnodeRole, WorkerNodeRole = workernodeRole }; AddConfigActionComponent(configAction, inputs, headnodeRole, workernodeRole, zookeeperRole); cluster.Components.Add(configAction); } // Adding Oozie component OozieComponent oozie = new OozieComponent { HeadNodeRole = headnodeRole }; ConfigOozieComponent(oozie, inputs); cluster.Components.Add(oozie); // Adding Hdfs component HdfsComponent hdfs = new HdfsComponent { HeadNodeRole = headnodeRole, WorkerNodeRole = workernodeRole }; ConfigHdfsComponent(hdfs, inputs); cluster.Components.Add(hdfs); // Adding HadoopCore component HadoopCoreComponent hadoopCore = new HadoopCoreComponent(); ConfigHadoopCoreComponent(hadoopCore, inputs); cluster.Components.Add(hadoopCore); ConfigVirtualNetwork(cluster, inputs); return(cluster); }
public void InternalValidation_PayloadConverter_SerializationCreateRequestWithMetastore_Spark() { var expected = new HDInsight.ClusterCreateParametersV2 { UserName = Guid.NewGuid().ToString("N"), Password = Guid.NewGuid().ToString("N"), Version = "3.0", DefaultStorageAccountKey = Guid.NewGuid().ToString("N"), DefaultStorageAccountName = Guid.NewGuid().ToString("N"), DefaultStorageContainer = Guid.NewGuid().ToString("N"), Name = GetRandomClusterName(), Location = "East US", ClusterSizeInNodes = new Random().Next(), ClusterType = ClusterType.Spark }; expected.AdditionalStorageAccounts.Add(new WabStorageAccountConfiguration(Guid.NewGuid().ToString("N"), Guid.NewGuid().ToString("N"))); expected.AdditionalStorageAccounts.Add(new WabStorageAccountConfiguration(Guid.NewGuid().ToString("N"), Guid.NewGuid().ToString("N"))); expected.OozieMetastore = new Metastore(Guid.NewGuid().ToString("N"), Guid.NewGuid().ToString("N"), Guid.NewGuid().ToString("N"), Guid.NewGuid().ToString("N")); expected.HiveMetastore = new Metastore(Guid.NewGuid().ToString("N"), Guid.NewGuid().ToString("N"), Guid.NewGuid().ToString("N"), Guid.NewGuid().ToString("N")); string payload = new PayloadConverter().SerializeClusterCreateRequestV3(expected); var actual = ServerSerializer.DeserializeClusterCreateRequestV3(payload); fixDefaultExpectedZookeeperSize(expected); Assert.IsTrue(Equals(expected, actual)); }
public async Task ICanCreateACluster_WithVmSizes_All_Specified_NonHBase_Negative() { var restClient = ServiceLocator.Instance.Locate<IRdfeClustersResourceRestClientFactory>() .Create(this.DefaultHandler, this.HdInsightCertCred, this.Context, false, SchemaVersionUtils.GetSchemaVersion(Capabilities)); var clustersPocoClient = new PaasClustersPocoClient(this.HdInsightCertCred, false, this.Context, Capabilities, restClient); try { var clusterCreateParameters = new HDInsight.ClusterCreateParametersV2 { Name = "ConfigActionTest", DefaultStorageAccountKey = IntegrationTestBase.TestCredentials.Environments[0].DefaultStorageAccount.Key, DefaultStorageAccountName = IntegrationTestBase.TestCredentials.Environments[0].DefaultStorageAccount.Name, DefaultStorageContainer = "ConfigActionTest", ClusterSizeInNodes = 2, Location = "East US", UserName = "******", Password = "******", Version = "3.1", HeadNodeSize = "ExtraLarge", DataNodeSize = "Large", ZookeeperNodeSize = "Medium", ClusterType = ClusterType.Spark, }; // Add in valid config action. clusterCreateParameters.ConfigActions.Add(new ScriptAction("TestScriptAction", new ClusterNodeType[] { ClusterNodeType.HeadNode }, new Uri("http://www.microsoft.com"), null)); await clustersPocoClient.CreateContainer(clusterCreateParameters); //this should not work for non hbase clusters Assert.Fail("Zookeeper node size should not be settable for non-hbase clusters"); } catch (ArgumentException aex) { Assert.AreEqual(aex.Message, "clusterCreateParameters.ZookeeperNodeSize must be null for Spark clusters."); } }
public void InternalValidation_PayloadConverter_SerializationCreateRequestWithHiveAndHBaseConfigurationV3() { var expected = new HDInsight.ClusterCreateParametersV2 { UserName = Guid.NewGuid().ToString("N"), Password = Guid.NewGuid().ToString("N"), Version = "3.0", DefaultStorageAccountKey = Guid.NewGuid().ToString("N"), DefaultStorageAccountName = Guid.NewGuid().ToString("N"), DefaultStorageContainer = Guid.NewGuid().ToString("N"), Name = GetRandomClusterName(), Location = "East US", ClusterSizeInNodes = new Random().Next(), ClusterType = ClusterType.HBase }; expected.HiveConfiguration.ConfigurationCollection.Add(new KeyValuePair<string, string>("my setting 1", "my value 1")); expected.HiveConfiguration.ConfigurationCollection.Add(new KeyValuePair<string, string>("my setting 2", "my value 2")); expected.HBaseConfiguration.ConfigurationCollection.Add(new KeyValuePair<string, string>("my setting 3", "my value 3")); string payload = new PayloadConverter().SerializeClusterCreateRequestV3(expected); var actual = ServerSerializer.DeserializeClusterCreateRequestV3(payload); fixDefaultExpectedZookeeperSize(expected); Assert.IsTrue(Equals(expected, actual)); }
/// <summary> /// Generate ClusterCreateParameters object for 3.X cluster with Hadoop and Spark. /// </summary> /// <param name="inputs">Cluster creation parameter inputs.</param> /// <returns>The corresponding ClusterCreateParameter object.</returns> internal static ClusterCreateParameters Create3XClusterForMapReduceAndSparkTemplate(HDInsight.ClusterCreateParametersV2 inputs) { if (inputs == null) { throw new ArgumentNullException("inputs"); } var cluster = Create3XClusterFromMapReduceTemplate(inputs); var masterRole = cluster.Components.OfType <YarnComponent>().Single().ResourceManagerRole; var workerRole = cluster.Components.OfType <YarnComponent>().Single().NodeManagerRole; //Add Spark component SparkComponent spark = new SparkComponent { MasterRole = masterRole, WorkerRole = workerRole }; ConfigSparkComponent(spark, inputs); cluster.Components.Add(spark); return(cluster); }
/// <summary> /// Generate ClusterCreateParameters object for 3.X cluster with Hadoop and HBase. /// </summary> /// <param name="inputs">Cluster creation parameter inputs.</param> /// <returns>The corresponding ClusterCreateParameter object.</returns> internal static ClusterCreateParameters Create3XClusterForMapReduceAndHBaseTemplate(HDInsight.ClusterCreateParametersV2 inputs) { if (inputs == null) { throw new ArgumentNullException("inputs"); } var cluster = Create3XClusterFromMapReduceTemplate(inputs); var hbaseMasterRole = cluster.Components.OfType <ZookeeperComponent>().Single().ZookeeperRole; //in case no ZK node size is set for hbase, set medium. if (inputs.ZookeeperNodeSize == null) { hbaseMasterRole.VMSizeAsString = VmSize.Medium.ToString(); } //Add HBase component HBaseComponent hbase = new HBaseComponent { MasterServerRole = hbaseMasterRole, RegionServerRole = cluster.Components.OfType <HdfsComponent>().Single().WorkerNodeRole }; ConfigHBaseComponent(hbase, inputs); cluster.Components.Add(hbase); return(cluster); }
private static HDInsight.ClusterCreateParametersV2 GetClusterCreateParametersForHeadNodeSize(NodeVMSize headNodeSize) { var cluster1 = new HDInsight.ClusterCreateParametersV2 { UserName = Guid.NewGuid().ToString("N"), Password = Guid.NewGuid().ToString("N"), DefaultStorageAccountKey = Guid.NewGuid().ToString("N"), DefaultStorageAccountName = Guid.NewGuid().ToString("N"), DefaultStorageContainer = Guid.NewGuid().ToString("N"), Name = GetRandomClusterName(), Location = "East US", Version = IntegrationTestBase.TestCredentials.WellKnownCluster.Version, ClusterSizeInNodes = new Random().Next(), HeadNodeSize = headNodeSize.ToVmSize().ToString(), }; cluster1.AdditionalStorageAccounts.Add(new WabStorageAccountConfiguration(Guid.NewGuid().ToString("N"), Guid.NewGuid().ToString("N"))); cluster1.AdditionalStorageAccounts.Add(new WabStorageAccountConfiguration(Guid.NewGuid().ToString("N"), Guid.NewGuid().ToString("N"))); return cluster1; }
public async Task CanCannotClusterCreateWithInvalidRdpCredentials() { var restClient = ServiceLocator.Instance.Locate <IRdfeClustersResourceRestClientFactory>() .Create(this.DefaultHandler, this.HdInsightCertCred, this.Context, false, SchemaVersionUtils.GetSchemaVersion(Capabilities)); var clusterDnsName = "rdpTestCluster"; var clustersPocoClient = new PaasClustersPocoClient(this.HdInsightCertCred, false, this.Context, Capabilities, restClient); var clusterCreateParameters = new HDInsight.ClusterCreateParametersV2 { Name = clusterDnsName, DefaultStorageAccountKey = IntegrationTestBase.TestCredentials.Environments[0].DefaultStorageAccount.Key, DefaultStorageAccountName = IntegrationTestBase.TestCredentials.Environments[0].DefaultStorageAccount.Name, DefaultStorageContainer = "EnableDisableRdpTest", ClusterSizeInNodes = 2, Location = "East US", UserName = "******", Password = "******", Version = "3.1", ClusterType = ClusterType.Hadoop, RdpUsername = "", RdpPassword = "******", RdpAccessExpiry = DateTime.Now.AddDays(6) }; try { await clustersPocoClient.CreateContainer(clusterCreateParameters); throw new Exception("CreateContainer should have thrown an ArgumentException"); } catch (ArgumentException exp) { Assert.AreEqual(exp.Message, @"clusterCreateParameters.RdpUsername cannot be null or empty in case either RdpPassword or RdpAccessExpiry is specified Parameter name: clusterCreateParameters"); } clusterCreateParameters.RdpUsername = "******"; clusterCreateParameters.RdpPassword = ""; try { await clustersPocoClient.CreateContainer(clusterCreateParameters); throw new Exception("CreateContainer should have thrown an ArgumentException"); } catch (ArgumentException exp) { Assert.AreEqual(exp.Message, @"clusterCreateParameters.RdpPassword cannot be null or empty in case either RdpUsername or RdpAccessExpiry is specified Parameter name: clusterCreateParameters"); } clusterCreateParameters.RdpPassword = "******"; clusterCreateParameters.RdpAccessExpiry = null; try { await clustersPocoClient.CreateContainer(clusterCreateParameters); throw new Exception("CreateContainer should have thrown an ArgumentException"); } catch (ArgumentException exp) { Assert.AreEqual(exp.Message, @"clusterCreateParameters.RdpAccessExpiry cannot be null or empty in case either RdpUsername or RdpPassword is specified Parameter name: clusterCreateParameters"); } clusterCreateParameters.RdpAccessExpiry = DateTime.MinValue; try { await clustersPocoClient.CreateContainer(clusterCreateParameters); throw new Exception("CreateContainer should have thrown an ArgumentException"); } catch (ArgumentException exp) { Assert.AreEqual(exp.Message, @"clusterCreateParameters.RdpAccessExpiry should be a time in future. Parameter name: clusterCreateParameters"); } }
public void InternalValidation_PayloadConverter_SerializationCreateRequest() { var cluster1 = new HDInsight.ClusterCreateParametersV2 { UserName = Guid.NewGuid().ToString("N"), Password = Guid.NewGuid().ToString("N"), DefaultStorageAccountKey = Guid.NewGuid().ToString("N"), DefaultStorageAccountName = Guid.NewGuid().ToString("N"), DefaultStorageContainer = Guid.NewGuid().ToString("N"), Name = GetRandomClusterName(), Location = "East US", Version = IntegrationTestBase.TestCredentials.WellKnownCluster.Version, ClusterSizeInNodes = new Random().Next() }; cluster1.AdditionalStorageAccounts.Add(new WabStorageAccountConfiguration(Guid.NewGuid().ToString("N"), Guid.NewGuid().ToString("N"))); cluster1.AdditionalStorageAccounts.Add(new WabStorageAccountConfiguration(Guid.NewGuid().ToString("N"), Guid.NewGuid().ToString("N"))); string payload = new PayloadConverter().SerializeClusterCreateRequest(cluster1); var cluster2 = ServerSerializer.DeserializeClusterCreateRequest(payload); Assert.IsTrue(Equals(cluster1, cluster2)); }
public async Task CanCreateRdpUserDuringClusterCreate() { Capabilities.Add("CAPABILITY_FEATURE_CLUSTERS_CONTRACT_1_SDK"); Capabilities.Add("CAPABILITY_FEATURE_CLUSTERS_CONTRACT_VERSION_3_SDK"); var restClient = ServiceLocator.Instance.Locate<IRdfeClustersResourceRestClientFactory>() .Create(this.DefaultHandler, this.HdInsightCertCred, this.Context, false, SchemaVersionUtils.GetSchemaVersion(Capabilities)); var clusterDnsName = "rdpTestCluster"; var clustersPocoClient = new PaasClustersPocoClient(this.HdInsightCertCred, false, this.Context, Capabilities, restClient); var clusterCreateParameters = new HDInsight.ClusterCreateParametersV2 { Name = clusterDnsName, DefaultStorageAccountKey = IntegrationTestBase.TestCredentials.Environments[0].DefaultStorageAccount.Key, DefaultStorageAccountName = IntegrationTestBase.TestCredentials.Environments[0].DefaultStorageAccount.Name, DefaultStorageContainer = "EnableDisableRdpTest", ClusterSizeInNodes = 2, Location = "East US", UserName = "******", Password = "******", Version = "3.1", ClusterType = ClusterType.Hadoop, RdpUsername = "******", RdpPassword = "******", RdpAccessExpiry = DateTime.Now.AddDays(6) }; await clustersPocoClient.CreateContainer(clusterCreateParameters); var cluster = clustersPocoClient.ListContainer(clusterDnsName).Result; var rdpUsername = "******"; var actualRdpUserName = cluster.RdpUserName; Assert.AreEqual(rdpUsername, actualRdpUserName); await clustersPocoClient.DisableRdp(clusterDnsName, cluster.Location); cluster = clustersPocoClient.ListContainer(clusterDnsName).Result; Assert.IsNull(cluster.RdpUserName); await clustersPocoClient.DeleteContainer(cluster.Name, cluster.Location); }
private static void ConfigYarnComponent(YarnComponent yarn, HDInsight.ClusterCreateParametersV2 inputs) { yarn.Configuration.AddRange(inputs.YarnConfiguration.Select(prop => new Property { Name = prop.Key, Value = prop.Value })); }
private static HDInsight.ClusterCreateParametersV2 CreateClusterRequest_FromInternal(ClusterContainer payloadObject) { var cluster = new HDInsight.ClusterCreateParametersV2 { Location = payloadObject.Region, Name = payloadObject.ClusterName }; cluster.UserName = payloadObject.Deployment.ClusterUsername; cluster.Password = payloadObject.Deployment.ClusterPassword; cluster.Version = payloadObject.Deployment.Version; cluster.DefaultStorageAccountName = payloadObject.StorageAccounts[0].AccountName; cluster.DefaultStorageAccountKey = payloadObject.StorageAccounts[0].Key; cluster.DefaultStorageContainer = payloadObject.StorageAccounts[0].BlobContainerName; var headnodeRole = payloadObject.Deployment.Roles.Single(r => r.RoleType == ClusterRoleType.HeadNode); //if headnode count is 1 and size XL, then we treat it as Default on the server side if (headnodeRole.VMSize == Microsoft.WindowsAzure.Management.HDInsight.Contracts.May2013.NodeVMSize.ExtraLarge && headnodeRole.Count == 1) { //changed this to no-op for ccpv2 //cluster.HeadNodeSize = HDInsight.NodeVMSize.Default; } else { switch (headnodeRole.VMSize) { case Microsoft.WindowsAzure.Management.HDInsight.Contracts.May2013.NodeVMSize.ExtraLarge: cluster.HeadNodeSize = HDInsight.NodeVMSize.ExtraLarge.ToString(); break; case Microsoft.WindowsAzure.Management.HDInsight.Contracts.May2013.NodeVMSize.Large: cluster.HeadNodeSize = HDInsight.NodeVMSize.Large.ToString(); break; default: throw new InvalidDataContractException(string.Format("The server returned an unsupported value for head node VM size '{0}", headnodeRole.VMSize)); } } foreach (var asv in payloadObject.StorageAccounts.Skip(1)) { cluster.AdditionalStorageAccounts.Add(new WabStorageAccountConfiguration(asv.AccountName, asv.Key)); } if (payloadObject.Settings != null) { CopyConfiguration(payloadObject, cluster); if (payloadObject.Settings.Oozie != null) { if (payloadObject.Settings.Oozie.Catalog != null) { var oozieMetaStore = payloadObject.Settings.Oozie.Catalog; cluster.OozieMetastore = new Metastore(oozieMetaStore.Server, oozieMetaStore.DatabaseName, oozieMetaStore.Username, oozieMetaStore.Password); } } if (payloadObject.Settings.Hive != null) { if (payloadObject.Settings.Hive.Catalog != null) { var hiveMetaStore = payloadObject.Settings.Hive.Catalog; cluster.HiveMetastore = new Metastore(hiveMetaStore.Server, hiveMetaStore.DatabaseName, hiveMetaStore.Username, hiveMetaStore.Password); } } } cluster.ClusterSizeInNodes = payloadObject.Deployment.Roles.Where(r => r.RoleType == ClusterRoleType.DataNode) .Sum(role => role.Count); return(cluster); }
public void InternalValidation_PayloadConverter_SerializationCreateRequest_MayContracts() { var cluster1 = new HDInsight.ClusterCreateParametersV2 { Name = "bcarlson", ClusterSizeInNodes = 1, UserName = "******", Version = "default", Password = "******", Location = "East US" }; cluster1.DefaultStorageAccountName = "hdicurrenteastus.blob.core.windows.net"; cluster1.DefaultStorageContainer = "newcontainer"; cluster1.DefaultStorageAccountKey = "jKe7cqoU0a9OmDFlwi3DHZLf7JoKwGOU2pV1iZdBKifxwQuDOKwZFyXMJrPSLtGgDV9b7pVKSGz6lbBWcfX2lA=="; var metaStore = new Metastore("lbl44y45cd.bigbean.windowsazure.mscds.com", "newmaytestdb", "bcarlson", "SuperPass1!"); cluster1.HiveMetastore = cluster1.OozieMetastore = metaStore; string payload = new PayloadConverter().SerializeClusterCreateRequest(cluster1); var resource = ServerSerializer.DeserializeClusterCreateRequestIntoResource(payload); Assert.AreEqual(resource.SchemaVersion, "2.0"); }
private static void CopyConfigurationForCluster( Microsoft.WindowsAzure.Management.HDInsight.Contracts.May2014.ClusterCreateParameters payloadObject, HDInsight.ClusterCreateParametersV2 cluster) { var yarn = payloadObject.Components.OfType <YarnComponent>().Single(); var mapreduce = yarn.Applications.OfType <MapReduceApplication>().Single(); var hive = payloadObject.Components.OfType <HiveComponent>().Single(); var oozie = payloadObject.Components.OfType <OozieComponent>().Single(); var hdfs = payloadObject.Components.OfType <HdfsComponent>().Single(); var hadoopCore = payloadObject.Components.OfType <HadoopCoreComponent>().Single(); HBaseComponent hbase = null; if (payloadObject.Components.OfType <HBaseComponent>().Count() == 1) { hbase = payloadObject.Components.OfType <HBaseComponent>().Single(); } StormComponent storm = null; if (payloadObject.Components.OfType <StormComponent>().Count() == 1) { storm = payloadObject.Components.OfType <StormComponent>().Single(); } SparkComponent spark = null; if (payloadObject.Components.OfType <SparkComponent>().Count() == 1) { spark = payloadObject.Components.OfType <SparkComponent>().Single(); } CustomActionComponent configActions = null; if (payloadObject.Components.OfType <CustomActionComponent>().Count() == 1) { configActions = payloadObject.Components.OfType <CustomActionComponent>().Single(); } if (hadoopCore.CoreSiteXmlProperties.Any()) { cluster.CoreConfiguration.AddRange( hadoopCore.CoreSiteXmlProperties.Select(prop => new KeyValuePair <string, string>(prop.Name, prop.Value))); } if (hdfs.HdfsSiteXmlProperties.Any()) { cluster.HdfsConfiguration.AddRange(hdfs.HdfsSiteXmlProperties.Select(prop => new KeyValuePair <string, string>(prop.Name, prop.Value))); } if (mapreduce.MapRedSiteXmlProperties.Any()) { cluster.MapReduceConfiguration.ConfigurationCollection.AddRange( mapreduce.MapRedSiteXmlProperties.Select(prop => new KeyValuePair <string, string>(prop.Name, prop.Value))); } if (mapreduce.CapacitySchedulerConfiguration.Any()) { cluster.MapReduceConfiguration.CapacitySchedulerConfigurationCollection.AddRange( mapreduce.CapacitySchedulerConfiguration.Select(prop => new KeyValuePair <string, string>(prop.Name, prop.Value))); } if (mapreduce.AdditionalStorageContainers.ToList().Any()) { cluster.AdditionalStorageAccounts.AddRange( from BlobContainerCredentialBackedResource tem in mapreduce.AdditionalStorageContainers select new WabStorageAccountConfiguration(tem.AccountDnsName, tem.Key, tem.BlobContainerName)); } if (yarn.Configuration.Any()) { cluster.YarnConfiguration.AddRange(yarn.Configuration.Select(prop => new KeyValuePair <string, string>(prop.Name, prop.Value))); } if (hive.HiveSiteXmlProperties.Any()) { cluster.HiveConfiguration.ConfigurationCollection.AddRange( hive.HiveSiteXmlProperties.Select(prop => new KeyValuePair <string, string>(prop.Name, prop.Value))); } if (hive.AdditionalLibraries != null) { cluster.HiveConfiguration.AdditionalLibraries = new WabStorageAccountConfiguration( hive.AdditionalLibraries.AccountDnsName, hive.AdditionalLibraries.Key, hive.AdditionalLibraries.BlobContainerName); } if (!hive.Metastore.ShouldProvisionNew) { var metaStore = (SqlAzureDatabaseCredentialBackedResource)hive.Metastore; cluster.HiveMetastore = new Metastore( metaStore.SqlServerName, metaStore.DatabaseName, metaStore.Credentials.Username, metaStore.Credentials.Password); } if (configActions != null) { foreach (var configAction in configActions.CustomActions) { ScriptCustomAction sca = configAction as ScriptCustomAction; if (sca != null) { cluster.ConfigActions.Add(new ScriptAction( sca.Name, ConvertClusterRoleToClusterNodeType(sca), sca.Uri, sca.Parameters)); } } } if (oozie.Configuration.Any()) { cluster.OozieConfiguration.ConfigurationCollection.AddRange( oozie.Configuration.Select(prop => new KeyValuePair <string, string>(prop.Name, prop.Value))); } if (oozie.AdditionalSharedLibraries != null) { cluster.OozieConfiguration.AdditionalSharedLibraries = new WabStorageAccountConfiguration( oozie.AdditionalSharedLibraries.AccountDnsName, oozie.AdditionalSharedLibraries.Key, oozie.AdditionalSharedLibraries.BlobContainerName); } if (oozie.AdditionalActionExecutorLibraries != null) { cluster.OozieConfiguration.AdditionalActionExecutorLibraries = new WabStorageAccountConfiguration( oozie.AdditionalActionExecutorLibraries.AccountDnsName, oozie.AdditionalActionExecutorLibraries.Key, oozie.AdditionalActionExecutorLibraries.BlobContainerName); } if (!oozie.Metastore.ShouldProvisionNew) { var metaStore = (SqlAzureDatabaseCredentialBackedResource)oozie.Metastore; cluster.OozieMetastore = new Metastore( metaStore.SqlServerName, metaStore.DatabaseName, metaStore.Credentials.Username, metaStore.Credentials.Password); } if (hbase != null && hbase.HBaseConfXmlProperties.Any()) { cluster.HBaseConfiguration.ConfigurationCollection.AddRange( hbase.HBaseConfXmlProperties.Select(prop => new KeyValuePair <string, string>(prop.Name, prop.Value))); } if (hbase != null && hbase.AdditionalLibraries != null) { cluster.HBaseConfiguration.AdditionalLibraries = new WabStorageAccountConfiguration( hbase.AdditionalLibraries.AccountDnsName, hbase.AdditionalLibraries.Key, hbase.AdditionalLibraries.BlobContainerName); } if (storm != null && storm.StormConfiguration.Any()) { cluster.StormConfiguration.AddRange( storm.StormConfiguration.Select(prop => new KeyValuePair <string, string>(prop.Name, prop.Value))); } if (spark != null && spark.SparkConfiguration.Any()) { cluster.SparkConfiguration.AddRange( spark.SparkConfiguration.Select(prop => new KeyValuePair <string, string>(prop.Name, prop.Value))); } }
private static void CopyConfiguration(ClusterContainer payloadObject, HDInsight.ClusterCreateParametersV2 cluster) { if (payloadObject.Settings.Core != null && payloadObject.Settings.Core.Configuration != null) { cluster.CoreConfiguration.AddRange( payloadObject.Settings.Core.Configuration.Select(config => new KeyValuePair <string, string>(config.Name, config.Value))); } if (payloadObject.Settings.Yarn != null && payloadObject.Settings.Yarn.Configuration != null) { cluster.YarnConfiguration.AddRange( payloadObject.Settings.Yarn.Configuration.Select(config => new KeyValuePair <string, string>(config.Name, config.Value))); } if (payloadObject.Settings.Hive != null) { if (payloadObject.Settings.Hive.AdditionalLibraries != null) { cluster.HiveConfiguration.AdditionalLibraries = new WabStorageAccountConfiguration( payloadObject.Settings.Hive.AdditionalLibraries.AccountName, payloadObject.Settings.Hive.AdditionalLibraries.Key, payloadObject.Settings.Hive.AdditionalLibraries.BlobContainerName); } if (payloadObject.Settings.Hive.Configuration != null) { cluster.HiveConfiguration.ConfigurationCollection.AddRange( payloadObject.Settings.Hive.Configuration.Select(config => new KeyValuePair <string, string>(config.Name, config.Value))); } } if (payloadObject.Settings.Hdfs != null && payloadObject.Settings.Hdfs.Configuration != null) { cluster.HdfsConfiguration.AddRange( payloadObject.Settings.Hdfs.Configuration.Select(config => new KeyValuePair <string, string>(config.Name, config.Value))); } if (payloadObject.Settings.MapReduce != null && payloadObject.Settings.MapReduce.Configuration != null) { cluster.MapReduceConfiguration = new HDInsight.MapReduceConfiguration(); if (payloadObject.Settings.MapReduce.Configuration != null) { cluster.MapReduceConfiguration.ConfigurationCollection.AddRange( payloadObject.Settings.MapReduce.Configuration.Select(config => new KeyValuePair <string, string>(config.Name, config.Value))); } if (payloadObject.Settings.MapReduce.CapacitySchedulerConfiguration != null) { cluster.MapReduceConfiguration.CapacitySchedulerConfigurationCollection.AddRange( payloadObject.Settings.MapReduce.CapacitySchedulerConfiguration.Select(config => new KeyValuePair <string, string>(config.Name, config.Value))); } } if (payloadObject.Settings.Oozie != null && payloadObject.Settings.Oozie.Configuration != null) { if (cluster.OozieConfiguration.ConfigurationCollection != null) { cluster.OozieConfiguration.ConfigurationCollection.AddRange( payloadObject.Settings.Oozie.Configuration.Select(config => new KeyValuePair <string, string>(config.Name, config.Value))); } if (payloadObject.Settings.Oozie.AdditionalSharedLibraries != null) { cluster.OozieConfiguration.AdditionalSharedLibraries = new WabStorageAccountConfiguration( payloadObject.Settings.Oozie.AdditionalSharedLibraries.AccountName, payloadObject.Settings.Oozie.AdditionalSharedLibraries.Key, payloadObject.Settings.Oozie.AdditionalSharedLibraries.BlobContainerName); } if (payloadObject.Settings.Oozie.AdditionalActionExecutorLibraries != null) { cluster.OozieConfiguration.AdditionalActionExecutorLibraries = new WabStorageAccountConfiguration( payloadObject.Settings.Oozie.AdditionalActionExecutorLibraries.AccountName, payloadObject.Settings.Oozie.AdditionalActionExecutorLibraries.Key, payloadObject.Settings.Oozie.AdditionalActionExecutorLibraries.BlobContainerName); } } }
public async Task CanCannotClusterCreateWithInvalidRdpCredentials() { var restClient = ServiceLocator.Instance.Locate<IRdfeClustersResourceRestClientFactory>() .Create(this.DefaultHandler, this.HdInsightCertCred, this.Context, false, SchemaVersionUtils.GetSchemaVersion(Capabilities)); var clusterDnsName = "rdpTestCluster"; var clustersPocoClient = new PaasClustersPocoClient(this.HdInsightCertCred, false, this.Context, Capabilities, restClient); var clusterCreateParameters = new HDInsight.ClusterCreateParametersV2 { Name = clusterDnsName, DefaultStorageAccountKey = IntegrationTestBase.TestCredentials.Environments[0].DefaultStorageAccount.Key, DefaultStorageAccountName = IntegrationTestBase.TestCredentials.Environments[0].DefaultStorageAccount.Name, DefaultStorageContainer = "EnableDisableRdpTest", ClusterSizeInNodes = 2, Location = "East US", UserName = "******", Password = "******", Version = "3.1", ClusterType = ClusterType.Hadoop, RdpUsername = "", RdpPassword = "******", RdpAccessExpiry = DateTime.Now.AddDays(6) }; try { await clustersPocoClient.CreateContainer(clusterCreateParameters); throw new Exception("CreateContainer should have thrown an ArgumentException"); } catch (ArgumentException exp) { Assert.AreEqual(exp.Message, @"clusterCreateParameters.RdpUsername cannot be null or empty in case either RdpPassword or RdpAccessExpiry is specified Parameter name: clusterCreateParameters"); } clusterCreateParameters.RdpUsername = "******"; clusterCreateParameters.RdpPassword = ""; try { await clustersPocoClient.CreateContainer(clusterCreateParameters); throw new Exception("CreateContainer should have thrown an ArgumentException"); } catch (ArgumentException exp) { Assert.AreEqual(exp.Message, @"clusterCreateParameters.RdpPassword cannot be null or empty in case either RdpUsername or RdpAccessExpiry is specified Parameter name: clusterCreateParameters"); } clusterCreateParameters.RdpPassword = "******"; clusterCreateParameters.RdpAccessExpiry = null; try { await clustersPocoClient.CreateContainer(clusterCreateParameters); throw new Exception("CreateContainer should have thrown an ArgumentException"); } catch (ArgumentException exp) { Assert.AreEqual(exp.Message, @"clusterCreateParameters.RdpAccessExpiry cannot be null or empty in case either RdpUsername or RdpPassword is specified Parameter name: clusterCreateParameters"); } clusterCreateParameters.RdpAccessExpiry = DateTime.MinValue; try { await clustersPocoClient.CreateContainer(clusterCreateParameters); throw new Exception("CreateContainer should have thrown an ArgumentException"); } catch (ArgumentException exp) { Assert.AreEqual(exp.Message, @"clusterCreateParameters.RdpAccessExpiry should be a time in future. Parameter name: clusterCreateParameters"); } }
public void InternalValidation_PayloadConverter_SerializationCreateRequestWithInvalidConfigActionsV3() { var testInvalidConfigAction = new HDInsight.ClusterCreateParametersV2 { UserName = Guid.NewGuid().ToString("N"), Password = Guid.NewGuid().ToString("N"), Version = IntegrationTestBase.TestCredentials.WellKnownCluster.Version, DefaultStorageAccountKey = Guid.NewGuid().ToString("N"), DefaultStorageAccountName = Guid.NewGuid().ToString("N"), DefaultStorageContainer = Guid.NewGuid().ToString("N"), Name = GetRandomClusterName(), Location = "East US", ClusterSizeInNodes = new Random().Next() }; testInvalidConfigAction.ConfigActions.Add(new ScriptAction("test invalid script action", null, new Uri("http://www.test.com"), "test parameter")); new PayloadConverter().SerializeClusterCreateRequestV3(testInvalidConfigAction); }
public async Task ICanCreateACluster_WithOldVmSizes_All_Specified() { var restClient = ServiceLocator.Instance.Locate<IRdfeClustersResourceRestClientFactory>() .Create(this.DefaultHandler, this.HdInsightCertCred, this.Context, false, SchemaVersionUtils.GetSchemaVersion(Capabilities)); var clustersPocoClient = new PaasClustersPocoClient(this.HdInsightCertCred, false, this.Context, Capabilities, restClient); try { var clusterCreateParameters = new HDInsight.ClusterCreateParametersV2 { Name = "ConfigActionTest", DefaultStorageAccountKey = IntegrationTestBase.TestCredentials.Environments[0].DefaultStorageAccount.Key, DefaultStorageAccountName = IntegrationTestBase.TestCredentials.Environments[0].DefaultStorageAccount.Name, DefaultStorageContainer = "ConfigActionTest", ClusterSizeInNodes = 2, Location = "East US", UserName = "******", Password = "******", Version = "3.1", HeadNodeSize = "ExtraLarge", DataNodeSize = "Large", ZookeeperNodeSize = "Medium", ClusterType = ClusterType.HBase, }; // Add in valid config action. clusterCreateParameters.ConfigActions.Add(new ScriptAction("TestScriptAction", new ClusterNodeType[] { ClusterNodeType.HeadNode }, new Uri("http://www.microsoft.com"), null)); await clustersPocoClient.CreateContainer(clusterCreateParameters); } catch (NotSupportedException ex) { Assert.IsNotNull(ex); } }
public void InternalValidation_PayloadConverter_SerializationCreateRequestWithConfigActionsV3() { var expected = new HDInsight.ClusterCreateParametersV2 { UserName = Guid.NewGuid().ToString("N"), Password = Guid.NewGuid().ToString("N"), Version = IntegrationTestBase.TestCredentials.WellKnownCluster.Version, DefaultStorageAccountKey = Guid.NewGuid().ToString("N"), DefaultStorageAccountName = Guid.NewGuid().ToString("N"), DefaultStorageContainer = Guid.NewGuid().ToString("N"), Name = GetRandomClusterName(), Location = "East US", ClusterSizeInNodes = new Random().Next() }; expected.ConfigActions.Add(new ScriptAction("testconfigaction1", new ClusterNodeType[] { ClusterNodeType.HeadNode }, new Uri("http://www.test1.com"), "test parameter1")); expected.ConfigActions.Add(new ScriptAction("testconfigaction2", new ClusterNodeType[] { ClusterNodeType.HeadNode, ClusterNodeType.DataNode }, new Uri("http://www.test2.com"), "test parameter2")); string payload = new PayloadConverter().SerializeClusterCreateRequestV3(expected); var actual = ServerSerializer.DeserializeClusterCreateRequestV3(payload); fixDefaultExpectedZookeeperSize(expected); Assert.IsTrue(Equals(expected, actual)); }
public async Task CanCreateIaasClusterWithD12Headnode() { var restClient = ServiceLocator.Instance.Locate<IRdfeClustersResourceRestClientFactory>() .Create(this.DefaultHandler, this.HdInsightCertCred, this.Context, false, SchemaVersionUtils.GetSchemaVersion(Capabilities)); var clustersPocoClient = new PaasClustersPocoClient(this.HdInsightCertCred, false, this.Context, Capabilities, restClient); var clusterCreateParameters = new HDInsight.ClusterCreateParametersV2 { Name = "D12HeadnodeCreationTest", DefaultStorageAccountKey = IntegrationTestBase.TestCredentials.Environments[0].DefaultStorageAccount.Key, DefaultStorageAccountName = IntegrationTestBase.TestCredentials.Environments[0].DefaultStorageAccount.Name, DefaultStorageContainer = "D12HeadnodeCreationTest", ClusterSizeInNodes = 2, Location = "East US", UserName = "******", Password = "******", OSType = OSType.Linux, Version = "3.2", ClusterType = ClusterProvisioning.Data.ClusterType.Hadoop, HeadNodeSize = "Standard_D12" }; await clustersPocoClient.CreateContainer(clusterCreateParameters); var containersList = clustersPocoClient.ListContainers().Result; Assert.AreEqual(containersList.Count, 1); Assert.IsNotNull(containersList.SingleOrDefault(cluster => cluster.Name.Equals("D12HeadnodeCreationTest"))); }
/// <summary> /// Creates the container. /// </summary> /// <param name="clusterCreateParameters">The cluster create parameters.</param> /// <returns>A task.</returns> public async Task CreateContainer(HDInsight.ClusterCreateParametersV2 clusterCreateParameters) { if (clusterCreateParameters == null) { throw new ArgumentNullException("clusterCreateParameters"); } if (string.IsNullOrEmpty(clusterCreateParameters.Name)) { throw new ArgumentException("ClusterCreateParameters.Name cannot be null or empty", "clusterCreateParameters"); } if (string.IsNullOrEmpty(clusterCreateParameters.Location)) { throw new ArgumentException("ClusterCreateParameters.Location cannot be null or empty", "clusterCreateParameters"); } if (clusterCreateParameters.ClusterSizeInNodes < 1) { throw new ArgumentException("clusterCreateParameters.ClusterSizeInNodes must be > 0"); } //allow zookeeper to be specified only for Hbase and Storm clusters if (clusterCreateParameters.ZookeeperNodeSize != null) { if (clusterCreateParameters.ClusterType != ClusterType.HBase && clusterCreateParameters.ClusterType != ClusterType.Storm) { throw new ArgumentException( string.Format("clusterCreateParameters.ZookeeperNodeSize must be null for {0} clusters.", clusterCreateParameters.ClusterType)); } } try { //Validate AsvValidationHelper.ValidateAndResolveAsvAccountsAndPrep(clusterCreateParameters); // Validates config action component. if (clusterCreateParameters.ConfigActions != null && clusterCreateParameters.ConfigActions.Count > 0) { this.LogMessage("Validating parameters for config actions.", Severity.Informational, Verbosity.Detailed); if (!HasClusterConfigActionCapability(this.capabilities) || !HasCorrectSchemaVersionForConfigAction(this.capabilities)) { throw new NotSupportedException("Your subscription does not support config actions."); } this.LogMessage("Validating URIs for config actions.", Severity.Informational, Verbosity.Detailed); // Validates that the config actions' Uris are downloadable. UriEndpointValidator.ValidateAndResolveConfigActionEndpointUris(clusterCreateParameters); } //Validate if new vm sizes are used and if the schema is on. if (CreateHasNewVMSizesSpecified(clusterCreateParameters) && !HasCorrectSchemaVersionForNewVMSizes(this.capabilities)) { throw new NotSupportedException("Your subscription does not support new VM sizes."); } var rdfeCapabilitiesClient = ServiceLocator.Instance.Locate <IRdfeServiceRestClientFactory>().Create(this.credentials, this.Context, this.ignoreSslErrors); var capabilities = await rdfeCapabilitiesClient.GetResourceProviderProperties(); // Validates the region for the cluster creation var locationClient = ServiceLocator.Instance.Locate <ILocationFinderClientFactory>().Create(this.credentials, this.Context, this.ignoreSslErrors); var availableLocations = locationClient.ListAvailableLocations(capabilities); if (!availableLocations.Contains(clusterCreateParameters.Location, StringComparer.OrdinalIgnoreCase)) { throw new InvalidOperationException(string.Format( "Cannot create a cluster in '{0}'. Available Locations for your subscription are: {1}", clusterCreateParameters.Location, string.Join(",", availableLocations))); } await this.RegisterSubscriptionIfExistsAsync(); await this.CreateCloudServiceAsyncIfNotExists(clusterCreateParameters.Location); var wireCreateParameters = PayloadConverterClusters.CreateWireClusterCreateParametersFromUserType(clusterCreateParameters); var rdfeResourceInputFromWireInput = PayloadConverterClusters.CreateRdfeResourceInputFromWireInput(wireCreateParameters, SchemaVersionUtils.GetSchemaVersion(this.capabilities)); await this.rdfeClustersRestClient.CreateCluster( this.credentials.SubscriptionId.ToString(), this.GetCloudServiceName(clusterCreateParameters.Location), this.credentials.DeploymentNamespace, clusterCreateParameters.Name, rdfeResourceInputFromWireInput, this.Context.CancellationToken); } catch (InvalidExpectedStatusCodeException iEx) { string content = iEx.Response.Content != null?iEx.Response.Content.ReadAsStringAsync().Result : string.Empty; throw new HttpLayerException(iEx.ReceivedStatusCode, content); } }