/// <summary>
        /// Generate ClusterCreateParameters object for 3.X cluster with Hadoop and Spark.
        /// </summary>
        /// <param name="inputs">Cluster creation parameter inputs.</param>
        /// <returns>The corresponding ClusterCreateParameter object.</returns>
        internal static ClusterCreateParameters Create3XClusterForMapReduceAndSparkTemplate(HDInsight.ClusterCreateParametersV2 inputs)
        {
            if (inputs == null)
            {
                throw new ArgumentNullException("inputs");
            }

            var cluster = Create3XClusterFromMapReduceTemplate(inputs);

            var masterRole = cluster.Components.OfType <YarnComponent>().Single().ResourceManagerRole;
            var workerRole = cluster.Components.OfType <YarnComponent>().Single().NodeManagerRole;

            //Add Spark component
            SparkComponent spark = new SparkComponent
            {
                MasterRole = masterRole,
                WorkerRole = workerRole
            };

            ConfigSparkComponent(spark, inputs);
            cluster.Components.Add(spark);

            return(cluster);
        }
        private static void CopyConfigurationForCluster(
            Microsoft.WindowsAzure.Management.HDInsight.Contracts.May2014.ClusterCreateParameters payloadObject, HDInsight.ClusterCreateParametersV2 cluster)
        {
            var yarn       = payloadObject.Components.OfType <YarnComponent>().Single();
            var mapreduce  = yarn.Applications.OfType <MapReduceApplication>().Single();
            var hive       = payloadObject.Components.OfType <HiveComponent>().Single();
            var oozie      = payloadObject.Components.OfType <OozieComponent>().Single();
            var hdfs       = payloadObject.Components.OfType <HdfsComponent>().Single();
            var hadoopCore = payloadObject.Components.OfType <HadoopCoreComponent>().Single();

            HBaseComponent hbase = null;

            if (payloadObject.Components.OfType <HBaseComponent>().Count() == 1)
            {
                hbase = payloadObject.Components.OfType <HBaseComponent>().Single();
            }
            StormComponent storm = null;

            if (payloadObject.Components.OfType <StormComponent>().Count() == 1)
            {
                storm = payloadObject.Components.OfType <StormComponent>().Single();
            }
            SparkComponent spark = null;

            if (payloadObject.Components.OfType <SparkComponent>().Count() == 1)
            {
                spark = payloadObject.Components.OfType <SparkComponent>().Single();
            }
            CustomActionComponent configActions = null;

            if (payloadObject.Components.OfType <CustomActionComponent>().Count() == 1)
            {
                configActions = payloadObject.Components.OfType <CustomActionComponent>().Single();
            }

            if (hadoopCore.CoreSiteXmlProperties.Any())
            {
                cluster.CoreConfiguration.AddRange(
                    hadoopCore.CoreSiteXmlProperties.Select(prop => new KeyValuePair <string, string>(prop.Name, prop.Value)));
            }

            if (hdfs.HdfsSiteXmlProperties.Any())
            {
                cluster.HdfsConfiguration.AddRange(hdfs.HdfsSiteXmlProperties.Select(prop => new KeyValuePair <string, string>(prop.Name, prop.Value)));
            }

            if (mapreduce.MapRedSiteXmlProperties.Any())
            {
                cluster.MapReduceConfiguration.ConfigurationCollection.AddRange(
                    mapreduce.MapRedSiteXmlProperties.Select(prop => new KeyValuePair <string, string>(prop.Name, prop.Value)));
            }

            if (mapreduce.CapacitySchedulerConfiguration.Any())
            {
                cluster.MapReduceConfiguration.CapacitySchedulerConfigurationCollection.AddRange(
                    mapreduce.CapacitySchedulerConfiguration.Select(prop => new KeyValuePair <string, string>(prop.Name, prop.Value)));
            }

            if (mapreduce.AdditionalStorageContainers.ToList().Any())
            {
                cluster.AdditionalStorageAccounts.AddRange(
                    from BlobContainerCredentialBackedResource tem in mapreduce.AdditionalStorageContainers
                    select new WabStorageAccountConfiguration(tem.AccountDnsName, tem.Key, tem.BlobContainerName));
            }

            if (yarn.Configuration.Any())
            {
                cluster.YarnConfiguration.AddRange(yarn.Configuration.Select(prop => new KeyValuePair <string, string>(prop.Name, prop.Value)));
            }

            if (hive.HiveSiteXmlProperties.Any())
            {
                cluster.HiveConfiguration.ConfigurationCollection.AddRange(
                    hive.HiveSiteXmlProperties.Select(prop => new KeyValuePair <string, string>(prop.Name, prop.Value)));
            }

            if (hive.AdditionalLibraries != null)
            {
                cluster.HiveConfiguration.AdditionalLibraries = new WabStorageAccountConfiguration(
                    hive.AdditionalLibraries.AccountDnsName, hive.AdditionalLibraries.Key, hive.AdditionalLibraries.BlobContainerName);
            }

            if (!hive.Metastore.ShouldProvisionNew)
            {
                var metaStore = (SqlAzureDatabaseCredentialBackedResource)hive.Metastore;
                cluster.HiveMetastore = new Metastore(
                    metaStore.SqlServerName, metaStore.DatabaseName, metaStore.Credentials.Username, metaStore.Credentials.Password);
            }

            if (configActions != null)
            {
                foreach (var configAction in configActions.CustomActions)
                {
                    ScriptCustomAction sca = configAction as ScriptCustomAction;

                    if (sca != null)
                    {
                        cluster.ConfigActions.Add(new ScriptAction(
                                                      sca.Name, ConvertClusterRoleToClusterNodeType(sca), sca.Uri, sca.Parameters));
                    }
                }
            }

            if (oozie.Configuration.Any())
            {
                cluster.OozieConfiguration.ConfigurationCollection.AddRange(
                    oozie.Configuration.Select(prop => new KeyValuePair <string, string>(prop.Name, prop.Value)));
            }

            if (oozie.AdditionalSharedLibraries != null)
            {
                cluster.OozieConfiguration.AdditionalSharedLibraries =
                    new WabStorageAccountConfiguration(
                        oozie.AdditionalSharedLibraries.AccountDnsName,
                        oozie.AdditionalSharedLibraries.Key,
                        oozie.AdditionalSharedLibraries.BlobContainerName);
            }

            if (oozie.AdditionalActionExecutorLibraries != null)
            {
                cluster.OozieConfiguration.AdditionalActionExecutorLibraries =
                    new WabStorageAccountConfiguration(
                        oozie.AdditionalActionExecutorLibraries.AccountDnsName,
                        oozie.AdditionalActionExecutorLibraries.Key,
                        oozie.AdditionalActionExecutorLibraries.BlobContainerName);
            }

            if (!oozie.Metastore.ShouldProvisionNew)
            {
                var metaStore = (SqlAzureDatabaseCredentialBackedResource)oozie.Metastore;
                cluster.OozieMetastore = new Metastore(
                    metaStore.SqlServerName, metaStore.DatabaseName, metaStore.Credentials.Username, metaStore.Credentials.Password);
            }

            if (hbase != null && hbase.HBaseConfXmlProperties.Any())
            {
                cluster.HBaseConfiguration.ConfigurationCollection.AddRange(
                    hbase.HBaseConfXmlProperties.Select(prop => new KeyValuePair <string, string>(prop.Name, prop.Value)));
            }

            if (hbase != null && hbase.AdditionalLibraries != null)
            {
                cluster.HBaseConfiguration.AdditionalLibraries = new WabStorageAccountConfiguration(
                    hbase.AdditionalLibraries.AccountDnsName, hbase.AdditionalLibraries.Key, hbase.AdditionalLibraries.BlobContainerName);
            }

            if (storm != null && storm.StormConfiguration.Any())
            {
                cluster.StormConfiguration.AddRange(
                    storm.StormConfiguration.Select(prop => new KeyValuePair <string, string>(prop.Name, prop.Value)));
            }

            if (spark != null && spark.SparkConfiguration.Any())
            {
                cluster.SparkConfiguration.AddRange(
                    spark.SparkConfiguration.Select(prop => new KeyValuePair <string, string>(prop.Name, prop.Value)));
            }
        }
 private static void ConfigSparkComponent(SparkComponent spark, HDInsight.ClusterCreateParametersV2 inputs)
 {
     spark.SparkConfiguration.AddRange(inputs.SparkConfiguration.Select(prop => new Property {
         Name = prop.Key, Value = prop.Value
     }));
 }