internal static AzureHDInsightJob GetJobWithID(IRunspace runspace, string jobId, ClusterDetails cluster) { IPipelineResult getJobDetailResults = runspace.NewPipeline() .AddCommand(CmdletConstants.GetAzureHDInsightJob) .WithParameter(CmdletConstants.Cluster, cluster.ConnectionUrl) .WithParameter(CmdletConstants.Credential, GetPSCredential(cluster.HttpUserName, cluster.HttpPassword)) .WithParameter(CmdletConstants.Id, jobId) .Invoke(); return getJobDetailResults.Results.ToEnumerable<AzureHDInsightJob>().FirstOrDefault(); }
/// <summary> /// Initializes a new instance of the <see cref="AzureHDInsightCluster" /> class. /// </summary> /// <param name="cluster"> /// The underlying SDK data object representing the cluster. /// </param> public AzureHDInsightCluster(ClusterDetails cluster) { this.cluster = cluster; }
public Task<ClusterDetails> CreateClusterAsync(ClusterCreateParameters clusterCreateParameters) { this.LogMessage("Creating cluster '{0}' in location {1}", clusterCreateParameters.Name, clusterCreateParameters.Location); LastCreateRequest = clusterCreateParameters; var clusterDetails = new ClusterDetails(); if (clusterCreateParameters.EnsureHighAvailability) { clusterDetails.ClusterSizeInNodes = clusterCreateParameters.ClusterSizeInNodes + 2; } clusterDetails.Name = clusterCreateParameters.Name; clusterDetails.HttpPassword = clusterCreateParameters.Password; clusterDetails.HttpUserName = clusterCreateParameters.UserName; clusterDetails.Version = clusterCreateParameters.Version; clusterDetails.Location = clusterCreateParameters.Location; clusterDetails.State = ClusterState.Running; clusterDetails.AdditionalStorageAccounts = clusterCreateParameters.AdditionalStorageAccounts; clusterDetails.DefaultStorageAccount = new WabStorageAccountConfiguration( clusterCreateParameters.DefaultStorageAccountName, clusterCreateParameters.DefaultStorageAccountKey, clusterCreateParameters.DefaultStorageContainer); Clusters.Add(new SimulatorClusterContainer { Cluster = clusterDetails }); return TaskEx2.FromResult(clusterDetails); }
private static void PopulateClusterUriAndHttpCredsFromGateway(ClusterDetails clusterDetails, GatewayComponent gateway) { if (clusterDetails == null) { throw new ArgumentNullException("clusterDetails"); } if (gateway == null) { return; } clusterDetails.ConnectionUrl = gateway.RestUri; if (gateway.IsEnabled) { clusterDetails.HttpUserName = gateway.RestAuthCredential.Username; clusterDetails.HttpPassword = gateway.RestAuthCredential.Password; } else { clusterDetails.HttpUserName = clusterDetails.HttpPassword = string.Empty; } }
private static AzureHDInsightJob TestJobStart(AzureHDInsightJobDefinition mapReduceJobDefinition, ClusterDetails testCluster) { IStartAzureHDInsightJobCommand startJobCommand = ServiceLocator.Instance.Locate<IAzureHDInsightCommandFactory>().CreateStartJob(); startJobCommand.Cluster = testCluster.ConnectionUrl; startJobCommand.Credential = GetPSCredential(testCluster.HttpUserName, testCluster.HttpPassword); startJobCommand.JobDefinition = mapReduceJobDefinition; startJobCommand.EndProcessing(); AzureHDInsightJob jobCreationResults = startJobCommand.Output.ElementAt(0); Assert.IsNotNull(jobCreationResults.JobId, "Should get a non-null jobDetails id"); Assert.IsNotNull(jobCreationResults.StatusDirectory, "StatusDirectory should be set on jobDetails"); return jobCreationResults; }
internal static AzureHDInsightJob RunJobInPowershell( IRunspace runspace, AzureHDInsightJobDefinition mapReduceJobDefinition, ClusterDetails cluster) { IPipelineResult results = runspace.NewPipeline() .AddCommand(CmdletConstants.StartAzureHDInsightJob) .WithParameter(CmdletConstants.Cluster, cluster.ConnectionUrl) .WithParameter(CmdletConstants.Credential, GetPSCredential(cluster.HttpUserName, cluster.HttpPassword)) .WithParameter(CmdletConstants.JobDefinition, mapReduceJobDefinition) .Invoke(); Assert.AreEqual(1, results.Results.Count); IEnumerable<AzureHDInsightJob> jobCreationCmdletResults = results.Results.ToEnumerable<AzureHDInsightJob>(); AzureHDInsightJob jobCreationResults = jobCreationCmdletResults.First(); Assert.IsNotNull(jobCreationResults.JobId, "Should get a non-null jobDetails id"); return jobCreationResults; }
public static ClusterDetails CreateClusterDetailsFromGetClustersResult(IaasCluster clusterDetailsFromServer) { if (clusterDetailsFromServer == null) { throw new ArgumentNullException("clusterDetailsFromServer"); } ClusterDetails clusterDetails = new ClusterDetails(); clusterDetails.CreatedDate = clusterDetailsFromServer.CreatedDate; clusterDetails.Location = clusterDetailsFromServer.Location; clusterDetails.Name = clusterDetailsFromServer.Id; clusterDetails.Version = clusterDetailsFromServer.HdiVersion; clusterDetails.StateString = clusterDetailsFromServer.State.ToString(); clusterDetails.DeploymentId = clusterDetailsFromServer.TenantId.ToString() ?? string.Empty; if (!string.IsNullOrEmpty(clusterDetails.Version)) { clusterDetails.VersionNumber = new PayloadConverter().ConvertStringToVersion(clusterDetails.Version); clusterDetails.VersionStatus = VersionFinderClient.GetVersionStatus(clusterDetails.Version); } else { clusterDetails.VersionNumber = new Version(0, 0); } clusterDetails.Version = clusterDetails.VersionNumber.ToString(); // TODO: Determine this from the documents? clusterDetails.ClusterType = ClusterType.Hadoop; // This code will only run for IaasCluster which only supports Linux today // We would need to put this information in one of the documents at some point clusterDetails.OSType = OSType.Linux; if (clusterDetailsFromServer.Errors != null && clusterDetailsFromServer.Errors.Any()) { // Populate error details with the most recent one. These occur if the deployment workflow errors out string errorDescription = string.Join(", ", clusterDetailsFromServer.Errors.Select(x => string.Format("{0} : {1}", x.ErrorCode, x.ErrorDescription))); clusterDetails.Error = new ClusterErrorStatus(0, errorDescription, string.Empty); } AzureCsmDocumentManager azureCsmDocumentManager = new AzureCsmDocumentManager(clusterDetailsFromServer.DeploymentDocuments[IaasClusterDocumentTypes.EmbeddedAzureConfigurationDocument]); AmbariConfigurationDocumentManager ambariConfigurationDocumentManager = new AmbariConfigurationDocumentManager(clusterDetailsFromServer.DeploymentDocuments[IaasClusterDocumentTypes.EmbeddedAmbariConfigurationDocument]); // Populate user name, passowrd, and server address information clusterDetails.HttpUserName = "******"; clusterDetails.HttpPassword = ambariConfigurationDocumentManager.GetPassword(); if (clusterDetailsFromServer.ConnectivityEndpoints != null) { foreach (var endpoint in clusterDetailsFromServer.ConnectivityEndpoints) { var webEndPoint = endpoint as WebConnectivityEndpoint; if (webEndPoint != null) { clusterDetails.ConnectionUrl = String.Format("https://{0}{1}", webEndPoint.Location, webEndPoint.Port > 0 ? String.Format(":{0}", webEndPoint.Port) : ""); break; } } } clusterDetails.DefaultStorageAccount = ambariConfigurationDocumentManager.GetDefaultStorageAccount(); // Populate additional Storage Accounts clusterDetails.AdditionalStorageAccounts = ambariConfigurationDocumentManager.GetAdditionalStorageAccounts(); // Populate Data Node Count clusterDetails.ClusterSizeInNodes = azureCsmDocumentManager.GetWorkerNodeCount(); return(clusterDetails); }
internal static PollResult PollSignal(this IHDInsightManagementPocoClient client, ClusterDetails cluster, params ClusterState[] states) { if (cluster == null) { client.LogMessage("Polling for cluster returned null. Returning null to polling function for retry logic.", Severity.Informational, Verbosity.Diagnostic); return(PollResult.Null); } PollResult retval = PollResult.Continue; client.RaiseClusterProvisioningEvent(client, new ClusterProvisioningStatusEventArgs(cluster, cluster.State)); var msg = string.Format(CultureInfo.CurrentCulture, "Current State {0} -> waiting for one state of {1}", cluster.State, string.Join(",", states.Select(s => s.ToString()))); client.LogMessage(msg, Severity.Informational, Verbosity.Diagnostic); if (cluster.State == ClusterState.Error) { client.LogMessage("Stopping Poll because cluster state was in Error", Severity.Error, Verbosity.Normal); retval = PollResult.Stop; } else if (cluster.Error != null) { msg = string.Format(CultureInfo.CurrentCulture, "Stopping Poll because cluster returned an error message. The message was: {0}", cluster.Error); client.LogMessage(msg, Severity.Error, Verbosity.Normal); retval = PollResult.Stop; } else if (states.Contains(cluster.State)) { msg = string.Format(CultureInfo.CurrentCulture, "Stopping Poll because cluster returned in a final state. The message was: {0}", cluster.State); client.LogMessage(msg, Severity.Informational, Verbosity.Diagnostic); retval = PollResult.Stop; } else if (cluster.State == ClusterState.Unknown) { retval = PollResult.Unknown; } msg = string.Format(CultureInfo.CurrentCulture, "Continue function determined a poll result of: {0}", retval); client.LogMessage(msg, Severity.Informational, Verbosity.Diagnostic); return(retval); }
private static AzureHDInsightJob TestJobStart(AzureHDInsightJobDefinition mapReduceJobDefinition, ClusterDetails testCluster) { IStartAzureHDInsightJobCommand startJobCommand = ServiceLocator.Instance.Locate <IAzureHDInsightCommandFactory>().CreateStartJob(); startJobCommand.Cluster = testCluster.ConnectionUrl; startJobCommand.Credential = GetPSCredential(testCluster.HttpUserName, testCluster.HttpPassword); startJobCommand.JobDefinition = mapReduceJobDefinition; startJobCommand.EndProcessing(); AzureHDInsightJob jobCreationResults = startJobCommand.Output.ElementAt(0); Assert.IsNotNull(jobCreationResults.JobId, "Should get a non-null jobDetails id"); Assert.IsNotNull(jobCreationResults.StatusDirectory, "StatusDirectory should be set on jobDetails"); return(jobCreationResults); }
internal static AzureHDInsightJob GetJobWithID(IRunspace runspace, string jobId, ClusterDetails cluster) { IPipelineResult getJobDetailResults = runspace.NewPipeline() .AddCommand(CmdletConstants.GetAzureHDInsightJob) .WithParameter(CmdletConstants.Cluster, cluster.ConnectionUrl) .WithParameter(CmdletConstants.Credential, GetPSCredential(cluster.HttpUserName, cluster.HttpPassword)) .WithParameter(CmdletConstants.Id, jobId) .Invoke(); return(getJobDetailResults.Results.ToEnumerable <AzureHDInsightJob>().FirstOrDefault()); }
private static AzureHDInsightJob TestJobStart(AzureHDInsightJobDefinition mapReduceJobDefinition) { ClusterDetails testCluster = CmdletScenariosTestCaseBase.GetHttpAccessEnabledCluster(); return(TestJobStart(mapReduceJobDefinition, testCluster)); }
public void CannotStopNonExistingJob() { ClusterDetails testCluster = CmdletScenariosTestCaseBase.GetHttpAccessEnabledCluster(); TestJobStop(testCluster, Guid.NewGuid().ToString()); }
public Task <ClusterDetails> CreateAsync(ClusterDetails cluster) { Ensure.NotNull(cluster, nameof(cluster)); return(api.PostAsync <ClusterDetails>($"/clusters", cluster)); }
public static ClusterDetails CreateHBaseClusterIfNotExists() { HBaseCluster = CreateClusterIfNotExists(ClusterType.HBase); return(HBaseCluster); }
public void InternalValidation_PayloadConverter_SerializationListContainersResult() { var storageAccount = new WabStorageAccountConfiguration(Guid.NewGuid().ToString(), Guid.NewGuid().ToString(), Guid.NewGuid().ToString()); // Creates two random containers var container1 = new ClusterDetails(GetRandomClusterName(), "Running") { CreatedDate = DateTime.Now, ConnectionUrl = @"https://some/long/uri/", HttpUserName = "******", Location = "East US", ClusterSizeInNodes = 20, Version = IntegrationTestBase.TestCredentials.WellKnownCluster.Version }; container1.DefaultStorageAccount = storageAccount; container1.AdditionalStorageAccounts = new List<WabStorageAccountConfiguration>() { new WabStorageAccountConfiguration(Guid.NewGuid().ToString(), Guid.NewGuid().ToString(), Guid.NewGuid().ToString()), new WabStorageAccountConfiguration(Guid.NewGuid().ToString(), Guid.NewGuid().ToString(), Guid.NewGuid().ToString()) }; var container2 = new ClusterDetails(GetRandomClusterName(), "ClusterStorageProvisioned") { CreatedDate = DateTime.Now, ConnectionUrl = @"https://some/long/uri/", HttpUserName = "******", Location = "West US", ClusterSizeInNodes = 10, Error = new ClusterErrorStatus(10, "error", "create"), Version = IntegrationTestBase.TestCredentials.WellKnownCluster.Version }; var originalContainers = new Collection<ClusterDetails> { container1, container2 }; // Roundtrip serialize\deserialize Guid subscriptionId = new Guid(); var payload = ServerSerializer.SerializeListContainersResult(originalContainers, "namespace", true, false); var finalContainers = new PayloadConverter().DeserializeListContainersResult(payload, "namespace", subscriptionId); // Compares the lists Assert.AreEqual(originalContainers.Count, finalContainers.Count); foreach (var expectedCluster in originalContainers) { var deserializedCluster = finalContainers.FirstOrDefault(cluster => cluster.Name == expectedCluster.Name); Assert.IsNotNull(deserializedCluster); Assert.AreEqual(deserializedCluster.SubscriptionId, subscriptionId); Assert.IsTrue(Equals(expectedCluster, deserializedCluster), "Failed to deserialize cluster pigJobCreateParameters {0}", expectedCluster.Name); } }
public void InternalValidation_PayloadConverter_SerializationListContainersResult_WithoutErrorWithExtendedError() { var storageAccount = new WabStorageAccountConfiguration(Guid.NewGuid().ToString(), Guid.NewGuid().ToString(), Guid.NewGuid().ToString()); var container1 = new ClusterDetails(GetRandomClusterName(), "ClusterStorageProvisioned") { CreatedDate = DateTime.Now, ConnectionUrl = @"https://some/long/uri/", HttpUserName = "******", Location = "West US", ClusterSizeInNodes = 10, Error = new ClusterErrorStatus(10, "error", "create"), Version = IntegrationTestBase.TestCredentials.WellKnownCluster.Version, }; container1.DefaultStorageAccount = storageAccount; container1.AdditionalStorageAccounts = new List<WabStorageAccountConfiguration>() { new WabStorageAccountConfiguration(Guid.NewGuid().ToString(), Guid.NewGuid().ToString(), Guid.NewGuid().ToString()), new WabStorageAccountConfiguration(Guid.NewGuid().ToString(), Guid.NewGuid().ToString(), Guid.NewGuid().ToString()) }; var originalContainers = new Collection<ClusterDetails> { container1 }; Guid subscriptionId = new Guid(); var payload = ServerSerializer.SerializeListContainersResult(originalContainers, "namespace", false, true); var finalContainers = new PayloadConverter().DeserializeListContainersResult(payload, "namespace", subscriptionId); Assert.AreEqual(originalContainers.Count, finalContainers.Count); var deserializedCluster = finalContainers.FirstOrDefault(cluster => cluster.Name == container1.Name); Assert.IsNotNull(deserializedCluster); Assert.AreEqual(deserializedCluster.SubscriptionId, subscriptionId); Assert.AreEqual(deserializedCluster.Error.Message, "error"); Assert.AreEqual(deserializedCluster.Error.HttpCode, 10); Assert.AreEqual(deserializedCluster.Error.OperationType, "create"); }
private static Resource ListClusterContainerResult_ToInternal(ClusterDetails result, string nameSpace, bool writeError, bool writeExtendedError) { var resource = new Resource { Name = result.Name, SubState = result.StateString, ResourceProviderNamespace = nameSpace, Type = "containers" }; if (result.AdditionalStorageAccounts == null) { result.AdditionalStorageAccounts = new List <WabStorageAccountConfiguration>(); } resource.Type = "containers"; resource.OutputItems = new OutputItemList { new OutputItem { Key = CreatedDate, Value = result.CreatedDate.ToString(CultureInfo.InvariantCulture) }, new OutputItem { Key = ConnectionUrl, Value = result.ConnectionUrl }, new OutputItem { Key = ClusterUserName, Value = result.HttpUserName }, new OutputItem { Key = Version, Value = result.Version }, new OutputItem { Key = BlobContainers, Value = SerializeStorageAccounts(result) }, new OutputItem { Key = NodesCount, Value = result.ClusterSizeInNodes.ToString(CultureInfo.InvariantCulture) } }; if (result.Error != null) { if (writeError) { resource.OperationStatus = new ResourceOperationStatus { Type = result.Error.OperationType }; resource.OperationStatus.Error = new ResourceErrorInfo { HttpCode = result.Error.HttpCode, Message = result.Error.Message }; } if (writeExtendedError) { resource.OperationStatus = new ResourceOperationStatus { Type = result.Error.OperationType }; resource.OperationStatus.Error = new ResourceErrorInfo { HttpCode = result.Error.HttpCode, Message = result.Error.Message }; resource.OutputItems.Add(new OutputItem { Key = ExtendedErrorMessage, Value = result.Error.Message }); } } var intrinsicSettings = new List <OutputItem> { new OutputItem { Key = RdpUserName, Value = result.RdpUserName }, new OutputItem { Key = HttpUserName, Value = result.HttpUserName }, new OutputItem { Key = HttpPassword, Value = result.HttpPassword }, new OutputItem { Key = Version, Value = result.Version } }; resource.IntrinsicSettings = new XmlNode[] { SerializeToXmlNode(intrinsicSettings) }; return(resource); }
private static bool Equals(ClusterDetails expectedCluster, ClusterDetails deserializedCluster) { if (expectedCluster == null && deserializedCluster == null) { return true; } if (expectedCluster == null || deserializedCluster == null) { return false; } var comparisonTuples = new List<Tuple<object, object>> { new Tuple<object, object>(expectedCluster.Name, deserializedCluster.Name), new Tuple<object, object>(expectedCluster.State, deserializedCluster.State), new Tuple<object, object>(expectedCluster.StateString, deserializedCluster.StateString), new Tuple<object, object>(TruncateMiliseconds(expectedCluster.CreatedDate), TruncateMiliseconds(deserializedCluster.CreatedDate)), new Tuple<object, object>(expectedCluster.Location, deserializedCluster.Location), new Tuple<object, object>(expectedCluster.HttpUserName, deserializedCluster.HttpUserName), new Tuple<object, object>(expectedCluster.ConnectionUrl, deserializedCluster.ConnectionUrl), new Tuple<object, object>(expectedCluster.ClusterSizeInNodes, deserializedCluster.ClusterSizeInNodes), }; if (expectedCluster.Error == null && deserializedCluster.Error != null) return false; if (expectedCluster.Error != null && deserializedCluster.Error == null) return false; if (expectedCluster.Error != null && deserializedCluster.Error != null) { comparisonTuples.Add(new Tuple<object, object>(expectedCluster.Error.HttpCode, deserializedCluster.Error.HttpCode)); comparisonTuples.Add(new Tuple<object, object>(expectedCluster.Error.Message, deserializedCluster.Error.Message)); comparisonTuples.Add(new Tuple<object, object>(expectedCluster.Error.OperationType, deserializedCluster.Error.OperationType)); } if (expectedCluster.DefaultStorageAccount != null) { Assert.IsNotNull(deserializedCluster.DefaultStorageAccount, "DefaultStorageAccount"); Assert.AreEqual(expectedCluster.DefaultStorageAccount.Key, deserializedCluster.DefaultStorageAccount.Key, "Key"); Assert.AreEqual(expectedCluster.DefaultStorageAccount.Container, deserializedCluster.DefaultStorageAccount.Container, "Container"); } foreach (var storageAccount in expectedCluster.AdditionalStorageAccounts) { var deserializedStorageAccount = deserializedCluster.AdditionalStorageAccounts.FirstOrDefault(acc => acc.Name == storageAccount.Name); Assert.IsNotNull(deserializedStorageAccount, storageAccount.Name); Assert.AreEqual(storageAccount.Key, deserializedStorageAccount.Key, "Key"); Assert.AreEqual(storageAccount.Container, deserializedStorageAccount.Container, "Container"); } return CompareTuples(comparisonTuples); }
private static AzureHDInsightJob TestJobStop(ClusterDetails testCluster, string jobId) { IStopAzureHDInsightJobCommand stopJobCommand = ServiceLocator.Instance.Locate<IAzureHDInsightCommandFactory>().CreateStopJob(); stopJobCommand.Cluster = testCluster.ConnectionUrl; stopJobCommand.Credential = GetPSCredential(testCluster.HttpUserName, testCluster.HttpPassword); stopJobCommand.JobId = jobId; stopJobCommand.EndProcessing(); if (stopJobCommand.Output.Count != 0) { AzureHDInsightJob jobCancellationResults = stopJobCommand.Output.ElementAt(0); Assert.IsNotNull(jobCancellationResults.JobId, "Should get a non-null jobDetails id"); Assert.IsNotNull(jobCancellationResults.StatusDirectory, "StatusDirectory should be set on jobDetails"); return jobCancellationResults; } return null; }
public static ClusterDetails CreateClusterDetailsFromGetClustersResult(Cluster clusterDetailsFromServer) { if (clusterDetailsFromServer == null) { throw new ArgumentNullException("clusterDetailsFromServer"); } ClusterDetails clusterDetails = new ClusterDetails(); clusterDetails.CreatedDate = clusterDetailsFromServer.CreatedTime; clusterDetails.Location = clusterDetailsFromServer.Location; clusterDetails.Name = clusterDetailsFromServer.DnsName; clusterDetails.Version = clusterDetailsFromServer.Version; clusterDetails.StateString = clusterDetailsFromServer.State.ToString(); clusterDetails.DeploymentId = clusterDetailsFromServer.DeploymentId ?? string.Empty; if (!string.IsNullOrEmpty(clusterDetails.Version)) { clusterDetails.VersionNumber = new PayloadConverter().ConvertStringToVersion(clusterDetails.Version); clusterDetails.VersionStatus = VersionFinderClient.GetVersionStatus(clusterDetails.Version); } else { clusterDetails.VersionNumber = new Version(0, 0); } string componentListCommaSeperated = clusterDetailsFromServer.Components != null ? string.Join(",", clusterDetailsFromServer.Components.Select(c => c.GetType().Name)) : string.Empty; clusterDetails.ClusterType = !string.IsNullOrEmpty(componentListCommaSeperated) ? GetClusterTypeFromComponentList(componentListCommaSeperated) : ClusterType.Unknown; // This code will only execute for PaaS clusters which only support Windows clusterDetails.OSType = OSType.Windows; if (clusterDetailsFromServer.Error != null) { //Populate error details with the most recent one. These occur if the deployment workflow errors out clusterDetails.Error = new ClusterErrorStatus( (int)clusterDetailsFromServer.Error.StatusCode, clusterDetailsFromServer.Error.ErrorMessage ?? string.Empty, string.Empty); } //Populate Uri and HttpCreds from the gateway. This should not throw //even if the gateway component is null PopulateClusterUriAndHttpCredsFromGateway(clusterDetails, clusterDetailsFromServer.Components.OfType <GatewayComponent>().SingleOrDefault()); //Look for Yarn for 3X clusters var yarn = clusterDetailsFromServer.Components.OfType <YarnComponent>().SingleOrDefault(); //Look for MR for 2X clusters var mr = clusterDetailsFromServer.Components.OfType <MapReduceComponent>().SingleOrDefault(); if (yarn != null) { clusterDetails.ClusterSizeInNodes = yarn.NodeManagerRole.InstanceCount; var mapReduceApplication = yarn.Applications.OfType <MapReduceApplication>().SingleOrDefault(); if (mapReduceApplication != null) { //ToWasbConfiguration returns null if DefaultStorageAccountAndContainer is null clusterDetails.DefaultStorageAccount = mapReduceApplication.DefaultStorageAccountAndContainer.ToWabStorageAccountConfiguration(); if (mapReduceApplication.AdditionalStorageContainers != null) { clusterDetails.AdditionalStorageAccounts = mapReduceApplication.AdditionalStorageContainers.Select(s => s.ToWabStorageAccountConfiguration()).ToList(); } } } else if (mr != null) { clusterDetails.ClusterSizeInNodes = mr.WorkerNodeRole.InstanceCount; clusterDetails.DefaultStorageAccount = mr.DefaultStorageAccountAndContainer.ToWabStorageAccountConfiguration(); if (mr.AdditionalStorageAccounts != null) { clusterDetails.AdditionalStorageAccounts = mr.AdditionalStorageAccounts.Select(s => s.ToWabStorageAccountConfiguration()).ToList(); } } //populate RDP user name. All roles will have the same RDP properties so we pick the first one if (clusterDetailsFromServer.ClusterRoleCollection != null && clusterDetailsFromServer.ClusterRoleCollection.Any() && clusterDetailsFromServer.ClusterRoleCollection.First().RemoteDesktopSettings.IsEnabled) { clusterDetails.RdpUserName = clusterDetailsFromServer.ClusterRoleCollection.First().RemoteDesktopSettings.AuthenticationCredential.Username; } //populate virtual network info VirtualNetworkConfiguration vnetConfigFromServer = clusterDetailsFromServer.VirtualNetworkConfiguration; if (vnetConfigFromServer != null && !string.IsNullOrEmpty(vnetConfigFromServer.VirtualNetworkSite)) { clusterDetails.VirtualNetworkId = vnetConfigFromServer.VirtualNetworkSite; //Populate the subnet name if (vnetConfigFromServer.AddressAssignments != null && vnetConfigFromServer.AddressAssignments.Any() && vnetConfigFromServer.AddressAssignments.First().Subnets != null && vnetConfigFromServer.AddressAssignments.First().Subnets.First() != null) { if (vnetConfigFromServer.AddressAssignments.Any()) { clusterDetails.SubnetName = vnetConfigFromServer.AddressAssignments.First().Subnets.First().Name; } } } return(clusterDetails); }
internal static AzureHDInsightJob RunJobInPowershell( IRunspace runspace, AzureHDInsightJobDefinition mapReduceJobDefinition, ClusterDetails cluster, bool debug, string expectedLogMessage) { IPipelineResult result = null; if (debug) { var logWriter = new PowershellLogWriter(); BufferingLogWriterFactory.Instance = logWriter; result = runspace.NewPipeline() .AddCommand(CmdletConstants.StartAzureHDInsightJob) .WithParameter(CmdletConstants.Cluster, cluster.ConnectionUrl) .WithParameter(CmdletConstants.Credential, GetPSCredential(cluster.HttpUserName, cluster.HttpPassword)) .WithParameter(CmdletConstants.JobDefinition, mapReduceJobDefinition) .WithParameter(CmdletConstants.Debug, null) .Invoke(); Assert.IsTrue(logWriter.Buffer.Any(message => message.Contains(expectedLogMessage))); BufferingLogWriterFactory.Reset(); } else { result = runspace.NewPipeline() .AddCommand(CmdletConstants.StartAzureHDInsightJob) .WithParameter(CmdletConstants.Cluster, cluster.ConnectionUrl) .WithParameter(CmdletConstants.Credential, GetPSCredential(cluster.HttpUserName, cluster.HttpPassword)) .WithParameter(CmdletConstants.JobDefinition, mapReduceJobDefinition) .Invoke(); } Assert.AreEqual(1, result.Results.Count); IEnumerable<AzureHDInsightJob> jobCreationCmdletResults = result.Results.ToEnumerable<AzureHDInsightJob>(); AzureHDInsightJob jobCreationResults = jobCreationCmdletResults.First(); Assert.IsNotNull(jobCreationResults.JobId, "Should get a non-null jobDetails id"); return jobCreationResults; }
public static ClusterDetails CreateClusterDetailsFromRdfeResourceOutput(string cloudServiceRegion, Resource resouceOutput) { if (resouceOutput == null) { throw new ArgumentNullException("resouceOutput"); } if (string.IsNullOrEmpty(cloudServiceRegion)) { throw new ArgumentException("CloudService region cannot be null or empty."); } string version = SafeGetValueFromOutputItem(resouceOutput.OutputItems, "Version"); string components = SafeGetValueFromOutputItem(resouceOutput.OutputItems, "ClusterComponents"); ClusterType clusterType = !string.IsNullOrEmpty(components) ? GetClusterTypeFromComponentList(components) : ClusterType.Unknown; var clusterDetails = new ClusterDetails { Name = resouceOutput.Name, Version = version, StateString = resouceOutput.SubState, Location = cloudServiceRegion, ClusterType = clusterType, }; if (!string.IsNullOrEmpty(version)) { clusterDetails.VersionStatus = VersionFinderClient.GetVersionStatus(version); clusterDetails.VersionNumber = new PayloadConverter().ConvertStringToVersion(version); } else { clusterDetails.VersionNumber = new Version(0, 0); } //Operation status is populated with failed, then let us mark the state as error if (resouceOutput.OperationStatus != null && resouceOutput.OperationStatus.Result.Equals("Failed", StringComparison.OrdinalIgnoreCase)) { clusterDetails.State = HDInsight.ClusterState.Error; string errorType = resouceOutput.OperationStatus.Type ?? string.Empty; clusterDetails.StateString = HDInsight.ClusterState.Error.ToString(); if (resouceOutput.OperationStatus.Error != null) { int httpCode = resouceOutput.OperationStatus.Error.HttpCode; string errorMessage = resouceOutput.OperationStatus.Error.Message ?? string.Empty; clusterDetails.Error = new ClusterErrorStatus(httpCode, errorMessage, errorType); } else { clusterDetails.Error = new ClusterErrorStatus(0, "Unknown error occurred", errorType); } } else { HDInsight.ClusterState clusterState; if (!Enum.TryParse(resouceOutput.SubState, true, out clusterState)) { clusterState = HDInsight.ClusterState.Unknown; } clusterDetails.State = clusterState; } return(clusterDetails); }
public static ClusterDetails CreateClusterDetailsFromGetClustersResult(Cluster clusterDetailsFromServer) { if (clusterDetailsFromServer == null) { throw new ArgumentNullException("clusterDetailsFromServer"); } ClusterDetails clusterDetails = new ClusterDetails(); clusterDetails.CreatedDate = clusterDetailsFromServer.CreatedTime; clusterDetails.Location = clusterDetailsFromServer.Location; clusterDetails.Name = clusterDetailsFromServer.DnsName; clusterDetails.Version = clusterDetailsFromServer.Version; clusterDetails.StateString = clusterDetailsFromServer.State.ToString(); clusterDetails.DeploymentId = clusterDetailsFromServer.DeploymentId ?? string.Empty; if (!string.IsNullOrEmpty(clusterDetails.Version)) { clusterDetails.VersionNumber = new PayloadConverter().ConvertStringToVersion(clusterDetails.Version); clusterDetails.VersionStatus = VersionFinderClient.GetVersionStatus(clusterDetails.Version); } else { clusterDetails.VersionNumber = new Version(0, 0); } string componentListCommaSeperated = clusterDetailsFromServer.Components != null ? string.Join(",", clusterDetailsFromServer.Components.Select(c => c.GetType().Name)) : string.Empty; clusterDetails.ClusterType = !string.IsNullOrEmpty(componentListCommaSeperated) ? GetClusterTypeFromComponentList(componentListCommaSeperated) : ClusterType.Unknown; // This code will only execute for PaaS clusters which only support Windows clusterDetails.OSType = OSType.Windows; if (clusterDetailsFromServer.Error != null) { //Populate error details with the most recent one. These occur if the deployment workflow errors out clusterDetails.Error = new ClusterErrorStatus( (int)clusterDetailsFromServer.Error.StatusCode, clusterDetailsFromServer.Error.ErrorMessage ?? string.Empty, string.Empty); } //Populate Uri and HttpCreds from the gateway. This should not throw //even if the gateway component is null PopulateClusterUriAndHttpCredsFromGateway(clusterDetails, clusterDetailsFromServer.Components.OfType<GatewayComponent>().SingleOrDefault()); //Look for Yarn for 3X clusters var yarn = clusterDetailsFromServer.Components.OfType<YarnComponent>().SingleOrDefault(); //Look for MR for 2X clusters var mr = clusterDetailsFromServer.Components.OfType<MapReduceComponent>().SingleOrDefault(); if (yarn != null) { clusterDetails.ClusterSizeInNodes = yarn.NodeManagerRole.InstanceCount; var mapReduceApplication = yarn.Applications.OfType<MapReduceApplication>().SingleOrDefault(); if (mapReduceApplication != null) { //ToWasbConfiguration returns null if DefaultStorageAccountAndContainer is null clusterDetails.DefaultStorageAccount = mapReduceApplication.DefaultStorageAccountAndContainer.ToWabStorageAccountConfiguration(); if (mapReduceApplication.AdditionalStorageContainers != null) { clusterDetails.AdditionalStorageAccounts = mapReduceApplication.AdditionalStorageContainers.Select(s => s.ToWabStorageAccountConfiguration()).ToList(); } } } else if (mr != null) { clusterDetails.ClusterSizeInNodes = mr.WorkerNodeRole.InstanceCount; clusterDetails.DefaultStorageAccount = mr.DefaultStorageAccountAndContainer.ToWabStorageAccountConfiguration(); if (mr.AdditionalStorageAccounts != null) { clusterDetails.AdditionalStorageAccounts = mr.AdditionalStorageAccounts.Select(s => s.ToWabStorageAccountConfiguration()).ToList(); } } //populate RDP user name. All roles will have the same RDP properties so we pick the first one if (clusterDetailsFromServer.ClusterRoleCollection != null && clusterDetailsFromServer.ClusterRoleCollection.Any() && clusterDetailsFromServer.ClusterRoleCollection.First().RemoteDesktopSettings.IsEnabled) { clusterDetails.RdpUserName = clusterDetailsFromServer.ClusterRoleCollection.First().RemoteDesktopSettings.AuthenticationCredential.Username; } //populate virtual network info VirtualNetworkConfiguration vnetConfigFromServer = clusterDetailsFromServer.VirtualNetworkConfiguration; if (vnetConfigFromServer != null && !string.IsNullOrEmpty(vnetConfigFromServer.VirtualNetworkSite)) { clusterDetails.VirtualNetworkId = vnetConfigFromServer.VirtualNetworkSite; //Populate the subnet name if (vnetConfigFromServer.AddressAssignments != null && vnetConfigFromServer.AddressAssignments.Any() && vnetConfigFromServer.AddressAssignments.First().Subnets != null && vnetConfigFromServer.AddressAssignments.First().Subnets.First() != null) { if (vnetConfigFromServer.AddressAssignments.Any()) { clusterDetails.SubnetName = vnetConfigFromServer.AddressAssignments.First().Subnets.First().Name; } } } return clusterDetails; }
public void ICanNotSubmitAJobWithTheIncorectCredintials() { IHDInsightCertificateCredential hdInsightCredentials = IntegrationTestBase.GetValidCredentials(); var client = ServiceLocator.Instance.Locate <IHDInsightClientFactory>().Create(new HDInsightCertificateCredential(hdInsightCredentials.SubscriptionId, hdInsightCredentials.Certificate)); var manager = ServiceLocator.Instance.Locate <IHDInsightManagementPocoClientFactory>(); var pocoClient = manager.Create(hdInsightCredentials, GetAbstractionContext(), false); var clusterDetails = GetRandomCluster(); client.CreateCluster(clusterDetails); try { ClusterDetails cluster = pocoClient.ListContainer(clusterDetails.Name).WaitForResult(); BasicAuthCredential hadoopCredentials = new BasicAuthCredential() { Server = GatewayUriResolver.GetGatewayUri(cluster.ConnectionUrl), UserName = clusterDetails.UserName, Password = clusterDetails.Password }; var hadoopClient = JobSubmissionClientFactory.Connect(hadoopCredentials); var mapReduceJob = new MapReduceJobCreateParameters() { ClassName = "pi", JobName = "pi estimation jobDetails", JarFile = "/example/hadoop-examples.jar", StatusFolder = "/piresults" }; mapReduceJob.Arguments.Add("16"); mapReduceJob.Arguments.Add("10000"); var jobCreationDetails = hadoopClient.CreateMapReduceJob(mapReduceJob); var id = pocoClient.DisableHttp(clusterDetails.Name, clusterDetails.Location).WaitForResult(); while (!pocoClient.IsComplete(cluster.Name, cluster.Location, id).WaitForResult()) { Thread.Sleep(500); } // now add a user string userName = "******"; string password = GetRandomValidPassword(); id = pocoClient.EnableHttp(clusterDetails.Name, clusterDetails.Location, userName, password).WaitForResult(); while (!pocoClient.IsComplete(cluster.Name, cluster.Location, id).WaitForResult()) { Thread.Sleep(500); } jobCreationDetails = hadoopClient.CreateMapReduceJob(mapReduceJob); Assert.Fail("This test expected an exception but did not receive one."); } catch (UnauthorizedAccessException ex) { Help.DoNothing(ex); } finally { // delete the cluster client.DeleteCluster(clusterDetails.Name); } }
public static ClusterDetails CreateClusterDetailsFromRdfeResourceOutput(string cloudServiceRegion, Resource resouceOutput) { if (resouceOutput == null) { throw new ArgumentNullException("resouceOutput"); } if (string.IsNullOrEmpty(cloudServiceRegion)) { throw new ArgumentException("CloudService region cannot be null or empty."); } string version = SafeGetValueFromOutputItem(resouceOutput.OutputItems, "Version"); string components = SafeGetValueFromOutputItem(resouceOutput.OutputItems, "ClusterComponents"); ClusterType clusterType = !string.IsNullOrEmpty(components) ? GetClusterTypeFromComponentList(components) : ClusterType.Unknown; var clusterDetails = new ClusterDetails { Name = resouceOutput.Name, Version = version, StateString = resouceOutput.SubState, Location = cloudServiceRegion, ClusterType = clusterType, }; if (!string.IsNullOrEmpty(version)) { clusterDetails.VersionStatus = VersionFinderClient.GetVersionStatus(version); clusterDetails.VersionNumber = new PayloadConverter().ConvertStringToVersion(version); } else { clusterDetails.VersionNumber = new Version(0, 0); } //Operation status is populated with failed, then let us mark the state as error if (resouceOutput.OperationStatus != null && resouceOutput.OperationStatus.Result.Equals("Failed", StringComparison.OrdinalIgnoreCase)) { clusterDetails.State = HDInsight.ClusterState.Error; string errorType = resouceOutput.OperationStatus.Type ?? string.Empty; clusterDetails.StateString = HDInsight.ClusterState.Error.ToString(); if (resouceOutput.OperationStatus.Error != null) { int httpCode = resouceOutput.OperationStatus.Error.HttpCode; string errorMessage = resouceOutput.OperationStatus.Error.Message ?? string.Empty; clusterDetails.Error = new ClusterErrorStatus(httpCode, errorMessage, errorType); } else { clusterDetails.Error = new ClusterErrorStatus(0, "Unknown error occurred", errorType); } } else { HDInsight.ClusterState clusterState; if (!Enum.TryParse(resouceOutput.SubState, true, out clusterState)) { clusterState = HDInsight.ClusterState.Unknown; } clusterDetails.State = clusterState; } return clusterDetails; }
/// <summary> /// This method invokes separate Gets on all resources to get the current information /// To avoid collision with member static variables, it creates its own copies of resources for reporting /// It is invoked based on timer raised events /// </summary> /// <param name="source"></param> /// <param name="e"></param> static void ReportProgress(Object source, ElapsedEventArgs e) { StorageAccount storageAccount = null; EventHubDescription eventHub = null; ClusterDetails stormCluster = null; ClusterDetails hbaseCluster = null; try { Parallel.Invoke( () => { storageAccount = AzureStorageHelper.GetAccount(); }, () => { eventHub = EventHubHelper.GetEventHub(); }, () => { stormCluster = HDInsightHelper.GetStormCluster(); }, () => { hbaseCluster = HDInsightHelper.GetHBaseCluster(); }); } catch { } //This is just a poller, okay to fail var sb = new StringBuilder(); sb.AppendLine(Environment.NewLine + "-".PadRight(60, '-')); sb.AppendLine("Current Status at: ".PadRight(20) + e.SignalTime); sb.AppendLine("StorageAccount: ".PadRight(20) + (storageAccount == null ? "No Status" : (storageAccount.Name.PadRight(20) + " - " + storageAccount.Properties.Status.ToString()) ) ); sb.AppendLine("EventHub: ".PadRight(20) + (eventHub == null ? "No Status" : (eventHub.Path.PadRight(20) + " - " + eventHub.Status.ToString()) ) ); sb.AppendLine("HDInsight Storm: ".PadRight(20) + (stormCluster == null ? "No Status" : (stormCluster.Name.PadRight(20) + " - " + stormCluster.StateString) ) ); sb.AppendLine("HDInsight HBase: ".PadRight(20) + (hbaseCluster == null ? "No Status" : (hbaseCluster.Name.PadRight(20) + " - " + hbaseCluster.StateString) ) ); sb.AppendLine("-".PadRight(60, '-') + Environment.NewLine); LOG.DebugFormat(sb.ToString()); }