private async void Button_JobC_Click(object sender, RoutedEventArgs e) { StringBuilder sb = new StringBuilder(1024); sb.AppendLine("JobC will not run if A and B jobs are active"); // read account settings, dump AccountSettings accountSettings = SampleHelpers.LoadAccountSettings(); // connect to batch, dump status BatchSharedKeyCredentials cred = new BatchSharedKeyCredentials( accountSettings.BatchServiceUrl, accountSettings.BatchAccountName, accountSettings.BatchAccountKey ); sb.AppendLine($"batchcred created to {accountSettings.BatchAccountName} at {accountSettings.BatchServiceUrl}"); using (BatchClient client = BatchClient.Open(cred)) { bool check = await ABCheck(client); if (check == false) { sb.AppendLine("An A or B job is still running. C job cannot execute."); } else { sb.AppendLine("No A or B jobs are running."); sb.AppendLine("We would kick off C at this point. Bypassing for now."); } TextBlock_Out.Text = sb.ToString(); } }
private void Button_ListJobs_Click(object sender, RoutedEventArgs e) { StringBuilder sb = new StringBuilder(1024); sb.AppendLine("listing all jobs"); // read account settings, dump AccountSettings accountSettings = SampleHelpers.LoadAccountSettings(); // connect to batch, dump status BatchSharedKeyCredentials cred = new BatchSharedKeyCredentials( accountSettings.BatchServiceUrl, accountSettings.BatchAccountName, accountSettings.BatchAccountKey ); sb.AppendLine($"batchcred created to {accountSettings.BatchAccountName} at {accountSettings.BatchServiceUrl}"); using (BatchClient client = BatchClient.Open(cred)) { var jobs = client.JobOperations.ListJobs(); foreach (CloudJob job in jobs) { sb.AppendLine($"{job.Id} {job.State} pool: {job.ExecutionInformation.PoolId} start:{job.ExecutionInformation.StartTime} end: {job.ExecutionInformation.EndTime}"); } } TextBlock_Out.Text = sb.ToString(); }
public JobSubmitter() { this.jobManagerSettings = new ConfigurationBuilder() .SetBasePath(Directory.GetCurrentDirectory()) .AddJsonFile("settings.json") .Build() .Get <Settings>(); this.accountSettings = SampleHelpers.LoadAccountSettings(); }
private void Button_Connect_Click(object sender, RoutedEventArgs e) { StringBuilder sb = new StringBuilder(1024); try { // read account settings, dump AccountSettings accountSettings = SampleHelpers.LoadAccountSettings(); sb.AppendLine("--- accountSettings ---"); sb.AppendLine(accountSettings.ToString()); // read job settings, dump JobSettings jobSettings = SampleHelpers.LoadJobSettings(); sb.AppendLine("--- jobSettings ---"); sb.AppendLine(jobSettings.ToString()); // connect to batch, dump status BatchSharedKeyCredentials cred = new BatchSharedKeyCredentials( accountSettings.BatchServiceUrl, accountSettings.BatchAccountName, accountSettings.BatchAccountKey ); sb.AppendLine($"credentials created: {cred.AccountName},{cred.BaseUrl}"); using (BatchClient client = BatchClient.Open(cred)) { sb.AppendLine($"batchclient opened successfully"); // enumerate pools sb.AppendLine("--- pools ---"); foreach (var pool in client.PoolOperations.ListPools()) { sb.AppendLine($"pool found: id:{pool.Id} vmsize:{pool.VirtualMachineSize} state:{pool.State.ToString()}"); } sb.AppendLine("--- applications ---"); foreach (var app in client.ApplicationOperations.ListApplicationSummaries()) { sb.AppendLine($"application found: {app.Id} {app.Versions[0]}"); } } // batchclient } catch (AggregateException ae) { sb.AppendLine("aggregate exception caught"); sb.AppendLine(SampleHelpers.AggregateExceptionDump(ae.Flatten())); System.Diagnostics.Trace.Write(sb.ToString(), "ERROR"); } catch (Exception ex) { sb.AppendLine("exception caught"); sb.AppendLine(ex.ToString()); System.Diagnostics.Trace.Write(sb.ToString(), "ERROR"); } TextBlock_Out.Text = sb.ToString(); }
/// <summary> /// Constructs a reducer task object. /// </summary> public ReducerTask() { this.textSearchSettings = new ConfigurationBuilder() .SetBasePath(Directory.GetCurrentDirectory()) .AddJsonFile("settings.json") .Build() .Get <Settings>(); this.accountSettings = SampleHelpers.LoadAccountSettings(); //Read some important data from preconfigured environment variables on the Batch compute node. this.accountName = Environment.GetEnvironmentVariable("AZ_BATCH_ACCOUNT_NAME"); this.jobId = Environment.GetEnvironmentVariable("AZ_BATCH_JOB_ID"); }
private static void Main() { // Call the asynchronous version of the Main() method. This is done so that we can await various // calls to async methods within the "Main" method of this console application. try { AccountSettings accountSettings = SampleHelpers.LoadAccountSettings(); MainAsync(accountSettings).Wait(); } catch (AggregateException ex) { SampleHelpers.PrintAggregateException(ex); throw; } Console.WriteLine("Press return to exit..."); Console.ReadLine(); }
private async void Button_KillJob_Click(object sender, RoutedEventArgs e) { string jobid = TextBox_JobID.Text.Trim(); StringBuilder sb = new StringBuilder(1024); sb.AppendLine("Killing job# " + jobid); // read account settings, dump AccountSettings accountSettings = SampleHelpers.LoadAccountSettings(); // connect to batch, dump status BatchSharedKeyCredentials cred = new BatchSharedKeyCredentials( accountSettings.BatchServiceUrl, accountSettings.BatchAccountName, accountSettings.BatchAccountKey ); sb.AppendLine($"batchcred created to {accountSettings.BatchAccountName} at {accountSettings.BatchServiceUrl}"); using (BatchClient client = BatchClient.Open(cred)) { try { sb.AppendLine("Attempting to delete job# " + jobid); await client.JobOperations.DeleteJobAsync(jobid); sb.AppendLine("success at deleting job# " + jobid); } catch (Exception ex) { sb.Append("exception thrown. jobid probably wasn't found. jobid=" + jobid); sb.Append(ex.ToString()); } } // batch client TextBlock_Out.Text = sb.ToString(); }
private static async Task MainAsync(string[] args) { // You may adjust these values to experiment with different compute resource scenarios. const string nodeSize = "standard_d1_v2"; const int nodeCount = 1; const int taskSlotsPerNode = 4; // Adjust the task count to experiment with different list operation query durations const int taskCount = 5000; const string poolId = "EfficientListQueriesSamplePool"; const string jobId = "EfficientListQueriesSampleJob"; var accountSettings = SampleHelpers.LoadAccountSettings(); // Set up the credentials required by the BatchClient. Configure your AccountSettings in the // Microsoft.Azure.Batch.Samples.Common project within this solution. BatchSharedKeyCredentials cred = new BatchSharedKeyCredentials( accountSettings.BatchServiceUrl, accountSettings.BatchAccountName, accountSettings.BatchAccountKey); using (BatchClient batchClient = BatchClient.Open(cred)) { // Create a CloudPool, or obtain an existing pool with the specified ID CloudPool pool = await ArticleHelpers.CreatePoolIfNotExistAsync( batchClient, poolId, nodeSize, nodeCount, taskSlotsPerNode); // Create a CloudJob, or obtain an existing job with the specified ID CloudJob job = await ArticleHelpers.CreateJobIfNotExistAsync(batchClient, poolId, jobId); // Configure the tasks we'll be querying. Each task simply echoes the node's // name and then exits. We create "large" tasks by setting an environment // variable for each that is 2048 bytes in size. This is done simply to // increase response time when querying the batch service to more clearly // demonstrate query durations. List <CloudTask> tasks = new List <CloudTask>(); List <EnvironmentSetting> environmentSettings = new List <EnvironmentSetting>(); environmentSettings.Add(new EnvironmentSetting("BIGENV", GetBigString(2048))); for (int i = 1; i < taskCount + 1; i++) { string taskId = "task" + i.ToString().PadLeft(5, '0'); string taskCommandLine = "cmd /c echo %COMPUTERNAME%"; CloudTask task = new CloudTask(taskId, taskCommandLine); task.EnvironmentSettings = environmentSettings; tasks.Add(task); } Console.WriteLine(); Console.WriteLine("Adding {0} tasks to job {1}...", taskCount, job.Id); Stopwatch stopwatch = Stopwatch.StartNew(); // Add the tasks in one API call as opposed to a separate AddTask call for each. Bulk task submission // helps to ensure efficient underlying API calls to the Batch service. await batchClient.JobOperations.AddTaskAsync(job.Id, tasks); stopwatch.Stop(); Console.WriteLine("{0} tasks added in {1}, hit ENTER to query tasks...", taskCount, stopwatch.Elapsed); Console.ReadLine(); Console.WriteLine(); stopwatch.Reset(); // Obtain the tasks, specifying different detail levels to demonstrate limiting the number of tasks returned // and the amount of data returned for each. If your job tasks number in the thousands or have "large" properties // (such as our big environment variable), specifying a DetailLevel is important in reducing the amount of data // transferred, lowering your query response times (potentially greatly). // Get a subset of the tasks based on different task states ODATADetailLevel detail = new ODATADetailLevel(); detail.FilterClause = "state eq 'active'"; detail.SelectClause = "id,state"; await QueryTasksAsync(batchClient, job.Id, detail); detail.FilterClause = "state eq 'running'"; await QueryTasksAsync(batchClient, job.Id, detail); detail.FilterClause = "state eq 'completed'"; await QueryTasksAsync(batchClient, job.Id, detail); // Get all tasks, but limit the properties returned to task id and state only detail.FilterClause = null; detail.SelectClause = "id,state"; await QueryTasksAsync(batchClient, job.Id, detail); // Get all tasks, include id and state, also include the inflated environment settings property detail.SelectClause = "id,state,environmentSettings"; await QueryTasksAsync(batchClient, job.Id, detail); // Get all tasks, include all standard properties, and expand the statistics detail.ExpandClause = "stats"; detail.SelectClause = null; await QueryTasksAsync(batchClient, job.Id, detail); Console.WriteLine(); Console.WriteLine("Done!"); Console.WriteLine(); // Clean up the resources we've created in the Batch account Console.WriteLine("Delete job? [yes] no"); string response = Console.ReadLine().ToLower(); if (response != "n" && response != "no") { await batchClient.JobOperations.DeleteJobAsync(job.Id); } Console.WriteLine("Delete pool? [yes] no"); response = Console.ReadLine().ToLower(); if (response != "n" && response != "no") { await batchClient.PoolOperations.DeletePoolAsync(pool.Id); } } }
public static async Task MainAsync() { const string poolId = "MultiInstanceSamplePool"; const string jobId = "MultiInstanceSampleJob"; const string taskId = "MultiInstanceSampleTask"; const int numberOfNodes = 5; //jmeno package kterou uploaduju na azure s polu s MSMpiSetup const string appPackageId = "Parallel"; const string appPackageVersion = "1.0"; TimeSpan timeout = TimeSpan.FromMinutes(15); AccountSettings accountSettings = SampleHelpers.LoadAccountSettings(); //nakonfigurované batch accounty abych se mohl připojit ke svému účtu BatchSharedKeyCredentials cred = new BatchSharedKeyCredentials( accountSettings.BatchServiceUrl, accountSettings.BatchAccountName, accountSettings.BatchAccountKey); using (BatchClient batchClient = BatchClient.Open(cred)) { // Vytvoření fondu výpočetních uzlů a úlohu, do které přidáme úlohu s více instancemi. await CreatePoolAsync(batchClient, poolId, numberOfNodes, appPackageId, appPackageVersion); await CreateJobAsync(batchClient, jobId, poolId); //batch vytvoří jednu hlavní a několik dílčích úkolů CloudTask multiInstanceTask = new CloudTask(id: taskId, commandline: $"cmd /c mpiexec.exe -c 1 -wdir %AZ_BATCH_TASK_SHARED_DIR% %AZ_BATCH_APP_PACKAGE_{appPackageId.ToUpper()}#{appPackageVersion}%\\ParallelMpiApp.exe"); // příkaz SPMD = více samostatných procesorů současně spouští stejný program multiInstanceTask.MultiInstanceSettings = new MultiInstanceSettings(@"cmd /c start cmd /c smpd.exe -d", numberOfNodes); //zadání úkolů, vytvoří se jeden primární a několik dílčích, //aby odpovídaly počtu uzlů a naplánuje se jejich provedení v uzlech Console.WriteLine($"Adding task [{taskId}] to job [{jobId}]..."); await batchClient.JobOperations.AddTaskAsync(jobId, multiInstanceTask); //verze úlohy CloudTask mainTask = await batchClient.JobOperations.GetTaskAsync(jobId, taskId); // sledování stavu úkolů,čekáme až bude úloha dokončena Console.WriteLine($"Awaiting task completion, timeout in {timeout}..."); TaskStateMonitor taskStateMonitor = batchClient.Utilities.CreateTaskStateMonitor(); await taskStateMonitor.WhenAll(new List <CloudTask> { mainTask }, TaskState.Completed, timeout); //aktualizace úlohy await mainTask.RefreshAsync(); string stdOut = mainTask.GetNodeFile(Constants.StandardOutFileName).ReadAsString(); string stdErr = mainTask.GetNodeFile(Constants.StandardErrorFileName).ReadAsString(); Console.WriteLine(); Console.WriteLine($"Main task [{mainTask.Id}] is in state [{mainTask.State}] and ran on compute node [{mainTask.ComputeNodeInformation.ComputeNodeId}]:"); Console.WriteLine("---- stdout.txt ----"); Console.WriteLine(stdOut); Console.WriteLine("---- stderr.txt ----"); Console.WriteLine(stdErr); // par sekund čas aby se stačily dílčí úlohy dokončit TimeSpan subtaskTimeout = TimeSpan.FromSeconds(10); Console.WriteLine($"Main task completed, waiting {subtaskTimeout} for subtasks to complete..."); System.Threading.Thread.Sleep(subtaskTimeout); Console.WriteLine(); Console.WriteLine("---- Subtask information ----"); //kolekce dílčích úlohů a tisk informací o každém IPagedEnumerable <SubtaskInformation> subtasks = mainTask.ListSubtasks(); await subtasks.ForEachAsync(async (subtask) => { Console.WriteLine("subtask: " + subtask.Id); Console.WriteLine("\texit code: " + subtask.ExitCode); if (subtask.State == SubtaskState.Completed) { //získání souborů z uzlů ComputeNode node = await batchClient.PoolOperations.GetComputeNodeAsync(subtask.ComputeNodeInformation.PoolId, subtask.ComputeNodeInformation.ComputeNodeId); string outPath = subtask.ComputeNodeInformation.TaskRootDirectory + "\\" + Constants.StandardOutFileName; string errPath = subtask.ComputeNodeInformation.TaskRootDirectory + "\\" + Constants.StandardErrorFileName; NodeFile stdOutFile = await node.GetNodeFileAsync(outPath.Trim('\\')); NodeFile stdErrFile = await node.GetNodeFileAsync(errPath.Trim('\\')); stdOut = await stdOutFile.ReadAsStringAsync(); stdErr = await stdErrFile.ReadAsStringAsync(); Console.WriteLine($"\tnode: " + node.Id); Console.WriteLine("\tstdout.txt: " + stdOut); Console.WriteLine("\tstderr.txt: " + stdErr); } else { Console.WriteLine($"\tSubtask {subtask.Id} is in state {subtask.State}"); } }); // vymazání zdrojů které jsme vytvořili, abychom to nemuseli dělat manuálně(fondy,úlohy) Console.WriteLine(); Console.Write("Delete job? [yes] no: "); string response = Console.ReadLine().ToLower(); if (response != "n" && response != "no") { await batchClient.JobOperations.DeleteJobAsync(jobId); } Console.Write("Delete pool? [yes] no: "); response = Console.ReadLine().ToLower(); if (response != "n" && response != "no") { await batchClient.PoolOperations.DeletePoolAsync(poolId); } } }
public static async Task MainAsync() { const string poolId = "MultiInstanceSamplePool"; const string jobId = "MultiInstanceSampleJob"; const string taskId = "MultiInstanceSampleTask"; const int numberOfNodes = 3; // The application package and version to deploy to the compute nodes. // It should contain your MPIHelloWorld sample MS-MPI program: // https://blogs.technet.microsoft.com/windowshpc/2015/02/02/how-to-compile-and-run-a-simple-ms-mpi-program/ // And the MSMpiSetup.exe installer: // https://www.microsoft.com/download/details.aspx?id=52981 // Then upload it as an application package: // https://azure.microsoft.com/documentation/articles/batch-application-packages/ const string appPackageId = "MPIHelloWorld"; const string appPackageVersion = "1.0"; TimeSpan timeout = TimeSpan.FromMinutes(30); AccountSettings accountSettings = SampleHelpers.LoadAccountSettings(); // Configure your AccountSettings in the Microsoft.Azure.Batch.Samples.Common project within this solution BatchSharedKeyCredentials cred = new BatchSharedKeyCredentials( accountSettings.BatchServiceUrl, accountSettings.BatchAccountName, accountSettings.BatchAccountKey); using (BatchClient batchClient = BatchClient.Open(cred)) { // Create the pool of compute nodes and the job to which we add the multi-instance task. await CreatePoolAsync(batchClient, poolId, numberOfNodes, appPackageId, appPackageVersion); await CreateJobAsync(batchClient, jobId, poolId); // Create the multi-instance task. The MultiInstanceSettings property (configured // below) tells Batch to create one primary and several subtasks, the total number // of which matches the number of instances you specify in the MultiInstanceSettings. // This main task's command line is the "application command," and is executed *only* // by the primary, and only after the primary and all subtasks have executed the // "coordination command" (the MultiInstanceSettings.CoordinationCommandLine). CloudTask multiInstanceTask = new CloudTask(id: taskId, commandline: $"cmd /c mpiexec.exe -c 1 -wdir %AZ_BATCH_TASK_SHARED_DIR% %AZ_BATCH_APP_PACKAGE_{appPackageId.ToUpper()}#{appPackageVersion}%\\MPIHelloWorld.exe"); // Configure the task's MultiInstanceSettings. Specify the number of nodes // to allocate to the multi-instance task, and the "coordination command". // The CoordinationCommandLine is run by the primary and subtasks, and is // used in this sample to start SMPD on the compute nodes. multiInstanceTask.MultiInstanceSettings = new MultiInstanceSettings(@"cmd /c start cmd /c smpd.exe -d", numberOfNodes); // Submit the task to the job. Batch will take care of creating one primary and // enough subtasks to match the total number of nodes allocated to the task, // and schedule them for execution on the nodes. Console.WriteLine($"Adding task [{taskId}] to job [{jobId}]..."); await batchClient.JobOperations.AddTaskAsync(jobId, multiInstanceTask); // Get the "bound" version of the multi-instance task. CloudTask mainTask = await batchClient.JobOperations.GetTaskAsync(jobId, taskId); // We use a TaskStateMonitor to monitor the state of our tasks. In this case, // we will wait for the task to reach the Completed state. Console.WriteLine($"Awaiting task completion, timeout in {timeout}..."); TaskStateMonitor taskStateMonitor = batchClient.Utilities.CreateTaskStateMonitor(); await taskStateMonitor.WhenAll(new List <CloudTask> { mainTask }, TaskState.Completed, timeout); // Refresh the task to obtain up-to-date property values from Batch, such as // its current state and information about the node on which it executed. await mainTask.RefreshAsync(); string stdOut = mainTask.GetNodeFile(Constants.StandardOutFileName).ReadAsString(); string stdErr = mainTask.GetNodeFile(Constants.StandardErrorFileName).ReadAsString(); Console.WriteLine(); Console.WriteLine($"Main task [{mainTask.Id}] is in state [{mainTask.State}] and ran on compute node [{mainTask.ComputeNodeInformation.ComputeNodeId}]:"); Console.WriteLine("---- stdout.txt ----"); Console.WriteLine(stdOut); Console.WriteLine("---- stderr.txt ----"); Console.WriteLine(stdErr); // Need to delay a bit to allow the Batch service to mark the subtasks as Complete TimeSpan subtaskTimeout = TimeSpan.FromSeconds(10); Console.WriteLine($"Main task completed, waiting {subtaskTimeout} for subtasks to complete..."); System.Threading.Thread.Sleep(subtaskTimeout); Console.WriteLine(); Console.WriteLine("---- Subtask information ----"); // Obtain the collection of subtasks for the multi-instance task, and print // some information about each. IPagedEnumerable <SubtaskInformation> subtasks = mainTask.ListSubtasks(); await subtasks.ForEachAsync(async (subtask) => { Console.WriteLine("subtask: " + subtask.Id); Console.WriteLine("\texit code: " + subtask.ExitCode); if (subtask.State == SubtaskState.Completed) { // Obtain the file from the node on which the subtask executed. For normal CloudTasks, // we could simply call CloudTask.GetNodeFile(Constants.StandardOutFileName), but the // subtasks are not "normal" tasks in Batch, and thus must be handled differently. ComputeNode node = await batchClient.PoolOperations.GetComputeNodeAsync(subtask.ComputeNodeInformation.PoolId, subtask.ComputeNodeInformation.ComputeNodeId); string outPath = subtask.ComputeNodeInformation.TaskRootDirectory + "\\" + Constants.StandardOutFileName; string errPath = subtask.ComputeNodeInformation.TaskRootDirectory + "\\" + Constants.StandardErrorFileName; NodeFile stdOutFile = await node.GetNodeFileAsync(outPath.Trim('\\')); NodeFile stdErrFile = await node.GetNodeFileAsync(errPath.Trim('\\')); stdOut = await stdOutFile.ReadAsStringAsync(); stdErr = await stdErrFile.ReadAsStringAsync(); Console.WriteLine($"\tnode: " + node.Id); Console.WriteLine("\tstdout.txt: " + stdOut); Console.WriteLine("\tstderr.txt: " + stdErr); } else { Console.WriteLine($"\tSubtask {subtask.Id} is in state {subtask.State}"); } }); // Clean up the resources we've created in the Batch account Console.WriteLine(); Console.Write("Delete job? [yes] no: "); string response = Console.ReadLine().ToLower(); if (response != "n" && response != "no") { await batchClient.JobOperations.DeleteJobAsync(jobId); } Console.Write("Delete pool? [yes] no: "); response = Console.ReadLine().ToLower(); if (response != "n" && response != "no") { await batchClient.PoolOperations.DeletePoolAsync(poolId); } } }
private static async Task MainAsync(string[] args) { const string poolId = "JobPrepReleaseSamplePool"; const string jobId = "JobPrepReleaseSampleJob"; // Location of the file that the job tasks will work with, a text file in the // node's "shared" directory. const string taskOutputFile = "%AZ_BATCH_NODE_SHARED_DIR%\\job_prep_and_release.txt"; // The job prep task will write the node ID to the text file in the shared directory const string jobPrepCmdLine = "cmd /c echo %AZ_BATCH_NODE_ID% tasks: >" + taskOutputFile; // Each task then echoes its ID to the same text file const string taskCmdLine = "cmd /c echo %AZ_BATCH_TASK_ID% >> " + taskOutputFile; // The job release task will then delete the text file from the shared directory const string jobReleaseCmdLine = "cmd /c del " + taskOutputFile; var accountSettings = SampleHelpers.LoadAccountSettings(); // Configure your AccountSettings in the Microsoft.Azure.Batch.Samples.Common project within this solution BatchSharedKeyCredentials cred = new BatchSharedKeyCredentials( accountSettings.BatchServiceUrl, accountSettings.BatchAccountName, accountSettings.BatchAccountKey); // Initialize the BatchClient for access to your Batch account using (BatchClient batchClient = await BatchClient.OpenAsync(cred)) { // Create a CloudPool (or obtain an existing pool with the specified ID) CloudPool pool = await ArticleHelpers.CreatePoolIfNotExistAsync( batchClient, poolId, "standard_d1_v2", 2, 1); // Create a CloudJob (or obtain an existing job with the specified ID) CloudJob job = await SampleHelpers.GetJobIfExistAsync(batchClient, jobId); if (job == null) { Console.WriteLine("Job {0} not found, creating...", jobId); CloudJob unboundJob = batchClient.JobOperations.CreateJob(jobId, new PoolInformation() { PoolId = poolId }); // Configure and assign the job preparation task unboundJob.JobPreparationTask = new JobPreparationTask { CommandLine = jobPrepCmdLine }; // Configure and assign the job release task unboundJob.JobReleaseTask = new JobReleaseTask { CommandLine = jobReleaseCmdLine }; await unboundJob.CommitAsync(); // Get the bound version of the job with all of its properties populated job = await batchClient.JobOperations.GetJobAsync(jobId); } // Create the tasks that the job will execute List <CloudTask> tasks = new List <CloudTask>(); for (int i = 1; i <= 8; i++) { string taskId = "task" + i.ToString().PadLeft(3, '0'); string taskCommandLine = taskCmdLine; CloudTask task = new CloudTask(taskId, taskCommandLine); tasks.Add(task); } // Add the tasks in one API call as opposed to a separate AddTask call for each. Bulk task // submission helps to ensure efficient underlying API calls to the Batch service. Console.WriteLine("Submitting tasks and awaiting completion..."); await batchClient.JobOperations.AddTaskAsync(job.Id, tasks); // Wait for the tasks to complete before proceeding. The long timeout here is to allow time // for the nodes within the pool to be created and started if the pool had not yet been created. await batchClient.Utilities.CreateTaskStateMonitor().WhenAll( job.ListTasks(), TaskState.Completed, TimeSpan.FromMinutes(30)); Console.WriteLine("All tasks completed."); Console.WriteLine(); // Print the contents of the shared text file modified by the job preparation and other tasks. ODATADetailLevel nodeDetail = new ODATADetailLevel(selectClause: "id, state"); IPagedEnumerable <ComputeNode> nodes = batchClient.PoolOperations.ListComputeNodes(pool.Id, nodeDetail); await nodes.ForEachAsync(async (node) => { // Check to ensure that the node is Idle before attempting to pull the text file. // If the pool was just created, there is a chance that another node completed all // of the tasks prior to the other node(s) completing their startup procedure. if (node.State == ComputeNodeState.Idle) { NodeFile sharedTextFile = await node.GetNodeFileAsync("shared\\job_prep_and_release.txt"); Console.WriteLine("Contents of {0} on {1}:", sharedTextFile.Path, node.Id); Console.WriteLine("-------------------------------------------"); Console.WriteLine(await sharedTextFile.ReadAsStringAsync()); } }); // Terminate the job to mark it as Completed; this will initiate the Job Release Task on any node // that executed job tasks. Note that the Job Release Task is also executed when a job is deleted, // thus you need not call Terminate if you typically delete your jobs upon task completion. await batchClient.JobOperations.TerminateJobAsync(job.Id); // Wait for the job to reach state "Completed." Note that this wait is not typically necessary in // production code, but is done here to enable the checking of the release tasks exit code below. await ArticleHelpers.WaitForJobToReachStateAsync(batchClient, job.Id, JobState.Completed, TimeSpan.FromMinutes(2)); // Print the exit codes of the prep and release tasks by obtaining their execution info List <JobPreparationAndReleaseTaskExecutionInformation> prepReleaseInfo = await batchClient.JobOperations.ListJobPreparationAndReleaseTaskStatus(job.Id).ToListAsync(); foreach (JobPreparationAndReleaseTaskExecutionInformation info in prepReleaseInfo) { Console.WriteLine(); Console.WriteLine("{0}: ", info.ComputeNodeId); // If no tasks were scheduled to run on the node, the JobPreparationTaskExecutionInformation will be null if (info.JobPreparationTaskExecutionInformation != null) { Console.WriteLine(" Prep task exit code: {0}", info.JobPreparationTaskExecutionInformation.ExitCode); } // If no tasks were scheduled to run on the node, the JobReleaseTaskExecutionInformation will be null if (info.JobReleaseTaskExecutionInformation != null) { Console.WriteLine(" Release task exit code: {0}", info.JobReleaseTaskExecutionInformation.ExitCode); } } // Clean up the resources we've created in the Batch account Console.WriteLine(); Console.WriteLine("Delete job? [yes] no"); string response = Console.ReadLine().ToLower(); if (response != "n" && response != "no") { // Note that deleting the job will execute the job release task if the job was not previously terminated await batchClient.JobOperations.DeleteJobAsync(job.Id); } Console.WriteLine("Delete pool? [yes] no"); response = Console.ReadLine(); if (response != "n" && response != "no") { await batchClient.PoolOperations.DeletePoolAsync(pool.Id); } } }
private async void Button_JobStatus_Click(object sender, RoutedEventArgs e) { StringBuilder sb = new StringBuilder(1024); // read account settings, dump AccountSettings accountSettings = SampleHelpers.LoadAccountSettings(); // read job settings, dump JobSettings jobSettings = SampleHelpers.LoadJobSettings(); // connect to batch, dump status BatchSharedKeyCredentials cred = new BatchSharedKeyCredentials( accountSettings.BatchServiceUrl, accountSettings.BatchAccountName, accountSettings.BatchAccountKey ); sb.AppendLine($"batchcred created to {accountSettings.BatchAccountName} at {accountSettings.BatchServiceUrl}"); using (BatchClient client = BatchClient.Open(cred)) { string jobid = TextBox_JobID.Text.Trim(); CloudJob job = null; sb.AppendLine($"GetJob({jobid})"); try { job = await client.JobOperations.GetJobAsync(jobid); } catch (Exception ex) { job = null; sb.AppendLine($"job not found. jobid=[{jobid}]"); sb.AppendLine("job not found exception: " + ex.ToString()); } if (job != null) { TimeSpan?jobdur = job.ExecutionInformation.EndTime - job.ExecutionInformation.StartTime; if (jobdur == null) { sb.AppendLine($"job state:{job.State} "); } else { sb.AppendLine($"job state:{job.State} duration: {jobdur}"); } foreach (CloudTask t in job.ListTasks()) { TimeSpan?dur = t.ExecutionInformation.EndTime - t.ExecutionInformation.StartTime; if (dur == null) { sb.AppendLine($"task: {t.Id} {t.State} start: {t.ExecutionInformation.StartTime} end:{t.ExecutionInformation.EndTime}"); } else { sb.AppendLine($"task: {t.Id} {t.State} duration:{dur} start: {t.ExecutionInformation.StartTime} end:{t.ExecutionInformation.EndTime}"); } } } } TextBlock_Out.Text = sb.ToString(); }
public static void Main(string[] args) { // Configure your AccountSettings in the Microsoft.Azure.Batch.Samples.Common project within this solution AccountSettings accountSettings = SampleHelpers.LoadAccountSettings(); BatchSharedKeyCredentials cred = new BatchSharedKeyCredentials( accountSettings.BatchServiceUrl, accountSettings.BatchAccountName, accountSettings.BatchAccountKey); StorageCredentials storageCred = new StorageCredentials( accountSettings.StorageAccountName, accountSettings.StorageAccountKey); CloudStorageAccount storageAccount = new CloudStorageAccount(storageCred, true); using (BatchClient batchClient = BatchClient.Open(cred)) { string jobId = "PersistOutput-" + DateTime.Now.ToString("yyyyMMdd-HHmmss"); const string poolId = "PersistOutputsSamplePool"; const int nodeCount = 1; CloudBlobContainer container; Console.Write("Which persistence technology would you like to use? 1) File conventions, 2) OutputFiles, or 3) OutputFiles implementing conventions: "); string response = Console.ReadLine().ToLower(); if (response == "1") { container = FileConventionsExample.Run(batchClient, storageAccount, poolId, nodeCount, jobId).Result; Console.WriteLine(); Console.WriteLine("All tasks completed and outputs downloaded. You can view the task outputs in the Azure portal"); Console.WriteLine("before deleting the job."); } else if (response == "2") { container = OutputFilesExample.Run(batchClient, storageAccount, poolId, nodeCount, jobId).Result; Console.WriteLine(); Console.WriteLine("All tasks completed and outputs downloaded."); } else if (response == "3") { container = OutputFilesExample.RunWithConventions(batchClient, storageAccount, poolId, nodeCount, jobId).Result; Console.WriteLine(); Console.WriteLine("All tasks completed and outputs downloaded. You can view the task outputs in the Azure portal"); Console.WriteLine("before deleting the job."); } else { throw new ArgumentException($"Unexpected response: {response}"); } // Clean up the resources we've created (job, pool, and blob storage container) Console.WriteLine(); Console.Write("Delete job? [yes] no: "); response = Console.ReadLine().ToLower(); if (response != "n" && response != "no") { batchClient.JobOperations.DeleteJob(jobId); } Console.Write("Delete pool? [yes] no: "); response = Console.ReadLine().ToLower(); if (response != "n" && response != "no") { batchClient.PoolOperations.DeletePool(poolId); } Console.Write("Delete storage container? [yes] no: "); response = Console.ReadLine().ToLower(); if (response != "n" && response != "no") { container.Delete(); } Console.WriteLine(); Console.WriteLine("Sample complete, hit ENTER to exit..."); Console.ReadLine(); } }
public static void JobMain(string[] args) { //Load the configuration Settings topNWordsConfiguration = new ConfigurationBuilder() .SetBasePath(Directory.GetCurrentDirectory()) .AddJsonFile("settings.json") .Build() .Get <Settings>(); AccountSettings accountSettings = SampleHelpers.LoadAccountSettings(); CloudStorageAccount cloudStorageAccount = new CloudStorageAccount( new StorageCredentials( accountSettings.StorageAccountName, accountSettings.StorageAccountKey), accountSettings.StorageServiceUrl, useHttps: true); StagingStorageAccount stagingStorageAccount = new StagingStorageAccount( accountSettings.StorageAccountName, accountSettings.StorageAccountKey, cloudStorageAccount.BlobEndpoint.ToString()); using (BatchClient client = BatchClient.Open(new BatchSharedKeyCredentials(accountSettings.BatchServiceUrl, accountSettings.BatchAccountName, accountSettings.BatchAccountKey))) { string stagingContainer = null; //OSFamily 5 == Windows 2016. You can learn more about os families and versions at: //http://msdn.microsoft.com/en-us/library/azure/ee924680.aspx CloudPool pool = client.PoolOperations.CreatePool( topNWordsConfiguration.PoolId, targetDedicatedComputeNodes: topNWordsConfiguration.PoolNodeCount, virtualMachineSize: "standard_d1_v2", cloudServiceConfiguration: new CloudServiceConfiguration(osFamily: "6")); Console.WriteLine("Adding pool {0}", topNWordsConfiguration.PoolId); pool.TaskSchedulingPolicy = new TaskSchedulingPolicy(ComputeNodeFillType.Spread); pool.MaxTasksPerComputeNode = 4; GettingStartedCommon.CreatePoolIfNotExistAsync(client, pool).Wait(); var formula = @"startingNumberOfVMs = 2; maxNumberofVMs = 4; pendingTaskSamplePercent = $PendingTasks.GetSamplePercent(90 * TimeInterval_Second); pendingTaskSamples = pendingTaskSamplePercent < 70 ? startingNumberOfVMs : avg($PendingTasks.GetSample(180 * TimeInterval_Second)); $TargetDedicatedNodes = min(maxNumberofVMs, pendingTaskSamples); $NodeDeallocationOption = taskcompletion;"; var noOfSeconds = 150; Thread.Sleep(noOfSeconds * 1000); client.PoolOperations.EnableAutoScale( poolId: topNWordsConfiguration.PoolId, autoscaleFormula: formula, autoscaleEvaluationInterval: TimeSpan.FromMinutes(5)); try { Console.WriteLine("Creating job: " + topNWordsConfiguration.JobId); // get an empty unbound Job CloudJob unboundJob = client.JobOperations.CreateJob(); unboundJob.Id = topNWordsConfiguration.JobId; unboundJob.PoolInformation = new PoolInformation() { PoolId = topNWordsConfiguration.PoolId }; // Commit Job to create it in the service unboundJob.Commit(); // create file staging objects that represent the executable and its dependent assembly to run as the task. // These files are copied to every node before the corresponding task is scheduled to run on that node. FileToStage topNWordExe = new FileToStage(TopNWordsExeName, stagingStorageAccount); FileToStage storageDll = new FileToStage(StorageClientDllName, stagingStorageAccount); FileToStage newtonJsoftDll = new FileToStage(NewtonJSoftDllName, stagingStorageAccount); FileToStage microsoftEFDll = new FileToStage(MicrosoftEntityFrameworkDllName, stagingStorageAccount); FileToStage microsoftEFCoreDll = new FileToStage(MicrosoftEntityFrameworkCoreDllName, stagingStorageAccount); FileToStage microsoftBCLDll = new FileToStage(MicrosoftBCLDllName, stagingStorageAccount); FileToStage systemTasksDll = new FileToStage(SystemTasksDllName, stagingStorageAccount); FileToStage topNWordsConfigFile = new FileToStage(TopnWordsConfig, stagingStorageAccount); FileToStage SystemValueTupleDll = new FileToStage(SystemValueTupleDllName, stagingStorageAccount); FileToStage DependencyInjectionAbstractionsDll = new FileToStage(DependecyInjectionAbstractionsDllName, stagingStorageAccount); FileToStage DependencyInjectionDll = new FileToStage(DependecyInjectionDllName, stagingStorageAccount); FileToStage LoggingAbstractionsDll = new FileToStage(LoggingAbstractionsDllName, stagingStorageAccount); FileToStage DiagnosticsDll = new FileToStage(DiagnosticssDllName, stagingStorageAccount); FileToStage CachingAbstractionDll = new FileToStage(CachingAbstractionsDllName, stagingStorageAccount); FileToStage MicrosoftSqlServerDll = new FileToStage(MicrosoftSqlServerDllName, stagingStorageAccount); FileToStage SystemComponentDll = new FileToStage(SystemComponentDllName, stagingStorageAccount); FileToStage SystemCollectionsDll = new FileToStage(SystemCollectionsDllName, stagingStorageAccount); FileToStage pDll = new FileToStage(pdllName, stagingStorageAccount); FileToStage oDll = new FileToStage(odllName, stagingStorageAccount); FileToStage lDll = new FileToStage(ldllName, stagingStorageAccount); FileToStage hashcodeDll = new FileToStage(hashcodeDllName, stagingStorageAccount); FileToStage clientSqlDll = new FileToStage(clientSqlClientDllName, stagingStorageAccount); FileToStage cachingMemoryDll = new FileToStage(CachingMemoryDllName, stagingStorageAccount); FileToStage configAbstractionDll = new FileToStage(configAbstractionDllName, stagingStorageAccount); FileToStage SNIDll = new FileToStage(SNIDllName, stagingStorageAccount); FileToStage relationDll = new FileToStage(relationddllName, stagingStorageAccount); var textFile = "E:\\WeatherAPIPOC\\cities_id.txt"; var text = File.ReadAllLines(textFile); var cityList = new List <string>(text); // In this sample, the input data is copied separately to Storage and its URI is passed to the task as an argument. // This approach is appropriate when the amount of input data is large such that copying it to every node via FileStaging // is not desired and the number of tasks is small since a large number of readers of the blob might get throttled // by Storage which will lengthen the overall processing time. // // You'll need to observe the behavior and use published techniques for finding the right balance of performance versus // complexity. Console.WriteLine("{0} uploaded to cloud", topNWordsConfiguration.FileName); // initialize a collection to hold the tasks that will be submitted in their entirety List <CloudTask> tasksToRun = new List <CloudTask>(topNWordsConfiguration.NumberOfTasks); for (int i = 0; i < cityList.Count; i++) { string programLaunchTime = DateTime.Now.ToString("h:mm:sstt"); CloudTask task = new CloudTask( id: $"task_no_{i + 1}", commandline: $"cmd /c mkdir x64 & move SNI.dll x64 & {TopNWordsExeName} --Task {cityList[i]} %AZ_BATCH_NODE_ID% {programLaunchTime}"); //This is the list of files to stage to a container -- for each job, one container is created and //files all resolve to Azure Blobs by their name (so two tasks with the same named file will create just 1 blob in //the container). task.FilesToStage = new List <IFileStagingProvider> { topNWordExe, storageDll, newtonJsoftDll, microsoftEFDll, microsoftEFCoreDll, microsoftBCLDll, systemTasksDll, topNWordsConfigFile, SystemValueTupleDll, DependencyInjectionAbstractionsDll, DependencyInjectionDll, LoggingAbstractionsDll, DiagnosticsDll, CachingAbstractionDll, MicrosoftSqlServerDll, SystemComponentDll, SystemCollectionsDll, oDll, pDll, lDll, relationDll, hashcodeDll, clientSqlDll, cachingMemoryDll, configAbstractionDll, SNIDll }; tasksToRun.Add(task); } // Commit all the tasks to the Batch Service. Ask AddTask to return information about the files that were staged. // The container information is used later on to remove these files from Storage. ConcurrentBag <ConcurrentDictionary <Type, IFileStagingArtifact> > fsArtifactBag = new ConcurrentBag <ConcurrentDictionary <Type, IFileStagingArtifact> >(); client.JobOperations.AddTask(topNWordsConfiguration.JobId, tasksToRun, fileStagingArtifacts: fsArtifactBag); // loop through the bag of artifacts, looking for the one that matches our staged files. Once there, // capture the name of the container holding the files so they can be deleted later on if that option // was configured in the settings. foreach (var fsBagItem in fsArtifactBag) { IFileStagingArtifact fsValue; if (fsBagItem.TryGetValue(typeof(FileToStage), out fsValue)) { SequentialFileStagingArtifact stagingArtifact = fsValue as SequentialFileStagingArtifact; if (stagingArtifact != null) { stagingContainer = stagingArtifact.BlobContainerCreated; Console.WriteLine( "Uploaded files to container: {0} -- you will be charged for their storage unless you delete them.", stagingArtifact.BlobContainerCreated); } } } //Get the job to monitor status. CloudJob job = client.JobOperations.GetJob(topNWordsConfiguration.JobId); Console.Write("Waiting for tasks to complete ... "); // Wait 20 minutes for all tasks to reach the completed state. The long timeout is necessary for the first // time a pool is created in order to allow nodes to be added to the pool and initialized to run tasks. IPagedEnumerable <CloudTask> ourTasks = job.ListTasks(new ODATADetailLevel(selectClause: "id")); client.Utilities.CreateTaskStateMonitor().WaitAll(ourTasks, TaskState.Completed, TimeSpan.FromMinutes(20)); Console.WriteLine("tasks are done."); foreach (CloudTask t in ourTasks) { Console.WriteLine("Task " + t.Id); Console.WriteLine("stdout:" + Environment.NewLine + t.GetNodeFile(Batch.Constants.StandardOutFileName).ReadAsString()); Console.WriteLine(); Console.WriteLine("stderr:" + Environment.NewLine + t.GetNodeFile(Batch.Constants.StandardErrorFileName).ReadAsString()); } } finally { //Delete the pool that we created if (topNWordsConfiguration.ShouldDeletePool) { Console.WriteLine("Deleting pool: {0}", topNWordsConfiguration.PoolId); client.PoolOperations.DeletePool(topNWordsConfiguration.PoolId); } //Delete the job that we created if (topNWordsConfiguration.ShouldDeleteJob) { Console.WriteLine("Deleting job: {0}", topNWordsConfiguration.JobId); client.JobOperations.DeleteJob(topNWordsConfiguration.JobId); } //Delete the containers we created if (topNWordsConfiguration.ShouldDeleteContainer) { DeleteContainers(accountSettings, stagingContainer); } } } }
private async void Button_JobA_Click(object sender, RoutedEventArgs e) { const string DQUOTE = "\""; string joba_task_cmdline = $"cmd /c {DQUOTE}set AZ_BATCH & timeout /t 30 > NUL{DQUOTE}"; StringBuilder sb = new StringBuilder(1024); sb.AppendLine("Submitting job"); string jobid = NamingHelpers.GenJobName("A"); sb.AppendLine($"jobid={jobid}"); string taskid = NamingHelpers.GenTaskName("JOBA"); sb.AppendLine($"taskid={taskid}"); sb.AppendLine($"task command line={joba_task_cmdline}"); // read account settings, dump AccountSettings accountSettings = SampleHelpers.LoadAccountSettings(); // read job settings, dump JobSettings jobSettings = SampleHelpers.LoadJobSettings(); // connect to batch, dump status BatchSharedKeyCredentials cred = new BatchSharedKeyCredentials( accountSettings.BatchServiceUrl, accountSettings.BatchAccountName, accountSettings.BatchAccountKey ); sb.AppendLine($"batchcred created to {accountSettings.BatchAccountName} at {accountSettings.BatchServiceUrl}"); using (BatchClient client = BatchClient.Open(cred)) { PoolInformation pool = new PoolInformation(); pool.PoolId = jobSettings.PoolID; sb.AppendLine("creating job " + jobid); CloudJob ourJob = client.JobOperations.CreateJob(jobid, pool); ourJob.OnAllTasksComplete = Microsoft.Azure.Batch.Common.OnAllTasksComplete.TerminateJob; await ourJob.CommitAsync(); sb.AppendLine("job created " + jobid); // Get the bound version of the job with all of its properties populated CloudJob committedJob = await client.JobOperations.GetJobAsync(jobid); sb.AppendLine("bound version of job retrieved " + jobid); sb.AppendLine("submitting task " + taskid); // Create the tasks that the job will execute CloudTask task = new CloudTask(taskid, joba_task_cmdline); await client.JobOperations.AddTaskAsync(jobid, task); sb.AppendLine("task submitted " + taskid); TextBox_JobID.Text = jobid; TextBox_Task.Text = taskid; sb.AppendLine("task submitted. use job status button to see job and task checks"); TextBlock_Out.Text = sb.ToString(); } }
private static async Task MainAsync(string[] args) { // You may adjust these values to experiment with different compute resource scenarios. const string nodeSize = "standard_d1_v2"; const string osFamily = "5"; const int nodeCount = 1; const string poolId = "TaskDependenciesSamplePool"; const string jobId = "TaskDependenciesSampleJob"; // Amount of time to wait before timing out long-running tasks. TimeSpan timeLimit = TimeSpan.FromMinutes(30); // Set up access to your Batch account with a BatchClient. Configure your AccountSettings in the // Microsoft.Azure.Batch.Samples.Common project within this solution. AccountSettings accountSettings = SampleHelpers.LoadAccountSettings(); BatchSharedKeyCredentials cred = new BatchSharedKeyCredentials( accountSettings.BatchServiceUrl, accountSettings.BatchAccountName, accountSettings.BatchAccountKey); try { using (BatchClient batchClient = BatchClient.Open(cred)) { // Create the pool. Console.WriteLine("Creating pool [{0}]...", poolId); CloudPool unboundPool = batchClient.PoolOperations.CreatePool( poolId: poolId, cloudServiceConfiguration: new CloudServiceConfiguration(osFamily), virtualMachineSize: nodeSize, targetDedicatedComputeNodes: nodeCount); await unboundPool.CommitAsync(); // Create the job and specify that it uses tasks dependencies. Console.WriteLine("Creating job [{0}]...", jobId); CloudJob unboundJob = batchClient.JobOperations.CreateJob(jobId, new PoolInformation { PoolId = poolId }); // IMPORTANT: This is REQUIRED for using task dependencies. unboundJob.UsesTaskDependencies = true; await unboundJob.CommitAsync(); // Create the collection of tasks that will be added to the job. List <CloudTask> tasks = new List <CloudTask> { // 'Rain' and 'Sun' don't depend on any other tasks new CloudTask("Rain", "cmd.exe /c echo Rain"), new CloudTask("Sun", "cmd.exe /c echo Sun"), // Task 'Flowers' depends on completion of both 'Rain' and 'Sun' // before it is run. new CloudTask("Flowers", "cmd.exe /c echo Flowers") { DependsOn = TaskDependencies.OnIds("Rain", "Sun") }, // Tasks 1, 2, and 3 don't depend on any other tasks. Because // we will be using them for a task range dependency, we must // specify string representations of integers as their ids. new CloudTask("1", "cmd.exe /c echo 1"), new CloudTask("2", "cmd.exe /c echo 2"), new CloudTask("3", "cmd.exe /c echo 3"), // Task A is the parent task. new CloudTask("A", "cmd.exe /c echo A") { // Specify exit conditions for task A and their dependency actions. ExitConditions = new ExitConditions { // If task A exits with a pre-processing error, block any downstream tasks (in this example, task B). PreProcessingError = new ExitOptions { DependencyAction = DependencyAction.Block }, // If task A exits with the specified error codes, block any downstream tasks (in this example, task B). ExitCodes = new List <ExitCodeMapping> { new ExitCodeMapping(10, new ExitOptions() { DependencyAction = DependencyAction.Block }), new ExitCodeMapping(20, new ExitOptions() { DependencyAction = DependencyAction.Block }) }, // If task A succeeds or fails with any other error, any downstream tasks become eligible to run // (in this example, task B). Default = new ExitOptions { DependencyAction = DependencyAction.Satisfy } } }, // Task B depends on task A. Whether it becomes eligible to run depends on how task A exits. new CloudTask("B", "cmd.exe /c echo B") { DependsOn = TaskDependencies.OnId("A") }, }; // Add the tasks to the job. await batchClient.JobOperations.AddTaskAsync(jobId, tasks); // Pause execution while we wait for the tasks to complete, and notify // whether the tasks completed successfully. Console.WriteLine("Waiting for task completion..."); Console.WriteLine(); CloudJob job = await batchClient.JobOperations.GetJobAsync(jobId); await batchClient.Utilities.CreateTaskStateMonitor().WhenAll( job.ListTasks(), TaskState.Completed, timeLimit); Console.WriteLine("All tasks completed successfully."); Console.WriteLine(); } } catch (Exception e) { Console.WriteLine(); Console.WriteLine("An exception occurred."); Console.WriteLine(e.Message); Console.WriteLine(e.StackTrace); } finally { using (BatchClient batchClient = BatchClient.Open(cred)) { CloudJob job = await batchClient.JobOperations.GetJobAsync(jobId); // Clean up the resources we've created in the Batch account Console.Write("Delete job? [yes] no: "); string response = Console.ReadLine().ToLower(); if (response != "n" && response != "no") { await batchClient.JobOperations.DeleteJobAsync(job.Id); } Console.Write("Delete pool? [yes] no: "); response = Console.ReadLine().ToLower(); if (response != "n" && response != "no") { await batchClient.PoolOperations.DeletePoolAsync(poolId); } } } }
private static async Task MainAsync(string[] args) { // You may adjust these values to experiment with different compute resource scenarios. const string nodeSize = "standard_d1_v2"; const int nodeCount = 4; const int maxTasksPerNode = 4; const int taskCount = 32; // Ensure there are enough tasks to help avoid hitting some timeout conditions below int minimumTaskCount = nodeCount * maxTasksPerNode * 2; if (taskCount < minimumTaskCount) { Console.WriteLine("You must specify at least two tasks per node core for this sample ({0} tasks in this configuration).", minimumTaskCount); Console.WriteLine(); // Not enough tasks, exit the application return; } // In this sample, the tasks simply ping localhost on the compute nodes; adjust these // values to simulate variable task duration const int minPings = 30; const int maxPings = 60; const string poolId = "ParallelTasksSamplePool"; const string jobId = "ParallelTasksSampleJob"; // Amount of time to wait before timing out (potentially) long-running tasks TimeSpan longTaskDurationLimit = TimeSpan.FromMinutes(30); // Set up access to your Batch account with a BatchClient. Configure your AccountSettings in the // Microsoft.Azure.Batch.Samples.Common project within this solution. AccountSettings accountSettings = SampleHelpers.LoadAccountSettings(); BatchSharedKeyCredentials cred = new BatchSharedKeyCredentials( accountSettings.BatchServiceUrl, accountSettings.BatchAccountName, accountSettings.BatchAccountKey); using (BatchClient batchClient = BatchClient.Open(cred)) { // Create a CloudPool, or obtain an existing pool with the specified ID CloudPool pool = await ArticleHelpers.CreatePoolIfNotExistAsync( batchClient, poolId, nodeSize, nodeCount, maxTasksPerNode); // Create a CloudJob, or obtain an existing pool with the specified ID CloudJob job = await ArticleHelpers.CreateJobIfNotExistAsync(batchClient, poolId, jobId); // The job's tasks ping localhost a random number of times between minPings and maxPings. // Adjust the minPings/maxPings values above to experiment with different task durations. Random rand = new Random(); List <CloudTask> tasks = new List <CloudTask>(); for (int i = 1; i <= taskCount; i++) { string taskId = "task" + i.ToString().PadLeft(3, '0'); string taskCommandLine = "ping -n " + rand.Next(minPings, maxPings + 1).ToString() + " localhost"; CloudTask task = new CloudTask(taskId, taskCommandLine); tasks.Add(task); } // Pause execution until the pool is steady and its compute nodes are ready to accept jobs. // NOTE: Such a pause is not necessary within your own code. Tasks can be added to a job at any point and will be // scheduled to execute on a compute node as soon any node has reached Idle state. Because the focus of this sample // is the demonstration of running tasks in parallel on multiple compute nodes, we wait for all compute nodes to // complete initialization and reach the Idle state in order to maximize the number of compute nodes available for // parallelization. await ArticleHelpers.WaitForPoolToReachStateAsync(batchClient, pool.Id, AllocationState.Steady, longTaskDurationLimit); await ArticleHelpers.WaitForNodesToReachStateAsync(batchClient, pool.Id, ComputeNodeState.Idle, longTaskDurationLimit); // Add the tasks in one API call as opposed to a separate AddTask call for each. Bulk task submission // helps to ensure efficient underlying API calls to the Batch service. await batchClient.JobOperations.AddTaskAsync(job.Id, tasks); // Pause again to wait until *all* nodes are running tasks await ArticleHelpers.WaitForNodesToReachStateAsync(batchClient, pool.Id, ComputeNodeState.Running, TimeSpan.FromMinutes(2)); Stopwatch stopwatch = Stopwatch.StartNew(); // Print out task assignment information. Console.WriteLine(); await GettingStartedCommon.PrintNodeTasksAsync(batchClient, pool.Id); Console.WriteLine(); // Pause execution while we wait for all of the tasks to complete Console.WriteLine("Waiting for task completion..."); Console.WriteLine(); try { await batchClient.Utilities.CreateTaskStateMonitor().WhenAll( job.ListTasks(), TaskState.Completed, longTaskDurationLimit); } catch (TimeoutException e) { Console.WriteLine(e.ToString()); } stopwatch.Stop(); // Obtain the tasks, specifying a detail level to limit the number of properties returned for each task. // If you have a large number of tasks, specifying a DetailLevel is extremely important in reducing the // amount of data transferred, lowering your query response times in increasing performance. ODATADetailLevel detail = new ODATADetailLevel(selectClause: "id,commandLine,nodeInfo,state"); IPagedEnumerable <CloudTask> allTasks = batchClient.JobOperations.ListTasks(job.Id, detail); // Get a collection of the completed tasks sorted by the compute nodes on which they executed List <CloudTask> completedTasks = allTasks .Where(t => t.State == TaskState.Completed) .OrderBy(t => t.ComputeNodeInformation.ComputeNodeId) .ToList(); // Print the completed task information Console.WriteLine(); Console.WriteLine("Completed tasks:"); string lastNodeId = string.Empty; foreach (CloudTask task in completedTasks) { if (!string.Equals(lastNodeId, task.ComputeNodeInformation.ComputeNodeId)) { Console.WriteLine(); Console.WriteLine(task.ComputeNodeInformation.ComputeNodeId); } lastNodeId = task.ComputeNodeInformation.ComputeNodeId; Console.WriteLine("\t{0}: {1}", task.Id, task.CommandLine); } // Get a collection of the uncompleted tasks which may exist if the TaskMonitor timeout was hit List <CloudTask> uncompletedTasks = allTasks .Where(t => t.State != TaskState.Completed) .OrderBy(t => t.Id) .ToList(); // Print a list of uncompleted tasks, if any Console.WriteLine(); Console.WriteLine("Uncompleted tasks:"); Console.WriteLine(); if (uncompletedTasks.Any()) { foreach (CloudTask task in uncompletedTasks) { Console.WriteLine("\t{0}: {1}", task.Id, task.CommandLine); } } else { Console.WriteLine("\t<none>"); } // Print some summary information Console.WriteLine(); Console.WriteLine(" Nodes: " + nodeCount); Console.WriteLine(" Node size: " + nodeSize); Console.WriteLine("Max tasks per node: " + pool.MaxTasksPerComputeNode); Console.WriteLine(" Tasks: " + tasks.Count); Console.WriteLine(" Duration: " + stopwatch.Elapsed); Console.WriteLine(); Console.WriteLine("Done!"); Console.WriteLine(); // Clean up the resources we've created in the Batch account Console.WriteLine("Delete job? [yes] no"); string response = Console.ReadLine().ToLower(); if (response != "n" && response != "no") { await batchClient.JobOperations.DeleteJobAsync(job.Id); } Console.WriteLine("Delete pool? [yes] no"); response = Console.ReadLine(); if (response != "n" && response != "no") { await batchClient.PoolOperations.DeletePoolAsync(pool.Id); } } }
public static void JobMain(string[] args) { //Load the configuration Settings topNWordsConfiguration = new ConfigurationBuilder() .SetBasePath(Directory.GetCurrentDirectory()) .AddJsonFile("settings.json") .Build() .Get <Settings>(); AccountSettings accountSettings = SampleHelpers.LoadAccountSettings(); CloudStorageAccount cloudStorageAccount = new CloudStorageAccount( new StorageCredentials( accountSettings.StorageAccountName, accountSettings.StorageAccountKey), accountSettings.StorageServiceUrl, useHttps: true); StagingStorageAccount stagingStorageAccount = new StagingStorageAccount( accountSettings.StorageAccountName, accountSettings.StorageAccountKey, cloudStorageAccount.BlobEndpoint.ToString()); using (BatchClient client = BatchClient.Open(new BatchSharedKeyCredentials(accountSettings.BatchServiceUrl, accountSettings.BatchAccountName, accountSettings.BatchAccountKey))) { string stagingContainer = null; //OSFamily 5 == Windows 2016. You can learn more about os families and versions at: //http://msdn.microsoft.com/en-us/library/azure/ee924680.aspx CloudPool pool = client.PoolOperations.CreatePool( topNWordsConfiguration.PoolId, targetDedicatedComputeNodes: topNWordsConfiguration.PoolNodeCount, virtualMachineSize: "standard_d1_v2", cloudServiceConfiguration: new CloudServiceConfiguration(osFamily: "5")); Console.WriteLine("Adding pool {0}", topNWordsConfiguration.PoolId); GettingStartedCommon.CreatePoolIfNotExistAsync(client, pool).Wait(); try { Console.WriteLine("Creating job: " + topNWordsConfiguration.JobId); // get an empty unbound Job CloudJob unboundJob = client.JobOperations.CreateJob(); unboundJob.Id = topNWordsConfiguration.JobId; unboundJob.PoolInformation = new PoolInformation() { PoolId = topNWordsConfiguration.PoolId }; // Commit Job to create it in the service unboundJob.Commit(); // create file staging objects that represent the executable and its dependent assembly to run as the task. // These files are copied to every node before the corresponding task is scheduled to run on that node. FileToStage topNWordExe = new FileToStage(TopNWordsExeName, stagingStorageAccount); FileToStage storageDll = new FileToStage(StorageClientDllName, stagingStorageAccount); // In this sample, the input data is copied separately to Storage and its URI is passed to the task as an argument. // This approach is appropriate when the amount of input data is large such that copying it to every node via FileStaging // is not desired and the number of tasks is small since a large number of readers of the blob might get throttled // by Storage which will lengthen the overall processing time. // // You'll need to observe the behavior and use published techniques for finding the right balance of performance versus // complexity. string bookFileUri = UploadBookFileToCloudBlob(accountSettings, topNWordsConfiguration.FileName); Console.WriteLine("{0} uploaded to cloud", topNWordsConfiguration.FileName); // initialize a collection to hold the tasks that will be submitted in their entirety List <CloudTask> tasksToRun = new List <CloudTask>(topNWordsConfiguration.NumberOfTasks); for (int i = 1; i <= topNWordsConfiguration.NumberOfTasks; i++) { CloudTask task = new CloudTask("task_no_" + i, string.Format("{0} --Task {1} {2} {3} {4}", TopNWordsExeName, bookFileUri, topNWordsConfiguration.TopWordCount, accountSettings.StorageAccountName, accountSettings.StorageAccountKey)); //This is the list of files to stage to a container -- for each job, one container is created and //files all resolve to Azure Blobs by their name (so two tasks with the same named file will create just 1 blob in //the container). task.FilesToStage = new List <IFileStagingProvider> { topNWordExe, storageDll }; tasksToRun.Add(task); } // Commit all the tasks to the Batch Service. Ask AddTask to return information about the files that were staged. // The container information is used later on to remove these files from Storage. ConcurrentBag <ConcurrentDictionary <Type, IFileStagingArtifact> > fsArtifactBag = new ConcurrentBag <ConcurrentDictionary <Type, IFileStagingArtifact> >(); client.JobOperations.AddTask(topNWordsConfiguration.JobId, tasksToRun, fileStagingArtifacts: fsArtifactBag); // loop through the bag of artifacts, looking for the one that matches our staged files. Once there, // capture the name of the container holding the files so they can be deleted later on if that option // was configured in the settings. foreach (var fsBagItem in fsArtifactBag) { IFileStagingArtifact fsValue; if (fsBagItem.TryGetValue(typeof(FileToStage), out fsValue)) { SequentialFileStagingArtifact stagingArtifact = fsValue as SequentialFileStagingArtifact; if (stagingArtifact != null) { stagingContainer = stagingArtifact.BlobContainerCreated; Console.WriteLine( "Uploaded files to container: {0} -- you will be charged for their storage unless you delete them.", stagingArtifact.BlobContainerCreated); } } } //Get the job to monitor status. CloudJob job = client.JobOperations.GetJob(topNWordsConfiguration.JobId); Console.Write("Waiting for tasks to complete ... "); // Wait 20 minutes for all tasks to reach the completed state. The long timeout is necessary for the first // time a pool is created in order to allow nodes to be added to the pool and initialized to run tasks. IPagedEnumerable <CloudTask> ourTasks = job.ListTasks(new ODATADetailLevel(selectClause: "id")); client.Utilities.CreateTaskStateMonitor().WaitAll(ourTasks, TaskState.Completed, TimeSpan.FromMinutes(20)); Console.WriteLine("tasks are done."); foreach (CloudTask t in ourTasks) { Console.WriteLine("Task " + t.Id); Console.WriteLine("stdout:" + Environment.NewLine + t.GetNodeFile(Batch.Constants.StandardOutFileName).ReadAsString()); Console.WriteLine(); Console.WriteLine("stderr:" + Environment.NewLine + t.GetNodeFile(Batch.Constants.StandardErrorFileName).ReadAsString()); } } finally { //Delete the pool that we created if (topNWordsConfiguration.ShouldDeletePool) { Console.WriteLine("Deleting pool: {0}", topNWordsConfiguration.PoolId); client.PoolOperations.DeletePool(topNWordsConfiguration.PoolId); } //Delete the job that we created if (topNWordsConfiguration.ShouldDeleteJob) { Console.WriteLine("Deleting job: {0}", topNWordsConfiguration.JobId); client.JobOperations.DeleteJob(topNWordsConfiguration.JobId); } //Delete the containers we created if (topNWordsConfiguration.ShouldDeleteContainer) { DeleteContainers(accountSettings, stagingContainer); } } } }