/// <summary> /// Runs the reducer task. /// </summary> public async Task RunAsync() { //Set up the Batch Service credentials used to authenticate with the Batch Service. BatchSharedKeyCredentials credentials = new BatchSharedKeyCredentials( this.accountSettings.BatchServiceUrl, this.accountSettings.BatchAccountName, this.accountSettings.BatchAccountKey); using (BatchClient batchClient = await BatchClient.OpenAsync(credentials)) { //Gather each Mapper tasks output and write it to standard out. for (int i = 0; i < this.textSearchSettings.NumberOfMapperTasks; i++) { string mapperTaskId = Helpers.GetMapperTaskId(i); //Download the standard out from each mapper task. NodeFile mapperFile = await batchClient.JobOperations.GetNodeFileAsync( this.jobId, mapperTaskId, Batch.Constants.StandardOutFileName); string taskFileString = await mapperFile.ReadAsStringAsync(); Console.WriteLine(taskFileString); Console.WriteLine(); } } }
/// <summary> /// Waits for all tasks under the specified job to complete and then prints each task's output to the console. /// </summary> /// <param name="batchClient">The BatchClient to use when interacting with the Batch service.</param> /// <param name="jobId">The ID of the job.</param> /// <returns>An asynchronous <see cref="Task"/> representing the operation.</returns> private static async Task WaitForJobAndPrintOutputAsync(BatchClient batchClient, string jobId) { Console.WriteLine("Waiting for all tasks to complete on job: {0} ...", jobId); // We use the task state monitor to monitor the state of our tasks -- in this case we will wait for them all to complete. TaskStateMonitor taskStateMonitor = batchClient.Utilities.CreateTaskStateMonitor(); List <CloudTask> ourTasks = await batchClient.JobOperations.ListTasks(jobId).ToListAsync(); // Wait for all tasks to reach the completed state. // If the pool is being resized then enough time is needed for the nodes to reach the idle state in order // for tasks to run on them. bool timedOut = await taskStateMonitor.WhenAllAsync(ourTasks, TaskState.Completed, TimeSpan.FromMinutes(10)); if (timedOut) { throw new TimeoutException("Timed out waiting for tasks"); } // dump task output foreach (CloudTask t in ourTasks) { Console.WriteLine("Task {0}", t.Id); //Read the standard out of the task NodeFile standardOutFile = await t.GetNodeFileAsync(Constants.StandardOutFileName); string standardOutText = await standardOutFile.ReadAsStringAsync(); Console.WriteLine("Standard out:"); Console.WriteLine(standardOutText); Console.WriteLine(); } }
/// <summary> /// Waits for all tasks under the specified job to complete and then prints each task's output to the console. /// </summary> /// <param name="batchClient">The BatchClient to use when interacting with the Batch service.</param> /// <param name="tasks">The tasks to wait for.</param> /// <param name="timeout">The timeout. After this time has elapsed if the job is not complete and exception will be thrown.</param> /// <returns>An asynchronous <see cref="Task"/> representing the operation.</returns> public static async Task WaitForTasksAndPrintOutputAsync(BatchClient batchClient, IEnumerable <CloudTask> tasks, TimeSpan timeout) { // We use the task state monitor to monitor the state of our tasks -- in this case we will wait for them all to complete. TaskStateMonitor taskStateMonitor = batchClient.Utilities.CreateTaskStateMonitor(); // Wait until the tasks are in completed state. List <CloudTask> ourTasks = tasks.ToList(); await taskStateMonitor.WhenAll(ourTasks, TaskState.Completed, timeout).ConfigureAwait(continueOnCapturedContext: false); // dump task output foreach (CloudTask t in ourTasks) { Console.WriteLine("Task {0}", t.Id); //Read the standard out of the task NodeFile standardOutFile = await t.GetNodeFileAsync(Constants.StandardOutFileName).ConfigureAwait(continueOnCapturedContext: false); string standardOutText = await standardOutFile.ReadAsStringAsync().ConfigureAwait(continueOnCapturedContext: false); Console.WriteLine("Standard out:"); Console.WriteLine(standardOutText); //Read the standard error of the task NodeFile standardErrorFile = await t.GetNodeFileAsync(Constants.StandardErrorFileName).ConfigureAwait(continueOnCapturedContext: false); string standardErrorText = await standardErrorFile.ReadAsStringAsync().ConfigureAwait(continueOnCapturedContext: false); Console.WriteLine("Standard error:"); Console.WriteLine(standardErrorText); Console.WriteLine(); } }
public async Task GetFilePropertiesFromNodeDoesNotThrowOutOfMemoryException() { using BatchClient client = CreateBatchClientWithHandler(); NodeFile file = await client.PoolOperations.GetNodeFileAsync("Foo", "Bar", "Baz"); Assert.Equal(StreamUnitTests.StreamLengthInBytes, file.Properties.ContentLength); }
private static async Task WaitForJobAndPrintOutputAsync(BatchClient batchClient, string jobId) { Console.WriteLine("Waiting for all tasks to complete on job: {0} ...", jobId); //1. Use a task state monitor to monitor the status of your tasks var taskStateMonitor = batchClient.Utilities.CreateTaskStateMonitor(); List <CloudTask> myTasks = await batchClient.JobOperations.ListTasks(jobId).ToListAsync(); //2. Wait for all tasks to reach the completed state. bool timedOut = await taskStateMonitor.WhenAllAsync(myTasks, TaskState.Completed, TimeSpan.FromMinutes(15)); if (timedOut) { throw new TimeoutException("Timed out waiting for tasks."); } //3. Dump task output foreach (var task in myTasks) { Console.WriteLine("Task {0}", task.Id); //4. Read the standard out of the task NodeFile standardOutFile = await task.GetNodeFileAsync(Constants.StandardOutFileName); var standardOutText = await standardOutFile.ReadAsStringAsync(); Console.WriteLine("Standard out: "); Console.WriteLine(standardOutText); Console.WriteLine(); } }
public void Bug2338301_CheckStreamPositionAfterFileRead() { Action test = () => { using (BatchClient batchCli = TestUtilities.OpenBatchClientAsync(TestUtilities.GetCredentialsFromEnvironment()).Result) { JobOperations jobOperations = batchCli.JobOperations; { string jobId = "Bug2338301Job-" + TestUtilities.GetMyName(); try { const string taskId = "hiWorld"; // // Create the job // CloudJob unboundJob = jobOperations.CreateJob(jobId, new PoolInformation() { PoolId = this.poolFixture.PoolId }); unboundJob.Commit(); CloudJob boundJob = jobOperations.GetJob(jobId); CloudTask myTask = new CloudTask(taskId, "cmd /c echo hello world"); boundJob.AddTask(myTask); this.testOutputHelper.WriteLine("Initial job commit()"); // // Wait for task to go to completion // Utilities utilities = batchCli.Utilities; TaskStateMonitor taskStateMonitor = utilities.CreateTaskStateMonitor(); taskStateMonitor.WaitAll( boundJob.ListTasks(), Microsoft.Azure.Batch.Common.TaskState.Completed, TimeSpan.FromMinutes(3)); CloudTask boundTask = boundJob.GetTask(taskId); //Get the task file const string fileToGet = "stdout.txt"; NodeFile file = boundTask.GetNodeFile(fileToGet); //Download the file data string result = file.ReadAsString(); Assert.True(result.Length > 0); } finally { jobOperations.DeleteJob(jobId); } } } }; SynchronizationContextHelper.RunTest(test, TestTimeout); }
private static void AssertFileListsMatch(List <NodeFile> listOne, List <NodeFile> listTwo) { Assert.Equal(listOne.Count, listTwo.Count); Assert.NotEmpty(listOne); Assert.NotEmpty(listTwo); foreach (NodeFile file in listOne) { //Find the corresponding file in the other list and ensure they are the same NodeFile matchedFile = listTwo.FirstOrDefault(f => f.Path == file.Path); Assert.NotNull(matchedFile); //Ensure the files match Assert.Equal(file.IsDirectory, matchedFile.IsDirectory); if (file.Properties == null) { Assert.Null(file.Properties); Assert.Null(matchedFile.Properties); } else { Assert.Equal(file.Properties.ContentLength, matchedFile.Properties.ContentLength); Assert.Equal(file.Properties.ContentType, matchedFile.Properties.ContentType); Assert.Equal(file.Properties.CreationTime, matchedFile.Properties.CreationTime); Assert.Equal(file.Properties.LastModified, matchedFile.Properties.LastModified); } } }
// Returns whether the encounter was found (and therefore removed). public bool RemoveEncounter(EncounterNodeData encounter) { GameMasterNode encounterNodeFile = encounter.NodeFile; var filePath = GetEncounterFilePath(encounter); string key = null; foreach (var pair in mEncounters) { if (pair.Value == filePath) { key = pair.Key; break; } } if (key != null) { mEncounters.Remove(key); mEncounterFiles.Remove(encounterNodeFile); (NodeFile.Json["encounters"] as JObject).Property(key).Remove(); NodeFile.IsModified = true; NodeFile.SaveIfNecessary(); return(true); } else { return(false); } }
public void AddEncounter(EncounterNodeData encounter) { GameMasterNode encounterNodeFile = encounter.NodeFile; var filePath = GetEncounterFilePath(encounter); mEncounters.Add(encounterNodeFile.Name, filePath); mEncounterFiles.Add(encounterNodeFile); NodeFile.Json["encounters"][encounterNodeFile.Name] = filePath; NodeFile.IsModified = true; NodeFile.SaveIfNecessary(); }
public void AddEncounter(EncounterNodeData encounter) { // TODO, get relative path GameMasterNode encounterNodeFile = encounter.NodeFile; string filePath = encounterNodeFile.Path; string selfPath = NodeFile.Directory + '/'; filePath = "file(" + filePath.Replace(selfPath, "") + ")"; mEncounters.Add(encounterNodeFile.Name, filePath); mEncounterFiles.Add(encounterNodeFile); NodeFile.Json["encounters"][encounterNodeFile.Name] = filePath; NodeFile.IsModified = true; NodeFile.SaveIfNecessary(); }
private static async Task <string> GetFileAsync(CloudTask boundTask, string fileName, bool dumpFile = true) { //Dump the standard out file of the task. NodeFile file = await boundTask.GetNodeFileAsync(Batch.Constants.StandardOutFileName); string fileContent = await file.ReadAsStringAsync(); if (dumpFile) { Console.WriteLine($"Task {boundTask.Id} {fileName}:"); Console.WriteLine("----------------------------------------"); Console.WriteLine(fileContent); } return(fileContent); }
public static void InternalBatchSession(Document document) { if (internalFile == null) { internalFile = batchVm?.Nodes?.First(); if (internalFile == null) { return; } } else { if (internalFile.NeedCloseFile) { var adoc = AcadHelper.Doc; try { adoc.CloseAndDiscard(); } catch { // ignore } } internalFile = internalFile.NextFile; if (internalFile == null) { return; } internalFile.NeedCloseFile = false; } internalFile.BatchResult = null; internalFile.Color = null; var doc = AcadHelper.GetOpenedDocument(internalFile.Name); if (doc == null) { internalFile.NeedCloseFile = true; doc = Application.DocumentManager.Open(internalFile.Name); Application.DocumentManager.MdiActiveDocument = doc; } Execute(doc, nameof(Commands._InternalUse_SSBatchModal) + " "); }
private void UpdateNodeFile(NodeFile nf) { String result = String.Empty; String json = SerializeObject <NodeFile>(nf); var content = new StringContent(json, Encoding.UTF8, "application/json"); //Content-Type设置 try { string url = ConfigurationManager.AppSettings.Get("HttpFileUrl"); HttpClient _HttpClient = new HttpClient(); HttpResponseMessage response = _HttpClient.PostAsync(url + "filenoderec", content).Result; String statusCode = response.StatusCode.ToString(); result = response.Content.ReadAsStringAsync().Result; } catch (Exception ex) { Console.WriteLine(ex.StackTrace); } }
void OnFileSaveClick(object sender, EventArgs e) { NodeFile nfi = (NodeFile)(sender as ToolStripItem).Tag; SaveFileDialog fd = new SaveFileDialog(); fd.FileName = nfi.Name; if (fd.ShowDialog() == DialogResult.OK) { try { nfi.SaveFile(fd.FileName); nfi.FileReady = false; } catch { MessageBox.Show("An error saving MMS file " + nfi.FullName + " to file " + fd.FileName); } } }
public void Update() { var oldNodes = Nodes.ToList(); Nodes = new List <NodeFile>(); if (Model.Select?.SheetSet == null) { return; } var nodes = new List <NodeFile>(); NodeFile parentFile = null; var sheets = Model.Select.SheetSet.Nodes.SelectMany(s => s.GetSheets()).ToList(); foreach (var grouping in sheets.GroupBy(g => g.File)) { var nodeFile = new NodeFile { Name = grouping.Key, Nodes = grouping.Select(s => new NodeLayout { Name = s.Layout }).ToList(), IsExist = File.Exists(grouping.Key) }; nodes.Add(nodeFile); if (parentFile != null) { parentFile.NextFile = nodeFile; } parentFile = nodeFile; } SetToBatch(nodes.ToList <NodeBase>(), oldNodes.ToList <NodeBase>()); Nodes = nodes; }
public void TestGetNodeFileByTask() { Action test = () => { using (BatchClient batchCli = TestUtilities.OpenBatchClientAsync(TestUtilities.GetCredentialsFromEnvironment()).Result) { JobOperations jobOperations = batchCli.JobOperations; string jobId = Constants.DefaultConveniencePrefix + TestUtilities.GetMyName() + "-" + nameof(TestGetNodeFileByTask); try { // // Create the job // CloudJob job = jobOperations.CreateJob(jobId, new PoolInformation()); job.PoolInformation = new PoolInformation() { PoolId = this.poolFixture.PoolId }; this.testOutputHelper.WriteLine("Initial job schedule commit()"); job.Commit(); // // Wait for the job // this.testOutputHelper.WriteLine("Waiting for job"); CloudJob boundJob = jobOperations.GetJob(jobId); // // Add task to the job // const string taskId = "T1"; const string taskMessage = "This is a test"; this.testOutputHelper.WriteLine("Adding task: {0}", taskId); CloudTask task = new CloudTask(taskId, string.Format("cmd /c echo {0}", taskMessage)); boundJob.AddTask(task); // // Wait for the task to complete // this.testOutputHelper.WriteLine("Waiting for the task to complete"); Utilities utilities = batchCli.Utilities; TaskStateMonitor taskStateMonitor = utilities.CreateTaskStateMonitor(); //Wait for the task state to be running taskStateMonitor.WaitAll( jobOperations.ListTasks(jobId), TaskState.Completed, TimeSpan.FromSeconds(30)); //Download the data this.testOutputHelper.WriteLine("Downloading the stdout for the file"); NodeFile file = jobOperations.GetNodeFile(jobId, taskId, Constants.StandardOutFileName); string data = file.ReadAsString(); this.testOutputHelper.WriteLine("Data: {0}", data); Assert.Contains(taskMessage, data); // Download the data again using the JobOperations read file content helper data = batchCli.JobOperations.CopyNodeFileContentToString(jobId, taskId, Constants.StandardOutFileName); this.testOutputHelper.WriteLine("Data: {0}", data); Assert.Contains(taskMessage, data); } finally { jobOperations.DeleteJob(jobId); } } }; SynchronizationContextHelper.RunTest(test, TestTimeout); }
public void Bug1480491NodeFileFileProperties() { Action test = () => { using (BatchClient batchCli = TestUtilities.OpenBatchClientAsync(TestUtilities.GetCredentialsFromEnvironment()).Result) { string jobId = "Bug1480491Job-" + TestUtilities.GetMyName(); try { const string taskId = "hiWorld"; // // Create the job // CloudJob unboundJob = batchCli.JobOperations.CreateJob(jobId, new PoolInformation()); unboundJob.PoolInformation.PoolId = this.poolFixture.PoolId; unboundJob.Commit(); CloudJob boundJob = batchCli.JobOperations.GetJob(jobId); CloudTask myTask = new CloudTask(taskId, "cmd /c echo hello world"); boundJob.AddTask(myTask); this.testOutputHelper.WriteLine("Initial job commit()"); // // Wait for task to go to completion // Utilities utilities = batchCli.Utilities; TaskStateMonitor taskStateMonitor = utilities.CreateTaskStateMonitor(); taskStateMonitor.WaitAll( boundJob.ListTasks(), Microsoft.Azure.Batch.Common.TaskState.Completed, TimeSpan.FromMinutes(3)); const int expectedFileSize = 13; //Magic number based on output generated by the task // // NodeFile by task // NodeFile file = batchCli.JobOperations.GetNodeFile(jobId, taskId, Constants.StandardOutFileName); this.testOutputHelper.WriteLine("File {0} has content length: {1}", Constants.StandardOutFileName, file.Properties.ContentLength); this.testOutputHelper.WriteLine("File {0} has content type: {1}", Constants.StandardOutFileName, file.Properties.ContentType); this.testOutputHelper.WriteLine("File {0} has creation time: {1}", Constants.StandardOutFileName, file.Properties.CreationTime); this.testOutputHelper.WriteLine("File {0} has last modified time: {1}", Constants.StandardOutFileName, file.Properties.LastModified); Assert.Equal(expectedFileSize, file.Properties.ContentLength); Assert.Equal("text/plain", file.Properties.ContentType); // // NodeFile by node // CloudTask boundTask = boundJob.GetTask(taskId); string computeNodeId = boundTask.ComputeNodeInformation.AffinityId.Split(':')[1]; ComputeNode computeNode = batchCli.PoolOperations.GetComputeNode(this.poolFixture.PoolId, computeNodeId); this.testOutputHelper.WriteLine("Task ran on compute node: {0}", computeNodeId); List <NodeFile> files = computeNode.ListNodeFiles(recursive: true).ToList(); foreach (NodeFile nodeFile in files) { this.testOutputHelper.WriteLine("Found file: {0}", nodeFile.Path); } string filePathToGet = string.Format("workitems/{0}/{1}/{2}/{3}", jobId, "job-1", taskId, Constants.StandardOutFileName); file = computeNode.GetNodeFile(filePathToGet); this.testOutputHelper.WriteLine("File {0} has content length: {1}", filePathToGet, file.Properties.ContentLength); this.testOutputHelper.WriteLine("File {0} has content type: {1}", filePathToGet, file.Properties.ContentType); this.testOutputHelper.WriteLine("File {0} has creation time: {1}", filePathToGet, file.Properties.CreationTime); this.testOutputHelper.WriteLine("File {0} has last modified time: {1}", filePathToGet, file.Properties.LastModified); Assert.Equal(expectedFileSize, file.Properties.ContentLength); Assert.Equal("text/plain", file.Properties.ContentType); } finally { batchCli.JobOperations.DeleteJob(jobId); } } }; SynchronizationContextHelper.RunTest(test, TestTimeout); }
private static async Task MainAsync() { const int nodeCount = 1; string poolId = "TaskDependenciesSamplePool"; string jobId = "TaskDependenciesJob"; var settings = Config.LoadAccountSettings(); BatchSharedKeyCredentials cred = new BatchSharedKeyCredentials(settings.BatchServiceUrl, settings.BatchAccountName, settings.BatchAccountKey); using (BatchClient batchClient = BatchClient.Open(cred)) { var pool = await BatchUtils.CreatePoolIfNotExistAsync(batchClient, poolId, lowPriorityNodes : nodeCount); var job = await BatchUtils.CreateJobIfNotExistAsync(batchClient, pool.Id, jobId, usesTaskDependencies : true); string taskOutputFile = "$AZ_BATCH_NODE_SHARED_DIR/task_output.txt"; // Create the collection of tasks that will be added to the job. List <CloudTask> tasks = new List <CloudTask> { // 'Rain' and 'Sun' don't depend on any other tasks new CloudTask("Rain", $"/bin/bash -c \"echo Rain >> {taskOutputFile}\""), new CloudTask("Sun", $"/bin/bash -c \"echo Sun >> {taskOutputFile}\""), // Task 'Flowers' depends on completion of both 'Rain' and 'Sun' // before it is run. new CloudTask("Flowers", $"/bin/bash -c \"echo Flowers >> {taskOutputFile}\"") { DependsOn = TaskDependencies.OnIds("Rain", "Sun") }, // Tasks 1, 2, and 3 don't depend on any other tasks. Because // we will be using them for a task range dependency, we must // specify string representations of integers as their ids. new CloudTask("1", $"/bin/bash -c \"echo 1 >> {taskOutputFile}\""), new CloudTask("2", $"/bin/bash -c \"echo 2 >> {taskOutputFile}\""), new CloudTask("3", $"/bin/bash -c \"echo 3 >> {taskOutputFile}\""), // Task dependency on ID range new CloudTask("Final", $"/bin/bash -c \"echo Final >> {taskOutputFile}\"") { DependsOn = TaskDependencies.OnIdRange(1, 3) }, // Task A is the parent task. new CloudTask("A", $"/bin/bash -c \"echo A >> {taskOutputFile}\"") { // Specify exit conditions for task A and their dependency actions. ExitConditions = new ExitConditions { // If task A exits with a pre-processing error, block any downstream tasks (in this example, task B). PreProcessingError = new ExitOptions { DependencyAction = DependencyAction.Block }, // If task A exits with the specified error codes, block any downstream tasks (in this example, task B). ExitCodes = new List <ExitCodeMapping> { new ExitCodeMapping(10, new ExitOptions() { DependencyAction = DependencyAction.Block }), new ExitCodeMapping(20, new ExitOptions() { DependencyAction = DependencyAction.Block }) }, // If task A succeeds or fails with any other error, any downstream tasks become eligible to run // (in this example, task B). Default = new ExitOptions { DependencyAction = DependencyAction.Satisfy }, } }, // Task B depends on task A. Whether it becomes eligible to run depends on how task A exits. new CloudTask("B", $"/bin/bash -c \"echo B >> {taskOutputFile}\"") { DependsOn = TaskDependencies.OnId("A") }, }; // Add the tasks in one API call as opposed to a separate AddTask call for each. Bulk task // submission helps to ensure efficient underlying API calls to the Batch service. Console.WriteLine("Submitting tasks and awaiting completion..."); await batchClient.JobOperations.AddTaskAsync(job.Id, tasks); // Wait for the tasks to complete before proceeding. The long timeout here is to allow time // for the nodes within the pool to be created and started if the pool had not yet been created. await batchClient.Utilities.CreateTaskStateMonitor().WhenAll( job.ListTasks(), TaskState.Completed, TimeSpan.FromMinutes(30)); Console.WriteLine("All tasks completed."); Console.WriteLine(); // Print the contents of the shared text file modified by the job preparation and other tasks. ODATADetailLevel nodeDetail = new ODATADetailLevel(selectClause: "id, state"); IPagedEnumerable <ComputeNode> nodes = batchClient.PoolOperations.ListComputeNodes(poolId, nodeDetail); await nodes.ForEachAsync(async (node) => { // Check to ensure that the node is Idle before attempting to pull the text file. // If the pool was just created, there is a chance that another node completed all // of the tasks prior to the other node(s) completing their startup procedure. if (node.State == ComputeNodeState.Idle) { NodeFile sharedTextFile = await node.GetNodeFileAsync("shared/task_output.txt"); Console.WriteLine("Contents of {0} on {1}:", sharedTextFile.Path, node.Id); Console.WriteLine("-------------------------------------------"); Console.WriteLine(await sharedTextFile.ReadAsStringAsync()); } }); // Clean up the resources we've created in the Batch account Console.WriteLine(); Console.WriteLine("Delete job? [yes] no"); string response = Console.ReadLine().ToLower(); if (response != "n" && response != "no") { // Note that deleting the job will execute the job release task if the job was not previously terminated await batchClient.JobOperations.DeleteJobAsync(job.Id); } Console.WriteLine("Delete pool? [yes] no"); response = Console.ReadLine(); if (response != "n" && response != "no") { await batchClient.PoolOperations.DeletePoolAsync(poolId); } } }
public void TestNode_GetListDeleteFiles() { Action test = () => { using (BatchClient batchCli = TestUtilities.OpenBatchClientAsync(TestUtilities.GetCredentialsFromEnvironment()).Result) { string jobId = "TestNodeGetListDeleteFiles-" + TestUtilities.GetMyName(); try { const string taskId = "hiWorld"; const string directoryCreationTaskId1 = "dirTask1"; const string directoryCreationTaskId2 = "dirTask2"; const string directoryNameOne = "Foo"; const string directoryNameTwo = "Bar"; // // Create the job // CloudJob unboundJob = batchCli.JobOperations.CreateJob(jobId, new PoolInformation()); unboundJob.PoolInformation.PoolId = this.poolFixture.PoolId; unboundJob.Commit(); CloudJob boundJob = batchCli.JobOperations.GetJob(jobId); CloudTask myTask = new CloudTask(taskId, "cmd /c echo hello world"); CloudTask directoryCreationTask1 = new CloudTask(directoryCreationTaskId1, string.Format("cmd /c mkdir {0} && echo test > {0}/testfile.txt", directoryNameOne)); CloudTask directoryCreationTask2 = new CloudTask(directoryCreationTaskId2, string.Format("cmd /c mkdir {0} && echo test > {0}/testfile.txt", directoryNameTwo)); boundJob.AddTask(myTask); boundJob.AddTask(directoryCreationTask1); boundJob.AddTask(directoryCreationTask2); this.testOutputHelper.WriteLine("Initial job commit()"); // // Wait for task to go to completion // Utilities utilities = batchCli.Utilities; TaskStateMonitor taskStateMonitor = utilities.CreateTaskStateMonitor(); taskStateMonitor.WaitAll( boundJob.ListTasks(), Microsoft.Azure.Batch.Common.TaskState.Completed, TimeSpan.FromMinutes(3)); CloudTask boundTask = boundJob.GetTask(taskId); //Since the compute node name comes back as "Node:<computeNodeId>" we need to split on : to get the actual compute node name string computeNodeId = boundTask.ComputeNodeInformation.AffinityId.Split(':')[1]; ComputeNode computeNode = batchCli.PoolOperations.GetComputeNode(this.poolFixture.PoolId, computeNodeId); this.testOutputHelper.WriteLine("Task ran on compute node: {0}", computeNodeId); //Ensure that ListFiles done without a recursive option, or with recursive false return the same values { List <NodeFile> filesByComputeNodeRecursiveOmitted = batchCli.PoolOperations.ListNodeFiles( this.poolFixture.PoolId, computeNodeId).ToList(); List <NodeFile> filesByComputeNodeRecursiveFalse = batchCli.PoolOperations.ListNodeFiles( this.poolFixture.PoolId, computeNodeId, recursive: false).ToList(); AssertFileListsMatch(filesByComputeNodeRecursiveOmitted, filesByComputeNodeRecursiveFalse); } { List <NodeFile> filesByTaskRecursiveOmitted = batchCli.JobOperations.ListNodeFiles( jobId, taskId).ToList(); List <NodeFile> filesByTaskRecursiveFalse = batchCli.JobOperations.ListNodeFiles( jobId, taskId, recursive: false).ToList(); AssertFileListsMatch(filesByTaskRecursiveOmitted, filesByTaskRecursiveFalse); } // // List all node files from operations -- recursive true // //TODO: Detail level? List <NodeFile> fileListFromComputeNodeOperations = batchCli.PoolOperations.ListNodeFiles(this.poolFixture.PoolId, computeNodeId, recursive: true).ToList(); foreach (NodeFile f in fileListFromComputeNodeOperations) { this.testOutputHelper.WriteLine("Found file: {0}", f.Path); } //Check to make sure the expected folder named "Shared" exists Assert.Contains("shared", fileListFromComputeNodeOperations.Select(f => f.Path)); // // List all node files from the compute node -- recursive true // List <NodeFile> fileListFromComputeNode = computeNode.ListNodeFiles(recursive: true).ToList(); foreach (NodeFile f in fileListFromComputeNodeOperations) { this.testOutputHelper.WriteLine("Found file: {0}", f.Path); } //Check to make sure the expected folder named "Shared" exists Assert.Contains("shared", fileListFromComputeNode.Select(f => f.Path)); // // Get file from operations // string filePathToGet = fileListFromComputeNode.First(f => !f.IsDirectory.Value && f.Properties.ContentLength > 0).Path; this.testOutputHelper.WriteLine("Getting file: {0}", filePathToGet); NodeFile computeNodeFileFromManager = batchCli.PoolOperations.GetNodeFile(this.poolFixture.PoolId, computeNodeId, filePathToGet); this.testOutputHelper.WriteLine("Successfully retrieved file: {0}", filePathToGet); this.testOutputHelper.WriteLine("---- File data: ----"); var computeNodeFileContentFromManager = computeNodeFileFromManager.ReadAsString(); this.testOutputHelper.WriteLine(computeNodeFileContentFromManager); Assert.NotEmpty(computeNodeFileContentFromManager); // // Get file directly from operations (bypassing the properties call) // var computeNodeFileContentDirect = batchCli.PoolOperations.CopyNodeFileContentToString(this.poolFixture.PoolId, computeNodeId, filePathToGet); this.testOutputHelper.WriteLine("---- File data: ----"); this.testOutputHelper.WriteLine(computeNodeFileContentDirect); Assert.NotEmpty(computeNodeFileContentDirect); // // Get file from compute node // this.testOutputHelper.WriteLine("Getting file: {0}", filePathToGet); NodeFile fileFromComputeNode = computeNode.GetNodeFile(filePathToGet); this.testOutputHelper.WriteLine("Successfully retrieved file: {0}", filePathToGet); this.testOutputHelper.WriteLine("---- File data: ----"); var computeNodeFileContentFromNode = fileFromComputeNode.ReadAsString(); this.testOutputHelper.WriteLine(computeNodeFileContentFromNode); Assert.NotEmpty(computeNodeFileContentFromNode); // // Get file from compute node (bypassing the properties call) // computeNodeFileContentDirect = computeNode.CopyNodeFileContentToString(filePathToGet); this.testOutputHelper.WriteLine("---- File data: ----"); this.testOutputHelper.WriteLine(computeNodeFileContentDirect); Assert.NotEmpty(computeNodeFileContentDirect); // // NodeFile delete // string filePath = Path.Combine(@"workitems", jobId, "job-1", taskId, Constants.StandardOutFileName); NodeFile nodeFile = batchCli.PoolOperations.GetNodeFile(this.poolFixture.PoolId, computeNodeId, filePath); nodeFile.Delete(); //Ensure delete succeeded TestUtilities.AssertThrows <BatchException>(() => nodeFile.Refresh()); //Delete directory NodeFile directory = batchCli.PoolOperations.ListNodeFiles(this.poolFixture.PoolId, computeNodeId, recursive: true).First(item => item.Path.Contains(directoryNameOne)); Assert.True(directory.IsDirectory); TestUtilities.AssertThrows <BatchException>(() => directory.Delete(recursive: false)); directory.Delete(recursive: true); Assert.Null(batchCli.PoolOperations.ListNodeFiles(this.poolFixture.PoolId, computeNodeId, recursive: true).FirstOrDefault(item => item.Path.Contains(directoryNameOne))); // // PoolManager delete node file // filePath = Path.Combine(@"workitems", jobId, "job-1", taskId, Constants.StandardErrorFileName); NodeFile file = batchCli.JobOperations.GetNodeFile(jobId, taskId, Constants.StandardErrorFileName); batchCli.PoolOperations.DeleteNodeFile(this.poolFixture.PoolId, computeNodeId, filePath); //Ensure delete succeeded TestUtilities.AssertThrows <BatchException>(() => batchCli.JobOperations.GetNodeFile(jobId, taskId, Constants.StandardErrorFileName)); //Delete directory directory = batchCli.PoolOperations.ListNodeFiles(this.poolFixture.PoolId, computeNodeId, recursive: true).First(item => item.Path.Contains(directoryNameTwo)); Assert.True(directory.IsDirectory); TestUtilities.AssertThrows <BatchException>(() => batchCli.PoolOperations.DeleteNodeFile(this.poolFixture.PoolId, computeNodeId, directory.Path, recursive: false)); batchCli.PoolOperations.DeleteNodeFile(this.poolFixture.PoolId, computeNodeId, directory.Path, recursive: true); Assert.Null(batchCli.PoolOperations.ListNodeFiles(this.poolFixture.PoolId, computeNodeId, recursive: true).FirstOrDefault(item => item.Path.Contains(directoryNameTwo))); } finally { batchCli.JobOperations.DeleteJob(jobId); } } }; SynchronizationContextHelper.RunTest(test, TestTimeout); }
public void Bug230385SupportDeleteNodeFileByTask() { Action test = () => { using (BatchClient batchCli = TestUtilities.OpenBatchClientAsync(TestUtilities.GetCredentialsFromEnvironment()).Result) { string jobId = "Bug230285Job-" + TestUtilities.GetMyName(); try { const string taskId = "hiWorld"; const string directoryCreationTaskId1 = "dirTask1"; const string directoryCreationTaskId2 = "dirTask2"; const string directoryNameOne = "Foo"; const string directoryNameTwo = "Bar"; const string directory2PathOnNode = "wd/" + directoryNameTwo; // // Create the job // CloudJob unboundJob = batchCli.JobOperations.CreateJob(jobId, new PoolInformation()); unboundJob.PoolInformation.PoolId = this.poolFixture.PoolId; unboundJob.Commit(); CloudJob boundJob = batchCli.JobOperations.GetJob(jobId); CloudTask myTask = new CloudTask(taskId, "cmd /c echo hello world"); CloudTask directoryCreationTask1 = new CloudTask(directoryCreationTaskId1, string.Format("cmd /c mkdir {0} && echo test > {0}/testfile.txt", directoryNameOne)); CloudTask directoryCreationTask2 = new CloudTask(directoryCreationTaskId2, string.Format("cmd /c mkdir {0} && echo test > {0}/testfile.txt", directoryNameTwo)); boundJob.AddTask(myTask); boundJob.AddTask(directoryCreationTask1); boundJob.AddTask(directoryCreationTask2); this.testOutputHelper.WriteLine("Initial job commit()"); // // Wait for task to go to completion // Utilities utilities = batchCli.Utilities; TaskStateMonitor taskStateMonitor = utilities.CreateTaskStateMonitor(); taskStateMonitor.WaitAll( boundJob.ListTasks(), Microsoft.Azure.Batch.Common.TaskState.Completed, TimeSpan.FromMinutes(3)); // // NodeFile delete // //Delete single file NodeFile file = batchCli.JobOperations.GetNodeFile(jobId, taskId, Constants.StandardOutFileName); file.Delete(); //Ensure delete succeeded TestUtilities.AssertThrows <BatchException>(() => batchCli.JobOperations.GetNodeFile(jobId, taskId, Constants.StandardOutFileName)); //Delete directory NodeFile directory = batchCli.JobOperations.ListNodeFiles(jobId, directoryCreationTaskId1, recursive: true).First(item => item.Path.Contains(directoryNameOne)); Assert.True(directory.IsDirectory); TestUtilities.AssertThrows <BatchException>(() => directory.Delete(recursive: false)); directory.Delete(recursive: true); Assert.Null(batchCli.JobOperations.ListNodeFiles(jobId, directoryCreationTaskId1, recursive: true).FirstOrDefault(item => item.Path.Contains(directoryNameOne))); // // JobScheduleOperations delete task file // batchCli.JobOperations.GetNodeFile(jobId, taskId, Constants.StandardErrorFileName); batchCli.JobOperations.DeleteNodeFile(jobId, taskId, Constants.StandardErrorFileName); //Ensure delete succeeded TestUtilities.AssertThrows <BatchException>(() => batchCli.JobOperations.GetNodeFile(jobId, taskId, Constants.StandardErrorFileName)); //Delete directory directory = batchCli.JobOperations.ListNodeFiles(jobId, directoryCreationTaskId2, recursive: true).First(item => item.Path.Contains(directoryNameTwo)); Assert.True(directory.IsDirectory); TestUtilities.AssertThrows <BatchException>(() => batchCli.JobOperations.DeleteNodeFile(jobId, directoryCreationTaskId2, directory2PathOnNode, recursive: false)); batchCli.JobOperations.DeleteNodeFile(jobId, directoryCreationTaskId2, directory2PathOnNode, recursive: true); Assert.Null(batchCli.JobOperations.ListNodeFiles(jobId, directoryCreationTaskId2, recursive: true).FirstOrDefault(item => item.Path.Contains(directoryNameTwo))); } finally { TestUtilities.DeleteJobIfExistsAsync(batchCli, jobId).Wait(); } } }; SynchronizationContextHelper.RunTest(test, TestTimeout); }
/// <summary> /// Checks for a task's success or failure, and optionally dumps the output of the task. In the case that the task hit a scheduler or execution error, /// dumps that information as well. /// </summary> /// <param name="boundTask">The task.</param> /// <param name="dumpStandardOutOnTaskSuccess">True to log the standard output file of the task even if it succeeded. False to not log anything if the task succeeded.</param> /// <returns>The string containing the standard out of the file, or null if stdout could not be gathered.</returns> public static async Task <string> CheckForTaskSuccessAsync(CloudTask boundTask, bool dumpStandardOutOnTaskSuccess) { if (boundTask.State == TaskState.Completed) { string result = null; //Check to see if the task has execution information metadata. if (boundTask.ExecutionInformation != null) { //Dump the task scheduling error if there was one. if (boundTask.ExecutionInformation.SchedulingError != null) { TaskSchedulingError schedulingError = boundTask.ExecutionInformation.SchedulingError; Console.WriteLine("Task {0} hit scheduling error.", boundTask.Id); Console.WriteLine("SchedulingError Code: {0}", schedulingError.Code); Console.WriteLine("SchedulingError Message: {0}", schedulingError.Message); Console.WriteLine("SchedulingError Category: {0}", schedulingError.Category); Console.WriteLine("SchedulingError Details:"); foreach (NameValuePair detail in schedulingError.Details) { Console.WriteLine("{0} : {1}", detail.Name, detail.Value); } throw new TextSearchException(String.Format("Task {0} failed with a scheduling error", boundTask.Id)); } //Read the content of the output files if the task exited. if (boundTask.ExecutionInformation.ExitCode.HasValue) { Console.WriteLine("Task {0} exit code: {1}", boundTask.Id, boundTask.ExecutionInformation.ExitCode); if (dumpStandardOutOnTaskSuccess && boundTask.ExecutionInformation.ExitCode.Value == 0 || boundTask.ExecutionInformation.ExitCode.Value != 0) { //Dump the standard out file of the task. NodeFile taskStandardOut = await boundTask.GetNodeFileAsync(Batch.Constants.StandardOutFileName); Console.WriteLine("Task {0} StdOut:", boundTask.Id); Console.WriteLine("----------------------------------------"); string stdOutString = await taskStandardOut.ReadAsStringAsync(); result = stdOutString; Console.WriteLine(stdOutString); } //Check for nonzero exit code and dump standard error if there was a nonzero exit code. if (boundTask.ExecutionInformation.ExitCode.Value != 0) { NodeFile taskErrorFile = await boundTask.GetNodeFileAsync(Batch.Constants.StandardErrorFileName); Console.WriteLine("Task {0} StdErr:", boundTask.Id); Console.WriteLine("----------------------------------------"); string stdErrString = await taskErrorFile.ReadAsStringAsync(); Console.WriteLine(stdErrString); throw new TextSearchException(String.Format("Task {0} failed with a nonzero exit code", boundTask.Id)); } } } return(result); } else { throw new TextSearchException(String.Format("Task {0} is not completed yet. Current state: {1}", boundTask.Id, boundTask.State)); } }
private void GetFile(List <FInfo> lf) { String result = String.Empty; String json = SerializeObject <List <FInfo> >(lf); var content = new StringContent(json, Encoding.UTF8, "application/json"); //Content-Type设置 this.lbInfo.Items.Add("正在获取更新文件信息......"); try { string url = ConfigurationManager.AppSettings.Get("HttpFileUrl"); HttpClient _HttpClient = new HttpClient(); HttpResponseMessage response = _HttpClient.PostAsync(url + "files/" + Mac + "/" + Ip + "/" + ComputerName, content).Result; String statusCode = response.StatusCode.ToString(); result = response.Content.ReadAsStringAsync().Result; string[] files = result.Split(new char[] { '\r', '\n' }, StringSplitOptions.RemoveEmptyEntries); DateTimeFormatInfo dtFormat = new DateTimeFormatInfo(); dtFormat.ShortDatePattern = "yyyy-MM-dd hh:mm:ss"; string node = files[0]; for (int i = 1; i < files.Length; i += 2) { string file = files[i]; string fileTime = files[i + 1]; this.lbInfo.Items.Add("[" + (i / 2 + 1) + "/" + files.Length / 2 + "]" + "正在更新文件:" + file); this.lbInfo.SelectedIndex = this.lbInfo.Items.Count - 1; this.lbInfo.Update(); this.progressBar.Value = 100 * (i + 2) / files.Length; this.progressBar.Update(); byte[] fb = TcpGetFile(node, Path, file); System.IO.FileInfo fif = new System.IO.FileInfo(Path + "\\" + file); fif.LastWriteTime = Convert.ToDateTime(files[i + 1], dtFormat); // 上传记录更新信息 string branchId = ConfigurationManager.AppSettings.Get("BranchId"); if (String.IsNullOrEmpty(branchId)) { branchId = "1"; } NodeFile nf = new NodeFile(); nf.BranchId = int.Parse(branchId); nf.Mac = Mac; nf.Ip = Ip; nf.CompName = ComputerName; nf.FileWp = Path + "\\" + file; nf.Version = FileVersionInfo.GetVersionInfo(nf.FileWp).FileVersion; nf.FileTime = files[i + 1]; UpdateNodeFile(nf); } this.lbInfo.Items.Add("更新文件完成!"); if (stayFinish) { this.btnExit.Enabled = true; } else { this.Close(); } } catch (Exception ex) { Console.WriteLine(ex.StackTrace); } }
public static async Task MainAsync() { const string poolId = "MultiInstanceSamplePool"; const string jobId = "MultiInstanceSampleJob"; const string taskId = "MultiInstanceSampleTask"; const int numberOfNodes = 3; // The application package and version to deploy to the compute nodes. // It should contain your MPIHelloWorld sample MS-MPI program: // https://blogs.technet.microsoft.com/windowshpc/2015/02/02/how-to-compile-and-run-a-simple-ms-mpi-program/ // And the MSMpiSetup.exe installer: // https://www.microsoft.com/download/details.aspx?id=52981 // Then upload it as an application package: // https://azure.microsoft.com/documentation/articles/batch-application-packages/ const string appPackageId = "MPIHelloWorld"; const string appPackageVersion = "1.0"; TimeSpan timeout = TimeSpan.FromMinutes(30); // Configure your AccountSettings in the Microsoft.Azure.Batch.Samples.Common project within this solution BatchSharedKeyCredentials cred = new BatchSharedKeyCredentials(AccountSettings.Default.BatchServiceUrl, AccountSettings.Default.BatchAccountName, AccountSettings.Default.BatchAccountKey); using (BatchClient batchClient = BatchClient.Open(cred)) { // Create the pool of compute nodes and the job to which we add the multi-instance task. await CreatePoolAsync(batchClient, poolId, numberOfNodes, appPackageId, appPackageVersion); await CreateJobAsync(batchClient, jobId, poolId); // Create the multi-instance task. The MultiInstanceSettings property (configured // below) tells Batch to create one primary and several subtasks, the total number // of which matches the number of instances you specify in the MultiInstanceSettings. // This main task's command line is the "application command," and is executed *only* // by the primary, and only after the primary and all subtasks have executed the // "coordination command" (the MultiInstanceSettings.CoordinationCommandLine). CloudTask multiInstanceTask = new CloudTask(id: taskId, commandline: $"cmd /c mpiexec.exe -c 1 -wdir %AZ_BATCH_TASK_SHARED_DIR% %AZ_BATCH_APP_PACKAGE_{appPackageId.ToUpper()}#{appPackageVersion}%\\MPIHelloWorld.exe"); // Configure the task's MultiInstanceSettings. Specify the number of nodes // to allocate to the multi-instance task, and the "coordination command". // The CoordinationCommandLine is run by the primary and subtasks, and is // used in this sample to start SMPD on the compute nodes. multiInstanceTask.MultiInstanceSettings = new MultiInstanceSettings(@"cmd /c start cmd /c smpd.exe -d", numberOfNodes); // Submit the task to the job. Batch will take care of creating one primary and // enough subtasks to match the total number of nodes allocated to the task, // and schedule them for execution on the nodes. Console.WriteLine($"Adding task [{taskId}] to job [{jobId}]..."); await batchClient.JobOperations.AddTaskAsync(jobId, multiInstanceTask); // Get the "bound" version of the multi-instance task. CloudTask mainTask = await batchClient.JobOperations.GetTaskAsync(jobId, taskId); // We use a TaskStateMonitor to monitor the state of our tasks. In this case, // we will wait for the task to reach the Completed state. Console.WriteLine($"Awaiting task completion, timeout in {timeout}..."); TaskStateMonitor taskStateMonitor = batchClient.Utilities.CreateTaskStateMonitor(); await taskStateMonitor.WhenAll(new List <CloudTask> { mainTask }, TaskState.Completed, timeout); // Refresh the task to obtain up-to-date property values from Batch, such as // its current state and information about the node on which it executed. await mainTask.RefreshAsync(); string stdOut = mainTask.GetNodeFile(Constants.StandardOutFileName).ReadAsString(); string stdErr = mainTask.GetNodeFile(Constants.StandardErrorFileName).ReadAsString(); Console.WriteLine(); Console.WriteLine($"Main task [{mainTask.Id}] is in state [{mainTask.State}] and ran on compute node [{mainTask.ComputeNodeInformation.ComputeNodeId}]:"); Console.WriteLine("---- stdout.txt ----"); Console.WriteLine(stdOut); Console.WriteLine("---- stderr.txt ----"); Console.WriteLine(stdErr); // Need to delay a bit to allow the Batch service to mark the subtasks as Complete TimeSpan subtaskTimeout = TimeSpan.FromSeconds(10); Console.WriteLine($"Main task completed, waiting {subtaskTimeout} for subtasks to complete..."); System.Threading.Thread.Sleep(subtaskTimeout); Console.WriteLine(); Console.WriteLine("---- Subtask information ----"); // Obtain the collection of subtasks for the multi-instance task, and print // some information about each. IPagedEnumerable <SubtaskInformation> subtasks = mainTask.ListSubtasks(); await subtasks.ForEachAsync(async (subtask) => { Console.WriteLine("subtask: " + subtask.Id); Console.WriteLine("\texit code: " + subtask.ExitCode); if (subtask.State == SubtaskState.Completed) { // Obtain the file from the node on which the subtask executed. For normal CloudTasks, // we could simply call CloudTask.GetNodeFile(Constants.StandardOutFileName), but the // subtasks are not "normal" tasks in Batch, and thus must be handled differently. ComputeNode node = await batchClient.PoolOperations.GetComputeNodeAsync(subtask.ComputeNodeInformation.PoolId, subtask.ComputeNodeInformation.ComputeNodeId); string outPath = subtask.ComputeNodeInformation.TaskRootDirectory + "\\" + Constants.StandardOutFileName; string errPath = subtask.ComputeNodeInformation.TaskRootDirectory + "\\" + Constants.StandardErrorFileName; NodeFile stdOutFile = await node.GetNodeFileAsync(outPath.Trim('\\')); NodeFile stdErrFile = await node.GetNodeFileAsync(errPath.Trim('\\')); stdOut = await stdOutFile.ReadAsStringAsync(); stdErr = await stdErrFile.ReadAsStringAsync(); Console.WriteLine($"\tnode: " + node.Id); Console.WriteLine("\tstdout.txt: " + stdOut); Console.WriteLine("\tstderr.txt: " + stdErr); } else { Console.WriteLine($"\tSubtask {subtask.Id} is in state {subtask.State}"); } }); // Clean up the resources we've created in the Batch account Console.WriteLine(); Console.Write("Delete job? [yes] no: "); string response = Console.ReadLine().ToLower(); if (response != "n" && response != "no") { await batchClient.JobOperations.DeleteJobAsync(jobId); } Console.Write("Delete pool? [yes] no: "); response = Console.ReadLine().ToLower(); if (response != "n" && response != "no") { await batchClient.PoolOperations.DeletePoolAsync(poolId); } } }
private static async Task MainAsync() { string poolId = "JobPrepReleaseSamplePool"; string jobId = "JobPrepReleaseSampleJob"; var settings = Config.LoadAccountSettings(); // Location of the file that the job tasks will work with, a text file in the // node's "shared" directory. string taskOutputFile = "$AZ_BATCH_NODE_SHARED_DIR/job_prep_and_release.txt"; // The job prep task will write the node ID to the text file in the shared directory string jobPrepCmdLine = $@"/bin/bash -c ""echo $AZ_BATCH_NODE_ID tasks: > {taskOutputFile}"""; // Each task then echoes its ID to the same text file string taskCmdLine = $@"/bin/bash -c ""echo $AZ_BATCH_TASK_ID >> {taskOutputFile}"""; // The job release task will then delete the text file from the shared directory string jobReleaseCmdLine = $@"/bin/bash -c ""rm {taskOutputFile}"""; BatchSharedKeyCredentials cred = new BatchSharedKeyCredentials(settings.BatchServiceUrl, settings.BatchAccountName, settings.BatchAccountKey); using (BatchClient batchClient = BatchClient.Open(cred)) { var pool = await BatchUtils.CreatePoolIfNotExistAsync(batchClient, poolId); var prepTask = new JobPreparationTask { CommandLine = jobPrepCmdLine }; var releaseTask = new JobReleaseTask { CommandLine = jobReleaseCmdLine }; var job = await BatchUtils.CreateJobIfNotExistAsync(batchClient, pool.Id, jobId, prepTask : prepTask, releaseTask : releaseTask); // Create the tasks that the job will execute List <CloudTask> tasks = new List <CloudTask>(); for (int i = 1; i <= 8; i++) { string taskId = "task" + i.ToString().PadLeft(3, '0'); string taskCommandLine = taskCmdLine; CloudTask task = new CloudTask(taskId, taskCommandLine); tasks.Add(task); } // Add the tasks in one API call as opposed to a separate AddTask call for each. Bulk task // submission helps to ensure efficient underlying API calls to the Batch service. Console.WriteLine("Submitting tasks and awaiting completion..."); await batchClient.JobOperations.AddTaskAsync(job.Id, tasks); // Wait for the tasks to complete before proceeding. The long timeout here is to allow time // for the nodes within the pool to be created and started if the pool had not yet been created. await batchClient.Utilities.CreateTaskStateMonitor().WhenAll( job.ListTasks(), TaskState.Completed, TimeSpan.FromMinutes(30)); Console.WriteLine("All tasks completed."); Console.WriteLine(); // Print the contents of the shared text file modified by the job preparation and other tasks. ODATADetailLevel nodeDetail = new ODATADetailLevel(selectClause: "id, state"); IPagedEnumerable <ComputeNode> nodes = batchClient.PoolOperations.ListComputeNodes(poolId, nodeDetail); await nodes.ForEachAsync(async (node) => { // Check to ensure that the node is Idle before attempting to pull the text file. // If the pool was just created, there is a chance that another node completed all // of the tasks prior to the other node(s) completing their startup procedure. if (node.State == ComputeNodeState.Idle) { var files = await node.ListNodeFiles().ToListAsync(); NodeFile sharedTextFile = await node.GetNodeFileAsync("shared/job_prep_and_release.txt"); Console.WriteLine("Contents of {0} on {1}:", sharedTextFile.Path, node.Id); Console.WriteLine("-------------------------------------------"); Console.WriteLine(await sharedTextFile.ReadAsStringAsync()); } }); // Terminate the job to mark it as Completed; this will initiate the Job Release Task on any node // that executed job tasks. Note that the Job Release Task is also executed when a job is deleted, // thus you need not call Terminate if you typically delete your jobs upon task completion. await batchClient.JobOperations.TerminateJobAsync(job.Id); // Wait for the job to reach state "Completed." Note that this wait is not typically necessary in // production code, but is done here to enable the checking of the release tasks exit code below. await BatchUtils.WaitForJobToReachStateAsync(batchClient, job.Id, JobState.Completed, TimeSpan.FromMinutes(2)); // Print the exit codes of the prep and release tasks by obtaining their execution info List <JobPreparationAndReleaseTaskExecutionInformation> prepReleaseInfo = await batchClient.JobOperations.ListJobPreparationAndReleaseTaskStatus(job.Id).ToListAsync(); foreach (JobPreparationAndReleaseTaskExecutionInformation info in prepReleaseInfo) { Console.WriteLine(); Console.WriteLine("{0}: ", info.ComputeNodeId); // If no tasks were scheduled to run on the node, the JobPreparationTaskExecutionInformation will be null if (info.JobPreparationTaskExecutionInformation != null) { Console.WriteLine(" Prep task exit code: {0}", info.JobPreparationTaskExecutionInformation.ExitCode); } // If no tasks were scheduled to run on the node, the JobReleaseTaskExecutionInformation will be null if (info.JobReleaseTaskExecutionInformation != null) { Console.WriteLine(" Release task exit code: {0}", info.JobReleaseTaskExecutionInformation.ExitCode); } } // Clean up the resources we've created in the Batch account Console.WriteLine(); Console.WriteLine("Delete job? [yes] no"); string response = Console.ReadLine().ToLower(); if (response != "n" && response != "no") { // Note that deleting the job will execute the job release task if the job was not previously terminated await batchClient.JobOperations.DeleteJobAsync(job.Id); } Console.WriteLine("Delete pool? [yes] no"); response = Console.ReadLine(); if (response != "n" && response != "no") { await batchClient.PoolOperations.DeletePoolAsync(poolId); } } }
/// <summary> /// Downloads the contents of the specific file on the ComputeNode. /// </summary> /// <param name="filePath">The path to the file.</param> /// <param name="destinationStream">The destination stream.</param> /// <returns></returns> private async Task DownloadFile(string filePath, Stream destinationStream) { NodeFile file = await this.ComputeNode.GetNodeFileAsync(filePath); await file.CopyToStreamAsync(destinationStream); }
private void CheckExistFileExec(NodeFile nodeFile) { nodeFile.IsExist = File.Exists(nodeFile.Name); }
public static async Task MainAsync() { const string poolId = "MultiInstanceSamplePool"; const string jobId = "MultiInstanceSampleJob"; const string taskId = "MultiInstanceSampleTask"; const int numberOfNodes = 5; //jmeno package kterou uploaduju na azure s polu s MSMpiSetup const string appPackageId = "Parallel"; const string appPackageVersion = "1.0"; TimeSpan timeout = TimeSpan.FromMinutes(15); AccountSettings accountSettings = SampleHelpers.LoadAccountSettings(); //nakonfigurované batch accounty abych se mohl připojit ke svému účtu BatchSharedKeyCredentials cred = new BatchSharedKeyCredentials( accountSettings.BatchServiceUrl, accountSettings.BatchAccountName, accountSettings.BatchAccountKey); using (BatchClient batchClient = BatchClient.Open(cred)) { // Vytvoření fondu výpočetních uzlů a úlohu, do které přidáme úlohu s více instancemi. await CreatePoolAsync(batchClient, poolId, numberOfNodes, appPackageId, appPackageVersion); await CreateJobAsync(batchClient, jobId, poolId); //batch vytvoří jednu hlavní a několik dílčích úkolů CloudTask multiInstanceTask = new CloudTask(id: taskId, commandline: $"cmd /c mpiexec.exe -c 1 -wdir %AZ_BATCH_TASK_SHARED_DIR% %AZ_BATCH_APP_PACKAGE_{appPackageId.ToUpper()}#{appPackageVersion}%\\ParallelMpiApp.exe"); // příkaz SPMD = více samostatných procesorů současně spouští stejný program multiInstanceTask.MultiInstanceSettings = new MultiInstanceSettings(@"cmd /c start cmd /c smpd.exe -d", numberOfNodes); //zadání úkolů, vytvoří se jeden primární a několik dílčích, //aby odpovídaly počtu uzlů a naplánuje se jejich provedení v uzlech Console.WriteLine($"Adding task [{taskId}] to job [{jobId}]..."); await batchClient.JobOperations.AddTaskAsync(jobId, multiInstanceTask); //verze úlohy CloudTask mainTask = await batchClient.JobOperations.GetTaskAsync(jobId, taskId); // sledování stavu úkolů,čekáme až bude úloha dokončena Console.WriteLine($"Awaiting task completion, timeout in {timeout}..."); TaskStateMonitor taskStateMonitor = batchClient.Utilities.CreateTaskStateMonitor(); await taskStateMonitor.WhenAll(new List <CloudTask> { mainTask }, TaskState.Completed, timeout); //aktualizace úlohy await mainTask.RefreshAsync(); string stdOut = mainTask.GetNodeFile(Constants.StandardOutFileName).ReadAsString(); string stdErr = mainTask.GetNodeFile(Constants.StandardErrorFileName).ReadAsString(); Console.WriteLine(); Console.WriteLine($"Main task [{mainTask.Id}] is in state [{mainTask.State}] and ran on compute node [{mainTask.ComputeNodeInformation.ComputeNodeId}]:"); Console.WriteLine("---- stdout.txt ----"); Console.WriteLine(stdOut); Console.WriteLine("---- stderr.txt ----"); Console.WriteLine(stdErr); // par sekund čas aby se stačily dílčí úlohy dokončit TimeSpan subtaskTimeout = TimeSpan.FromSeconds(10); Console.WriteLine($"Main task completed, waiting {subtaskTimeout} for subtasks to complete..."); System.Threading.Thread.Sleep(subtaskTimeout); Console.WriteLine(); Console.WriteLine("---- Subtask information ----"); //kolekce dílčích úlohů a tisk informací o každém IPagedEnumerable <SubtaskInformation> subtasks = mainTask.ListSubtasks(); await subtasks.ForEachAsync(async (subtask) => { Console.WriteLine("subtask: " + subtask.Id); Console.WriteLine("\texit code: " + subtask.ExitCode); if (subtask.State == SubtaskState.Completed) { //získání souborů z uzlů ComputeNode node = await batchClient.PoolOperations.GetComputeNodeAsync(subtask.ComputeNodeInformation.PoolId, subtask.ComputeNodeInformation.ComputeNodeId); string outPath = subtask.ComputeNodeInformation.TaskRootDirectory + "\\" + Constants.StandardOutFileName; string errPath = subtask.ComputeNodeInformation.TaskRootDirectory + "\\" + Constants.StandardErrorFileName; NodeFile stdOutFile = await node.GetNodeFileAsync(outPath.Trim('\\')); NodeFile stdErrFile = await node.GetNodeFileAsync(errPath.Trim('\\')); stdOut = await stdOutFile.ReadAsStringAsync(); stdErr = await stdErrFile.ReadAsStringAsync(); Console.WriteLine($"\tnode: " + node.Id); Console.WriteLine("\tstdout.txt: " + stdOut); Console.WriteLine("\tstderr.txt: " + stdErr); } else { Console.WriteLine($"\tSubtask {subtask.Id} is in state {subtask.State}"); } }); // vymazání zdrojů které jsme vytvořili, abychom to nemuseli dělat manuálně(fondy,úlohy) Console.WriteLine(); Console.Write("Delete job? [yes] no: "); string response = Console.ReadLine().ToLower(); if (response != "n" && response != "no") { await batchClient.JobOperations.DeleteJobAsync(jobId); } Console.Write("Delete pool? [yes] no: "); response = Console.ReadLine().ToLower(); if (response != "n" && response != "no") { await batchClient.PoolOperations.DeletePoolAsync(poolId); } } }
private async System.Threading.Tasks.Task DownloadFileAsync(NodeFile nodeFile, string localDownloadTargetPath = null, bool isNodeFile = true) { string file = nodeFile.Name; string fileName = null; try { bool? result; if (string.IsNullOrEmpty(localDownloadTargetPath)) { // Configure save file dialog box Microsoft.Win32.SaveFileDialog saveFileDlg = new Microsoft.Win32.SaveFileDialog(); saveFileDlg.FileName = Path.GetFileName(file); // Default file name saveFileDlg.Filter = "All files (*.*)|*.*"; // Filter files by extension // Show save file dialog box result = saveFileDlg.ShowDialog(); if (result == true) { fileName = saveFileDlg.FileName; } } else { fileName = Path.Combine(localDownloadTargetPath, Path.GetFileName(file)); result = true; } if (result == true) { // Save document if (nodeFile.IsDirectory.HasValue && nodeFile.IsDirectory.Value) { await this.SelectedComputeNode.DownloadFilesAsync(file, fileName); } else { using (FileStream destStream = new FileStream(fileName, FileMode.Create)) { if (isNodeFile) { await this.SelectedComputeNode.DownloadFileAsync(file, destStream); } else { await this.SelectedTask.GetTaskFileAsync(file, destStream); } } // open text files if (fileName.EndsWith(".txt", StringComparison.OrdinalIgnoreCase)) { Process.Start(fileName); } } } } catch (Exception e) { if (!string.IsNullOrEmpty(fileName)) { if (File.Exists(fileName)) { File.Delete(fileName); //Delete the file if we have hit an exception } } Messenger.Default.Send(new GenericDialogMessage(e.ToString())); } }
private static async Task MainAsync(string[] args) { const string poolId = "JobPrepReleaseSamplePool"; const string jobId = "JobPrepReleaseSampleJob"; // Location of the file that the job tasks will work with, a text file in the // node's "shared" directory. const string taskOutputFile = "%AZ_BATCH_NODE_SHARED_DIR%\\job_prep_and_release.txt"; // The job prep task will write the node ID to the text file in the shared directory const string jobPrepCmdLine = "cmd /c echo %AZ_BATCH_NODE_ID% tasks: >" + taskOutputFile; // Each task then echoes its ID to the same text file const string taskCmdLine = "cmd /c echo %AZ_BATCH_TASK_ID% >> " + taskOutputFile; // The job release task will then delete the text file from the shared directory const string jobReleaseCmdLine = "cmd /c del " + taskOutputFile; // Configure your AccountSettings in the Microsoft.Azure.Batch.Samples.Common project within this solution BatchSharedKeyCredentials cred = new BatchSharedKeyCredentials(AccountSettings.Default.BatchServiceUrl, AccountSettings.Default.BatchAccountName, AccountSettings.Default.BatchAccountKey); // Initialize the BatchClient for access to your Batch account using (BatchClient batchClient = await BatchClient.OpenAsync(cred)) { // Create a CloudPool (or obtain an existing pool with the specified ID) CloudPool pool = await ArticleHelpers.CreatePoolIfNotExistAsync(batchClient, poolId, "small", 2, 1); // Create a CloudJob (or obtain an existing job with the specified ID) CloudJob job = await SampleHelpers.GetJobIfExistAsync(batchClient, jobId); if (job == null) { Console.WriteLine("Job {0} not found, creating...", jobId); CloudJob unboundJob = batchClient.JobOperations.CreateJob(jobId, new PoolInformation() { PoolId = poolId }); // Configure and assign the job preparation task unboundJob.JobPreparationTask = new JobPreparationTask { CommandLine = jobPrepCmdLine }; // Configure and assign the job release task unboundJob.JobReleaseTask = new JobReleaseTask { CommandLine = jobReleaseCmdLine }; await unboundJob.CommitAsync(); // Get the bound version of the job with all of its properties populated job = await batchClient.JobOperations.GetJobAsync(jobId); } // Create the tasks that the job will execute List <CloudTask> tasks = new List <CloudTask>(); for (int i = 1; i <= 8; i++) { string taskId = "task" + i.ToString().PadLeft(3, '0'); string taskCommandLine = taskCmdLine; CloudTask task = new CloudTask(taskId, taskCommandLine); tasks.Add(task); } // Add the tasks in one API call as opposed to a separate AddTask call for each. Bulk task // submission helps to ensure efficient underlying API calls to the Batch service. Console.WriteLine("Submitting tasks and awaiting completion..."); await batchClient.JobOperations.AddTaskAsync(job.Id, tasks); // Wait for the tasks to complete before proceeding. The long timeout here is to allow time // for the nodes within the pool to be created and started if the pool had not yet been created. if (await batchClient.Utilities.CreateTaskStateMonitor().WhenAllAsync(job.ListTasks(), TaskState.Completed, TimeSpan.FromMinutes(30))) { Console.WriteLine("Operation timed out while waiting for submitted tasks to reach state {0}", TaskState.Completed); return; } else { Console.WriteLine("All tasks completed."); Console.WriteLine(); } // Print the contents of the shared text file modified by the job preparation and other tasks. ODATADetailLevel nodeDetail = new ODATADetailLevel(selectClause: "id, state"); IPagedEnumerable <ComputeNode> nodes = batchClient.PoolOperations.ListComputeNodes(pool.Id, nodeDetail); await nodes.ForEachAsync(async (node) => { // Check to ensure that the node is Idle before attempting to pull the text file. // If the pool was just created, there is a chance that another node completed all // of the tasks prior to the other node(s) completing their startup procedure. if (node.State == ComputeNodeState.Idle) { NodeFile sharedTextFile = await node.GetNodeFileAsync("shared\\job_prep_and_release.txt"); Console.WriteLine("Contents of {0} on {1}:", sharedTextFile.Name, node.Id); Console.WriteLine("-------------------------------------------"); Console.WriteLine(await sharedTextFile.ReadAsStringAsync()); } }); // Terminate the job to mark it as Completed; this will initiate the Job Release Task on any node // that executed job tasks. Note that the Job Release Task is also executed when a job is deleted, // thus you need not call Terminate if you typically delete your jobs upon task completion. await batchClient.JobOperations.TerminateJobAsync(job.Id); // Wait for the job to reach state "Completed." Note that this wait is not typically necessary in // production code, but is done here to enable the checking of the release tasks exit code below. await ArticleHelpers.WaitForJobToReachStateAsync(batchClient, job.Id, JobState.Completed, TimeSpan.FromMinutes(2)); // Print the exit codes of the prep and release tasks by obtaining their execution info List <JobPreparationAndReleaseTaskExecutionInformation> prepReleaseInfo = await batchClient.JobOperations.ListJobPreparationAndReleaseTaskStatus(job.Id).ToListAsync(); foreach (JobPreparationAndReleaseTaskExecutionInformation info in prepReleaseInfo) { Console.WriteLine(); Console.WriteLine("{0}: ", info.ComputeNodeId); // If no tasks were scheduled to run on the node, the JobPreparationTaskExecutionInformation will be null if (info.JobPreparationTaskExecutionInformation != null) { Console.WriteLine(" Prep task exit code: {0}", info.JobPreparationTaskExecutionInformation.ExitCode); } // If no tasks were scheduled to run on the node, the JobReleaseTaskExecutionInformation will be null if (info.JobReleaseTaskExecutionInformation != null) { Console.WriteLine(" Release task exit code: {0}", info.JobReleaseTaskExecutionInformation.ExitCode); } } // Clean up the resources we've created in the Batch account Console.WriteLine(); Console.WriteLine("Delete job? [yes] no"); string response = Console.ReadLine().ToLower(); if (response != "n" && response != "no") { // Note that deleting the job will execute the job release task if the job was not previously terminated await batchClient.JobOperations.DeleteJobAsync(job.Id); } Console.WriteLine("Delete pool? [yes] no"); response = Console.ReadLine(); if (response != "n" && response != "no") { await batchClient.PoolOperations.DeletePoolAsync(pool.Id); } } }
/// <summary> /// Downloads the contents of the specific file of the task. /// </summary> /// <param name="filePath">The path to the file.</param> /// <param name="destinationStream">The destination stream.</param> /// <returns></returns> private async System.Threading.Tasks.Task DownloadTaskFile(string filePath, Stream destinationStream) { NodeFile file = await this.Task.GetNodeFileAsync(filePath); await file.CopyToStreamAsync(destinationStream); }