public void TestJobScheduleVerbs() { Action test = () => { using (BatchClient batchCli = TestUtilities.OpenBatchClientAsync(TestUtilities.GetCredentialsFromEnvironment()).Result) { string jobScheduleId = Microsoft.Azure.Batch.Constants.DefaultConveniencePrefix + TestUtilities.GetMyName() + "-TestEnableDisableDeleteJobSchedule"; try { Schedule schedule = new Schedule() { DoNotRunAfter = DateTime.UtcNow.Add(TimeSpan.FromDays(1)) }; JobSpecification jobSpecification = new JobSpecification(new PoolInformation() { PoolId = "DummyPool" }); CloudJobSchedule unboundJobSchedule = batchCli.JobScheduleOperations.CreateJobSchedule(jobScheduleId, schedule, jobSpecification); unboundJobSchedule.Commit(); CloudJobSchedule boundJobSchedule = batchCli.JobScheduleOperations.GetJobSchedule(jobScheduleId); //Disable the job schedule via instance boundJobSchedule.Disable(); boundJobSchedule.Refresh(); Assert.NotNull(boundJobSchedule.State); Assert.Equal(JobScheduleState.Disabled, boundJobSchedule.State); //Enable the job schedule via instance boundJobSchedule.Enable(); boundJobSchedule.Refresh(); Assert.NotNull(boundJobSchedule.State); Assert.Equal(JobScheduleState.Active, boundJobSchedule.State); //Disable the job schedule via operations batchCli.JobScheduleOperations.DisableJobSchedule(jobScheduleId); boundJobSchedule.Refresh(); Assert.NotNull(boundJobSchedule.State); Assert.Equal(JobScheduleState.Disabled, boundJobSchedule.State); //Enable the job schedule via instance batchCli.JobScheduleOperations.EnableJobSchedule(jobScheduleId); boundJobSchedule.Refresh(); Assert.NotNull(boundJobSchedule.State); Assert.Equal(JobScheduleState.Active, boundJobSchedule.State); //Terminate the job schedule batchCli.JobScheduleOperations.TerminateJobSchedule(jobScheduleId); boundJobSchedule.Refresh(); Assert.True(boundJobSchedule.State == JobScheduleState.Completed || boundJobSchedule.State == JobScheduleState.Terminating); //Delete the job schedule boundJobSchedule.Delete(); //Wait for deletion to take BatchException be = TestUtilities.AssertThrowsEventuallyAsync <BatchException>(() => boundJobSchedule.RefreshAsync(), TimeSpan.FromSeconds(30)).Result; Assert.NotNull(be.RequestInformation); Assert.NotNull(be.RequestInformation.BatchError); Assert.Equal("JobScheduleNotFound", be.RequestInformation.BatchError.Code); } finally { // clean up TestUtilities.DeleteJobScheduleIfExistsAsync(batchCli, jobScheduleId).Wait(); } } }; SynchronizationContextHelper.RunTest(test, TestTimeout); }
public void TestExitConditionsAreBeingRoundTrippedCorrectly() { Action test = () => { using (BatchClient client = TestUtilities.OpenBatchClient(TestUtilities.GetCredentialsFromEnvironment())) { //Create a job string jobId = Constants.DefaultConveniencePrefix + TestUtilities.GetMyName() + "-TestExitConditionsAreBeingRoundTrippedCorrectly"; string taskId = "task-id-1"; try { CloudJob boundJob = null; { // need a bound job/task for the tests so set one up boundJob = CreateBoundJob(client, jobId, j => { j.OnTaskFailure = OnTaskFailure.PerformExitOptionsJobAction; }); CloudTask cloudTask = new CloudTask(taskId, "cmd /c exit 2"); cloudTask.ExitConditions = new ExitConditions { ExitCodes = new List <ExitCodeMapping> { new ExitCodeMapping(1, new ExitOptions { JobAction = JobAction.None }) }, ExitCodeRanges = new List <ExitCodeRangeMapping> { new ExitCodeRangeMapping(2, 4, new ExitOptions { JobAction = JobAction.Disable }) }, PreProcessingError = new ExitOptions { JobAction = JobAction.Terminate }, FileUploadError = new ExitOptions { JobAction = JobAction.Terminate }, Default = new ExitOptions { JobAction = JobAction.Terminate }, }; boundJob.AddTask(cloudTask); boundJob.Refresh(); Assert.Equal(OnTaskFailure.PerformExitOptionsJobAction, boundJob.OnTaskFailure); CloudTask boundTask = client.JobOperations.GetTask(jobId, taskId); Assert.Equal(JobAction.None, boundTask.ExitConditions.ExitCodes.First().ExitOptions.JobAction); Assert.Equal(1, boundTask.ExitConditions.ExitCodes.First().Code); var exitCodeRangeMappings = boundTask.ExitConditions.ExitCodeRanges; Assert.Equal(2, exitCodeRangeMappings.First().Start); Assert.Equal(4, exitCodeRangeMappings.First().End); Assert.Equal(JobAction.Disable, exitCodeRangeMappings.First().ExitOptions.JobAction); Assert.Equal(JobAction.Terminate, boundTask.ExitConditions.PreProcessingError.JobAction); Assert.Equal(JobAction.Terminate, boundTask.ExitConditions.FileUploadError.JobAction); Assert.Equal(JobAction.Terminate, boundTask.ExitConditions.Default.JobAction); } } finally { TestUtilities.DeleteJobIfExistsAsync(client, jobId).Wait(); } } }; SynchronizationContextHelper.RunTest(test, TestTimeout); }
public void Bug1996130_JobTaskVerbsFailAfterDoubleRefresh() { Action test = () => { using (BatchClient batchCli = TestUtilities.OpenBatchClient(TestUtilities.GetCredentialsFromEnvironment())) { string jobId = "Bug1996130Job-" + TestUtilities.GetMyName(); try { // get a job/task to test. use workflow CloudJob boundJob = null; { // need a bound job/task for the tests so set one up CloudJob tsh = batchCli.JobOperations.CreateJob(jobId, new PoolInformation()); tsh.PoolInformation.PoolId = this.poolFixture.PoolId; tsh.Commit(); boundJob = batchCli.JobOperations.GetJob(jobId); boundJob.AddTask(new CloudTask("Bug1996130_task", "cmd /c hostname")); } // test task double refresh { // get the task CloudTask boundTask = batchCli.JobOperations.ListTasks(jobId).First(); // double refresh boundTask.Refresh(); boundTask.Refresh(); // this branch of the bug actually fixed in the other doublerefesh checkin by matthchr // do verbs boundTask.Refresh(); boundTask.Delete(); Thread.Sleep(5000); // give server time to do its deed List <CloudTask> tasks = batchCli.JobOperations.ListTasks(jobId).ToList(); // confirm delete suceeded Assert.Empty(tasks); } // test job double refresh and verbs { boundJob = batchCli.JobOperations.GetJob(jobId); // double refresh to taint the instance... lost path variable boundJob.Refresh(); boundJob.Refresh(); // this used to fail/throw boundJob.Refresh(); // this should fail but does not boundJob.Delete(); // yet another verb that suceeds CloudJob job = batchCli.JobOperations.ListJobs().ToList().FirstOrDefault(j => j.Id == jobId); // confirm job delete suceeded Assert.True(job == null || (JobState.Deleting == job.State)); } } finally { TestUtilities.DeleteJobIfExistsAsync(batchCli, jobId).Wait(); } } }; SynchronizationContextHelper.RunTest(test, TestTimeout); }
public void TestNode_GetListDeleteFiles() { void test() { using BatchClient batchCli = TestUtilities.OpenBatchClient(TestUtilities.GetCredentialsFromEnvironment()); string jobId = "TestNodeGetListDeleteFiles-" + TestUtilities.GetMyName(); try { const string taskId = "hiWorld"; const string directoryCreationTaskId1 = "dirTask1"; const string directoryCreationTaskId2 = "dirTask2"; const string directoryNameOne = "Foo"; const string directoryNameTwo = "Bar"; // // Create the job // CloudJob unboundJob = batchCli.JobOperations.CreateJob(jobId, new PoolInformation()); unboundJob.PoolInformation.PoolId = poolFixture.PoolId; unboundJob.Commit(); CloudJob boundJob = batchCli.JobOperations.GetJob(jobId); CloudTask myTask = new CloudTask(taskId, "cmd /c echo hello world"); CloudTask directoryCreationTask1 = new CloudTask(directoryCreationTaskId1, string.Format("cmd /c mkdir {0} && echo test > {0}/testfile.txt", directoryNameOne)); CloudTask directoryCreationTask2 = new CloudTask(directoryCreationTaskId2, string.Format("cmd /c mkdir {0} && echo test > {0}/testfile.txt", directoryNameTwo)); boundJob.AddTask(myTask); boundJob.AddTask(directoryCreationTask1); boundJob.AddTask(directoryCreationTask2); testOutputHelper.WriteLine("Initial job commit()"); // // Wait for task to go to completion // Utilities utilities = batchCli.Utilities; TaskStateMonitor taskStateMonitor = utilities.CreateTaskStateMonitor(); taskStateMonitor.WaitAll( boundJob.ListTasks(), Microsoft.Azure.Batch.Common.TaskState.Completed, TimeSpan.FromMinutes(3)); CloudTask boundTask = boundJob.GetTask(taskId); //Since the compute node name comes back as "Node:<computeNodeId>" we need to split on : to get the actual compute node name string computeNodeId = boundTask.ComputeNodeInformation.AffinityId.Split(':')[1]; ComputeNode computeNode = batchCli.PoolOperations.GetComputeNode(poolFixture.PoolId, computeNodeId); testOutputHelper.WriteLine("Task ran on compute node: {0}", computeNodeId); //Ensure that ListFiles done without a recursive option, or with recursive false return the same values { List <NodeFile> filesByComputeNodeRecursiveOmitted = batchCli.PoolOperations.ListNodeFiles( poolFixture.PoolId, computeNodeId).ToList(); List <NodeFile> filesByComputeNodeRecursiveFalse = batchCli.PoolOperations.ListNodeFiles( poolFixture.PoolId, computeNodeId, recursive: false).ToList(); AssertFileListsMatch(filesByComputeNodeRecursiveOmitted, filesByComputeNodeRecursiveFalse); } { List <NodeFile> filesByTaskRecursiveOmitted = batchCli.JobOperations.ListNodeFiles( jobId, taskId).ToList(); List <NodeFile> filesByTaskRecursiveFalse = batchCli.JobOperations.ListNodeFiles( jobId, taskId, recursive: false).ToList(); AssertFileListsMatch(filesByTaskRecursiveOmitted, filesByTaskRecursiveFalse); } // // List all node files from operations -- recursive true // //TODO: Detail level? List <NodeFile> fileListFromComputeNodeOperations = batchCli.PoolOperations.ListNodeFiles(poolFixture.PoolId, computeNodeId, recursive: true).ToList(); foreach (NodeFile f in fileListFromComputeNodeOperations) { testOutputHelper.WriteLine("Found file: {0}", f.Path); } //Check to make sure the expected folder named "Shared" exists Assert.Contains("shared", fileListFromComputeNodeOperations.Select(f => f.Path)); // // List all node files from the compute node -- recursive true // List <NodeFile> fileListFromComputeNode = computeNode.ListNodeFiles(recursive: true).ToList(); foreach (NodeFile f in fileListFromComputeNodeOperations) { testOutputHelper.WriteLine("Found file: {0}", f.Path); } //Check to make sure the expected folder named "Shared" exists Assert.Contains("shared", fileListFromComputeNode.Select(f => f.Path)); // // Get file from operations // string filePathToGet = fileListFromComputeNode.First(f => !f.IsDirectory.Value && f.Properties.ContentLength > 0).Path; testOutputHelper.WriteLine("Getting file: {0}", filePathToGet); NodeFile computeNodeFileFromManager = batchCli.PoolOperations.GetNodeFile(poolFixture.PoolId, computeNodeId, filePathToGet); testOutputHelper.WriteLine("Successfully retrieved file: {0}", filePathToGet); testOutputHelper.WriteLine("---- File data: ----"); var computeNodeFileContentFromManager = computeNodeFileFromManager.ReadAsString(); testOutputHelper.WriteLine(computeNodeFileContentFromManager); Assert.NotEmpty(computeNodeFileContentFromManager); // // Get file directly from operations (bypassing the properties call) // var computeNodeFileContentDirect = batchCli.PoolOperations.CopyNodeFileContentToString(poolFixture.PoolId, computeNodeId, filePathToGet); testOutputHelper.WriteLine("---- File data: ----"); testOutputHelper.WriteLine(computeNodeFileContentDirect); Assert.NotEmpty(computeNodeFileContentDirect); // // Get file from compute node // testOutputHelper.WriteLine("Getting file: {0}", filePathToGet); NodeFile fileFromComputeNode = computeNode.GetNodeFile(filePathToGet); testOutputHelper.WriteLine("Successfully retrieved file: {0}", filePathToGet); testOutputHelper.WriteLine("---- File data: ----"); var computeNodeFileContentFromNode = fileFromComputeNode.ReadAsString(); testOutputHelper.WriteLine(computeNodeFileContentFromNode); Assert.NotEmpty(computeNodeFileContentFromNode); // // Get file from compute node (bypassing the properties call) // computeNodeFileContentDirect = computeNode.CopyNodeFileContentToString(filePathToGet); testOutputHelper.WriteLine("---- File data: ----"); testOutputHelper.WriteLine(computeNodeFileContentDirect); Assert.NotEmpty(computeNodeFileContentDirect); // // NodeFile delete // string filePath = Path.Combine(@"workitems", jobId, "job-1", taskId, Constants.StandardOutFileName); NodeFile nodeFile = batchCli.PoolOperations.GetNodeFile(poolFixture.PoolId, computeNodeId, filePath); nodeFile.Delete(); //Ensure delete succeeded TestUtilities.AssertThrows <BatchException>(() => nodeFile.Refresh()); //Delete directory NodeFile directory = batchCli.PoolOperations.ListNodeFiles(poolFixture.PoolId, computeNodeId, recursive: true).First(item => item.Path.Contains(directoryNameOne)); Assert.True(directory.IsDirectory); TestUtilities.AssertThrows <BatchException>(() => directory.Delete(recursive: false)); directory.Delete(recursive: true); Assert.Null(batchCli.PoolOperations.ListNodeFiles(poolFixture.PoolId, computeNodeId, recursive: true).FirstOrDefault(item => item.Path.Contains(directoryNameOne))); // // PoolManager delete node file // filePath = Path.Combine(@"workitems", jobId, "job-1", taskId, Constants.StandardErrorFileName); NodeFile file = batchCli.JobOperations.GetNodeFile(jobId, taskId, Constants.StandardErrorFileName); batchCli.PoolOperations.DeleteNodeFile(poolFixture.PoolId, computeNodeId, filePath); //Ensure delete succeeded TestUtilities.AssertThrows <BatchException>(() => batchCli.JobOperations.GetNodeFile(jobId, taskId, Constants.StandardErrorFileName)); //Delete directory directory = batchCli.PoolOperations.ListNodeFiles(poolFixture.PoolId, computeNodeId, recursive: true).First(item => item.Path.Contains(directoryNameTwo)); Assert.True(directory.IsDirectory); TestUtilities.AssertThrows <BatchException>(() => batchCli.PoolOperations.DeleteNodeFile(poolFixture.PoolId, computeNodeId, directory.Path, recursive: false)); batchCli.PoolOperations.DeleteNodeFile(poolFixture.PoolId, computeNodeId, directory.Path, recursive: true); Assert.Null(batchCli.PoolOperations.ListNodeFiles(poolFixture.PoolId, computeNodeId, recursive: true).FirstOrDefault(item => item.Path.Contains(directoryNameTwo))); } finally { batchCli.JobOperations.DeleteJob(jobId); } } SynchronizationContextHelper.RunTest(test, TestTimeout); }
public void TestOMJobSpecAndRelease() { Action test = () => { StagingStorageAccount stagingCreds = TestUtilities.GetStorageCredentialsFromEnvironment(); using (BatchClient client = TestUtilities.OpenBatchClientAsync(TestUtilities.GetCredentialsFromEnvironment()).Result) { string jsId = "JobPrepAndRelease-" + /* "OM-static-c" */ "dynamic-" + CraftTimeString() + "-" + TestUtilities.GetMyName(); try { // increase request timeout interceptor Protocol.RequestInterceptor increaseTimeoutInterceptor = new Protocol.RequestInterceptor((x) => { this.testOutputHelper.WriteLine("TestOMJobSpecAndRelease: setting request timeout. Request type: " + x.GetType().ToString() + ", ClientRequestID: " + x.Options.ClientRequestId); var timeoutOptions = x.Options as Protocol.Models.ITimeoutOptions; timeoutOptions.Timeout = 5 * 60; }); // lets use a timer too CallTimerViaInterceptors timerInterceptor = new CallTimerViaInterceptors(); // seeing client side timeouts... so increase the durations on every call client.CustomBehaviors.Add(increaseTimeoutInterceptor); // add a call timer spammer/logger client.CustomBehaviors.Add(timerInterceptor.ReqInterceptor); // get some resource files to play with IList <ResourceFile> resFiles = UploadFilesMakeResFiles(stagingCreds); // create job schedule with prep/release { CloudJobSchedule unboundJobSchedule = client.JobScheduleOperations.CreateJobSchedule(jsId, null, null); unboundJobSchedule.JobSpecification = new JobSpecification(new PoolInformation()); unboundJobSchedule.JobSpecification.PoolInformation.PoolId = this.poolFixture.PoolId; unboundJobSchedule.Schedule = new Schedule() { RecurrenceInterval = TimeSpan.FromMinutes(3) }; // add the jobPrep task to the job schedule { JobPreparationTask prep = new JobPreparationTask(JobPrepCommandLine); unboundJobSchedule.JobSpecification.JobPreparationTask = prep; List <EnvironmentSetting> prepEnvSettings = new List <EnvironmentSetting>(); prepEnvSettings.Add(JobPrepEnvSettingOM); prep.EnvironmentSettings = prepEnvSettings; prep.Id = JobPrepId; prep.RerunOnComputeNodeRebootAfterSuccess = JobPrepRerunOnComputeNodeRebootAfterSuccess; prep.ResourceFiles = resFiles; // bug: incorrect type this should be IList<> /* * prep.ResourceFiles = new List<ResourceFile>(); // this is actually read into our concurrent iList thing * * // why not, merge them in. exersize the concurent IList thing * foreach (ResourceFile curRF in resFiles) * { * prep.ResourceFiles.Add(curRF); * } */ prep.RunElevated = JobPrepRunElevated; prep.Constraints = JobPrepTaskConstraintsOM; prep.WaitForSuccess = JobPrepWaitForSuccessCreate; } // add a jobRelease task to the job schedule { JobReleaseTask relTask = new JobReleaseTask(JobReleaseTaskCommandLine); unboundJobSchedule.JobSpecification.JobReleaseTask = relTask; List <EnvironmentSetting> relEnvSettings = new List <EnvironmentSetting>(); relEnvSettings.Add(JobRelEnvSettingOM); relTask.EnvironmentSettings = relEnvSettings; relTask.MaxWallClockTime = JobRelMaxWallClockTime; relTask.Id = JobRelId; relTask.ResourceFiles = null; relTask.ResourceFiles = new List <ResourceFile>(); // why not, merge them in. work the concurrent IList thing foreach (ResourceFile curRF in resFiles) { relTask.ResourceFiles.Add(curRF); } relTask.RetentionTime = JobRelRetentionTime; relTask.RunElevated = JobRelRunElevated; } // set JobCommonEnvSettings { List <EnvironmentSetting> jobCommonES = new List <EnvironmentSetting>(); jobCommonES.Add(JobCommonEnvSettingOM); unboundJobSchedule.JobSpecification.CommonEnvironmentSettings = jobCommonES; } // add the job schedule to the service unboundJobSchedule.Commit(); } // now we have a jobschedule with jobprep/release...now test the values on the jobschedule { CloudJobSchedule boundJobSchedule = client.JobScheduleOperations.GetJobSchedule(jsId); Assert.NotNull(boundJobSchedule); Assert.NotNull(boundJobSchedule.JobSpecification); Assert.NotNull(boundJobSchedule.JobSpecification.JobPreparationTask); Assert.NotNull(boundJobSchedule.JobSpecification.JobReleaseTask); Assert.NotNull(boundJobSchedule.JobSpecification.CommonEnvironmentSettings); AssertGoodCommonEnvSettingsOM(boundJobSchedule.JobSpecification.CommonEnvironmentSettings); AssertGoodJobPrepTaskOM(boundJobSchedule.JobSpecification.JobPreparationTask); AssertGoodJobReleaseTaskOM(boundJobSchedule.JobSpecification.JobReleaseTask); AssertGoodResourceFiles(resFiles, boundJobSchedule.JobSpecification.JobPreparationTask.ResourceFiles); AssertGoodResourceFiles(resFiles, boundJobSchedule.JobSpecification.JobReleaseTask.ResourceFiles); //todo: test mutability } CloudJobSchedule boundJobScheduleWithJob; // set on job test // test the values on the job { boundJobScheduleWithJob = TestUtilities.WaitForJobOnJobSchedule(client.JobScheduleOperations, jsId); CloudJob bndJob = client.JobOperations.GetJob(boundJobScheduleWithJob.ExecutionInformation.RecentJob.Id); Assert.NotNull(bndJob); Assert.NotNull(bndJob.CommonEnvironmentSettings); Assert.NotNull(bndJob.JobPreparationTask); Assert.NotNull(bndJob.JobReleaseTask); AssertGoodCommonEnvSettingsOM(bndJob.CommonEnvironmentSettings as IList <EnvironmentSetting> /* we know it is our internal IList */); AssertGoodJobPrepTaskOM(bndJob.JobPreparationTask); AssertGoodJobReleaseTaskOM(bndJob.JobReleaseTask); AssertGoodResourceFiles(resFiles, bndJob.JobPreparationTask.ResourceFiles); AssertGoodResourceFiles(resFiles, bndJob.JobReleaseTask.ResourceFiles); //TODO: test immutability } // used for on get-status test CloudJobSchedule updatedJobSchedule; // test update on the WI jobprep/jobrelease { // change props boundJobScheduleWithJob.JobSpecification.JobPreparationTask.WaitForSuccess = JobPrepWaitForSuccessUpdate; // commit changes boundJobScheduleWithJob.Commit(); // get new values updatedJobSchedule = client.JobScheduleOperations.GetJobSchedule(jsId); // confirm values changed Assert.Equal(JobPrepWaitForSuccessUpdate, updatedJobSchedule.JobSpecification.JobPreparationTask.WaitForSuccess); } TestGetPrepReleaseStatusCalls(client, updatedJobSchedule, this.poolFixture.PoolId, resFiles); } finally { // cleanup TestUtilities.DeleteJobScheduleIfExistsAsync(client, jsId).Wait(); } } }; SynchronizationContextHelper.RunTest(test, LongTestTimeout); }
public void TestBoundJobVerbs() { Action test = () => { using (BatchClient batchCli = TestUtilities.OpenBatchClient(TestUtilities.GetCredentialsFromEnvironment())) { //Create a job string jobId = Microsoft.Azure.Batch.Constants.DefaultConveniencePrefix + TestUtilities.GetMyName() + "-TestBoundJobVerbs"; try { CloudJob cloudJob = batchCli.JobOperations.CreateJob(jobId, new PoolInformation()); cloudJob.PoolInformation = new PoolInformation() { PoolId = this.poolFixture.PoolId }; cloudJob.Commit(); //Get the bound job CloudJob job = batchCli.JobOperations.GetJob(jobId); //Disable the job (via instance) job.Disable(DisableJobOption.Terminate); //Check the job state CloudJob disabledJob = batchCli.JobOperations.GetJob(jobId); this.testOutputHelper.WriteLine("DisabledJob State: {0}", disabledJob.State); Assert.True(disabledJob.State == JobState.Disabled || disabledJob.State == JobState.Disabling); //Enable the job (via instance) job.Enable(); //Check the job state CloudJob enabledJob = batchCli.JobOperations.GetJob(jobId); this.testOutputHelper.WriteLine("EnabledJob state: {0}", enabledJob.State); Assert.Equal(JobState.Active, JobState.Active); //Disable the job (via operations) batchCli.JobOperations.DisableJob(jobId, DisableJobOption.Terminate); disabledJob = batchCli.JobOperations.GetJob(jobId); this.testOutputHelper.WriteLine("DisabledJob State: {0}", disabledJob.State); Assert.True(disabledJob.State == JobState.Disabled || disabledJob.State == JobState.Disabling); //Enable the job (via operations) batchCli.JobOperations.EnableJob(jobId); //Check the job state enabledJob = batchCli.JobOperations.GetJob(jobId); this.testOutputHelper.WriteLine("EnabledJob state: {0}", enabledJob.State); Assert.Equal(JobState.Active, JobState.Active); //Terminate the job job.Terminate("need some reason"); //Check the job state CloudJob terminatedJob = batchCli.JobOperations.GetJob(jobId); this.testOutputHelper.WriteLine("TerminatedJob state: {0}", terminatedJob.State); Assert.True(terminatedJob.State == JobState.Terminating || terminatedJob.State == JobState.Completed); if (terminatedJob.State == JobState.Terminating) { Thread.Sleep(TimeSpan.FromSeconds(5)); //Sleep and wait for the job to finish terminating before we issue a delete } //Delete the job job.Delete(); //Check that the job doesn't exist anymore try { this.testOutputHelper.WriteLine("Expected Exception: testing that job does NOT exist."); CloudJob deletedJob = batchCli.JobOperations.GetJob(jobId); Assert.Equal(JobState.Deleting, deletedJob.State); } catch (Exception e) { Assert.IsAssignableFrom <BatchException>(e); BatchException be = e as BatchException; Assert.NotNull(be.RequestInformation); Assert.NotNull(be.RequestInformation.BatchError); Assert.Equal(BatchErrorCodeStrings.JobNotFound, be.RequestInformation.BatchError.Code); this.testOutputHelper.WriteLine("Job was deleted successfully"); } } finally { TestUtilities.DeleteJobIfExistsAsync(batchCli, jobId).Wait(); } } }; SynchronizationContextHelper.RunTest(test, TestTimeout); }
public void Bug1770933_1770935_1771164_AddUserCRUDAndGetRDP() { Action test = () => { using (BatchClient batchCli = TestUtilities.OpenBatchClient(TestUtilities.GetCredentialsFromEnvironment())) { // names to create/delete List <string> names = new List <string>() { TestUtilities.GetMyName(), TestUtilities.GetMyName() + "1", TestUtilities.GetMyName() + "2", TestUtilities.GetMyName() + "3", TestUtilities.GetMyName() + "4" }; // pick a compute node to victimize with user accounts IEnumerable <ComputeNode> ienmComputeNodes = batchCli.PoolOperations.ListComputeNodes(this.poolFixture.PoolId); List <ComputeNode> computeNodeList = new List <ComputeNode>(ienmComputeNodes); ComputeNode computeNode = computeNodeList[0]; try { string rdpFileName = "Bug1770933.rdp"; // test user public constructor and IPoolMgr verbs { ComputeNodeUser newUser = batchCli.PoolOperations.CreateComputeNodeUser(this.poolFixture.PoolId, computeNode.Id); newUser.Name = names[0]; newUser.IsAdmin = true; newUser.ExpiryTime = DateTime.UtcNow + TimeSpan.FromHours(1.0); newUser.Password = @"!!Admin!!"; // commit that creates/adds the user newUser.Commit(ComputeNodeUserCommitSemantics.AddUser); // now update the user's password newUser.Password = @"!!!Admin!!!"; // commit that updates newUser.Commit(ComputeNodeUserCommitSemantics.UpdateUser); // clean up from prev run if (File.Exists(rdpFileName)) { File.Delete(rdpFileName); } // pull the rdp file batchCli.PoolOperations.GetRDPFile(this.poolFixture.PoolId, computeNode.Id, rdpFileName); // simple validation tests on the rdp file TestFileExistsAndHasLength(rdpFileName); // cleanup the rdp file File.Delete(rdpFileName); // "test" delete user from IPoolMgr // TODO: when GET/LIST User is available we should close the loop and confirm the user is gone. batchCli.PoolOperations.DeleteComputeNodeUser(this.poolFixture.PoolId, computeNode.Id, newUser.Name); } // test IPoolMgr CreateUser { ComputeNodeUser pmcUser = batchCli.PoolOperations.CreateComputeNodeUser(this.poolFixture.PoolId, computeNode.Id); pmcUser.Name = names[1]; pmcUser.IsAdmin = true; pmcUser.ExpiryTime = DateTime.UtcNow + TimeSpan.FromHours(1.0); pmcUser.Password = @"!!!Admin!!!"; // add the user pmcUser.Commit(ComputeNodeUserCommitSemantics.AddUser); // pull rdp file batchCli.PoolOperations.GetRDPFile(this.poolFixture.PoolId, computeNode.Id, rdpFileName); // simple validation on rdp file TestFileExistsAndHasLength(rdpFileName); // cleanup File.Delete(rdpFileName); // delete user batchCli.PoolOperations.DeleteComputeNodeUser(this.poolFixture.PoolId, computeNode.Id, pmcUser.Name); } // test IComputeNode verbs { ComputeNodeUser poolMgrUser = batchCli.PoolOperations.CreateComputeNodeUser(this.poolFixture.PoolId, computeNode.Id); poolMgrUser.Name = names[2]; poolMgrUser.IsAdmin = true; poolMgrUser.ExpiryTime = DateTime.UtcNow + TimeSpan.FromHours(1.0); poolMgrUser.Password = @"!!!Admin!!!"; poolMgrUser.Commit(ComputeNodeUserCommitSemantics.AddUser); // pull rdp file computeNode.GetRDPFile(rdpFileName); // simple validation on rdp file TestFileExistsAndHasLength(rdpFileName); // cleanup File.Delete(rdpFileName); // delete user computeNode.DeleteComputeNodeUser(poolMgrUser.Name); } // test ComputeNodeUser.Delete { ComputeNodeUser usrDelete = batchCli.PoolOperations.CreateComputeNodeUser(this.poolFixture.PoolId, computeNode.Id); usrDelete.Name = names[3]; usrDelete.ExpiryTime = DateTime.UtcNow + TimeSpan.FromHours(1.0); usrDelete.Password = @"!!!Admin!!!"; usrDelete.Commit(ComputeNodeUserCommitSemantics.AddUser); usrDelete.Delete(); } // test rdp-by-stream IPoolMgr and IComputeNode // the by-stream paths do not converge with the by-filename paths until IProtocol so we test them seperately { ComputeNodeUser byStreamUser = batchCli.PoolOperations.CreateComputeNodeUser(this.poolFixture.PoolId, computeNode.Id); byStreamUser.Name = names[4]; byStreamUser.IsAdmin = true; byStreamUser.ExpiryTime = DateTime.UtcNow + TimeSpan.FromHours(1.0); byStreamUser.Password = @"!!!Admin!!!"; byStreamUser.Commit(ComputeNodeUserCommitSemantics.AddUser); // IPoolMgr using (Stream rdpStreamPoolMgr = File.Create(rdpFileName)) { batchCli.PoolOperations.GetRDPFile(this.poolFixture.PoolId, computeNode.Id, rdpStreamPoolMgr); rdpStreamPoolMgr.Flush(); rdpStreamPoolMgr.Close(); TestFileExistsAndHasLength(rdpFileName); File.Delete(rdpFileName); } // IComputeNode using (Stream rdpViaIComputeNode = File.Create(rdpFileName)) { computeNode.GetRDPFile(rdpViaIComputeNode); rdpViaIComputeNode.Flush(); rdpViaIComputeNode.Close(); TestFileExistsAndHasLength(rdpFileName); File.Delete(rdpFileName); } // delete the user account byStreamUser.Delete(); } } finally { // clear any old accounts foreach (string curName in names) { bool hitException = false; try { ComputeNodeUser deleteThis = batchCli.PoolOperations.CreateComputeNodeUser(this.poolFixture.PoolId, computeNode.Id); deleteThis.Name = curName; deleteThis.Delete(); } catch (BatchException ex) { Assert.Equal(BatchErrorCodeStrings.NodeUserNotFound, ex.RequestInformation.BatchError.Code); hitException = true; } Assert.True(hitException, "Should have hit exception on user: "******", compute node: " + computeNode.Id + "."); } } } }; SynchronizationContextHelper.RunTest(test, TestTimeout); }
public void TestOMJobPrepSchedulingError() { string jobId = "TestOMJobPrepSchedulingError-" + CraftTimeString() + "-" + TestUtilities.GetMyName(); Action test = () => { using (BatchClient client = TestUtilities.OpenBatchClientAsync(TestUtilities.GetCredentialsFromEnvironment()).Result) { try { // create job with prep that triggers prep scheduling error { CloudJob unboundJob = client.JobOperations.CreateJob(jobId, new PoolInformation() { PoolId = this.poolFixture.PoolId }); // add the jobPrep task to the job { JobPreparationTask prep = new JobPreparationTask("cmd /c JobPrep Task"); unboundJob.JobPreparationTask = prep; ResourceFile[] badResFiles = { new ResourceFile("https://127.0.0.1/foo/bar/baf", "bob.txt") }; prep.ResourceFiles = badResFiles; prep.WaitForSuccess = true; // be explicit even though this is the default. need JP/ to not run } // add the job to the service unboundJob.Commit(); } CloudJob boundJob = client.JobOperations.GetJob(jobId); // add a trivial task to force the JP client.JobOperations.AddTask(boundJob.Id, new CloudTask("ForceJobPrep", "cmd /c echo TestOMJobPrepSchedulingError")); // the victim compute node. pool should have size 1. List <ComputeNode> nodes = client.PoolOperations.ListComputeNodes(this.poolFixture.PoolId).ToList(); Assert.Equal(1, nodes.Count); // now we have a job that should be trying to run the JP // poll for the JP to have been run, and it must have a scheduling error bool prepNotCompleted = true; // gotta poll to find out when the jp has been run while (prepNotCompleted) { List <JobPreparationAndReleaseTaskExecutionInformation> jpStatsList = client.JobOperations.ListJobPreparationAndReleaseTaskStatus(jobId).ToList(); JobPreparationAndReleaseTaskExecutionInformation jpStatus = jpStatsList.FirstOrDefault(); if (jpStatus == null) { Thread.Sleep(2000); } else { if (JobPreparationTaskState.Completed == jpStatus.JobPreparationTaskExecutionInformation.State) { prepNotCompleted = false; // we see a JP has completed Assert.NotNull(jpStatus.JobPreparationTaskExecutionInformation.SchedulingError); // spew the schederror OutputSchedulingError(jpStatus.JobPreparationTaskExecutionInformation.SchedulingError); } this.testOutputHelper.WriteLine("Job Prep is running (waiting for blob dl to timeout)"); } } } finally { // cleanup client.JobOperations.DeleteJob(jobId); } } }; SynchronizationContextHelper.RunTest(test, TestTimeout); }
public async Task ComputeNodeUploadLogs() { Action test = async() => { using (BatchClient batchCli = TestUtilities.OpenBatchClientFromEnvironmentAsync().Result) { var node = batchCli.PoolOperations.ListComputeNodes(this.poolFixture.PoolId).First(); // Generate a storage container URL StagingStorageAccount storageAccount = TestUtilities.GetStorageCredentialsFromEnvironment(); CloudStorageAccount cloudStorageAccount = new CloudStorageAccount( new StorageCredentials(storageAccount.StorageAccount, storageAccount.StorageAccountKey), blobEndpoint: storageAccount.BlobUri, queueEndpoint: null, tableEndpoint: null, fileEndpoint: null); CloudBlobClient blobClient = cloudStorageAccount.CreateCloudBlobClient(); const string containerName = "computenodelogscontainer"; var container = blobClient.GetContainerReference(containerName); try { await container.CreateIfNotExistsAsync(); var blobs = await BlobStorageExtensions.ListBlobs(container); // Ensure that there are no items in the container to begin with Assert.Empty(blobs); var sas = container.GetSharedAccessSignature(new SharedAccessBlobPolicy() { Permissions = SharedAccessBlobPermissions.Write, SharedAccessExpiryTime = DateTime.UtcNow.AddDays(1) }); var fullSas = container.Uri + sas; var startTime = DateTime.UtcNow.Subtract(TimeSpan.FromMinutes(5)); var result = batchCli.PoolOperations.UploadComputeNodeBatchServiceLogs( this.poolFixture.PoolId, node.Id, fullSas, startTime); Assert.NotEqual(0, result.NumberOfFilesUploaded); Assert.NotEmpty(result.VirtualDirectoryName); // Allow up to 2m for files to get uploaded DateTime timeoutAt = DateTime.UtcNow.AddMinutes(2); while (DateTime.UtcNow < timeoutAt) { blobs = await BlobStorageExtensions.ListBlobs(container); if (blobs.Any()) { break; } } Assert.NotEmpty(blobs); } finally { await container.DeleteIfExistsAsync(); } } }; SynchronizationContextHelper.RunTest(test, TestTimeout); }
public void Bug2329884_ComputeNodeRecentTasksAndComputeNodeError() { Action test = () => { using (BatchClient batchCli = TestUtilities.OpenBatchClient(TestUtilities.GetCredentialsFromEnvironment())) { string jobId = "Bug2329884Job-" + TestUtilities.GetMyName(); Protocol.RequestInterceptor interceptor = null; try { const string taskId = "hiWorld"; // // Create the job // CloudJob unboundJob = batchCli.JobOperations.CreateJob(jobId, new PoolInformation()); unboundJob.PoolInformation.PoolId = this.poolFixture.PoolId; unboundJob.Commit(); CloudJob boundJob = batchCli.JobOperations.GetJob(jobId); CloudTask myTask = new CloudTask(taskId, "cmd /c echo hello world"); boundJob.AddTask(myTask); this.testOutputHelper.WriteLine("Initial job commit()"); // // Wait for task to go to completion // Utilities utilities = batchCli.Utilities; TaskStateMonitor taskStateMonitor = utilities.CreateTaskStateMonitor(); taskStateMonitor.WaitAll( boundJob.ListTasks(), Microsoft.Azure.Batch.Common.TaskState.Completed, new TimeSpan(0, 3 /*min*/, 0)); CloudTask boundTask = boundJob.GetTask(taskId); //Since the compute node name comes back as "Node:<computeNodeId>" we need to split on : to get the actual compute node name string computeNodeId = boundTask.ComputeNodeInformation.AffinityId.Split(':')[1]; // // Check recent tasks // ComputeNode computeNode = batchCli.PoolOperations.GetComputeNode(this.poolFixture.PoolId, computeNodeId); this.testOutputHelper.WriteLine("Recent tasks:"); foreach (TaskInformation recentTask in computeNode.RecentTasks) { this.testOutputHelper.WriteLine("Compute node has recent task Job: {0}, Task: {1}, State: {2}, Subtask: {3}", recentTask.JobId, recentTask.TaskId, recentTask.TaskState, recentTask.SubtaskId); } TaskInformation myTaskInfo = computeNode.RecentTasks.First(taskInfo => taskInfo.JobId.Equals( jobId, StringComparison.InvariantCultureIgnoreCase) && taskInfo.TaskId.Equals(taskId, StringComparison.InvariantCultureIgnoreCase)); Assert.Equal(TaskState.Completed, myTaskInfo.TaskState); Assert.NotNull(myTaskInfo.ExecutionInformation); Assert.Equal(0, myTaskInfo.ExecutionInformation.ExitCode); // // Check compute node Error // const string expectedErrorCode = "TestErrorCode"; const string expectedErrorMessage = "Test error message"; const string nvpValue = "Test"; //We use mocking to return a fake compute node object here to test Compute Node Error because we cannot force one easily interceptor = new Protocol.RequestInterceptor((req => { if (req is ComputeNodeGetBatchRequest) { var typedRequest = req as ComputeNodeGetBatchRequest; typedRequest.ServiceRequestFunc = (token) => { var response = new AzureOperationResponse <Protocol.Models.ComputeNode, Protocol.Models.ComputeNodeGetHeaders>(); List <Protocol.Models.ComputeNodeError> errors = new List <Protocol.Models.ComputeNodeError>(); //Generate first Compute Node Error List <Protocol.Models.NameValuePair> nvps = new List <Protocol.Models.NameValuePair>(); nvps.Add(new Protocol.Models.NameValuePair() { Name = nvpValue, Value = nvpValue }); Protocol.Models.ComputeNodeError error1 = new Protocol.Models.ComputeNodeError(); error1.Code = expectedErrorCode; error1.Message = expectedErrorMessage; error1.ErrorDetails = nvps; errors.Add(error1); //Generate second Compute Node Error nvps = new List <Protocol.Models.NameValuePair>(); nvps.Add(new Protocol.Models.NameValuePair() { Name = nvpValue, Value = nvpValue }); Protocol.Models.ComputeNodeError error2 = new Protocol.Models.ComputeNodeError(); error2.Code = expectedErrorCode; error2.Message = expectedErrorMessage; error2.ErrorDetails = nvps; errors.Add(error2); Protocol.Models.ComputeNode protoComputeNode = new Protocol.Models.ComputeNode(); protoComputeNode.Id = computeNodeId; protoComputeNode.State = Protocol.Models.ComputeNodeState.Idle; protoComputeNode.Errors = errors; response.Body = protoComputeNode; return(Task.FromResult(response)); }; } })); batchCli.PoolOperations.CustomBehaviors.Add(interceptor); computeNode = batchCli.PoolOperations.GetComputeNode(this.poolFixture.PoolId, computeNodeId); Assert.Equal(computeNodeId, computeNode.Id); Assert.NotNull(computeNode.Errors); Assert.Equal(2, computeNode.Errors.Count()); foreach (ComputeNodeError computeNodeError in computeNode.Errors) { Assert.Equal(expectedErrorCode, computeNodeError.Code); Assert.Equal(expectedErrorMessage, computeNodeError.Message); Assert.NotNull(computeNodeError.ErrorDetails); Assert.Single(computeNodeError.ErrorDetails); Assert.Contains(nvpValue, computeNodeError.ErrorDetails.First().Name); } } finally { batchCli.JobOperations.DeleteJob(jobId); } } }; SynchronizationContextHelper.RunTest(test, TestTimeout); }
public void TestGetNodeFileByTask() { void test() { using BatchClient batchCli = TestUtilities.OpenBatchClient(TestUtilities.GetCredentialsFromEnvironment()); JobOperations jobOperations = batchCli.JobOperations; string jobId = Constants.DefaultConveniencePrefix + TestUtilities.GetMyName() + "-" + nameof(TestGetNodeFileByTask); try { // // Create the job // CloudJob job = jobOperations.CreateJob(jobId, new PoolInformation()); job.PoolInformation = new PoolInformation() { PoolId = poolFixture.PoolId }; testOutputHelper.WriteLine("Initial job schedule commit()"); job.Commit(); // // Wait for the job // testOutputHelper.WriteLine("Waiting for job"); CloudJob boundJob = jobOperations.GetJob(jobId); // // Add task to the job // const string taskId = "T1"; const string taskMessage = "This is a test"; testOutputHelper.WriteLine("Adding task: {0}", taskId); CloudTask task = new CloudTask(taskId, string.Format("cmd /c echo {0}", taskMessage)); boundJob.AddTask(task); // // Wait for the task to complete // testOutputHelper.WriteLine("Waiting for the task to complete"); Utilities utilities = batchCli.Utilities; TaskStateMonitor taskStateMonitor = utilities.CreateTaskStateMonitor(); //Wait for the task state to be running taskStateMonitor.WaitAll( jobOperations.ListTasks(jobId), TaskState.Completed, TimeSpan.FromSeconds(30)); //Download the data testOutputHelper.WriteLine("Downloading the stdout for the file"); NodeFile file = jobOperations.GetNodeFile(jobId, taskId, Constants.StandardOutFileName); string data = file.ReadAsString(); testOutputHelper.WriteLine("Data: {0}", data); Assert.Contains(taskMessage, data); // Download the data again using the JobOperations read file content helper data = batchCli.JobOperations.CopyNodeFileContentToString(jobId, taskId, Constants.StandardOutFileName); testOutputHelper.WriteLine("Data: {0}", data); Assert.Contains(taskMessage, data); } finally { jobOperations.DeleteJob(jobId); } } SynchronizationContextHelper.RunTest(test, TestTimeout); }
public void Bug1480491NodeFileFileProperties() { void test() { using BatchClient batchCli = TestUtilities.OpenBatchClient(TestUtilities.GetCredentialsFromEnvironment()); string jobId = "Bug1480491Job-" + TestUtilities.GetMyName(); try { const string taskId = "hiWorld"; // // Create the job // CloudJob unboundJob = batchCli.JobOperations.CreateJob(jobId, new PoolInformation()); unboundJob.PoolInformation.PoolId = poolFixture.PoolId; unboundJob.Commit(); CloudJob boundJob = batchCli.JobOperations.GetJob(jobId); CloudTask myTask = new CloudTask(taskId, "cmd /c echo hello world"); boundJob.AddTask(myTask); testOutputHelper.WriteLine("Initial job commit()"); // // Wait for task to go to completion // Utilities utilities = batchCli.Utilities; TaskStateMonitor taskStateMonitor = utilities.CreateTaskStateMonitor(); taskStateMonitor.WaitAll( boundJob.ListTasks(), Microsoft.Azure.Batch.Common.TaskState.Completed, TimeSpan.FromMinutes(3)); const int expectedFileSize = 13; //Magic number based on output generated by the task // // NodeFile by task // NodeFile file = batchCli.JobOperations.GetNodeFile(jobId, taskId, Constants.StandardOutFileName); testOutputHelper.WriteLine("File {0} has content length: {1}", Constants.StandardOutFileName, file.Properties.ContentLength); testOutputHelper.WriteLine("File {0} has content type: {1}", Constants.StandardOutFileName, file.Properties.ContentType); testOutputHelper.WriteLine("File {0} has creation time: {1}", Constants.StandardOutFileName, file.Properties.CreationTime); testOutputHelper.WriteLine("File {0} has last modified time: {1}", Constants.StandardOutFileName, file.Properties.LastModified); Assert.Equal(expectedFileSize, file.Properties.ContentLength); Assert.Equal("text/plain", file.Properties.ContentType); // // NodeFile by node // CloudTask boundTask = boundJob.GetTask(taskId); string computeNodeId = boundTask.ComputeNodeInformation.AffinityId.Split(':')[1]; ComputeNode computeNode = batchCli.PoolOperations.GetComputeNode(poolFixture.PoolId, computeNodeId); testOutputHelper.WriteLine("Task ran on compute node: {0}", computeNodeId); List <NodeFile> files = computeNode.ListNodeFiles(recursive: true).ToList(); foreach (NodeFile nodeFile in files) { testOutputHelper.WriteLine("Found file: {0}", nodeFile.Path); } string filePathToGet = string.Format("workitems/{0}/{1}/{2}/{3}", jobId, "job-1", taskId, Constants.StandardOutFileName); file = computeNode.GetNodeFile(filePathToGet); testOutputHelper.WriteLine("File {0} has content length: {1}", filePathToGet, file.Properties.ContentLength); testOutputHelper.WriteLine("File {0} has content type: {1}", filePathToGet, file.Properties.ContentType); testOutputHelper.WriteLine("File {0} has creation time: {1}", filePathToGet, file.Properties.CreationTime); testOutputHelper.WriteLine("File {0} has last modified time: {1}", filePathToGet, file.Properties.LastModified); Assert.Equal(expectedFileSize, file.Properties.ContentLength); Assert.Equal("text/plain", file.Properties.ContentType); } finally { batchCli.JobOperations.DeleteJob(jobId); } } SynchronizationContextHelper.RunTest(test, TestTimeout); }
public void Bug1480489NodeFileMissingIsDirectory() { void test() { using BatchClient batchCli = TestUtilities.OpenBatchClient(TestUtilities.GetCredentialsFromEnvironment()); string jobId = "Bug1480489Job-" + TestUtilities.GetMyName(); try { // here we show how to use an unbound Job + Commit() to run millions of Tasks CloudJob unboundJob = batchCli.JobOperations.CreateJob(jobId, new PoolInformation() { PoolId = poolFixture.PoolId }); unboundJob.Commit(); // Open the new Job as bound. CloudJob boundJob = batchCli.JobOperations.GetJob(jobId); CloudTask myTask = new CloudTask(id: "Bug1480489Task", commandline: @"md Bug1480489Directory"); // add the task to the job boundJob.AddTask(myTask); // wait for the task to complete Utilities utilities = batchCli.Utilities; TaskStateMonitor taskStateMonitor = utilities.CreateTaskStateMonitor(); taskStateMonitor.WaitAll( boundJob.ListTasks(), Microsoft.Azure.Batch.Common.TaskState.Completed, TimeSpan.FromMinutes(3)); CloudTask myCompletedTask = new List <CloudTask>(boundJob.ListTasks(null))[0]; string stdOut = myCompletedTask.GetNodeFile(Constants.StandardOutFileName).ReadAsString(); string stdErr = myCompletedTask.GetNodeFile(Constants.StandardErrorFileName).ReadAsString(); testOutputHelper.WriteLine("TaskId: " + myCompletedTask.Id); testOutputHelper.WriteLine("StdOut: "); testOutputHelper.WriteLine(stdOut); testOutputHelper.WriteLine("StdErr: "); testOutputHelper.WriteLine(stdErr); testOutputHelper.WriteLine("Task Files:"); bool foundAtLeastOneDir = false; foreach (NodeFile curFile in myCompletedTask.ListNodeFiles()) { testOutputHelper.WriteLine(" Filepath: " + curFile.Path); testOutputHelper.WriteLine(" IsDirectory: " + curFile.IsDirectory.ToString()); // turns out wd is created for each task so use it as sentinal if (curFile.Path.Equals("wd") && curFile.IsDirectory.HasValue && curFile.IsDirectory.Value) { foundAtLeastOneDir = true; } } Assert.True(foundAtLeastOneDir); } finally { TestUtilities.DeleteJobIfExistsAsync(batchCli, jobId).Wait(); } } SynchronizationContextHelper.RunTest(test, TestTimeout); }
public void TestJobUpdateWithAndWithoutPoolInfo() { Action test = () => { using (BatchClient batchCli = TestUtilities.OpenBatchClient(TestUtilities.GetCredentialsFromEnvironment())) { const string testName = "TestJobUpdateWithAndWithoutPoolInfo"; // Create a job string jobId = testName + "_" + TestUtilities.GetMyName(); CloudJob unboundJob = batchCli.JobOperations.CreateJob(); unboundJob.Id = jobId; // Use an auto pool with the job, since PoolInformation can't be updated otherwise. PoolSpecification poolSpec = new PoolSpecification(); poolSpec.CloudServiceConfiguration = new CloudServiceConfiguration(PoolFixture.OSFamily, "*"); poolSpec.TargetDedicatedComputeNodes = 0; poolSpec.VirtualMachineSize = PoolFixture.VMSize; AutoPoolSpecification autoPoolSpec = new AutoPoolSpecification(); string autoPoolPrefix = "UpdPIAuto_" + TestUtilities.GetMyName(); autoPoolSpec.AutoPoolIdPrefix = autoPoolPrefix; const bool originalKeepAlive = false; autoPoolSpec.KeepAlive = originalKeepAlive; autoPoolSpec.PoolLifetimeOption = PoolLifetimeOption.Job; autoPoolSpec.PoolSpecification = poolSpec; PoolInformation poolInfo = new PoolInformation(); poolInfo.AutoPoolSpecification = autoPoolSpec; unboundJob.PoolInformation = poolInfo; const int originalPriority = 0; unboundJob.Priority = originalPriority; List <MetadataItem> originalMetadata = new List <MetadataItem>(); originalMetadata.Add(new MetadataItem("meta1", "value1")); originalMetadata.Add(new MetadataItem("meta2", "value2")); unboundJob.Metadata = originalMetadata; this.testOutputHelper.WriteLine("Creating job {0}", jobId); unboundJob.Commit(); try { // Get bound job CloudJob createdJob = batchCli.JobOperations.GetJob(jobId); // Verify that we can update something besides PoolInformation without getting an error for not being in the Disabled state. Assert.NotEqual(JobState.Disabled, createdJob.State); int updatedPriority = originalPriority + 1; List <MetadataItem> updatedMetadata = new List <MetadataItem>(); updatedMetadata.Add(new MetadataItem("updatedMeta1", "value1")); createdJob.Priority = updatedPriority; createdJob.Metadata = updatedMetadata; this.testOutputHelper.WriteLine("Updating job {0} without altering PoolInformation", jobId); createdJob.Commit(); // Verify update occurred CloudJob updatedJob = batchCli.JobOperations.GetJob(jobId); Assert.Equal(updatedPriority, updatedJob.Priority); Assert.Equal(updatedJob.Metadata.Count, updatedJob.Priority); Assert.Equal(updatedJob.Metadata[0].Name, updatedMetadata[0].Name); Assert.Equal(updatedJob.Metadata[0].Value, updatedMetadata[0].Value); // Verify that updating the PoolInformation works. // PoolInformation can only be changed in the Disabled state. this.testOutputHelper.WriteLine("Disabling job {0}", jobId); updatedJob.Disable(DisableJobOption.Terminate); while (updatedJob.State != JobState.Disabled) { Thread.Sleep(500); updatedJob.Refresh(); } Assert.Equal(JobState.Disabled, updatedJob.State); bool updatedKeepAlive = !originalKeepAlive; updatedJob.PoolInformation.AutoPoolSpecification.KeepAlive = updatedKeepAlive; int updatedAgainPriority = updatedPriority + 1; updatedJob.Priority = updatedAgainPriority; this.testOutputHelper.WriteLine("Updating job {0} properties, including PoolInformation", jobId); updatedJob.Commit(); CloudJob updatedPoolInfoJob = batchCli.JobOperations.GetJob(jobId); Assert.Equal(updatedKeepAlive, updatedPoolInfoJob.PoolInformation.AutoPoolSpecification.KeepAlive); Assert.Equal(updatedAgainPriority, updatedPoolInfoJob.Priority); } finally { this.testOutputHelper.WriteLine("Deleting job {0}", jobId); TestUtilities.DeleteJobIfExistsAsync(batchCli, jobId).Wait(); // Explicitly delete auto pool foreach (CloudPool pool in batchCli.PoolOperations.ListPools(new ODATADetailLevel(filterClause: string.Format("startswith(id,'{0}')", autoPoolPrefix)))) { this.testOutputHelper.WriteLine("Deleting pool {0}", pool.Id); TestUtilities.DeletePoolIfExistsAsync(batchCli, pool.Id).Wait(); } } } }; SynchronizationContextHelper.RunTest(test, TestTimeout); }
public void Bug2342986_StartTaskMissingOnComputeNode() { Action test = () => { using (BatchClient batchCli = TestUtilities.OpenBatchClient(TestUtilities.GetCredentialsFromEnvironment())) { CloudPool pool = batchCli.PoolOperations.GetPool(this.poolFixture.PoolId); this.testOutputHelper.WriteLine("Getting pool"); StartTask poolStartTask = pool.StartTask; Assert.NotNull(poolStartTask); Assert.NotNull(poolStartTask.EnvironmentSettings); IEnumerable <ComputeNode> computeNodes = pool.ListComputeNodes(); Assert.True(computeNodes.Any()); this.testOutputHelper.WriteLine("Checking every compute nodes start task in the pool matches the pools start task"); foreach (ComputeNode computeNode in computeNodes) { this.testOutputHelper.WriteLine("Checking start task of compute node: {0}", computeNode.Id); //Check that the property is correctly set on each compute node Assert.NotNull(computeNode.StartTask); Assert.Equal(poolStartTask.CommandLine, computeNode.StartTask.CommandLine); Assert.Equal(poolStartTask.MaxTaskRetryCount, computeNode.StartTask.MaxTaskRetryCount); Assert.Equal(AutoUserScope.Pool, poolStartTask.UserIdentity.AutoUser.Scope); Assert.Equal(AutoUserScope.Pool, computeNode.StartTask.UserIdentity.AutoUser.Scope); Assert.Equal(poolStartTask.WaitForSuccess, computeNode.StartTask.WaitForSuccess); if (poolStartTask.EnvironmentSettings != null) { Assert.Equal(poolStartTask.EnvironmentSettings.Count, computeNode.StartTask.EnvironmentSettings.Count); foreach (EnvironmentSetting environmentSetting in poolStartTask.EnvironmentSettings) { EnvironmentSetting matchingEnvSetting = computeNode.StartTask.EnvironmentSettings.FirstOrDefault(envSetting => envSetting.Name == environmentSetting.Name); Assert.NotNull(matchingEnvSetting); Assert.Equal(environmentSetting.Name, matchingEnvSetting.Name); Assert.Equal(environmentSetting.Value, matchingEnvSetting.Value); } } if (poolStartTask.ResourceFiles != null) { Assert.Equal(poolStartTask.ResourceFiles.Count, computeNode.StartTask.ResourceFiles.Count); foreach (ResourceFile resourceFile in poolStartTask.ResourceFiles) { ResourceFile matchingResourceFile = computeNode.StartTask.ResourceFiles.FirstOrDefault(item => item.HttpUrl == resourceFile.HttpUrl); Assert.NotNull(matchingResourceFile); Assert.Equal(resourceFile.HttpUrl, matchingResourceFile.HttpUrl); Assert.Equal(resourceFile.FilePath, matchingResourceFile.FilePath); } } //Try to set some properties of the compute node's start task and ensure it fails TestUtilities.AssertThrows <InvalidOperationException>(() => { computeNode.StartTask.CommandLine = "Test"; }); TestUtilities.AssertThrows <InvalidOperationException>(() => { computeNode.StartTask.MaxTaskRetryCount = 5; }); TestUtilities.AssertThrows <InvalidOperationException>(() => { computeNode.StartTask.UserIdentity = new UserIdentity("foo"); }); TestUtilities.AssertThrows <InvalidOperationException>(() => { computeNode.StartTask.WaitForSuccess = true; }); TestUtilities.AssertThrows <InvalidOperationException>(() => { computeNode.StartTask.EnvironmentSettings = new List <EnvironmentSetting>(); }); if (computeNode.StartTask.EnvironmentSettings != null) { TestUtilities.AssertThrows <InvalidOperationException>(() => { computeNode.StartTask.EnvironmentSettings.Add(new EnvironmentSetting("test", "test")); }); } TestUtilities.AssertThrows <InvalidOperationException>(() => { computeNode.StartTask.ResourceFiles = new List <ResourceFile>(); }); if (computeNode.StartTask.ResourceFiles != null) { TestUtilities.AssertThrows <InvalidOperationException>(() => { computeNode.StartTask.ResourceFiles.Add(ResourceFile.FromUrl("test", "test")); }); } } } }; SynchronizationContextHelper.RunTest(test, TestTimeout); }
public void LongRunning_Bug1965363Wat7OSVersionFeaturesQuickJobWithAutoPool() { Action test = () => { using (BatchClient batchCli = TestUtilities.OpenBatchClient(TestUtilities.GetCredentialsFromEnvironment())) { string jobId = "Bug1965363Job-" + TestUtilities.GetMyName(); try { PoolInformation poolInfo = new PoolInformation() { AutoPoolSpecification = new AutoPoolSpecification() { PoolLifetimeOption = PoolLifetimeOption.Job, PoolSpecification = new PoolSpecification() { CloudServiceConfiguration = new CloudServiceConfiguration(PoolFixture.OSFamily), VirtualMachineSize = PoolFixture.VMSize, TargetDedicatedComputeNodes = 1 } } }; CloudJob unboundJob = batchCli.JobOperations.CreateJob(jobId, poolInfo); this.testOutputHelper.WriteLine("Commiting quickjob"); unboundJob.Commit(); CloudTask task = new CloudTask("Bug1965363Wat7OSVersionFeaturesQuickJobWithAutoPoolTask", "cmd /c echo Bug1965363"); CloudJob boundJob = batchCli.JobOperations.GetJob(jobId); boundJob.AddTask(task); this.testOutputHelper.WriteLine("Getting pool name: {0}", boundJob.ExecutionInformation.PoolId); CloudPool boundPool = batchCli.PoolOperations.GetPool(boundJob.ExecutionInformation.PoolId); TaskStateMonitor tsm = batchCli.Utilities.CreateTaskStateMonitor(); ODATAMonitorControl odControl = new ODATAMonitorControl(); // we know that the autopool compute nodes will take a long time to become scheduleable so we slow down polling/spam odControl.DelayBetweenDataFetch = TimeSpan.FromSeconds(5); this.testOutputHelper.WriteLine("Invoking TaskStateMonitor"); tsm.WaitAll( boundJob.ListTasks(), TaskState.Completed, TimeSpan.FromMinutes(15), odControl, new[] { // spam/logging interceptor new Microsoft.Azure.Batch.Protocol.RequestInterceptor((x) => { this.testOutputHelper.WriteLine("Issuing request type: " + x.GetType().ToString()); // print out the compute node states... we are actually waiting on the compute nodes List <ComputeNode> allComputeNodes = boundPool.ListComputeNodes().ToList(); this.testOutputHelper.WriteLine(" #comnpute nodes: " + allComputeNodes.Count); allComputeNodes.ForEach((icn) => { this.testOutputHelper.WriteLine(" computeNode.id: " + icn.Id + ", state: " + icn.State); }); this.testOutputHelper.WriteLine(""); }) }); // confirm the task ran by inspecting the stdOut string stdOut = boundJob.ListTasks().ToList()[0].GetNodeFile(Constants.StandardOutFileName).ReadAsString(); Assert.Contains("Bug1965363", stdOut); } finally { TestUtilities.DeleteJobIfExistsAsync(batchCli, jobId).Wait(); } } }; SynchronizationContextHelper.RunTest(test, LongTestTimeout); }
public void RunTaskAndUploadFiles_FilesAreSuccessfullyUploaded() { Action test = async() => { string containerName = "runtaskanduploadfiles"; StagingStorageAccount storageAccount = TestUtilities.GetStorageCredentialsFromEnvironment(); CloudStorageAccount cloudStorageAccount = new CloudStorageAccount( new StorageCredentials(storageAccount.StorageAccount, storageAccount.StorageAccountKey), blobEndpoint: storageAccount.BlobUri, queueEndpoint: null, tableEndpoint: null, fileEndpoint: null); CloudBlobClient blobClient = cloudStorageAccount.CreateCloudBlobClient(); using (BatchClient batchCli = TestUtilities.OpenBatchClient(TestUtilities.GetCredentialsFromEnvironment())) { string jobId = "RunTaskAndUploadFiles-" + TestUtilities.GetMyName(); try { // Create container and writeable SAS var container = blobClient.GetContainerReference(containerName); await container.CreateIfNotExistsAsync(); var sas = container.GetSharedAccessSignature(new SharedAccessBlobPolicy() { Permissions = SharedAccessBlobPermissions.Write, SharedAccessExpiryTime = DateTime.UtcNow.AddDays(1) }); var fullSas = container.Uri + sas; CloudJob createJob = batchCli.JobOperations.CreateJob(jobId, new PoolInformation() { PoolId = this.poolFixture.PoolId }); createJob.Commit(); const string blobPrefix = "foo/bar"; const string taskId = "simpletask"; CloudTask unboundTask = new CloudTask(taskId, "echo test") { OutputFiles = new List <OutputFile> { new OutputFile( filePattern: @"../*.txt", destination: new OutputFileDestination(new OutputFileBlobContainerDestination(fullSas, blobPrefix)), uploadOptions: new OutputFileUploadOptions(uploadCondition: OutputFileUploadCondition.TaskCompletion)) } }; batchCli.JobOperations.AddTask(jobId, unboundTask); var tasks = batchCli.JobOperations.ListTasks(jobId); var monitor = batchCli.Utilities.CreateTaskStateMonitor(); monitor.WaitAll(tasks, TaskState.Completed, TimeSpan.FromMinutes(1)); // Ensure that the correct files got uploaded var blobs = await BlobStorageExtensions.ListBlobs(container, useFlatBlobListing : true); blobs = blobs.ToList(); Assert.Equal(4, blobs.Count()); //There are 4 .txt files created, stdout, stderr, fileuploadout, and fileuploaderr foreach (var blob in blobs) { var blockBlob = blob as CloudBlockBlob; Assert.StartsWith(blobPrefix, blockBlob.Name); } } finally { TestUtilities.DeleteJobIfExistsAsync(batchCli, jobId).Wait(); var container = blobClient.GetContainerReference(containerName); await container.DeleteIfExistsAsync(); } } }; SynchronizationContextHelper.RunTest(test, TestTimeout); }
public void TestOMJobReleaseSchedulingError() { string jobId = "TestOMJobReleaseSchedulingError-" + CraftTimeString() + "-" + TestUtilities.GetMyName(); Action test = () => { using (BatchClient client = TestUtilities.OpenBatchClientAsync(TestUtilities.GetCredentialsFromEnvironment()).Result) { try { // create job schedule with prep that succeeds and release the triggers scheduling error { PoolInformation poolInfo = new PoolInformation() { PoolId = this.poolFixture.PoolId }; CloudJob unboundJob = client.JobOperations.CreateJob(jobId, poolInfo); // add the jobPrep task to the job { JobPreparationTask prep = new JobPreparationTask("cmd /c echo the quick job prep jumped over the..."); unboundJob.JobPreparationTask = prep; prep.WaitForSuccess = false; // we don't really care but why not set this } // add a jobRelease task to the job { JobReleaseTask relTask = new JobReleaseTask("cmd /c echo Job Release Task"); unboundJob.JobReleaseTask = relTask; ResourceFile[] badResFiles = { new ResourceFile("https://127.0.0.1/foo/bar/baf", "bob.txt") }; relTask.ResourceFiles = badResFiles; relTask.Id = "jobRelease"; } // add the job to the service unboundJob.Commit(); } // add a trivial task to force the JP client.JobOperations.AddTask(jobId, new CloudTask("ForceJobPrep", "cmd /c echo TestOMJobReleaseSchedulingError")); // wait for the task to complete TaskStateMonitor tsm = client.Utilities.CreateTaskStateMonitor(); tsm.WaitAll( client.JobOperations.ListTasks(jobId), TaskState.Completed, TimeSpan.FromMinutes(10), additionalBehaviors: new[] { // spam/logging interceptor new Protocol.RequestInterceptor((x) => { this.testOutputHelper.WriteLine("Issuing request type: " + x.GetType().ToString()); // print out the compute node states... we are actually waiting on the compute nodes List <ComputeNode> allComputeNodes = client.PoolOperations.ListComputeNodes(this.poolFixture.PoolId).ToList(); this.testOutputHelper.WriteLine(" #compute nodes: " + allComputeNodes.Count); allComputeNodes.ForEach((icn) => { this.testOutputHelper.WriteLine(" computeNode.id: " + icn.Id + ", state: " + icn.State); }); this.testOutputHelper.WriteLine(""); }) } ); // ok terminate job to trigger job release client.JobOperations.TerminateJob(jobId, "BUG: Server will throw 500 if I don't provide reason"); // the victim compute node. pool should have size 1. List <ComputeNode> computeNodes = client.PoolOperations.ListComputeNodes(this.poolFixture.PoolId).ToList(); Assert.Equal(1, computeNodes.Count); // now we have a job that should be trying to run the JP // poll for the JP to have been run, and it must have a scheduling error bool releaseNotCompleted = true; // gotta poll to find out when the jp has been run while (releaseNotCompleted) { List <JobPreparationAndReleaseTaskExecutionInformation> jrStatusList = client.JobOperations.ListJobPreparationAndReleaseTaskStatus(jobId).ToList(); JobPreparationAndReleaseTaskExecutionInformation prepAndReleaseStatus = jrStatusList.FirstOrDefault(); if (prepAndReleaseStatus != null && null != prepAndReleaseStatus.JobReleaseTaskExecutionInformation) { if (JobReleaseTaskState.Completed == prepAndReleaseStatus.JobReleaseTaskExecutionInformation.State) { releaseNotCompleted = false; // we see a JP has been run // now assert the scheduling error Assert.NotNull(prepAndReleaseStatus); Assert.NotNull(prepAndReleaseStatus.JobReleaseTaskExecutionInformation.SchedulingError); // spew the schederror OutputSchedulingError(prepAndReleaseStatus.JobReleaseTaskExecutionInformation.SchedulingError); } } Thread.Sleep(2000); this.testOutputHelper.WriteLine("Job Release tasks still running (waiting for blob dl to timeout)."); } } finally { client.JobOperations.DeleteJob(jobId); } } }; SynchronizationContextHelper.RunTest(test, LongTestTimeout); }
public void SampleCreateJobScheduleAutoPool() { Action test = () => { using (BatchClient batchCli = TestUtilities.OpenBatchClientAsync(TestUtilities.GetCredentialsFromEnvironment()).Result) { string jsId = Microsoft.Azure.Batch.Constants.DefaultConveniencePrefix + TestUtilities.GetMyName() + "-CreateWiAutoPoolTest"; try { CloudJobSchedule newJobSchedule = batchCli.JobScheduleOperations.CreateJobSchedule(jsId, null, null); { newJobSchedule.Metadata = MakeMetaData("onCreateName", "onCreateValue"); PoolInformation poolInformation = new PoolInformation(); AutoPoolSpecification iaps = new AutoPoolSpecification(); Schedule schedule = new Schedule() { RecurrenceInterval = TimeSpan.FromMinutes(18) }; poolInformation.AutoPoolSpecification = iaps; iaps.AutoPoolIdPrefix = Microsoft.Azure.Batch.Constants.DefaultConveniencePrefix + TestUtilities.GetMyName(); iaps.PoolLifetimeOption = Microsoft.Azure.Batch.Common.PoolLifetimeOption.Job; iaps.KeepAlive = false; PoolSpecification ps = new PoolSpecification(); iaps.PoolSpecification = ps; ps.TargetDedicatedComputeNodes = 1; ps.VirtualMachineSize = PoolFixture.VMSize; ps.CloudServiceConfiguration = new CloudServiceConfiguration(PoolFixture.OSFamily); ps.Metadata = MakeMetaData("pusMDIName", "pusMDIValue"); JobSpecification jobSpec = newJobSchedule.JobSpecification; Assert.Null(jobSpec); jobSpec = new JobSpecification(poolInformation); JobManagerTask jobMgr = jobSpec.JobManagerTask; Assert.Null(jobMgr); jobMgr = new JobManagerTask(TestUtilities.GetMyName() + "-JobManagerTest", "hostname"); jobMgr.KillJobOnCompletion = false; // set the JobManagerTask on the JobSpecification jobSpec.JobManagerTask = jobMgr; // set the JobSpecifcation on the Job Schedule newJobSchedule.JobSpecification = jobSpec; newJobSchedule.Schedule = schedule; newJobSchedule.Commit(); } CloudJobSchedule jobSchedule = batchCli.JobScheduleOperations.GetJobSchedule(jsId); { TestUtilities.DisplayJobScheduleLong(this.testOutputHelper, jobSchedule); List <MetadataItem> mdi = new List <MetadataItem>(jobSchedule.Metadata); // check the values specified for AddJobSchedule are correct. foreach (MetadataItem curIMDI in mdi) { Assert.Equal("onCreateName", curIMDI.Name); Assert.Equal("onCreateValue", curIMDI.Value); } // add metadata items mdi.Add(new MetadataItem("modifiedName", "modifiedValue")); jobSchedule.Metadata = mdi; jobSchedule.Commit(); // confirm metadata updated correctly CloudJobSchedule jsUpdated = batchCli.JobScheduleOperations.GetJobSchedule(jsId); { List <MetadataItem> updatedMDI = new List <MetadataItem>(jsUpdated.Metadata); Assert.Equal(2, updatedMDI.Count); Assert.Equal("onCreateName", updatedMDI[0].Name); Assert.Equal("onCreateValue", updatedMDI[0].Value); Assert.Equal("modifiedName", updatedMDI[1].Name); Assert.Equal("modifiedValue", updatedMDI[1].Value); } jobSchedule.Refresh(); TestUtilities.DisplayJobScheduleLong(this.testOutputHelper, jobSchedule); } } finally { // clean up TestUtilities.DeleteJobScheduleIfExistsAsync(batchCli, jsId).Wait(); } } }; SynchronizationContextHelper.RunTest(test, TestTimeout); }
public void TestSampleWithFilesAndPool() { Action test = () => { StagingStorageAccount storageCreds = TestUtilities.GetStorageCredentialsFromEnvironment(); using (BatchClient batchCli = TestUtilities.OpenBatchClientAsync(TestUtilities.GetCredentialsFromEnvironment()).Result) { string jobId = "SampleWithFilesJob-" + TestUtilities.GetMyName(); try { CloudJob quickJob = batchCli.JobOperations.CreateJob(); quickJob.Id = jobId; quickJob.PoolInformation = new PoolInformation() { PoolId = this.poolFixture.PoolId }; quickJob.Commit(); CloudJob boundJob = batchCli.JobOperations.GetJob(jobId); CloudTask myTask = new CloudTask(id: "CountWordsTask", commandline: @"cmd /c dir /s .. & dir & wc localwords.txt"); // first we have local files that we want pushed to the compute node before the commandline is invoked FileToStage wordsDotText = new FileToStage(Resources.LocalWordsDotText, storageCreds); // use "default" mapping to base name of local file myTask.FilesToStage = new List <IFileStagingProvider>(); myTask.FilesToStage.Add(wordsDotText); // add the task to the job var artifacts = boundJob.AddTask(myTask); var specificArtifact = artifacts[typeof(FileToStage)]; SequentialFileStagingArtifact sfsa = specificArtifact as SequentialFileStagingArtifact; Assert.NotNull(sfsa); // add a million more tasks... // test to ensure the task is read only TestUtilities.AssertThrows <InvalidOperationException>(() => myTask.FilesToStage = new List <IFileStagingProvider>()); // Open the new Job as bound. CloudPool boundPool = batchCli.PoolOperations.GetPool(boundJob.ExecutionInformation.PoolId); // wait for the task to complete Utilities utilities = batchCli.Utilities; TaskStateMonitor taskStateMonitor = utilities.CreateTaskStateMonitor(); taskStateMonitor.WaitAll( boundJob.ListTasks(), Microsoft.Azure.Batch.Common.TaskState.Completed, TimeSpan.FromMinutes(10), controlParams: null, additionalBehaviors: new[] { // spam/logging interceptor new Microsoft.Azure.Batch.Protocol.RequestInterceptor((x) => { this.testOutputHelper.WriteLine("Issuing request type: " + x.GetType().ToString()); try { // print out the compute node states... we are actually waiting on the compute nodes List <ComputeNode> allComputeNodes = boundPool.ListComputeNodes().ToList(); this.testOutputHelper.WriteLine(" #compute nodes: " + allComputeNodes.Count); allComputeNodes.ForEach( (icn) => { this.testOutputHelper.WriteLine(" computeNode.id: " + icn.Id + ", state: " + icn.State); }); } catch (Exception ex) { // there is a race between the pool-life-job and the end of the job.. and the ListComputeNodes above Assert.True(false, "SampleWithFilesAndPool probably can ignore this if its pool not found: " + ex.ToString()); } }) }); List <CloudTask> tasks = boundJob.ListTasks(null).ToList(); CloudTask myCompletedTask = tasks[0]; foreach (CloudTask curTask in tasks) { this.testOutputHelper.WriteLine("Task Id: " + curTask.Id + ", state: " + curTask.State); } boundPool.Refresh(); this.testOutputHelper.WriteLine("Pool Id: " + boundPool.Id + ", state: " + boundPool.State); string stdOut = myCompletedTask.GetNodeFile(Constants.StandardOutFileName).ReadAsString(); string stdErr = myCompletedTask.GetNodeFile(Constants.StandardErrorFileName).ReadAsString(); this.testOutputHelper.WriteLine("StdOut: "); this.testOutputHelper.WriteLine(stdOut); this.testOutputHelper.WriteLine("StdErr: "); this.testOutputHelper.WriteLine(stdErr); this.testOutputHelper.WriteLine("Task Files:"); foreach (NodeFile curFile in myCompletedTask.ListNodeFiles(recursive: true)) { this.testOutputHelper.WriteLine(" FilePath: " + curFile.Path); } // confirm the files are there Assert.True(FoundFile("localwords.txt", myCompletedTask.ListNodeFiles(recursive: true)), "mising file: localwords.txt"); // test validation of StagingStorageAccount TestUtilities.AssertThrows <ArgumentOutOfRangeException>(() => { new StagingStorageAccount(storageAccount: " ", storageAccountKey: "key", blobEndpoint: "blob"); }); TestUtilities.AssertThrows <ArgumentOutOfRangeException>(() => { new StagingStorageAccount(storageAccount: "account", storageAccountKey: " ", blobEndpoint: "blob"); }); TestUtilities.AssertThrows <ArgumentOutOfRangeException>(() => { new StagingStorageAccount(storageAccount: "account", storageAccountKey: "key", blobEndpoint: ""); }); if (null != sfsa) { // TODO: delete the container! } } finally { TestUtilities.DeleteJobIfExistsAsync(batchCli, jobId).Wait(); } } }; SynchronizationContextHelper.RunTest(test, TestTimeout); }
public void TestBoundJobScheduleCommit() { Action test = () => { using (BatchClient batchCli = TestUtilities.OpenBatchClientAsync(TestUtilities.GetCredentialsFromEnvironment()).Result) { string jobScheduleId = Microsoft.Azure.Batch.Constants.DefaultConveniencePrefix + TestUtilities.GetMyName() + "-TestBoundJobScheduleCommit"; try { // // Create the job schedule // const int jobSchedulePriority = 5; const string jobManagerId = "TestBoundJobScheduleCommit"; const string jobManagerCommandLine = "ping 127.0.0.1 -n 500"; IList <MetadataItem> metadata = new List <MetadataItem> { new MetadataItem("key1", "test1"), new MetadataItem("key2", "test2") }; CloudJobSchedule jobSchedule = batchCli.JobScheduleOperations.CreateJobSchedule(jobScheduleId, null, null); TimeSpan firstRecurrenceInterval = TimeSpan.FromMinutes(2); jobSchedule.Schedule = new Schedule() { RecurrenceInterval = firstRecurrenceInterval }; PoolInformation poolInfo = new PoolInformation() { PoolId = this.poolFixture.PoolId }; jobSchedule.JobSpecification = new JobSpecification(poolInfo) { Priority = jobSchedulePriority, JobManagerTask = new JobManagerTask(jobManagerId, jobManagerCommandLine) }; jobSchedule.Metadata = metadata; this.testOutputHelper.WriteLine("Initial job schedule commit()"); jobSchedule.Commit(); //Get the bound job schedule CloudJobSchedule boundJobSchedule = batchCli.JobScheduleOperations.GetJobSchedule(jobScheduleId); //Ensure the job schedule is structured as expected AssertJobScheduleCorrectness(batchCli.JobScheduleOperations, boundJobSchedule, this.poolFixture.PoolId, jobSchedulePriority, jobManagerId, jobManagerCommandLine, firstRecurrenceInterval, metadata); //Update the bound job schedule schedule TimeSpan recurrenceInterval = TimeSpan.FromMinutes(5); boundJobSchedule.Schedule = new Schedule() { RecurrenceInterval = recurrenceInterval }; this.testOutputHelper.WriteLine("Updating JobSchedule Schedule"); boundJobSchedule.Commit(); //Ensure the job schedule is correct after commit AssertJobScheduleCorrectness(batchCli.JobScheduleOperations, boundJobSchedule, this.poolFixture.PoolId, jobSchedulePriority, jobManagerId, jobManagerCommandLine, recurrenceInterval, metadata); //Update the bound job schedule priority const int newJobSchedulePriority = 1; boundJobSchedule.JobSpecification.Priority = newJobSchedulePriority; this.testOutputHelper.WriteLine("Updating JobSpecification.Priority"); boundJobSchedule.Commit(); //Ensure the job schedule is correct after commit AssertJobScheduleCorrectness(batchCli.JobScheduleOperations, boundJobSchedule, this.poolFixture.PoolId, newJobSchedulePriority, jobManagerId, jobManagerCommandLine, recurrenceInterval, metadata); //Update the bound job schedule job manager commandline const string newJobManagerCommandLine = "ping 127.0.0.1 -n 150"; boundJobSchedule.JobSpecification.JobManagerTask.CommandLine = newJobManagerCommandLine; this.testOutputHelper.WriteLine("Updating JobSpecification.JobManagerTask.CommandLine"); boundJobSchedule.Commit(); //Ensure the job schedule is correct after commit AssertJobScheduleCorrectness(batchCli.JobScheduleOperations, boundJobSchedule, this.poolFixture.PoolId, newJobSchedulePriority, jobManagerId, newJobManagerCommandLine, recurrenceInterval, metadata); //Update the bound job schedule PoolInformation const string newPoolId = "TestPool"; boundJobSchedule.JobSpecification.PoolInformation = new PoolInformation() { PoolId = newPoolId }; this.testOutputHelper.WriteLine("Updating PoolInformation"); boundJobSchedule.Commit(); //Ensure the job schedule is correct after commit AssertJobScheduleCorrectness( batchCli.JobScheduleOperations, boundJobSchedule, newPoolId, newJobSchedulePriority, jobManagerId, newJobManagerCommandLine, recurrenceInterval, metadata); //Update the bound job schedule Metadata IList <MetadataItem> newMetadata = new List <MetadataItem> { new MetadataItem("Object", "Model") }; boundJobSchedule.Metadata = newMetadata; this.testOutputHelper.WriteLine("Updating Metadata"); boundJobSchedule.Commit(); //Ensure the job schedule is correct after commit AssertJobScheduleCorrectness( batchCli.JobScheduleOperations, boundJobSchedule, newPoolId, newJobSchedulePriority, jobManagerId, newJobManagerCommandLine, recurrenceInterval, newMetadata); } finally { batchCli.JobScheduleOperations.DeleteJobSchedule(jobScheduleId); } } }; SynchronizationContextHelper.RunTest(test, TestTimeout); }
public void Bug1433069TestBoundJobCommit() { Action test = () => { using (BatchClient batchCli = TestUtilities.OpenBatchClient(TestUtilities.GetCredentialsFromEnvironment())) { string jobId = Microsoft.Azure.Batch.Constants.DefaultConveniencePrefix + TestUtilities.GetMyName() + "-TestBoundJobCommit"; try { // // Create the job // CloudJob cloudJob = batchCli.JobOperations.CreateJob(jobId, new PoolInformation()); cloudJob.PoolInformation = new PoolInformation() { PoolId = this.poolFixture.PoolId }; this.testOutputHelper.WriteLine("Initial job schedule commit()"); cloudJob.Commit(); //Get the job CloudJob refreshableJob = batchCli.JobOperations.GetJob(jobId); //Update the bound job priority const int newJobPriority = 5; OnAllTasksComplete newOnAllTasksComplete = OnAllTasksComplete.NoAction; this.testOutputHelper.WriteLine("Job priority is: {0}", refreshableJob.Priority); refreshableJob.Priority = newJobPriority; refreshableJob.OnAllTasksComplete = newOnAllTasksComplete; refreshableJob.Commit(); AssertJobCorrectness(batchCli.JobOperations, jobId, ref refreshableJob, this.poolFixture.PoolId, newJobPriority, null); //Update the bound job pool name //Must disable the job first before updating its pool refreshableJob.Disable(DisableJobOption.Terminate); //Wait for job to reach disabled state (could go to Disabling for a bit) //TODO: Use a uBtilities wait helper here DateTime jobDisabledStateWaitStartTime = DateTime.UtcNow; TimeSpan jobDisabledTimeout = TimeSpan.FromSeconds(120); while (refreshableJob.State != JobState.Disabled) { this.testOutputHelper.WriteLine("Bug1433069TestBoundJobCommit: sleeping for (refreshableJob.State != JobState.Disabled)"); Thread.Sleep(TimeSpan.FromSeconds(10)); refreshableJob = batchCli.JobOperations.GetJob(jobId); if (DateTime.UtcNow > jobDisabledStateWaitStartTime.Add(jobDisabledTimeout)) { Assert.False(true, "Timed out waiting for job to go to disabled state"); } } const string newPoolId = "testPool"; refreshableJob.PoolInformation.PoolId = newPoolId; refreshableJob.Commit(); AssertJobCorrectness(batchCli.JobOperations, jobId, ref refreshableJob, newPoolId, newJobPriority, null); //Enable the job again refreshableJob.Enable(); //Update the bound job constraints JobConstraints newJobConstraints = new JobConstraints(TimeSpan.FromSeconds(200), 19); refreshableJob.Constraints = newJobConstraints; refreshableJob.Commit(); AssertJobCorrectness(batchCli.JobOperations, jobId, ref refreshableJob, newPoolId, newJobPriority, newJobConstraints); } finally { batchCli.JobOperations.DeleteJob(jobId); } } }; SynchronizationContextHelper.RunTest(test, TestTimeout); }
public void Bug1433008JobScheduleScheduleNewable() { Action test = () => { using (BatchClient batchCli = TestUtilities.OpenBatchClientAsync(TestUtilities.GetCredentialsFromEnvironment()).Result) { string jsId = Microsoft.Azure.Batch.Constants.DefaultConveniencePrefix + TestUtilities.GetMyName() + "-Bug1433008JobScheduleScheduleNewable"; try { DateTime unboundDNRU = DateTime.UtcNow.AddYears(1); CloudJobSchedule newJobSchedule = batchCli.JobScheduleOperations.CreateJobSchedule(jsId, null, null); { AutoPoolSpecification iaps = new AutoPoolSpecification(); PoolSpecification ips = new PoolSpecification(); JobSpecification jobSpecification = new JobSpecification(new PoolInformation() { AutoPoolSpecification = iaps }); iaps.PoolSpecification = ips; iaps.AutoPoolIdPrefix = Microsoft.Azure.Batch.Constants.DefaultConveniencePrefix + TestUtilities.GetMyName(); iaps.PoolLifetimeOption = Microsoft.Azure.Batch.Common.PoolLifetimeOption.Job; iaps.KeepAlive = false; PoolSpecification ps = iaps.PoolSpecification; ps.TargetDedicatedComputeNodes = 1; ps.VirtualMachineSize = PoolFixture.VMSize; ps.CloudServiceConfiguration = new CloudServiceConfiguration(PoolFixture.OSFamily); Schedule sched = new Schedule(); sched.DoNotRunUntil = unboundDNRU; newJobSchedule.Schedule = sched; newJobSchedule.JobSpecification = jobSpecification; newJobSchedule.Commit(); } CloudJobSchedule jobSchedule = batchCli.JobScheduleOperations.GetJobSchedule(jsId); // confirm that the original value(s) are set TestUtilities.DisplayJobScheduleLong(this.testOutputHelper, jobSchedule); Assert.Equal(unboundDNRU, jobSchedule.Schedule.DoNotRunUntil); // now update the schedule and confirm DateTime boundDNRU = DateTime.UtcNow.AddYears(2); jobSchedule.Schedule.DoNotRunUntil = boundDNRU; jobSchedule.Commit(); jobSchedule.Refresh(); // confirm that the new value(s) are set TestUtilities.DisplayJobScheduleLong(this.testOutputHelper, jobSchedule); Assert.Equal(boundDNRU, jobSchedule.Schedule.DoNotRunUntil); } finally { // clean up TestUtilities.DeleteJobScheduleIfExistsAsync(batchCli, jsId).Wait(); } } }; SynchronizationContextHelper.RunTest(test, TestTimeout); }
public void Bug1665834TaskStateMonitor() { Action test = () => { using (BatchClient batchCli = TestUtilities.OpenBatchClient(TestUtilities.GetCredentialsFromEnvironment())) { string jobId = "Bug1665834Job-" + TestUtilities.GetMyName(); try { CloudJob unboundJob = batchCli.JobOperations.CreateJob(jobId, new PoolInformation()); unboundJob.PoolInformation.PoolId = this.poolFixture.PoolId; unboundJob.Commit(); CloudJob boundJob = batchCli.JobOperations.GetJob(jobId); // add some noise tasks for (int j = 0; j < 5; j++) { CloudTask unboundTaskQuick = new CloudTask((10 + j).ToString(), "cmd /c hostname"); boundJob.AddTask(unboundTaskQuick); } System.Threading.Thread.Sleep(5000); // wait for fast tasks to complete { bool repeat = true; while (repeat) { CloudPool boundPool = batchCli.PoolOperations.GetPool(this.poolFixture.PoolId); repeat = false; foreach (CloudTask curTask in boundJob.ListTasks()) { if (curTask.State != Microsoft.Azure.Batch.Common.TaskState.Completed) { repeat = true; this.testOutputHelper.WriteLine("Manual Wait Task Id: " + curTask.Id + ", state = " + curTask.State); this.testOutputHelper.WriteLine(" poolstate: " + boundPool.State + ", currentdedicated: " + boundPool.CurrentDedicatedComputeNodes); this.testOutputHelper.WriteLine(" compute nodes:"); foreach (ComputeNode curComputeNode in boundPool.ListComputeNodes()) { this.testOutputHelper.WriteLine(" computeNode.Id: " + curComputeNode.Id + ", state: " + curComputeNode.State); } } } } } // add some longer running tasks this.testOutputHelper.WriteLine("Adding longer running tasks"); for (int i = 0; i < 15; i++) { CloudTask unboundTask = new CloudTask(i.ToString() + "_a234567890a234567890a234567890a234567890a234567890a234567890", "cmd /c ping 127.0.0.1 -n 4"); boundJob.AddTask(unboundTask); } Utilities utilities = batchCli.Utilities; TaskStateMonitor tsm = utilities.CreateTaskStateMonitor(); IPagedEnumerable <CloudTask> taskList = boundJob.ListTasks(); ODATAMonitorControl odmc = new ODATAMonitorControl(); // try to set really low delay odmc.DelayBetweenDataFetch = new TimeSpan(0); // confirm the floor is enforced Assert.Equal(500, odmc.DelayBetweenDataFetch.Milliseconds); this.testOutputHelper.WriteLine("Calling TaskStateMonitor.WaitAll(). This will take a while."); TimeSpan timeToWait = TimeSpan.FromMinutes(5); Task whenAll = tsm.WhenAll(taskList, Microsoft.Azure.Batch.Common.TaskState.Completed, timeToWait, controlParams: odmc); //This could throw, if it does the test will fail, which is what we want whenAll.Wait(); foreach (CloudTask curTask in boundJob.ListTasks()) { Assert.Equal(TaskState.Completed, curTask.State); } } finally { // cleanup TestUtilities.DeleteJobIfExistsAsync(batchCli, jobId).Wait(); } } }; SynchronizationContextHelper.RunTest(test, TestTimeout); }
public void Bug230385SupportDeleteNodeFileByTask() { void test() { using BatchClient batchCli = TestUtilities.OpenBatchClient(TestUtilities.GetCredentialsFromEnvironment()); string jobId = "Bug230285Job-" + TestUtilities.GetMyName(); try { const string taskId = "hiWorld"; const string directoryCreationTaskId1 = "dirTask1"; const string directoryCreationTaskId2 = "dirTask2"; const string directoryNameOne = "Foo"; const string directoryNameTwo = "Bar"; const string directory2PathOnNode = "wd/" + directoryNameTwo; // // Create the job // CloudJob unboundJob = batchCli.JobOperations.CreateJob(jobId, new PoolInformation()); unboundJob.PoolInformation.PoolId = poolFixture.PoolId; unboundJob.Commit(); CloudJob boundJob = batchCli.JobOperations.GetJob(jobId); CloudTask myTask = new CloudTask(taskId, "cmd /c echo hello world"); CloudTask directoryCreationTask1 = new CloudTask(directoryCreationTaskId1, string.Format("cmd /c mkdir {0} && echo test > {0}/testfile.txt", directoryNameOne)); CloudTask directoryCreationTask2 = new CloudTask(directoryCreationTaskId2, string.Format("cmd /c mkdir {0} && echo test > {0}/testfile.txt", directoryNameTwo)); boundJob.AddTask(myTask); boundJob.AddTask(directoryCreationTask1); boundJob.AddTask(directoryCreationTask2); testOutputHelper.WriteLine("Initial job commit()"); // // Wait for task to go to completion // Utilities utilities = batchCli.Utilities; TaskStateMonitor taskStateMonitor = utilities.CreateTaskStateMonitor(); taskStateMonitor.WaitAll( boundJob.ListTasks(), Microsoft.Azure.Batch.Common.TaskState.Completed, TimeSpan.FromMinutes(3)); // // NodeFile delete // //Delete single file NodeFile file = batchCli.JobOperations.GetNodeFile(jobId, taskId, Constants.StandardOutFileName); file.Delete(); //Ensure delete succeeded TestUtilities.AssertThrows <BatchException>(() => batchCli.JobOperations.GetNodeFile(jobId, taskId, Constants.StandardOutFileName)); //Delete directory NodeFile directory = batchCli.JobOperations.ListNodeFiles(jobId, directoryCreationTaskId1, recursive: true).First(item => item.Path.Contains(directoryNameOne)); Assert.True(directory.IsDirectory); TestUtilities.AssertThrows <BatchException>(() => directory.Delete(recursive: false)); directory.Delete(recursive: true); Assert.Null(batchCli.JobOperations.ListNodeFiles(jobId, directoryCreationTaskId1, recursive: true).FirstOrDefault(item => item.Path.Contains(directoryNameOne))); // // JobScheduleOperations delete task file // batchCli.JobOperations.GetNodeFile(jobId, taskId, Constants.StandardErrorFileName); batchCli.JobOperations.DeleteNodeFile(jobId, taskId, Constants.StandardErrorFileName); //Ensure delete succeeded TestUtilities.AssertThrows <BatchException>(() => batchCli.JobOperations.GetNodeFile(jobId, taskId, Constants.StandardErrorFileName)); //Delete directory directory = batchCli.JobOperations.ListNodeFiles(jobId, directoryCreationTaskId2, recursive: true).First(item => item.Path.Contains(directoryNameTwo)); Assert.True(directory.IsDirectory); TestUtilities.AssertThrows <BatchException>(() => batchCli.JobOperations.DeleteNodeFile(jobId, directoryCreationTaskId2, directory2PathOnNode, recursive: false)); batchCli.JobOperations.DeleteNodeFile(jobId, directoryCreationTaskId2, directory2PathOnNode, recursive: true); Assert.Null(batchCli.JobOperations.ListNodeFiles(jobId, directoryCreationTaskId2, recursive: true).FirstOrDefault(item => item.Path.Contains(directoryNameTwo))); } finally { TestUtilities.DeleteJobIfExistsAsync(batchCli, jobId).Wait(); } } SynchronizationContextHelper.RunTest(test, TestTimeout); }