public async Task TestRequestWhichDoesSupportSelect() { using (BatchClient client = await BatchClient.OpenAsync(ClientUnitTestCommon.CreateDummySharedKeyCredential())) { ODATADetailLevel detailLevel = new ODATADetailLevel(selectClause: "foo"); bool wasHit = false; BatchClientBehavior behavior = new Protocol.RequestInterceptor(request => { PoolGetBatchRequest poolGetRequest = request as PoolGetBatchRequest; poolGetRequest.ServiceRequestFunc = t => { Assert.Equal(detailLevel.SelectClause, poolGetRequest.Options.Select); wasHit = true; //Ensure the interceptor was hit return(Task.FromResult(new AzureOperationResponse <CloudPool, PoolGetHeaders>() { Body = new CloudPool() })); }; }); const string dummyPoolId = "dummy"; await client.PoolOperations.GetPoolAsync(dummyPoolId, detailLevel, new[] { behavior }); Assert.True(wasHit); } }
public void CannotModifyUsesTaskDependenciesOnAJobScheduleAfterItHasBeenCommitted() { const bool usesTaskDependencies = true; using BatchClient client = ClientUnitTestCommon.CreateDummyClient(); Protocol.RequestInterceptor interceptor = new Protocol.RequestInterceptor( baseRequest => { var request = (Protocol.BatchRequests.JobScheduleAddBatchRequest)baseRequest; request.ServiceRequestFunc = token => { var response = new AzureOperationHeaderResponse <Models.JobScheduleAddHeaders> { Response = new HttpResponseMessage(HttpStatusCode.Created) }; return(Task.FromResult(response)); }; }); Microsoft.Azure.Batch.CloudJobSchedule cloudJobSchedule = client.JobScheduleOperations.CreateJobSchedule(); Microsoft.Azure.Batch.JobSpecification jobSpec = new Microsoft.Azure.Batch.JobSpecification(poolInformation: null) { UsesTaskDependencies = usesTaskDependencies }; cloudJobSchedule.JobSpecification = jobSpec; cloudJobSchedule.Commit(new List <BatchClientBehavior> { interceptor }); // writing isn't allowed for a CloudJobSchedule.JobSpecification.UsesTaskDependencies that is in an invalid state. Assert.Throws <InvalidOperationException>(() => cloudJobSchedule.JobSpecification.UsesTaskDependencies = false); }
public async Task TestBatchRequestCannotBeModifiedAfterExecutionStarted() { using (BatchClient batchClient = ClientUnitTestCommon.CreateDummyClient()) { Protocol.RequestInterceptor interceptor = new Protocol.RequestInterceptor(req => { PoolAddBatchRequest addPoolRequest = req as PoolAddBatchRequest; addPoolRequest.ServiceRequestFunc = token => { Assert.Throws <InvalidOperationException>(() => addPoolRequest.CancellationToken = CancellationToken.None); Assert.Throws <InvalidOperationException>(() => addPoolRequest.Options = null); Assert.Throws <InvalidOperationException>(() => addPoolRequest.RetryPolicy = null); Assert.Throws <InvalidOperationException>(() => addPoolRequest.ServiceRequestFunc = null); Assert.Throws <InvalidOperationException>(() => addPoolRequest.Timeout = TimeSpan.FromSeconds(0)); Assert.Throws <InvalidOperationException>(() => addPoolRequest.ClientRequestIdProvider = null); Assert.Throws <InvalidOperationException>(() => addPoolRequest.Parameters = null); return(Task.FromResult(new AzureOperationHeaderResponse <Protocol.Models.PoolAddHeaders>())); }; }); CloudPool pool = batchClient.PoolOperations.CreatePool("dummy", "small", default(CloudServiceConfiguration), targetDedicatedComputeNodes: 0); await pool.CommitAsync(additionalBehaviors : new[] { interceptor }); } }
public async Task TestDefaultBatchRequestTimeoutSet() { TimeSpan requestTimeout = TimeSpan.MinValue; using (BatchClient client = ClientUnitTestCommon.CreateDummyClient()) { Protocol.RequestInterceptor interceptor = new Protocol.RequestInterceptor(req => { requestTimeout = req.Timeout; var castRequest = (Protocol.BatchRequest < Protocol.Models.JobGetOptions, AzureOperationResponse <Protocol.Models.CloudJob, Protocol.Models.JobGetHeaders> >)req; castRequest.ServiceRequestFunc = (token) => { return(Task.FromResult(new AzureOperationResponse <Protocol.Models.CloudJob, Protocol.Models.JobGetHeaders>() { Body = new Protocol.Models.CloudJob() })); }; }); await client.JobOperations.GetJobAsync("foo", additionalBehaviors : new List <BatchClientBehavior> { interceptor }); Assert.Equal(Constants.DefaultSingleRestRequestClientTimeout, requestTimeout); } }
private static Protocol.RequestInterceptor CreateRequestInterceptorForCancellationMonitoring() { DateTime startTime = DateTime.UtcNow; int observedRequestCount = 0; Protocol.RequestInterceptor requestInterceptor = new Protocol.RequestInterceptor(req => { TaskCompletionSource <TimeSpan> source = new TaskCompletionSource <TimeSpan>(); req.CancellationToken.Register(() => { DateTime endTime = DateTime.UtcNow; TimeSpan duration = endTime.Subtract(startTime); source.SetResult(duration); }); Interlocked.Increment(ref observedRequestCount); TimeSpan cancellationDuration = source.Task.Result; //Force an exception -- so the real request is never called throw new BatchUnitTestCancellationException(observedRequestCount, cancellationDuration); }); return(requestInterceptor); }
public void CanReadUsesTaskDependenciesFromABoundCloudJobScheduleTest() { const string jobId = "id-123"; const bool usesTaskDependencies = true; using (BatchClient client = ClientUnitTestCommon.CreateDummyClient()) { Protocol.RequestInterceptor interceptor = new Protocol.RequestInterceptor( baseRequest => { var request = (Protocol.BatchRequest <Models.JobScheduleGetOptions, AzureOperationResponse <Models.CloudJobSchedule, Models.JobScheduleGetHeaders> >)baseRequest; request.ServiceRequestFunc = token => { var response = new AzureOperationResponse <Models.CloudJobSchedule, Models.JobScheduleGetHeaders> { Body = new Protocol.Models.CloudJobSchedule(jobId, schedule: new Protocol.Models.Schedule(), jobSpecification: new Protocol.Models.JobSpecification() { UsesTaskDependencies = true }) }; return(Task.FromResult(response)); }; }); Microsoft.Azure.Batch.CloudJobSchedule unboundCloudJob = client.JobScheduleOperations.GetJobSchedule(jobId, additionalBehaviors: new List <BatchClientBehavior> { interceptor }); Assert.Equal(usesTaskDependencies, unboundCloudJob.JobSpecification.UsesTaskDependencies); } }
public void CannotSetUsesTaskDependenciesFromABoundCloudJob() { const string jobId = "id-123"; const bool usesTaskDependencies = true; // Bound using (BatchClient client = ClientUnitTestCommon.CreateDummyClient()) { Protocol.RequestInterceptor interceptor = new Protocol.RequestInterceptor( baseRequest => { var request = (Protocol.BatchRequest <Models.JobGetOptions, AzureOperationResponse <Models.CloudJob, Models.JobGetHeaders> >)baseRequest; request.ServiceRequestFunc = (token) => { var response = new AzureOperationResponse <Models.CloudJob, Models.JobGetHeaders> { Body = new Protocol.Models.CloudJob { UsesTaskDependencies = usesTaskDependencies } }; return(Task.FromResult(response)); }; }); var cloudJob = client.JobOperations.GetJob(jobId, additionalBehaviors: new List <BatchClientBehavior> { interceptor }); Assert.Equal(usesTaskDependencies, cloudJob.UsesTaskDependencies); Assert.Throws <InvalidOperationException>(() => cloudJob.UsesTaskDependencies = false); } }
public void GetPoolResizeError() { var autoScaleRunError = new Models.AutoScaleRunError { Code = "InsufficientSampleData", Message = "Autoscale evaluation failed due to insufficient sample data", Values = new List <Models.NameValuePair> { new Models.NameValuePair { Name = "Message", Value = "Line 1, Col 24: Insufficient data from data set: $RunningTasks wanted 100%, received 0%" } } }; var autoScaleError = new Models.AutoScaleRun { Error = autoScaleRunError }; BatchSharedKeyCredentials credentials = ClientUnitTestCommon.CreateDummySharedKeyCredential(); using (BatchClient client = BatchClient.Open(credentials)) { Protocol.RequestInterceptor interceptor = new Protocol.RequestInterceptor(baseRequest => { var request = (Protocol.BatchRequest <Models.PoolGetOptions, AzureOperationResponse <Models.CloudPool, Models.PoolGetHeaders> >)baseRequest; request.ServiceRequestFunc = async(token) => { var response = new AzureOperationResponse <Models.CloudPool, Models.PoolGetHeaders> { Body = new Models.CloudPool { DisplayName = "batch-test", AutoScaleFormula = "$RunningTasks.GetSample(10 * TimeInterval_Second, 0 * TimeInterval_Second, 100);", AutoScaleRun = autoScaleError, EnableAutoScale = true, } }; var task = Task.FromResult(response); return(await task); }; }); var pool = client.PoolOperations.GetPool("batch-test", additionalBehaviors: new List <BatchClientBehavior> { interceptor }); Assert.Equal("batch-test", pool.DisplayName); Assert.Equal(pool.AutoScaleEnabled, true); Assert.Equal(pool.AutoScaleRun.Error.Code, "InsufficientSampleData"); Assert.Equal(pool.AutoScaleRun.Error.Message, "Autoscale evaluation failed due to insufficient sample data"); Assert.Equal(pool.AutoScaleRun.Error.Values.First().Name, "Message"); Assert.Equal(pool.AutoScaleRun.Error.Values.First().Value, "Line 1, Col 24: Insufficient data from data set: $RunningTasks wanted 100%, received 0%"); } }
public async Task CreateJobScheduleWithApplicationPackageReferences() { const string applicationId = "blender.exe"; const string version = "blender"; const string jobId = "mock-job"; BatchSharedKeyCredentials credentials = ClientUnitTestCommon.CreateDummySharedKeyCredential(); using (BatchClient client = BatchClient.Open(credentials)) { Protocol.RequestInterceptor interceptor = new Protocol.RequestInterceptor( baseRequest => { var request = (Protocol.BatchRequest <Models.JobScheduleGetOptions, AzureOperationResponse <Models.CloudJobSchedule, Models.JobScheduleGetHeaders> >)baseRequest; request.ServiceRequestFunc = (token) => { var response = new AzureOperationResponse <Models.CloudJobSchedule, Models.JobScheduleGetHeaders> { Body = new Models.CloudJobSchedule { JobSpecification = new Protocol.Models.JobSpecification { PoolInfo = new Models.PoolInformation { AutoPoolSpecification = new Models.AutoPoolSpecification { Pool = new Models.PoolSpecification { ApplicationPackageReferences = new[] { new Protocol.Models.ApplicationPackageReference { ApplicationId = applicationId, Version = version, } }, MaxTasksPerNode = 4 } } } } } }; return(Task.FromResult(response)); }; }); Microsoft.Azure.Batch.CloudJobSchedule cloudJobSchedule = await client.JobScheduleOperations.GetJobScheduleAsync(jobId, additionalBehaviors : new List <BatchClientBehavior> { interceptor }); Assert.Equal(cloudJobSchedule.JobSpecification.PoolInformation.AutoPoolSpecification.PoolSpecification.ApplicationPackageReferences.First().ApplicationId, applicationId); Assert.Equal(cloudJobSchedule.JobSpecification.PoolInformation.AutoPoolSpecification.PoolSpecification.ApplicationPackageReferences.First().Version, version); } }
public void SetUpdateJobConditionalHeader() { Action test = () => { using (BatchClient batchCli = TestUtilities.OpenBatchClientFromEnvironmentAsync().Result) { string jobId = "JobConditionalHeaders-" + TestUtilities.GetMyName(); try { PoolInformation poolInfo = new PoolInformation() { PoolId = "Fake" }; CloudJob unboundJob = batchCli.JobOperations.CreateJob(jobId, poolInfo); unboundJob.Commit(); CloudJob boundJob = batchCli.JobOperations.GetJob(jobId); string capturedEtag1 = boundJob.ETag; this.testOutputHelper.WriteLine("Etag is: {0}", capturedEtag1); Assert.NotNull(capturedEtag1); boundJob.Constraints = new JobConstraints(TimeSpan.FromMinutes(60), 0); BatchClientBehavior updateInterceptor = new Protocol.RequestInterceptor( (req) => { var typedParams = req.Options as Protocol.Models.JobUpdateOptions; if (typedParams != null) { typedParams.IfMatch = capturedEtag1; } }); //Update bound job with if-match header, it should succeed boundJob.Commit(additionalBehaviors: new[] { updateInterceptor }); boundJob = batchCli.JobOperations.GetJob(jobId); boundJob.Constraints = new JobConstraints(TimeSpan.FromMinutes(30), 1); //Update bound job with if-match header, it should fail Exception e = TestUtilities.AssertThrows <BatchException>(() => boundJob.Commit(additionalBehaviors: new[] { updateInterceptor })); TestUtilities.AssertIsBatchExceptionAndHasCorrectAzureErrorCode(e, BatchErrorCodeStrings.ConditionNotMet, this.testOutputHelper); } finally { TestUtilities.DeleteJobIfExistsAsync(batchCli, jobId).Wait(); } } }; SynchronizationContextHelper.RunTest(test, LongTestTimeout); }
public async Task GetJobScheduleWithApplicationPackageReferences() { const string applicationId = "app-1"; const string version = "1.0"; using (BatchClient client = ClientUnitTestCommon.CreateDummyClient()) { Protocol.RequestInterceptor interceptor = new Protocol.RequestInterceptor(baseRequest => { var request = (Protocol.BatchRequest <Models.JobScheduleGetOptions, AzureOperationResponse <Models.CloudJobSchedule, Models.JobScheduleGetHeaders> >)baseRequest; request.ServiceRequestFunc = (token) => { var response = new AzureOperationResponse <Models.CloudJobSchedule, Models.JobScheduleGetHeaders> { Body = new Models.CloudJobSchedule { JobSpecification = new Protocol.Models.JobSpecification { PoolInfo = new Models.PoolInformation { AutoPoolSpecification = new Protocol.Models.AutoPoolSpecification { Pool = new Models.PoolSpecification { ApplicationPackageReferences = new List <Protocol.Models.ApplicationPackageReference> { new Protocol.Models.ApplicationPackageReference { ApplicationId = applicationId, Version = version } } } } } } } }; return(Task.FromResult(response)); }; }); Microsoft.Azure.Batch.CloudJobSchedule jobSchedule = await client.JobScheduleOperations.GetJobScheduleAsync("test", additionalBehaviors : new List <BatchClientBehavior> { interceptor }); Microsoft.Azure.Batch.ApplicationPackageReference apr = jobSchedule.JobSpecification.PoolInformation.AutoPoolSpecification.PoolSpecification.ApplicationPackageReferences.First(); Assert.Equal(apr.ApplicationId, applicationId); Assert.Equal(apr.Version, version); } }
public async Task Bug1360227_AddTasksBatchRequestFailure(bool useJobOperations) { const string testName = "Bug1360227_AddTasksBatchRequestFailure"; Random rand = new Random(); object randLock = new object(); BatchClientBehavior customBehavior = new Protocol.RequestInterceptor(request => { var typedRequest = request as Protocol.BatchRequests.TaskAddCollectionBatchRequest; if (typedRequest != null) { var originalServiceRequestFunction = typedRequest.ServiceRequestFunc; typedRequest.ServiceRequestFunc = token => { lock (randLock) { double d = rand.NextDouble(); if (d > 0.3) { throw new HttpRequestException("Simulating a network problem"); } else { return(originalServiceRequestFunction(token)); } } }; } }); await SynchronizationContextHelper.RunTestAsync(async() => { using (BatchClient batchCli = await TestUtilities.OpenBatchClientAsync(TestUtilities.GetCredentialsFromEnvironment(), addDefaultRetryPolicy: false)) { batchCli.JobOperations.CustomBehaviors.Add(customBehavior); BatchClientParallelOptions parallelOptions = new BatchClientParallelOptions() { MaxDegreeOfParallelism = 2 }; var exception = await TestUtilities.AssertThrowsAsync <ParallelOperationsException>(async() => await this.AddTasksSimpleTestAsync(batchCli, testName, 397, parallelOptions, useJobOperations: useJobOperations).ConfigureAwait(false) ).ConfigureAwait(false); Assert.IsType <HttpRequestException>(exception.InnerException); } }, TestTimeout); }
public void GetPoolStartTask() { var startTask = new Protocol.Models.StartTask { CommandLine = "-start", EnvironmentSettings = new[] { new Models.EnvironmentSetting { Name = "windows", Value = "foo" } }, MaxTaskRetryCount = 3, RunElevated = false, WaitForSuccess = false }; BatchSharedKeyCredentials credentials = ClientUnitTestCommon.CreateDummySharedKeyCredential(); using (BatchClient client = BatchClient.Open(credentials)) { Protocol.RequestInterceptor interceptor = new Protocol.RequestInterceptor(baseRequest => { var request = (Protocol.BatchRequest <Models.PoolGetOptions, AzureOperationResponse <Models.CloudPool, Models.PoolGetHeaders> >)baseRequest; request.ServiceRequestFunc = async(token) => { var response = new AzureOperationResponse <Models.CloudPool, Models.PoolGetHeaders> { Body = new Models.CloudPool { DisplayName = "batch-test", StartTask = startTask, } }; var task = Task.FromResult(response); return(await task); }; }); var pool = client.PoolOperations.GetPool("batch-test", additionalBehaviors: new List <BatchClientBehavior> { interceptor }); Assert.Equal("batch-test", pool.DisplayName); Assert.Equal(pool.StartTask.CommandLine, "-start"); Assert.Equal(pool.StartTask.EnvironmentSettings.FirstOrDefault().Name, "windows"); Assert.Equal(pool.StartTask.EnvironmentSettings.FirstOrDefault().Value, "foo"); Assert.Equal(pool.StartTask.MaxTaskRetryCount, 3); Assert.Equal(pool.StartTask.RunElevated, false); Assert.Equal(pool.StartTask.WaitForSuccess, false); } }
public void GetPoolWithApplicationReferencesTest() { const string applicationId = "blender.exe"; const string version = "blender"; BatchSharedKeyCredentials credentials = ClientUnitTestCommon.CreateDummySharedKeyCredential(); using (BatchClient client = BatchClient.Open(credentials)) { Protocol.RequestInterceptor interceptor = new Protocol.RequestInterceptor( baseRequest => { var request = (Protocol.BatchRequest <Models.PoolGetOptions, AzureOperationResponse <Models.CloudPool, Models.PoolGetHeaders> >)baseRequest; request.ServiceRequestFunc = (token) => { var response = new AzureOperationResponse <Models.CloudPool, Models.PoolGetHeaders> { Body = new Models.CloudPool { ApplicationPackageReferences = new[] { new Protocol.Models.ApplicationPackageReference { ApplicationId = applicationId, Version = version } }, CurrentDedicated = 4, CloudServiceConfiguration = new Models.CloudServiceConfiguration() { CurrentOSVersion = "3" }, Id = "pool-id" }, }; return(Task.FromResult(response)); }; }); Microsoft.Azure.Batch.CloudPool cloudPool = client.PoolOperations.GetPool("pool-id", additionalBehaviors: new List <BatchClientBehavior> { interceptor }); Assert.Equal(cloudPool.ApplicationPackageReferences.First().Version, version); Assert.Equal(cloudPool.ApplicationPackageReferences.First().ApplicationId, applicationId); } }
public void GetPoolStartTask() { var startTask = new Protocol.Models.StartTask { CommandLine = "-start", EnvironmentSettings = new[] { new Models.EnvironmentSetting { Name = "windows", Value = "foo" } }, MaxTaskRetryCount = 3, UserIdentity = new Protocol.Models.UserIdentity( autoUser: new Protocol.Models.AutoUserSpecification(elevationLevel: Protocol.Models.ElevationLevel.NonAdmin)), WaitForSuccess = false }; using (BatchClient client = ClientUnitTestCommon.CreateDummyClient()) { Protocol.RequestInterceptor interceptor = new Protocol.RequestInterceptor(baseRequest => { var request = (Protocol.BatchRequest <Models.PoolGetOptions, AzureOperationResponse <Models.CloudPool, Models.PoolGetHeaders> >)baseRequest; request.ServiceRequestFunc = async(token) => { var response = new AzureOperationResponse <Models.CloudPool, Models.PoolGetHeaders> { Body = new Models.CloudPool { DisplayName = "batch-test", StartTask = startTask, } }; var task = Task.FromResult(response); return(await task); }; }); var pool = client.PoolOperations.GetPool("batch-test", additionalBehaviors: new List <BatchClientBehavior> { interceptor }); Assert.Equal("batch-test", pool.DisplayName); Assert.Equal("-start", pool.StartTask.CommandLine); Assert.Equal("windows", pool.StartTask.EnvironmentSettings.FirstOrDefault().Name); Assert.Equal("foo", pool.StartTask.EnvironmentSettings.FirstOrDefault().Value); Assert.Equal(3, pool.StartTask.MaxTaskRetryCount); Assert.Equal(ElevationLevel.NonAdmin, pool.StartTask.UserIdentity.AutoUser.ElevationLevel); Assert.Equal(false, pool.StartTask.WaitForSuccess); } }
public async Task CheckListApplicationSummariesIsReturningAValidList() { const string applicationId = "blender.exe"; const string displayName = "blender"; IList <string> versions = new[] { "1.0", "1.5" }; BatchSharedKeyCredentials credentials = ClientUnitTestCommon.CreateDummySharedKeyCredential(); using (BatchClient client = await BatchClient.OpenAsync(credentials)) { Protocol.RequestInterceptor interceptor = new Protocol.RequestInterceptor(baseRequest => { var request = (Protocol.BatchRequest <Models.ApplicationListOptions, AzureOperationResponse <IPage <Models.ApplicationSummary>, Models.ApplicationListHeaders> >)baseRequest; request.ServiceRequestFunc = (token) => { var response = new AzureOperationResponse <IPage <Models.ApplicationSummary>, Models.ApplicationListHeaders> { Body = new FakePage <Models.ApplicationSummary>(new[] { new Models.ApplicationSummary { Id = applicationId, DisplayName = displayName, Versions = versions }, }) }; return(Task.FromResult(response)); }; }); IPagedEnumerable <Microsoft.Azure.Batch.ApplicationSummary> applicationSummaries = client.ApplicationOperations.ListApplicationSummaries(additionalBehaviors: new List <BatchClientBehavior> { interceptor }); Assert.Equal(1, applicationSummaries.Count()); var applicationSummary = applicationSummaries.First(); Assert.Equal(applicationId, applicationSummary.Id); Assert.Equal(displayName, applicationSummary.DisplayName); Assert.Equal(versions.First(), applicationSummary.Versions.First()); Assert.Equal(versions.Count, applicationSummary.Versions.ToList().Count); } }
public async Task GetPoolWithApplicationPackageReferences() { const string applicationId = "blender.exe"; const string version = "blender"; const string poolName = "test-pool"; BatchSharedKeyCredentials credentials = ClientUnitTestCommon.CreateDummySharedKeyCredential(); using (BatchClient client = BatchClient.Open(credentials)) { Protocol.RequestInterceptor interceptor = new Protocol.RequestInterceptor(baseRequest => { var request = (Protocol.BatchRequest <Models.PoolGetOptions, AzureOperationResponse <Models.CloudPool, Models.PoolGetHeaders> >)baseRequest; request.ServiceRequestFunc = async(token) => { var response = new AzureOperationResponse <Models.CloudPool, Models.PoolGetHeaders> { Body = new Models.CloudPool { ApplicationPackageReferences = new[] { new Protocol.Models.ApplicationPackageReference { ApplicationId = applicationId, Version = version } }, } }; Task <AzureOperationResponse <Models.CloudPool, Models.PoolGetHeaders> > task = Task.FromResult(response); return(await task); }; }); var pool = await client.PoolOperations.GetPoolAsync(poolName, additionalBehaviors : new List <BatchClientBehavior> { interceptor }); var appRefs = pool.ApplicationPackageReferences; Assert.Equal(applicationId, appRefs[0].ApplicationId); Assert.Equal(version, appRefs[0].Version); } }
// Downloads the file represented by an NodeFile instance to the specified path. private void DownloadNodeFileByInstance(NodeFile file, string destinationPath, Stream stream, DownloadNodeFileOptions.ByteRange byteRange, IEnumerable <BatchClientBehavior> additionalBehaviors = null) { // TODO: Update this to use the new built in support in the C# SDK when we update the C# SDK to 6.x or later Protocol.RequestInterceptor interceptor = new Protocol.RequestInterceptor(baseRequest => { var fromTaskRequest = baseRequest as BatchRequests.FileGetFromTaskBatchRequest; if (fromTaskRequest != null && byteRange != null) { fromTaskRequest.Options.OcpRange = $"bytes={byteRange.Start}-{byteRange.End}"; } var fromNodeRequest = baseRequest as BatchRequests.FileGetFromComputeNodeBatchRequest; if (fromNodeRequest != null && byteRange != null) { fromNodeRequest.Options.OcpRange = $"bytes={byteRange.Start}-{byteRange.End}"; } }); additionalBehaviors = additionalBehaviors != null ? new List <BatchClientBehavior> { interceptor }.Union(additionalBehaviors) : new List <BatchClientBehavior>() { interceptor }; if (byteRange != null) { WriteVerbose(string.Format(Resources.DownloadingNodeFileByteRange, byteRange.Start, byteRange.End)); } if (stream != null) { // Don't dispose supplied Stream file.CopyToStream(stream, additionalBehaviors: additionalBehaviors); } else { WriteVerbose(string.Format(Resources.DownloadingNodeFile, file.Path, destinationPath)); using (FileStream fs = new FileStream(destinationPath, FileMode.Create)) { file.CopyToStream(fs, additionalBehaviors: additionalBehaviors); } } }
public void Bug2302907_TestComputeNodeDoesInheritBehaviors() { void test() { using BatchClient batchCli = TestUtilities.OpenBatchClient(TestUtilities.GetCredentialsFromEnvironment(), addDefaultRetryPolicy: false); Protocol.RequestInterceptor interceptor = new Protocol.RequestInterceptor(); batchCli.PoolOperations.CustomBehaviors.Add(interceptor); List <ComputeNode> computeNodeList = batchCli.PoolOperations.ListComputeNodes(poolFixture.PoolId).ToList(); ComputeNode computeNode = computeNodeList.First(); Assert.Equal(2, computeNode.CustomBehaviors.Count); Assert.Contains(interceptor, computeNode.CustomBehaviors); } SynchronizationContextHelper.RunTest(test, TestTimeout); }
public void CheckIfGetApplicationPackageReferencesIsReadableButNotWritableOnABoundPool() { const string applicationId = "blender.exe"; const string version = "blender"; BatchSharedKeyCredentials credentials = ClientUnitTestCommon.CreateDummySharedKeyCredential(); using (BatchClient client = BatchClient.Open(credentials)) { Protocol.RequestInterceptor interceptor = new Protocol.RequestInterceptor( baseRequest => { var request = (Protocol.BatchRequest <Models.PoolGetOptions, AzureOperationResponse <Models.CloudPool, Models.PoolGetHeaders> >)baseRequest; request.ServiceRequestFunc = (token) => { var response = new AzureOperationResponse <Models.CloudPool, Models.PoolGetHeaders> { Body = new Models.CloudPool { ApplicationPackageReferences = new[] { new Protocol.Models.ApplicationPackageReference { ApplicationId = applicationId, Version = version } } } }; return(Task.FromResult(response)); }; }); Microsoft.Azure.Batch.CloudPool cloudPool = client.PoolOperations.GetPool("pool-id", additionalBehaviors: new List <BatchClientBehavior> { interceptor }); Assert.Throws <InvalidOperationException>(() => cloudPool.ApplicationPackageReferences.First().ApplicationId = applicationId); Assert.Throws <InvalidOperationException>(() => cloudPool.ApplicationPackageReferences.First().Version = version); Assert.Equal(cloudPool.ApplicationPackageReferences.First().Version, version); Assert.Equal(cloudPool.ApplicationPackageReferences.First().ApplicationId, applicationId); } }
public void GetPoolsListTest() { BatchSharedKeyCredentials credentials = ClientUnitTestCommon.CreateDummySharedKeyCredential(); using (BatchClient client = BatchClient.Open(credentials)) { Protocol.RequestInterceptor interceptor = new Protocol.RequestInterceptor(baseRequest => { var request = (Protocol.BatchRequest <Models.PoolListOptions, AzureOperationResponse <IPage <Models.CloudPool>, Models.PoolListHeaders> >)baseRequest; request.ServiceRequestFunc = async(token) => { var response = new AzureOperationResponse <IPage <Models.CloudPool>, Models.PoolListHeaders> { Body = new FakePage <Models.CloudPool>(new List <Models.CloudPool> { new Models.CloudPool { DisplayName = "batch-test" }, new Models.CloudPool { DisplayName = "foobar", CloudServiceConfiguration = new Models.CloudServiceConfiguration("3"), AllocationState = Models.AllocationState.Steady }, }) }; Task <AzureOperationResponse <IPage <Models.CloudPool>, Models.PoolListHeaders> > task = Task.FromResult(response); return(await task); }; }); IPagedEnumerable <Microsoft.Azure.Batch.CloudPool> asyncPools = client.PoolOperations.ListPools(additionalBehaviors: new List <BatchClientBehavior> { interceptor }); var pools = new List <Microsoft.Azure.Batch.CloudPool>(asyncPools); Assert.Equal(2, pools.Count); Assert.Equal("batch-test", pools[0].DisplayName); Assert.Equal("foobar", pools[1].DisplayName); // enums are in the same namespace. Assert.Equal(AllocationState.Steady, pools[1].AllocationState); } }
public async Task GetApplicationSummaryAsyncTest() { const string applicationId = "blender.exe"; const string displayName = "blender"; IList <string> versions = new[] { "1.0", "1.5" }; BatchSharedKeyCredentials credentials = ClientUnitTestCommon.CreateDummySharedKeyCredential(); using (BatchClient client = await BatchClient.OpenAsync(credentials)) { Protocol.RequestInterceptor interceptor = new Protocol.RequestInterceptor( baseRequest => { var request = (Protocol.BatchRequest <Models.ApplicationGetOptions, AzureOperationResponse <Models.ApplicationSummary, Models.ApplicationGetHeaders> >)baseRequest; request.ServiceRequestFunc = (token) => { var response = new AzureOperationResponse <Models.ApplicationSummary, Models.ApplicationGetHeaders> { Body = new Models.ApplicationSummary { Id = applicationId, DisplayName = displayName, Versions = versions } }; return(Task.FromResult(response)); }; }); Microsoft.Azure.Batch.ApplicationSummary applicationSummary = client.ApplicationOperations.GetApplicationSummaryAsync(applicationId, additionalBehaviors: new List <BatchClientBehavior> { interceptor }).Result; Assert.Equal(applicationId, applicationSummary.Id); Assert.Equal(displayName, applicationSummary.DisplayName); Assert.Equal(versions.First(), applicationSummary.Versions.First()); } }
public void ListComputeNodes() { var dateTime = DateTime.UtcNow; BatchSharedKeyCredentials credentials = ClientUnitTestCommon.CreateDummySharedKeyCredential(); using (BatchClient client = BatchClient.Open(credentials)) { Protocol.RequestInterceptor interceptor = new Protocol.RequestInterceptor(baseRequest => { var request = (Protocol.BatchRequest <Models.ComputeNodeListOptions, AzureOperationResponse <IPage <Models.ComputeNode>, Models.ComputeNodeListHeaders> >)baseRequest; request.ServiceRequestFunc = async(token) => { var response = new AzureOperationResponse <IPage <Models.ComputeNode>, Models.ComputeNodeListHeaders> { Body = new FakePage <Models.ComputeNode>(new[] { new Microsoft.Azure.Batch.Protocol.Models.ComputeNode { State = Models.ComputeNodeState.Running, LastBootTime = dateTime, Id = "computeNode1", }, }) }; var task = Task.FromResult(response); return(await task); }; }); List <Microsoft.Azure.Batch.ComputeNode> vms = client.PoolOperations.ListComputeNodes("foo", additionalBehaviors: new List <BatchClientBehavior> { interceptor }).ToList(); Assert.Equal(vms.Count, 1); Assert.Equal(vms[0].Id, "computeNode1"); Assert.Equal(vms[0].State, ComputeNodeState.Running); Assert.Equal(vms[0].LastBootTime, dateTime); } }
public async Task AddTasksRequestEntityTooLarge_ReduceChunkSize() { const string testName = "AddTasksRequestEntityTooLarge_ReduceChunkSize"; List <ResourceFile> resourceFiles = new List <ResourceFile>(); ResourceFile resourceFile; int countChunksOf100 = 0; int numTasks = 176; int degreesOfParallelism = 2; BatchClientBehavior customBehavior = new Protocol.RequestInterceptor(request => { var typedRequest = request as Protocol.BatchRequests.TaskAddCollectionBatchRequest; if (typedRequest != null) { if (typedRequest.Parameters.Count > 50) { Interlocked.Increment(ref countChunksOf100); } } }); // If this test fails try increasing the size of the Task in case maximum size increase for (int i = 0; i < 100; i++) { resourceFile = ResourceFile.FromUrl("https://mystorageaccount.blob.core.windows.net/files/resourceFile" + i, "resourceFile" + i); resourceFiles.Add(resourceFile); } await SynchronizationContextHelper.RunTestAsync(async() => { using (BatchClient batchCli = TestUtilities.OpenBatchClient(TestUtilities.GetCredentialsFromEnvironment(), addDefaultRetryPolicy: false)) { batchCli.JobOperations.CustomBehaviors.Add(customBehavior); BatchClientParallelOptions parallelOptions = new BatchClientParallelOptions() { MaxDegreeOfParallelism = degreesOfParallelism }; await AddTasksSimpleTestAsync(batchCli, testName, numTasks, parallelOptions, resourceFiles: resourceFiles).ConfigureAwait(false); } }, TestTimeout); Assert.True(countChunksOf100 <= Math.Min(Math.Ceiling(numTasks / 100.0), degreesOfParallelism)); }
public async Task TestRequestWhichDoesntSupportFilter() { using BatchClient client = ClientUnitTestCommon.CreateDummyClient(); BatchClientBehavior behavior = new Protocol.RequestInterceptor(request => { PoolGetBatchRequest poolGetRequest = request as PoolGetBatchRequest; poolGetRequest.ServiceRequestFunc = t => { return(Task.FromResult(new AzureOperationResponse <CloudPool, PoolGetHeaders>() { Body = new CloudPool() })); }; }); const string dummyPoolId = "dummy"; DetailLevel detailLevel = new ODATADetailLevel(filterClause: "foo"); ArgumentException e = await Assert.ThrowsAsync <ArgumentException>(async() => await client.PoolOperations.GetPoolAsync(dummyPoolId, detailLevel, new[] { behavior })); Assert.Contains("Type Microsoft.Azure.Batch.Protocol.BatchRequests.PoolGetBatchRequest does not support a filter clause.", e.Message); Assert.Equal("detailLevel", e.ParamName); }
internal CallTimerViaInterceptors() { this.ReqInterceptor = new Protocol.RequestInterceptor(this.RequestInterceptHandler); this.ResInterceptor = new Protocol.ResponseInterceptor(this.ResponseInterceptHandler); }
public void TestOMJobSpecAndRelease() { Action test = () => { StagingStorageAccount stagingCreds = TestUtilities.GetStorageCredentialsFromEnvironment(); using (BatchClient client = TestUtilities.OpenBatchClient(TestUtilities.GetCredentialsFromEnvironment())) { string jsId = "JobPrepAndRelease-" + /* "OM-static-c" */ "dynamic-" + CraftTimeString() + "-" + TestUtilities.GetMyName(); try { // increase request timeout interceptor Protocol.RequestInterceptor increaseTimeoutInterceptor = new Protocol.RequestInterceptor((x) => { this.testOutputHelper.WriteLine("TestOMJobSpecAndRelease: setting request timeout. Request type: " + x.GetType().ToString() + ", ClientRequestID: " + x.Options.ClientRequestId); var timeoutOptions = x.Options as Protocol.Models.ITimeoutOptions; timeoutOptions.Timeout = 5 * 60; }); // lets use a timer too CallTimerViaInterceptors timerInterceptor = new CallTimerViaInterceptors(); // seeing client side timeouts... so increase the durations on every call client.CustomBehaviors.Add(increaseTimeoutInterceptor); // add a call timer spammer/logger client.CustomBehaviors.Add(timerInterceptor.ReqInterceptor); // get some resource files to play with IList <ResourceFile> resFiles = UploadFilesMakeResFiles(stagingCreds); // create job schedule with prep/release { CloudJobSchedule unboundJobSchedule = client.JobScheduleOperations.CreateJobSchedule(jsId, null, null); unboundJobSchedule.JobSpecification = new JobSpecification(new PoolInformation()); unboundJobSchedule.JobSpecification.PoolInformation.PoolId = this.poolFixture.PoolId; unboundJobSchedule.Schedule = new Schedule() { RecurrenceInterval = TimeSpan.FromMinutes(3) }; // add the jobPrep task to the job schedule { JobPreparationTask prep = new JobPreparationTask(JobPrepCommandLine); unboundJobSchedule.JobSpecification.JobPreparationTask = prep; List <EnvironmentSetting> prepEnvSettings = new List <EnvironmentSetting>(); prepEnvSettings.Add(JobPrepEnvSettingOM); prep.EnvironmentSettings = prepEnvSettings; prep.Id = JobPrepId; prep.RerunOnComputeNodeRebootAfterSuccess = JobPrepRerunOnComputeNodeRebootAfterSuccess; prep.ResourceFiles = resFiles; // bug: incorrect type this should be IList<> /* * prep.ResourceFiles = new List<ResourceFile>(); // this is actually read into our concurrent iList thing * * // why not, merge them in. exersize the concurent IList thing * foreach (ResourceFile curRF in resFiles) * { * prep.ResourceFiles.Add(curRF); * } */ prep.UserIdentity = new UserIdentity(JobPrepUserSpec); prep.Constraints = JobPrepTaskConstraintsOM; prep.WaitForSuccess = JobPrepWaitForSuccessCreate; } // add a jobRelease task to the job schedule { JobReleaseTask relTask = new JobReleaseTask(JobReleaseTaskCommandLine); unboundJobSchedule.JobSpecification.JobReleaseTask = relTask; List <EnvironmentSetting> relEnvSettings = new List <EnvironmentSetting>(); relEnvSettings.Add(JobRelEnvSettingOM); relTask.EnvironmentSettings = relEnvSettings; relTask.MaxWallClockTime = JobRelMaxWallClockTime; relTask.Id = JobRelId; relTask.ResourceFiles = null; relTask.ResourceFiles = new List <ResourceFile>(); // why not, merge them in. work the concurrent IList thing foreach (ResourceFile curRF in resFiles) { relTask.ResourceFiles.Add(curRF); } relTask.RetentionTime = JobRelRetentionTime; relTask.UserIdentity = new UserIdentity(JobRelUserSpec); } // set JobCommonEnvSettings { List <EnvironmentSetting> jobCommonES = new List <EnvironmentSetting>(); jobCommonES.Add(JobCommonEnvSettingOM); unboundJobSchedule.JobSpecification.CommonEnvironmentSettings = jobCommonES; } // add the job schedule to the service unboundJobSchedule.Commit(); } // now we have a jobschedule with jobprep/release...now test the values on the jobschedule { CloudJobSchedule boundJobSchedule = client.JobScheduleOperations.GetJobSchedule(jsId); Assert.NotNull(boundJobSchedule); Assert.NotNull(boundJobSchedule.JobSpecification); Assert.NotNull(boundJobSchedule.JobSpecification.JobPreparationTask); Assert.NotNull(boundJobSchedule.JobSpecification.JobReleaseTask); Assert.NotNull(boundJobSchedule.JobSpecification.CommonEnvironmentSettings); AssertGoodCommonEnvSettingsOM(boundJobSchedule.JobSpecification.CommonEnvironmentSettings); AssertGoodJobPrepTaskOM(boundJobSchedule.JobSpecification.JobPreparationTask); AssertGoodJobReleaseTaskOM(boundJobSchedule.JobSpecification.JobReleaseTask); AssertGoodResourceFiles(resFiles, boundJobSchedule.JobSpecification.JobPreparationTask.ResourceFiles); AssertGoodResourceFiles(resFiles, boundJobSchedule.JobSpecification.JobReleaseTask.ResourceFiles); //todo: test mutability } CloudJobSchedule boundJobScheduleWithJob; // set on job test // test the values on the job { boundJobScheduleWithJob = TestUtilities.WaitForJobOnJobSchedule(client.JobScheduleOperations, jsId); CloudJob bndJob = client.JobOperations.GetJob(boundJobScheduleWithJob.ExecutionInformation.RecentJob.Id); Assert.NotNull(bndJob); Assert.NotNull(bndJob.CommonEnvironmentSettings); Assert.NotNull(bndJob.JobPreparationTask); Assert.NotNull(bndJob.JobReleaseTask); AssertGoodCommonEnvSettingsOM(bndJob.CommonEnvironmentSettings as IList <EnvironmentSetting> /* we know it is our internal IList */); AssertGoodJobPrepTaskOM(bndJob.JobPreparationTask); AssertGoodJobReleaseTaskOM(bndJob.JobReleaseTask); AssertGoodResourceFiles(resFiles, bndJob.JobPreparationTask.ResourceFiles); AssertGoodResourceFiles(resFiles, bndJob.JobReleaseTask.ResourceFiles); //TODO: test immutability } // used for on get-status test CloudJobSchedule updatedJobSchedule; // test update on the WI jobprep/jobrelease { // change props boundJobScheduleWithJob.JobSpecification.JobPreparationTask.WaitForSuccess = JobPrepWaitForSuccessUpdate; // commit changes boundJobScheduleWithJob.Commit(); // get new values updatedJobSchedule = client.JobScheduleOperations.GetJobSchedule(jsId); // confirm values changed Assert.Equal(JobPrepWaitForSuccessUpdate, updatedJobSchedule.JobSpecification.JobPreparationTask.WaitForSuccess); } TestGetPrepReleaseStatusCalls(client, updatedJobSchedule, this.poolFixture.PoolId, resFiles); } finally { // cleanup TestUtilities.DeleteJobScheduleIfExistsAsync(client, jsId).Wait(); } } }; SynchronizationContextHelper.RunTest(test, LongTestTimeout); }
/// <summary> /// calls the two new get-status REST APIs and asserts their values /// /// 1: add a single quick task (quick because we don't need it to run very long) /// 2: this forces a victim compute node to run the JobPrep /// 3: poll for this compute node, ignore others (sharedPool.size probably > 1) /// 4: check status of JobPrep /// 4a: assert as many values as makes sense... this is not a retry test /// 5: JobPrep succeeds, task runs /// 6: poll for JobRelease.. it is long running /// 7: assert as many values as makes sense. /// </summary> /// <param name="batchCli"></param> private void TestGetPrepReleaseStatusCalls(BatchClient batchCli, CloudJobSchedule boundJobSchedule, string sharedPool, IEnumerable <ResourceFile> correctResFiles) { // need this often enough lets just pull it out string jobId = boundJobSchedule.ExecutionInformation.RecentJob.Id; PoolOperations poolOps = batchCli.PoolOperations; JobScheduleOperations jobScheduleOperations = batchCli.JobScheduleOperations; { DateTime beforeJobPrepRuns = DateTime.UtcNow; // used to test start time // need a task to force JobPrep CloudTask sillyTask = new CloudTask("forceJobPrep", "cmd /c hostname"); // add the task batchCli.JobOperations.AddTask(jobId, sillyTask); bool keepLooking = true; while (keepLooking) { this.testOutputHelper.WriteLine("Waiting for task to be scheduled."); foreach (CloudTask curTask in batchCli.JobOperations.GetJob(jobId).ListTasks()) { if (curTask.State != TaskState.Active) { keepLooking = false; break; } } Thread.Sleep(1000); } List <JobPreparationAndReleaseTaskExecutionInformation> jobPrepStatusList = new List <JobPreparationAndReleaseTaskExecutionInformation>(); while (jobPrepStatusList.Count == 0) { jobPrepStatusList = batchCli.JobOperations.ListJobPreparationAndReleaseTaskStatus(jobId).ToList(); } JobPreparationAndReleaseTaskExecutionInformation jptei = jobPrepStatusList.First(); ComputeNode victimComputeNodeRunningPrepAndRelease = poolOps.GetComputeNode(sharedPool, jptei.ComputeNodeId); // job prep tests { Assert.NotNull(jptei); Assert.Equal(0, jptei.JobPreparationTaskExecutionInformation.RetryCount); Assert.True(beforeJobPrepRuns < jptei.JobPreparationTaskExecutionInformation.StartTime + TimeSpan.FromSeconds(10)); // test that the start time is rational -- 10s of wiggle room Assert.Null(jptei.JobPreparationTaskExecutionInformation.FailureInformation); this.testOutputHelper.WriteLine(""); this.testOutputHelper.WriteLine("listing files for compute node: " + victimComputeNodeRunningPrepAndRelease.Id); // fiter the list so reduce noise List <NodeFile> filteredListJobPrep = new List <NodeFile>(); foreach (NodeFile curTF in victimComputeNodeRunningPrepAndRelease.ListNodeFiles(recursive: true)) { // filter on the jsId since we only run one job per job in this test. if (curTF.Path.IndexOf(boundJobSchedule.Id, StringComparison.InvariantCultureIgnoreCase) >= 0) { this.testOutputHelper.WriteLine(" name:" + curTF.Path + ", size: " + ((curTF.IsDirectory.HasValue && curTF.IsDirectory.Value) ? "<dir>" : curTF.Properties.ContentLength.ToString())); filteredListJobPrep.Add(curTF); } } // confirm resource files made it foreach (ResourceFile curCorrectRF in correctResFiles) { bool found = false; foreach (NodeFile curTF in filteredListJobPrep) { // look for the resfile filepath in the taskfile name found |= curTF.Path.IndexOf(curCorrectRF.FilePath, StringComparison.InvariantCultureIgnoreCase) >= 0; } Assert.True(found, "Looking for resourcefile: " + curCorrectRF.FilePath); } // poll for completion while (JobPreparationTaskState.Completed != jptei.JobPreparationTaskExecutionInformation.State) { this.testOutputHelper.WriteLine("waiting for jopPrep to complete"); Thread.Sleep(2000); // refresh the state info ODATADetailLevel detailLevel = new ODATADetailLevel() { FilterClause = string.Format("nodeId eq '{0}'", victimComputeNodeRunningPrepAndRelease.Id) }; jobPrepStatusList = batchCli.JobOperations.ListJobPreparationAndReleaseTaskStatus(jobId, detailLevel: detailLevel).ToList(); jptei = jobPrepStatusList.First(); } // need success Assert.Equal(0, jptei.JobPreparationTaskExecutionInformation.ExitCode); // check stdout to confirm prep ran //Why do I have to use the hardcoded string job-1 here...? string stdOutFileSpec = Path.Combine("workitems", boundJobSchedule.Id, "job-1", boundJobSchedule.JobSpecification.JobPreparationTask.Id, Constants.StandardOutFileName); string stdOut = victimComputeNodeRunningPrepAndRelease.GetNodeFile(stdOutFileSpec).ReadAsString(); string stdErrFileSpec = Path.Combine("workitems", boundJobSchedule.Id, "job-1", boundJobSchedule.JobSpecification.JobPreparationTask.Id, Constants.StandardErrorFileName); string stdErr = string.Empty; try { stdErr = victimComputeNodeRunningPrepAndRelease.GetNodeFile(stdErrFileSpec).ReadAsString(); } catch (Exception) { //Swallow any exceptions here since stderr may not exist } this.testOutputHelper.WriteLine(stdOut); this.testOutputHelper.WriteLine(stdErr); Assert.True(!string.IsNullOrWhiteSpace(stdOut)); Assert.Contains("jobpreparation", stdOut.ToLower()); } // jobPrep tests completed. let JobPrep complete and task run and wait for JobRelease TaskStateMonitor tsm = batchCli.Utilities.CreateTaskStateMonitor(); // spam/logging interceptor Protocol.RequestInterceptor consoleSpammer = new Protocol.RequestInterceptor((x) => { this.testOutputHelper.WriteLine("TestGetPrepReleaseStatusCalls: waiting for JobPrep and task to complete"); ODATADetailLevel detailLevel = new ODATADetailLevel() { FilterClause = string.Format("nodeId eq '{0}'", victimComputeNodeRunningPrepAndRelease.Id) }; jobPrepStatusList = batchCli.JobOperations.ListJobPreparationAndReleaseTaskStatus(jobId, detailLevel: detailLevel).ToList(); JobPreparationAndReleaseTaskExecutionInformation jpteiInterceptor = jobPrepStatusList.First(); this.testOutputHelper.WriteLine(" JobPrep.State: " + jpteiInterceptor.JobPreparationTaskExecutionInformation.State); this.testOutputHelper.WriteLine(""); }); // waiting for the task to complete means so JobRelease is run. tsm.WaitAll( batchCli.JobOperations.GetJob(jobId).ListTasks(additionalBehaviors: new[] { consoleSpammer }), TaskState.Completed, TimeSpan.FromSeconds(120), additionalBehaviors: new[] { consoleSpammer }); // trigger JobRelease batchCli.JobOperations.TerminateJob(jobId, terminateReason: "die! I want JobRelease to run!"); // now that the task has competed, we are racing with the JobRelease... but it is sleeping so we can can catch it while (true) { ODATADetailLevel detailLevel = new ODATADetailLevel() { FilterClause = string.Format("nodeId eq '{0}'", victimComputeNodeRunningPrepAndRelease.Id) }; jobPrepStatusList = batchCli.JobOperations.ListJobPreparationAndReleaseTaskStatus(jobId, detailLevel: detailLevel).ToList(); JobPreparationAndReleaseTaskExecutionInformation jrtei = jobPrepStatusList.FirstOrDefault(); if ((jrtei == null) || (null == jrtei.JobReleaseTaskExecutionInformation)) { Thread.Sleep(2000); } else { Assert.NotNull(jrtei); if (jrtei.JobReleaseTaskExecutionInformation.State != JobReleaseTaskState.Completed) { this.testOutputHelper.WriteLine("JobReleaseTask state is: " + jrtei.JobReleaseTaskExecutionInformation.State); Thread.Sleep(5000); } else { this.testOutputHelper.WriteLine("JobRelease commpleted!"); // we are done break; } } } } }
public void Bug2329884_ComputeNodeRecentTasksAndComputeNodeError() { Action test = () => { using (BatchClient batchCli = TestUtilities.OpenBatchClientAsync(TestUtilities.GetCredentialsFromEnvironment()).Result) { string jobId = "Bug2329884Job-" + TestUtilities.GetMyName(); Protocol.RequestInterceptor interceptor = null; try { const string taskId = "hiWorld"; // // Create the job // CloudJob unboundJob = batchCli.JobOperations.CreateJob(jobId, new PoolInformation()); unboundJob.PoolInformation.PoolId = this.poolFixture.PoolId; unboundJob.Commit(); CloudJob boundJob = batchCli.JobOperations.GetJob(jobId); CloudTask myTask = new CloudTask(taskId, "cmd /c echo hello world"); boundJob.AddTask(myTask); this.testOutputHelper.WriteLine("Initial job commit()"); // // Wait for task to go to completion // Utilities utilities = batchCli.Utilities; TaskStateMonitor taskStateMonitor = utilities.CreateTaskStateMonitor(); taskStateMonitor.WaitAll( boundJob.ListTasks(), Microsoft.Azure.Batch.Common.TaskState.Completed, new TimeSpan(0, 3 /*min*/, 0)); CloudTask boundTask = boundJob.GetTask(taskId); //Since the compute node name comes back as "Node:<computeNodeId>" we need to split on : to get the actual compute node name string computeNodeId = boundTask.ComputeNodeInformation.AffinityId.Split(':')[1]; // // Check recent tasks // ComputeNode computeNode = batchCli.PoolOperations.GetComputeNode(this.poolFixture.PoolId, computeNodeId); this.testOutputHelper.WriteLine("Recent tasks:"); foreach (TaskInformation recentTask in computeNode.RecentTasks) { this.testOutputHelper.WriteLine("Compute node has recent task Job: {0}, Task: {1}, State: {2}, Subtask: {3}", recentTask.JobId, recentTask.TaskId, recentTask.TaskState, recentTask.SubtaskId); } TaskInformation myTaskInfo = computeNode.RecentTasks.First(taskInfo => taskInfo.JobId.Equals( jobId, StringComparison.InvariantCultureIgnoreCase) && taskInfo.TaskId.Equals(taskId, StringComparison.InvariantCultureIgnoreCase)); Assert.Equal(TaskState.Completed, myTaskInfo.TaskState); Assert.NotNull(myTaskInfo.ExecutionInformation); Assert.Equal(0, myTaskInfo.ExecutionInformation.ExitCode); // // Check compute node Error // const string expectedErrorCode = "TestErrorCode"; const string expectedErrorMessage = "Test error message"; const string nvpValue = "Test"; //We use mocking to return a fake compute node object here to test Compute Node Error because we cannot force one easily interceptor = new Protocol.RequestInterceptor((req => { if (req is ComputeNodeGetBatchRequest) { var typedRequest = req as ComputeNodeGetBatchRequest; typedRequest.ServiceRequestFunc = (token) => { var response = new AzureOperationResponse <Protocol.Models.ComputeNode, Protocol.Models.ComputeNodeGetHeaders>(); List <Protocol.Models.ComputeNodeError> errors = new List <Protocol.Models.ComputeNodeError>(); //Generate first Compute Node Error List <Protocol.Models.NameValuePair> nvps = new List <Protocol.Models.NameValuePair>(); nvps.Add(new Protocol.Models.NameValuePair() { Name = nvpValue, Value = nvpValue }); Protocol.Models.ComputeNodeError error1 = new Protocol.Models.ComputeNodeError(); error1.Code = expectedErrorCode; error1.Message = expectedErrorMessage; error1.ErrorDetails = nvps; errors.Add(error1); //Generate second Compute Node Error nvps = new List <Protocol.Models.NameValuePair>(); nvps.Add(new Protocol.Models.NameValuePair() { Name = nvpValue, Value = nvpValue }); Protocol.Models.ComputeNodeError error2 = new Protocol.Models.ComputeNodeError(); error2.Code = expectedErrorCode; error2.Message = expectedErrorMessage; error2.ErrorDetails = nvps; errors.Add(error2); Protocol.Models.ComputeNode protoComputeNode = new Protocol.Models.ComputeNode(); protoComputeNode.Id = computeNodeId; protoComputeNode.State = Protocol.Models.ComputeNodeState.Idle; protoComputeNode.Errors = errors; response.Body = protoComputeNode; return(Task.FromResult(response)); }; } })); batchCli.PoolOperations.CustomBehaviors.Add(interceptor); computeNode = batchCli.PoolOperations.GetComputeNode(this.poolFixture.PoolId, computeNodeId); Assert.Equal(computeNodeId, computeNode.Id); Assert.NotNull(computeNode.Errors); Assert.Equal(2, computeNode.Errors.Count()); foreach (ComputeNodeError computeNodeError in computeNode.Errors) { Assert.Equal(expectedErrorCode, computeNodeError.Code); Assert.Equal(expectedErrorMessage, computeNodeError.Message); Assert.NotNull(computeNodeError.ErrorDetails); Assert.Equal(1, computeNodeError.ErrorDetails.Count()); Assert.Contains(nvpValue, computeNodeError.ErrorDetails.First().Name); } } finally { batchCli.JobOperations.DeleteJob(jobId); } } }; SynchronizationContextHelper.RunTest(test, TestTimeout); }
private async Task BatchRequestCancellationViaInterceptorTestAsync( TimeSpan?clientRequestTimeoutViaCustomToken, TimeSpan?clientRequestTimeoutViaTimeout, IRetryPolicy retryPolicy = null, int?expectedMaxRetries = null) { TimeSpan timeoutViaCancellationTokenValue = clientRequestTimeoutViaCustomToken ?? TimeSpan.Zero; TimeSpan?cancellationDuration = null; DateTime startTime = DateTime.UtcNow; bool expectedCustomTokenTimeoutToHitFirst = false; int observedRequestCount = 0; CancellationToken customToken = CancellationToken.None; using (CancellationTokenSource source = new CancellationTokenSource(timeoutViaCancellationTokenValue)) { if (clientRequestTimeoutViaCustomToken.HasValue) { customToken = source.Token; } //Determine which timeout should hit first and create the requestCancellationOptions object if (clientRequestTimeoutViaCustomToken.HasValue && clientRequestTimeoutViaTimeout.HasValue) { expectedCustomTokenTimeoutToHitFirst = clientRequestTimeoutViaCustomToken < clientRequestTimeoutViaTimeout; } else if (clientRequestTimeoutViaCustomToken.HasValue) { expectedCustomTokenTimeoutToHitFirst = true; } else if (clientRequestTimeoutViaTimeout.HasValue) { expectedCustomTokenTimeoutToHitFirst = false; } else { Assert.True(false, "Both clientRequestTimeoutViaCustomToken and clientRequestTimeoutViaTimeout cannot be null"); } using (BatchClient client = ClientUnitTestCommon.CreateDummyClient()) { //Add a retry policy to the client if required if (retryPolicy != null) { client.CustomBehaviors.Add(new RetryPolicyProvider(retryPolicy)); } // // Set the interceptor to catch the request before it really goes to the Batch service and hook the cancellation token to find when it times out // Protocol.RequestInterceptor requestInterceptor = new Protocol.RequestInterceptor(req => { if (clientRequestTimeoutViaTimeout.HasValue) { req.Timeout = clientRequestTimeoutViaTimeout.Value; } req.CancellationToken = customToken; var castRequest = (Protocol.BatchRequests.JobGetBatchRequest)req; castRequest.ServiceRequestFunc = async(token) => { TaskCompletionSource <TimeSpan> taskCompletionSource = new TaskCompletionSource <TimeSpan>(); observedRequestCount++; if (!expectedCustomTokenTimeoutToHitFirst) { startTime = DateTime.UtcNow; } token.Register(() => { DateTime endTime = DateTime.UtcNow; TimeSpan duration = endTime.Subtract(startTime); taskCompletionSource.SetResult(duration); }); cancellationDuration = await taskCompletionSource.Task; token.ThrowIfCancellationRequested(); //Force an exception return(new AzureOperationResponse <Protocol.Models.CloudJob, Protocol.Models.JobGetHeaders>() { Body = new Protocol.Models.CloudJob() }); }; }); await Assert.ThrowsAsync <OperationCanceledException>(async() => await client.JobOperations.GetJobAsync("dummy", additionalBehaviors: new List <BatchClientBehavior> { requestInterceptor })); } this.testOutputHelper.WriteLine("There were {0} requests executed", observedRequestCount); this.testOutputHelper.WriteLine("Took {0} to cancel task", cancellationDuration); Assert.NotNull(cancellationDuration); if (expectedCustomTokenTimeoutToHitFirst) { this.testOutputHelper.WriteLine("Expected custom token timeout to hit first"); Assert.True(Math.Abs(clientRequestTimeoutViaCustomToken.Value.TotalSeconds - cancellationDuration.Value.TotalSeconds) < TimeTolerance, string.Format("Expected timeout: {0}, Observed timeout: {1}", clientRequestTimeoutViaCustomToken, cancellationDuration)); } else { this.testOutputHelper.WriteLine("Expected client side timeout to hit first"); Assert.True(Math.Abs(clientRequestTimeoutViaTimeout.Value.TotalSeconds - cancellationDuration.Value.TotalSeconds) < TimeTolerance, string.Format("Expected timeout: {0}, Observed timeout: {1}", clientRequestTimeoutViaTimeout, cancellationDuration)); } //Confirm the right number of retries were reached (if applicable) if (retryPolicy != null) { if (expectedCustomTokenTimeoutToHitFirst) { //This terminates the retry so there should just be 1 request (0 retries) Assert.Equal(0, observedRequestCount - 1); } else { Assert.Equal(expectedMaxRetries, observedRequestCount - 1); } } } }