static async Task test()
            {
                using BatchClient batchCli = await TestUtilities.OpenBatchClientFromEnvironmentAsync().ConfigureAwait(false);

                string jobId = "TestJobWithLowPriJobManager-" + TestUtilities.GetMyName();

                try
                {
                    PoolInformation poolInfo = new PoolInformation()
                    {
                        PoolId = "Fake"
                    };

                    CloudJob unboundJob = batchCli.JobOperations.CreateJob(jobId, poolInfo);
                    unboundJob.JobManagerTask = new JobManagerTask("foo", "cmd /c echo hi")
                    {
                        AllowLowPriorityNode = true
                    };
                    await unboundJob.CommitAsync().ConfigureAwait(false);

                    await unboundJob.RefreshAsync().ConfigureAwait(false);

                    Assert.True(unboundJob.JobManagerTask.AllowLowPriorityNode);
                }
                finally
                {
                    await TestUtilities.DeleteJobIfExistsAsync(batchCli, jobId);
                }
            }
        public async Task ApplicationPackageReferencesOnCloudTaskAreRoundtripped()
        {
            string jobId  = Constants.DefaultConveniencePrefix + TestUtilities.GetMyName() + "-APROnCloudTaskAreRoundtripped";
            string taskId = "task-id";

            const string applicationId     = "blender";
            const string applicationVerson = "beta";

            Func <Task> test = async() =>
            {
                using (BatchClient client = await TestUtilities.OpenBatchClientFromEnvironmentAsync().ConfigureAwait(false))
                {
                    var poolInfo = new PoolInformation
                    {
                        AutoPoolSpecification = new AutoPoolSpecification
                        {
                            PoolSpecification = new PoolSpecification
                            {
                                CloudServiceConfiguration = new CloudServiceConfiguration(PoolFixture.OSFamily),
                                VirtualMachineSize        = PoolFixture.VMSize,
                            },
                            PoolLifetimeOption = PoolLifetimeOption.Job
                        }
                    };

                    try
                    {
                        CloudJob job = client.JobOperations.CreateJob(jobId, poolInfo);
                        await job.CommitAsync().ConfigureAwait(false);

                        var boundJob = await client.JobOperations.GetJobAsync(jobId).ConfigureAwait(false);

                        CloudTask cloudTask = new CloudTask(taskId, "cmd /c ping 127.0.0.1")
                        {
                            ApplicationPackageReferences = new[]
                            {
                                new ApplicationPackageReference
                                {
                                    ApplicationId = applicationId,
                                    Version       = applicationVerson
                                }
                            }
                        };

                        await boundJob.AddTaskAsync(cloudTask).ConfigureAwait(false);

                        CloudTask boundCloudTask = await boundJob.GetTaskAsync(taskId).ConfigureAwait(false);

                        Assert.Equal(applicationId, boundCloudTask.ApplicationPackageReferences.Single().ApplicationId);
                        Assert.Equal(applicationVerson, boundCloudTask.ApplicationPackageReferences.Single().Version);
                    }
                    finally
                    {
                        TestUtilities.DeleteJobIfExistsAsync(client, jobId).Wait();
                    }
                }
            };

            await SynchronizationContextHelper.RunTestAsync(test, TestTimeout);
        }
Exemple #3
0
        /// <summary>
        /// Creates a new Azure Batch job
        /// </summary>
        /// <param name="jobId"></param>
        /// <param name="cloudTask"></param>
        /// <param name="poolInformation"></param>
        /// <returns></returns>
        public async Task CreateBatchJobAsync(string jobId, CloudTask cloudTask, PoolInformation poolInformation)
        {
            var job = batchClient.JobOperations.CreateJob(jobId, poolInformation);

            await job.CommitAsync();

            try
            {
                job = await batchClient.JobOperations.GetJobAsync(job.Id); // Retrieve the "bound" version of the job

                job.PoolInformation    = poolInformation;                  // Redoing this since the container registry password is not retrieved by GetJobAsync()
                job.OnAllTasksComplete = OnAllTasksComplete.TerminateJob;

                await job.AddTaskAsync(cloudTask);

                await job.CommitAsync();
            }
            catch (Exception ex)
            {
                var batchError = JsonConvert.SerializeObject((ex as BatchException)?.RequestInformation?.BatchError);
                logger.LogError(ex, $"Deleting {job.Id} because adding task to it failed. Batch error: {batchError}");
                await batchClient.JobOperations.DeleteJobAsync(job.Id);

                throw;
            }
        }
Exemple #4
0
        public async Task Job_CanAddJobWithJobManagerAndAllowLowPriorityTrue()
        {
            Func <Task> test = async() =>
            {
                using (BatchClient batchCli = await TestUtilities.OpenBatchClientFromEnvironmentAsync().ConfigureAwait(false))
                {
                    string jobId = "TestJobWithLowPriJobManager-" + TestUtilities.GetMyName();
                    try
                    {
                        PoolInformation poolInfo = new PoolInformation()
                        {
                            PoolId = "Fake"
                        };

                        CloudJob unboundJob = batchCli.JobOperations.CreateJob(jobId, poolInfo);
                        unboundJob.JobManagerTask = new JobManagerTask("foo", "cmd /c echo hi")
                        {
                            AllowLowPriorityNode = true
                        };
                        await unboundJob.CommitAsync().ConfigureAwait(false);

                        await unboundJob.RefreshAsync().ConfigureAwait(false);

                        Assert.True(unboundJob.JobManagerTask.AllowLowPriorityNode);
                    }
                    finally
                    {
                        await TestUtilities.DeleteJobIfExistsAsync(batchCli, jobId);
                    }
                }
            };

            await SynchronizationContextHelper.RunTestAsync(test, TestTimeout);
        }
        public void ApplyPoolInformationResponseModels(string coinSymbol, List <PoolInformation> poolInformationResonseModels)
        {
            //apply to non-Network Devices, those are populated separately
            IEnumerable <DeviceViewModel> relevantDevices = Devices.Where(d => (d.Kind != DeviceKind.NET) && (d.Coin != null) && d.Coin.Symbol.Equals(coinSymbol));

            foreach (DeviceViewModel relevantDevice in relevantDevices)
            {
                PoolInformation poolInformation = poolInformationResonseModels.SingleOrDefault(p => p.Index == relevantDevice.PoolIndex);
                if (poolInformation == null)
                {
                    //device not mining, or crashed, or no pool details
                    relevantDevice.LastShareDifficulty = 0;
                    relevantDevice.LastShareTime       = null;
                    relevantDevice.Url              = String.Empty;
                    relevantDevice.BestShare        = 0;
                    relevantDevice.PoolStalePercent = 0;
                }
                else
                {
                    relevantDevice.LastShareDifficulty = poolInformation.LastShareDifficulty;
                    relevantDevice.LastShareTime       = poolInformation.LastShareTime;
                    relevantDevice.Url              = poolInformation.Url;
                    relevantDevice.BestShare        = poolInformation.BestShare;
                    relevantDevice.PoolStalePercent = poolInformation.PoolStalePercent;
                }
            }
        }
        public static CloudJob CreateLoadTestJob(this BatchClient client, LoadTestSupervisorOptions options)
        {
            if (client == null)
            {
                throw new ArgumentNullException(nameof(client));
            }
            if (options == null)
            {
                throw new ArgumentNullException(nameof(options));
            }
            var jobId    = $"loadtest-{options.Name}";
            var poolInfo = new PoolInformation();

            if (options.UseAutoPool)
            {
                var poolSpecification = CreatePoolSpecification(options, jobId);
                poolInfo.AutoPoolSpecification = new AutoPoolSpecification
                {
                    AutoPoolIdPrefix   = "loadtest",
                    PoolLifetimeOption = PoolLifetimeOption.Job,
                    PoolSpecification  = poolSpecification
                };
            }
            else
            {
                poolInfo.PoolId = options.PoolId;
            }

            var job = client.JobOperations.CreateJob(jobId, poolInfo);

            job.DisplayName = $"Run load test ({jobId})";
            job.Constraints = new JobConstraints(options.JobTimeout);
            return(job);
        }
        private void SubmitBatchJob(AnalysisEntity analysisEntity, IEnumerable <AnalysisQueryEntity> queries, List <ResourceFile> resourceFiles)
        {
            PoolInformation poolInfo;

            if (!string.IsNullOrEmpty(analysisEntity.PoolId))
            {
                poolInfo = new PoolInformation
                {
                    PoolId = analysisEntity.PoolId,
                };
            }
            else
            {
                var maxTasksPerNode = _configuration.GetCoresForVirtualMachineSize(analysisEntity.VirtualMachineSize);

                if (analysisEntity.TargetDedicated == 1 && maxTasksPerNode == 1)
                {
                    // Need to always ensure a JM can run
                    maxTasksPerNode = 2;
                }

                poolInfo = new PoolInformation
                {
                    AutoPoolSpecification = new AutoPoolSpecification
                    {
                        PoolSpecification = new PoolSpecification
                        {
                            TargetDedicated             = analysisEntity.TargetDedicated,
                            MaxTasksPerComputeNode      = maxTasksPerNode,
                            VirtualMachineSize          = analysisEntity.VirtualMachineSize,
                            VirtualMachineConfiguration = _configuration.GetVirtualMachineConfiguration(),
                        },
                        PoolLifetimeOption = PoolLifetimeOption.Job,
                        KeepAlive          = false,
                    }
                };
            }

            var job = _batchClient.JobOperations.CreateJob(analysisEntity.JobId, poolInfo);

            job.DisplayName        = analysisEntity.DatabaseId;
            job.JobPreparationTask = GetJobPreparationTask(
                analysisEntity.DatabaseId,
                analysisEntity.DatabaseContainer,
                resourceFiles);
            job.JobManagerTask = GetJobManagerTask(
                analysisEntity.Id.ToString(),
                resourceFiles);
            job.Commit();

            var tasks = GetTasks(analysisEntity, queries);

            job.Refresh();
            job.AddTask(tasks);

//            job.Refresh();
//            job.OnAllTasksComplete = OnAllTasksComplete.TerminateJob;
        }
Exemple #8
0
        public static void Submit()
        {
            Log("Start submission process.");
            state = null;

            BatchSharedKeyCredentials cred = new BatchSharedKeyCredentials(Settings.batchEndpoint, Settings.batchAccount, Settings.batchKey);

            using (BatchClient client = BatchClient.Open(cred)) // <-- connect to the cluster
            {
                #region job submission

                string          jobname = prefix + Environment.GetEnvironmentVariable("USERNAME") + "_" + DateTime.Now.ToString("yyyyMMdd-HHmmss");
                PoolInformation pool    = new PoolInformation();
                pool.PoolId = Settings.poolname;

                CloudJob job = client.JobOperations.CreateJob(jobname, pool); // <-- create a workitem that runs on pool "trdemo"

                Log("Submitting...");
                job.Commit();
                jobName = jobname;
                Log(string.Format("Job {0} created.", jobname));

                job = client.JobOperations.GetJob(jobname);

                Log("Analyzing input blobs...");
                string inputcontainersas  = StorageHelper.GetContainerSAS(Settings.inputContainer);
                string outputcontainersas = StorageHelper.GetContainerSAS(Settings.outputContainer);
                foreach (string blob in StorageHelper.ListBlobs(Settings.inputContainer))
                {
                    string filename = System.IO.Path.GetFileName((new Uri(blob)).LocalPath);
                    string taskname = "task_" + System.IO.Path.GetFileNameWithoutExtension(filename);

                    // prepare the command line
                    string cli;
                    cli  = ". robocopy.exe ${env:WATASK_TVM_ROOT_DIR}\\shared\\ . *.*;";
                    cli += "ffmpeg.exe -i {0} -vf 'movie=microsoft.png [watermark]; [in][watermark] overlay=10:main_h-overlay_h-10 [out]' {0}.output.avi;".Replace("{0}", filename);
                    cli += "azcopy.exe . {0} *.output.avi /destsas:'{1}' /y".Replace("{0}", Settings.outputContainer).Replace("{1}", outputcontainersas);

                    cli = string.Format("powershell -command \"{0}\"", cli);

                    // prepare task object
                    CloudTask task = new CloudTask(taskname, cli);
                    task.ResourceFiles = new List <ResourceFile>();
                    task.ResourceFiles.Add(new ResourceFile(blob + inputcontainersas, filename));

                    job.AddTask(task); // <-- add Task
                }


                #endregion job submission

                ThreadPool.QueueUserWorkItem((x) => { Monitor(); });

                client.Utilities.CreateTaskStateMonitor().WaitAll(client.JobOperations.ListTasks(jobname), TaskState.Completed, new TimeSpan(0, 60, 0));
                client.JobOperations.GetJob(jobname).Terminate();
            }
        }
Exemple #9
0
        public void SetUpdateJobConditionalHeader()
        {
            Action test = () =>
            {
                using (BatchClient batchCli = TestUtilities.OpenBatchClientFromEnvironmentAsync().Result)
                {
                    string jobId = "JobConditionalHeaders-" + TestUtilities.GetMyName();
                    try
                    {
                        PoolInformation poolInfo = new PoolInformation()
                        {
                            PoolId = "Fake"
                        };

                        CloudJob unboundJob = batchCli.JobOperations.CreateJob(jobId, poolInfo);
                        unboundJob.Commit();

                        CloudJob boundJob = batchCli.JobOperations.GetJob(jobId);

                        string capturedEtag1 = boundJob.ETag;
                        this.testOutputHelper.WriteLine("Etag is: {0}", capturedEtag1);
                        Assert.NotNull(capturedEtag1);

                        boundJob.Constraints = new JobConstraints(TimeSpan.FromMinutes(60), 0);

                        BatchClientBehavior updateInterceptor = new Protocol.RequestInterceptor(
                            (req) =>
                        {
                            var typedParams = req.Options as Protocol.Models.JobUpdateOptions;
                            if (typedParams != null)
                            {
                                typedParams.IfMatch = capturedEtag1;
                            }
                        });

                        //Update bound job with if-match header, it should succeed
                        boundJob.Commit(additionalBehaviors: new[] { updateInterceptor });

                        boundJob = batchCli.JobOperations.GetJob(jobId);

                        boundJob.Constraints = new JobConstraints(TimeSpan.FromMinutes(30), 1);

                        //Update bound job with if-match header, it should fail
                        Exception e = TestUtilities.AssertThrows <BatchException>(() => boundJob.Commit(additionalBehaviors: new[] { updateInterceptor }));
                        TestUtilities.AssertIsBatchExceptionAndHasCorrectAzureErrorCode(e, BatchErrorCodeStrings.ConditionNotMet, this.testOutputHelper);
                    }
                    finally
                    {
                        TestUtilities.DeleteJobIfExistsAsync(batchCli, jobId).Wait();
                    }
                }
            };

            SynchronizationContextHelper.RunTest(test, LongTestTimeout);
        }
Exemple #10
0
        public async Task CreateJobAsync(CreateJobOptions createJobOptions)
        {
            CloudJob unboundJob = this.Client.JobOperations.CreateJob();

            unboundJob.Id          = createJobOptions.JobId;
            unboundJob.Priority    = createJobOptions.Priority;
            unboundJob.Constraints = new JobConstraints(createJobOptions.MaxWallClockTime, createJobOptions.MaxRetryCount);

            PoolInformation poolInformation = new PoolInformation();

            if (createJobOptions.AutoPoolOptions.UseAutoPool.HasValue && createJobOptions.AutoPoolOptions.UseAutoPool.Value)
            {
                AutoPoolSpecification autoPoolSpecification = new AutoPoolSpecification()
                {
                    AutoPoolIdPrefix   = createJobOptions.AutoPoolOptions.AutoPoolPrefix,
                    KeepAlive          = createJobOptions.AutoPoolOptions.KeepAlive,
                    PoolLifetimeOption = (PoolLifetimeOption)Enum.Parse(typeof(PoolLifetimeOption), createJobOptions.AutoPoolOptions.LifeTimeOption),
                    PoolSpecification  = new PoolSpecification()
                    {
                        CloudServiceConfiguration     = new CloudServiceConfiguration(createJobOptions.AutoPoolOptions.OSFamily),
                        VirtualMachineSize            = createJobOptions.AutoPoolOptions.VirutalMachineSize,
                        TargetDedicatedComputeNodes   = createJobOptions.AutoPoolOptions.TargetDedicated,
                        TargetLowPriorityComputeNodes = createJobOptions.AutoPoolOptions.TargetLowPriority
                    }
                };

                poolInformation.AutoPoolSpecification = autoPoolSpecification;
            }
            else
            {
                poolInformation.PoolId = createJobOptions.PoolId;
            }

            unboundJob.PoolInformation = poolInformation;

            if (createJobOptions.CreateJobManager.HasValue && createJobOptions.CreateJobManager.Value == true)
            {
                JobManagerTask jobManager = new JobManagerTask()
                {
                    CommandLine         = createJobOptions.JobManagerOptions.CommandLine,
                    KillJobOnCompletion = createJobOptions.JobManagerOptions.KillOnCompletion,
                    Id = createJobOptions.JobManagerOptions.JobManagerId
                };

                jobManager.Constraints = new TaskConstraints(
                    createJobOptions.JobManagerOptions.MaxTaskWallClockTime,
                    createJobOptions.JobManagerOptions.RetentionTime,
                    createJobOptions.JobManagerOptions.MaxTaskRetryCount);

                unboundJob.JobManagerTask = jobManager;
            }

            await unboundJob.CommitAsync();
        }
        public async Task CanCreateJobAndAutoPoolWithAppPackageReferences()
        {
            var          jobId         = Guid.NewGuid().ToString();
            const string applicationId = "blender";

            Func <Task> test = async() =>
            {
                using (BatchClient client = await TestUtilities.OpenBatchClientFromEnvironmentAsync().ConfigureAwait(false))
                {
                    var poolInfo = new PoolInformation
                    {
                        AutoPoolSpecification = new AutoPoolSpecification
                        {
                            PoolSpecification = new PoolSpecification
                            {
                                ApplicationPackageReferences = new[]
                                {
                                    new ApplicationPackageReference
                                    {
                                        ApplicationId = applicationId,
                                        Version       = PoolFixture.VMSize,
                                    }
                                },
                                CloudServiceConfiguration = new CloudServiceConfiguration(PoolFixture.OSFamily),
                                VirtualMachineSize        = PoolFixture.VMSize,
                            },
                            PoolLifetimeOption = PoolLifetimeOption.Job
                        }
                    };

                    CloudJob response = null;
                    try
                    {
                        CloudJob job = client.JobOperations.CreateJob(jobId, poolInfo);
                        await job.CommitAsync().ConfigureAwait(false);

                        response = await client.JobOperations.GetJobAsync(jobId).ConfigureAwait(false);

                        Assert.Equal(response.PoolInformation.AutoPoolSpecification.PoolSpecification.ApplicationPackageReferences.First().ApplicationId, applicationId);
                    }
                    finally
                    {
                        if (response != null)
                        {
                            TestUtilities.DeleteJobIfExistsAsync(client, jobId).Wait();
                        }
                    }
                }
            };

            await SynchronizationContextHelper.RunTestAsync(test, TestTimeout);
        }
Exemple #12
0
        public async Task CanCreateJobAndAutoPoolWithAppPackageReferences()
        {
            string jobId = Guid.NewGuid().ToString();

            var poolInformation = new PoolInformation
            {
                AutoPoolSpecification = new AutoPoolSpecification
                {
                    PoolSpecification = new PoolSpecification
                    {
                        ApplicationPackageReferences = new List <ApplicationPackageReference>
                        {
                            new ApplicationPackageReference
                            {
                                ApplicationId = ApplicationId,
                                Version       = Version
                            }
                        },
                        CloudServiceConfiguration = new CloudServiceConfiguration(PoolFixture.OSFamily),
                        VirtualMachineSize        = PoolFixture.VMSize,
                    },
                    PoolLifetimeOption = PoolLifetimeOption.Job
                }
            };

            async Task test()
            {
                using BatchClient client = await TestUtilities.OpenBatchClientFromEnvironmentAsync().ConfigureAwait(false);

                try
                {
                    var job = client.JobOperations.CreateJob(jobId, poolInformation);

                    await job.CommitAsync().ConfigureAwait(false);

                    CloudJob jobResponse = await client.JobOperations.GetJobAsync(jobId).ConfigureAwait(false);

                    ApplicationPackageReference apr = jobResponse.PoolInformation.AutoPoolSpecification.PoolSpecification.ApplicationPackageReferences.First();

                    Assert.Equal(ApplicationId, apr.ApplicationId);
                    Assert.Equal(Version, apr.Version);
                }
                finally
                {
                    TestUtilities.DeleteJobIfExistsAsync(client, jobId).Wait();
                }
            }

            await SynchronizationContextHelper.RunTestAsync(test, LongTestTimeout);
        }
        public async Task Job_GetTaskCounts_ReturnsCorrectCountNonZeroTaskSlots()
        {
            Func <Task> test = async() =>
            {
                using (BatchClient batchCli = await TestUtilities.OpenBatchClientFromEnvironmentAsync().ConfigureAwait(false))
                {
                    string jobId = "NonZeroTaskSlots-" + TestUtilities.GetMyName();
                    try
                    {
                        PoolInformation poolInfo = new PoolInformation()
                        {
                            PoolId = "Fake"
                        };

                        CloudJob unboundJob = batchCli.JobOperations.CreateJob(jobId, poolInfo);
                        await unboundJob.CommitAsync().ConfigureAwait(false);

                        await unboundJob.RefreshAsync().ConfigureAwait(false);

                        CloudTask t1 = new CloudTask("t1", "cmd /c dir");
                        t1.RequiredSlots = 2;
                        CloudTask t2 = new CloudTask("t2", "cmd /c ping 127.0.0.1 -n 4");
                        t2.RequiredSlots = 3;

                        await unboundJob.AddTaskAsync(new[] { t1, t2 }).ConfigureAwait(false);

                        await Task.Delay(TimeSpan.FromSeconds(5)).ConfigureAwait(false); // Give the service some time to get the counts

                        var counts = await unboundJob.GetTaskCountsAsync().ConfigureAwait(false);

                        var retTask1 = await unboundJob.GetTaskAsync(t1.Id);

                        Assert.Equal(t1.RequiredSlots, retTask1.RequiredSlots);
                        var retTask2 = await unboundJob.GetTaskAsync(t1.Id);

                        Assert.Equal(t1.RequiredSlots, retTask2.RequiredSlots);

                        Assert.Equal(2, counts.TaskCounts.Active);
                        // Task slots counts is currently broken
                        // Assert.Equal(5, counts.TaskSlotCounts.Active);
                    }
                    finally
                    {
                        await TestUtilities.DeleteJobIfExistsAsync(batchCli, jobId);
                    }
                }
            };

            await SynchronizationContextHelper.RunTestAsync(test, TestTimeout);
        }
        /// <summary>
        /// Constructs an Azure Batch PoolInformation instance
        /// </summary>
        /// <param name="image">The image name for the current <see cref="TesTask"/></param>
        /// <param name="vmSize">The Azure VM sku</param>
        /// <param name="preemptible">True if preemptible machine should be used</param>
        /// <returns></returns>
        private async Task <PoolInformation> CreatePoolInformation(string image, string vmSize, bool preemptible)
        {
            var vmConfig = new VirtualMachineConfiguration(
                imageReference: new ImageReference("ubuntu-server-container", "microsoft-azure-batch", "16-04-lts", "latest"),
                nodeAgentSkuId: "batch.node.ubuntu 16.04");

            var containerRegistryInfo = await azureProxy.GetContainerRegistryInfoAsync(image);

            if (containerRegistryInfo != null)
            {
                var containerRegistry = new ContainerRegistry(
                    userName: containerRegistryInfo.Username,
                    registryServer: containerRegistryInfo.RegistryServer,
                    password: containerRegistryInfo.Password);

                // Download private images at node startup, since those cannot be downloaded in the main task that runs multiple containers.
                // Doing this also requires that the main task runs inside a container, hence downloading the "docker" image (contains docker client) as well.
                vmConfig.ContainerConfiguration = new ContainerConfiguration
                {
                    ContainerImageNames = new List <string> {
                        image, DockerInDockerImageName
                    },
                    ContainerRegistries = new List <ContainerRegistry> {
                        containerRegistry
                    }
                };
            }

            var poolSpecification = new PoolSpecification
            {
                VirtualMachineConfiguration = vmConfig,
                VirtualMachineSize          = vmSize,
                ResizeTimeout = TimeSpan.FromMinutes(30),
                TargetLowPriorityComputeNodes = preemptible ? 1 : 0,
                TargetDedicatedComputeNodes   = preemptible ? 0 : 1
            };

            var poolInformation = new PoolInformation
            {
                AutoPoolSpecification = new AutoPoolSpecification
                {
                    AutoPoolIdPrefix   = "TES",
                    PoolLifetimeOption = PoolLifetimeOption.Job,
                    PoolSpecification  = poolSpecification,
                    KeepAlive          = false
                }
            };

            return(poolInformation);
        }
        public async Task AutoPoolApplicationPackagesFlowThroughToPool()
        {
            var jobId = Guid.NewGuid().ToString();

            var client = new BatchServiceClient(new Uri(Url), new BatchSharedKeyCredential(AccountName, AccountKey));

            var poolInfo = new PoolInformation
            {
                AutoPoolSpecification = new AutoPoolSpecification(poolLifetimeOption: PoolLifetimeOption.Job, pool: new PoolSpecification
                {
                    TargetDedicated = 0,
                    ApplicationPackageReferences = new[]
                    {
                        new ApplicationPackageReference {
                            ApplicationId = AppPackageName, Version = Version
                        },
                    },
                    CloudServiceConfiguration = new CloudServiceConfiguration("4"),
                    VmSize = "small",
                })
            };

            try
            {
                AzureOperationHeaderResponse <JobAddHeaders> addResponse = await client.Job.AddWithHttpMessagesAsync(new JobAddParameter(jobId, poolInfo : poolInfo)).ConfigureAwait(false);

                Assert.Equal(HttpStatusCode.Created, addResponse.Response.StatusCode);

                var autoPoolId = WaitForAutoPool(client, jobId);

                var poolResponse = await client.Pool.GetWithHttpMessagesAsync(autoPoolId).ConfigureAwait(false);

                Assert.Equal(HttpStatusCode.OK, poolResponse.Response.StatusCode);
                Assert.NotNull(poolResponse.Body);
                Assert.Equal(1, poolResponse.Body.ApplicationPackageReferences.Count);
                Assert.Equal(AppPackageName, poolResponse.Body.ApplicationPackageReferences[0].ApplicationId);
                Assert.Equal(Version, poolResponse.Body.ApplicationPackageReferences[0].Version);
            }
            finally
            {
                TestUtilities.DeleteJobIfExistsNoThrow(client, jobId, output);
            }
        }
        /// <summary>
        /// Constructs an Azure Batch PoolInformation instance
        /// </summary>
        /// <param name="image">The image name for the current <see cref="TesTask"/></param>
        /// <param name="vmSize">The Azure VM sku</param>
        /// <param name="preemptible">True if preemptible machine should be used</param>
        /// <returns></returns>
        private async Task <PoolInformation> CreatePoolInformation(string image, string vmSize, bool preemptible)
        {
            var vmConfig = new VirtualMachineConfiguration(
                imageReference: new ImageReference("ubuntu-server-container", "microsoft-azure-batch", "16-04-lts", "latest"),
                nodeAgentSkuId: "batch.node.ubuntu 16.04");

            var containerRegistry = await GetContainerRegistry(image);

            vmConfig.ContainerConfiguration = new ContainerConfiguration
            {
                ContainerImageNames = new List <string> {
                    image
                },
                ContainerRegistries = containerRegistry != null ? new List <ContainerRegistry> {
                    containerRegistry
                } : null
            };

            var poolSpecification = new PoolSpecification
            {
                VirtualMachineConfiguration = vmConfig,
                VirtualMachineSize          = vmSize,
                ResizeTimeout = TimeSpan.FromMinutes(30),
                TargetLowPriorityComputeNodes = preemptible ? 1 : 0,
                TargetDedicatedComputeNodes   = preemptible ? 0 : 1
            };

            var poolInformation = new PoolInformation
            {
                AutoPoolSpecification = new AutoPoolSpecification
                {
                    AutoPoolIdPrefix   = "TES",
                    PoolLifetimeOption = PoolLifetimeOption.Job,
                    PoolSpecification  = poolSpecification,
                    KeepAlive          = false
                }
            };

            return(poolInformation);
        }
        private static async Task TestAutoPoolCreateAndUpdateWithCertificateReferencesAsync(
            BatchClient batchCli,
            IList <CertificateReference> certificateReferences)
        {
            string jobId = "TestAutoPoolCreateAndUpdateWithCertificateReferences-" + TestUtilities.GetMyName();

            try
            {
                PoolInformation poolInformation = new PoolInformation
                {
                    AutoPoolSpecification = new AutoPoolSpecification
                    {
                        PoolSpecification = new PoolSpecification
                        {
                            CertificateReferences       = certificateReferences,
                            CloudServiceConfiguration   = new CloudServiceConfiguration(PoolFixture.OSFamily),
                            TargetDedicatedComputeNodes = 0,
                            VirtualMachineSize          = PoolFixture.VMSize,
                        },
                        AutoPoolIdPrefix   = TestUtilities.GetMyName(),
                        PoolLifetimeOption = PoolLifetimeOption.Job
                    }
                };

                CloudJob unboundJob = batchCli.JobOperations.CreateJob(jobId, poolInformation);

                await unboundJob.CommitAsync().ConfigureAwait(false);

                CloudJob boundJob = await batchCli.JobOperations.GetJobAsync(jobId).ConfigureAwait(false);

                AssertCertificateReferenceCollectionsAreSame(
                    certificateReferences,
                    boundJob.PoolInformation.AutoPoolSpecification.PoolSpecification.CertificateReferences);
            }
            finally
            {
                TestUtilities.DeleteJobIfExistsAsync(batchCli, jobId).Wait();
            }
        }
        private void SubmitBatchJob(
            string jobId,
            string databaseName,
            IReadOnlyList <DatabaseFragment> fragments,
            List <ResourceFile> resourceFiles)
        {
            var poolInfo = new PoolInformation
            {
                AutoPoolSpecification = new AutoPoolSpecification
                {
                    PoolSpecification = new PoolSpecification
                    {
                        TargetDedicated             = Math.Min(_configuration.GetImportVirtualMachineMaxCount(), fragments.Count), // We don't want to create too many VMs
                        MaxTasksPerComputeNode      = _configuration.GetImportVirtualMachineCores(),
                        VirtualMachineSize          = _configuration.GetImportVirtualMachineSize(),
                        VirtualMachineConfiguration = _configuration.GetVirtualMachineConfiguration(),
                        StartTask = GetStartTask(resourceFiles),
                    },
                    PoolLifetimeOption = PoolLifetimeOption.Job,
                    KeepAlive          = false,
                }
            };

            var job = _batchClient.JobOperations.CreateJob(jobId, poolInfo);

            job.DisplayName        = databaseName;
            job.JobPreparationTask = GetJobPreparationTask(resourceFiles);
            job.JobManagerTask     = GetJobManagerTask(databaseName, resourceFiles);
            job.Commit();

            var tasks = GetTasks(databaseName, fragments);

            job.Refresh();
            job.AddTask(tasks);
            job.Refresh();
            job.OnAllTasksComplete = OnAllTasksComplete.TerminateJob;
        }
        public static void Submit()
        {
            Log("Start submission process.");
            state = null;

            BatchSharedKeyCredentials cred = new BatchSharedKeyCredentials(Settings.batchEndpoint, Settings.batchAccount, Settings.batchKey);
            using (BatchClient client = BatchClient.Open(cred)) // <-- connect to the cluster
            {
                #region job submission

                string jobname = prefix + Environment.GetEnvironmentVariable("USERNAME") + "_" + DateTime.Now.ToString("yyyyMMdd-HHmmss");
                PoolInformation pool = new PoolInformation();
                pool.PoolId = Settings.poolname;

                CloudJob job = client.JobOperations.CreateJob(jobname, pool); // <-- create a workitem that runs on pool "trdemo"

                Log("Submitting...");
                job.Commit();
                jobName = jobname;
                Log(string.Format("Job {0} created.", jobname));

                job = client.JobOperations.GetJob(jobname);

                Log("Analyzing input blobs...");
                string inputcontainersas = StorageHelper.GetContainerSAS(Settings.inputContainer);
                string outputcontainersas = StorageHelper.GetContainerSAS(Settings.outputContainer);
                foreach (string blob in StorageHelper.ListBlobs(Settings.inputContainer))
                {
                    string filename = System.IO.Path.GetFileName((new Uri(blob)).LocalPath);
                    string taskname = "task_" + System.IO.Path.GetFileNameWithoutExtension(filename);

                    // prepare the command line
                    string cli;
                    cli = ". robocopy.exe ${env:WATASK_TVM_ROOT_DIR}\\shared\\ . *.*;";
                    cli += "ffmpeg.exe -i {0} -vf 'movie=microsoft.png [watermark]; [in][watermark] overlay=10:main_h-overlay_h-10 [out]' {0}.output.avi;".Replace("{0}", filename);
                    cli += "azcopy.exe . {0} *.output.avi /destsas:'{1}' /y".Replace("{0}", Settings.outputContainer).Replace("{1}", outputcontainersas);

                    cli = string.Format("powershell -command \"{0}\"", cli);

                    // prepare task object
                    CloudTask task = new CloudTask(taskname, cli);
                    task.ResourceFiles = new List<ResourceFile>();
                    task.ResourceFiles.Add(new ResourceFile(blob + inputcontainersas, filename));

                    job.AddTask(task); // <-- add Task
                }

                #endregion job submission

                ThreadPool.QueueUserWorkItem((x) => { Monitor(); });

                client.Utilities.CreateTaskStateMonitor().WaitAll(client.JobOperations.ListTasks(jobname), TaskState.Completed, new TimeSpan(0, 60, 0));
                client.JobOperations.GetJob(jobname).Terminate();

            }
        }
Exemple #20
0
        public void TestOMJobReleaseSchedulingError()
        {
            string jobId = "TestOMJobReleaseSchedulingError-" + CraftTimeString() + "-" + TestUtilities.GetMyName();
            Action test  = () =>
            {
                using (BatchClient client = TestUtilities.OpenBatchClient(TestUtilities.GetCredentialsFromEnvironment()))
                {
                    try
                    {
                        // create job schedule with prep that succeeds and release the triggers scheduling error
                        {
                            PoolInformation poolInfo = new PoolInformation()
                            {
                                PoolId = this.poolFixture.PoolId
                            };
                            CloudJob unboundJob = client.JobOperations.CreateJob(jobId, poolInfo);

                            // add the jobPrep task to the job
                            {
                                JobPreparationTask prep = new JobPreparationTask("cmd /c echo the quick job prep jumped over the...");
                                unboundJob.JobPreparationTask = prep;

                                prep.WaitForSuccess = false; // we don't really care but why not set this
                            }

                            // add a jobRelease task to the job
                            {
                                JobReleaseTask relTask = new JobReleaseTask("cmd /c echo Job Release Task");
                                unboundJob.JobReleaseTask = relTask;

                                ResourceFile[] badResFiles = { ResourceFile.FromUrl("https://not.a.domain.invalid/file", "bob.txt") };

                                relTask.ResourceFiles = badResFiles;

                                relTask.Id = "jobRelease";
                            }

                            // add the job to the service
                            unboundJob.Commit();
                        }

                        // add a trivial task to force the JP
                        client.JobOperations.AddTask(jobId, new CloudTask("ForceJobPrep", "cmd /c echo TestOMJobReleaseSchedulingError"));

                        // wait for the task to complete

                        TaskStateMonitor tsm = client.Utilities.CreateTaskStateMonitor();

                        tsm.WaitAll(
                            client.JobOperations.ListTasks(jobId),
                            TaskState.Completed,
                            TimeSpan.FromMinutes(10),
                            additionalBehaviors:
                            new[]
                        {
                            // spam/logging interceptor
                            new Protocol.RequestInterceptor((x) =>
                            {
                                this.testOutputHelper.WriteLine("Issuing request type: " + x.GetType().ToString());

                                // print out the compute node states... we are actually waiting on the compute nodes
                                List <ComputeNode> allComputeNodes = client.PoolOperations.ListComputeNodes(this.poolFixture.PoolId).ToList();

                                this.testOutputHelper.WriteLine("    #compute nodes: " + allComputeNodes.Count);

                                allComputeNodes.ForEach((icn) =>
                                {
                                    this.testOutputHelper.WriteLine("  computeNode.id: " + icn.Id + ", state: " + icn.State);
                                });
                                this.testOutputHelper.WriteLine("");
                            })
                        }
                            );

                        // ok terminate job to trigger job release
                        client.JobOperations.TerminateJob(jobId, "BUG: Server will throw 500 if I don't provide reason");

                        // the victim compute node.  pool should have size 1.
                        List <ComputeNode> computeNodes = client.PoolOperations.ListComputeNodes(this.poolFixture.PoolId).ToList();

                        Assert.Single(computeNodes);

                        // now we have a job that should be trying to run the JP
                        // poll for the JP to have been run, and it must have a scheduling error
                        bool releaseNotCompleted = true;

                        // gotta poll to find out when the jp has been run
                        while (releaseNotCompleted)
                        {
                            List <JobPreparationAndReleaseTaskExecutionInformation> jrStatusList =
                                client.JobOperations.ListJobPreparationAndReleaseTaskStatus(jobId).ToList();

                            JobPreparationAndReleaseTaskExecutionInformation prepAndReleaseStatus = jrStatusList.FirstOrDefault();

                            if (prepAndReleaseStatus != null && null != prepAndReleaseStatus.JobReleaseTaskExecutionInformation)
                            {
                                if (JobReleaseTaskState.Completed == prepAndReleaseStatus.JobReleaseTaskExecutionInformation.State)
                                {
                                    releaseNotCompleted = false; // we see a JP has been run

                                    // now assert the failure info
                                    Assert.NotNull(prepAndReleaseStatus);
                                    Assert.NotNull(prepAndReleaseStatus.JobReleaseTaskExecutionInformation.FailureInformation);
                                    Assert.Equal(TaskExecutionResult.Failure, prepAndReleaseStatus.JobReleaseTaskExecutionInformation.Result);

                                    // spew the failure info
                                    this.OutputFailureInfo(prepAndReleaseStatus.JobReleaseTaskExecutionInformation.FailureInformation);
                                }
                            }
                            Thread.Sleep(2000);
                            this.testOutputHelper.WriteLine("Job Release tasks still running (waiting for blob dl to timeout).");
                        }
                    }
                    finally
                    {
                        client.JobOperations.DeleteJob(jobId);
                    }
                }
            };

            SynchronizationContextHelper.RunTest(test, LongTestTimeout);
        }
Exemple #21
0
        public static void ParseTextForDeviceDetails(string text, List <PoolInformation> poolInformation)
        {
            List <string> deviceBlob = text.Split('|').ToList();

            deviceBlob.RemoveAt(0);

            foreach (string deviceText in deviceBlob)
            {
                if (deviceText == "\0")
                {
                    continue;
                }

                //bfgminer may have multiple entries for the same key, e.g. Hardware Errors
                //seen with customer data/hardware
                //remove dupes using Distinct()
                var deviceAttributes = deviceText.Split(',').ToList().Distinct();

                Dictionary <string, string> keyValuePairs = deviceAttributes
                                                            .Where(value => value.Contains('='))
                                                            .Select(value => value.Split('='))
                                                            .ToDictionary(pair => pair[0], pair => pair[1]);

                //seen Count == 0 with user API logs
                if (keyValuePairs.Count > 0)
                {
                    PoolInformation newPool = new PoolInformation();

                    newPool.Index = int.Parse(keyValuePairs["POOL"]);

                    //user bug reports indicate this key may not exist
                    if (keyValuePairs.ContainsKey("URL"))
                    {
                        newPool.Url = keyValuePairs["URL"];
                    }

                    //user bug reports indicate this key may not exist
                    if (keyValuePairs.ContainsKey("Status"))
                    {
                        newPool.Status = keyValuePairs["Status"];
                    }

                    newPool.Priority = TryToParseInt(keyValuePairs, "Priority", 0);
                    newPool.Quota    = TryToParseInt(keyValuePairs, "Quota", 0);

                    if (keyValuePairs.ContainsKey("Long Pool"))
                    {
                        newPool.LongPoll = !keyValuePairs["Long Pool"].Equals("n", StringComparison.OrdinalIgnoreCase);
                    }

                    newPool.GetWorks       = TryToParseInt(keyValuePairs, "Getworks", 0);
                    newPool.Accepted       = TryToParseInt(keyValuePairs, "Accepted", 0);
                    newPool.Rejected       = TryToParseInt(keyValuePairs, "Rejected", 0);
                    newPool.Works          = TryToParseInt(keyValuePairs, "Works", 0);
                    newPool.Discarded      = TryToParseInt(keyValuePairs, "Discarded", 0);
                    newPool.Stale          = TryToParseInt(keyValuePairs, "Stale", 0);
                    newPool.GetFailures    = TryToParseInt(keyValuePairs, "Get Failures", 0);
                    newPool.RemoteFailures = TryToParseInt(keyValuePairs, "Remote Failures", 0);

                    //user bug reports indicate this key may not exist
                    if (keyValuePairs.ContainsKey("User"))
                    {
                        newPool.User = keyValuePairs["User"];
                    }

                    newPool.LastShareTime = TryToParseInt(keyValuePairs, "Last Share Time", 0).UnixTimeToDateTime();
                    newPool.Diff1Shares   = TryToParseInt(keyValuePairs, "Diff1 Shares", 0);

                    //user bug reports indicate this key may not exist
                    if (keyValuePairs.ContainsKey("Proxy"))
                    {
                        newPool.Proxy = keyValuePairs["Proxy"];
                    }

                    newPool.DifficultyAccepted  = TryToParseDouble(keyValuePairs, "Difficulty Accepted", 0.0);
                    newPool.DifficultyRejected  = TryToParseDouble(keyValuePairs, "Difficulty Rejected", 0.0);
                    newPool.DifficultyStale     = TryToParseDouble(keyValuePairs, "Difficulty Stale", 0.0);
                    newPool.LastShareDifficulty = TryToParseDouble(keyValuePairs, "Last Share Difficulty", 0.0);

                    //user bug reports indicate this key may not exist
                    if (keyValuePairs.ContainsKey("Has Stratum"))
                    {
                        newPool.HasStratum = keyValuePairs["Has Stratum"].Equals("true", StringComparison.OrdinalIgnoreCase);
                    }

                    //user bug reports indicate this key may not exist
                    if (keyValuePairs.ContainsKey("Stratum Active"))
                    {
                        newPool.StratumActive = keyValuePairs["Stratum Active"].Equals("true", StringComparison.OrdinalIgnoreCase);
                    }

                    //user bug reports indicate this key may not exist
                    if (keyValuePairs.ContainsKey("Stratum URL"))
                    {
                        newPool.StratumUrl = keyValuePairs["Stratum URL"];
                    }

                    newPool.BestShare           = TryToParseDouble(keyValuePairs, "Best Share", 0);
                    newPool.PoolRejectedPercent = TryToParseDouble(keyValuePairs, "Pool Rejected%", 0.0);
                    newPool.PoolStalePercent    = TryToParseDouble(keyValuePairs, "Pool Stale%", 0.0);

                    poolInformation.Add(newPool);
                }
            }
        }
        /// <summary>
        /// Creates a job schedule with the specified options.
        /// </summary>
        /// <param name="options">The options describing the job schedule to create.</param>
        /// <returns></returns>
        public async Task CreateJobScheduleAsync(CreateJobScheduleOptions options)
        {
            CloudJobSchedule unboundJobSchedule = this.Client.JobScheduleOperations.CreateJobSchedule();
            unboundJobSchedule.Id = options.JobScheduleId;
                
            PoolInformation poolInformation = new PoolInformation();
            if (options.AutoPoolOptions.UseAutoPool.HasValue && options.AutoPoolOptions.UseAutoPool.Value)
            {
                    AutoPoolSpecification autoPoolSpecification = new AutoPoolSpecification()
                    {
                        AutoPoolIdPrefix = options.AutoPoolOptions.AutoPoolPrefix,
                        KeepAlive = options.AutoPoolOptions.KeepAlive,
                        PoolLifetimeOption = (PoolLifetimeOption)Enum.Parse(typeof(PoolLifetimeOption), options.AutoPoolOptions.LifeTimeOption),
                        PoolSpecification = new PoolSpecification()
                        {
                            OSFamily = options.AutoPoolOptions.OSFamily,
                            VirtualMachineSize = options.AutoPoolOptions.VirutalMachineSize,
                            TargetDedicated = options.AutoPoolOptions.TargetDedicated
                        }
                    };

                    poolInformation.AutoPoolSpecification = autoPoolSpecification;
            }
            else
            {
                poolInformation.PoolId = options.PoolId;
            }
                
            unboundJobSchedule.JobSpecification = new JobSpecification()
            {
                Priority = options.Priority,
                PoolInformation = poolInformation
            };

            // TODO: These are read only
            unboundJobSchedule.JobSpecification.Constraints = new JobConstraints(options.MaxWallClockTime, options.MaxRetryCount);

            Schedule schedule = new Schedule()
            {
                DoNotRunAfter = options.DoNotRunAfter,
                DoNotRunUntil = options.DoNotRunUntil,
                RecurrenceInterval = options.RecurrenceInterval,
                StartWindow = options.StartWindow
            };
            unboundJobSchedule.Schedule = schedule;

            if (options.CreateJobManager.HasValue && options.CreateJobManager.Value == true)
            {
                JobManagerTask jobManager = new JobManagerTask()
                {
                    CommandLine = options.JobManagerOptions.CommandLine,
                    KillJobOnCompletion = options.JobManagerOptions.KillOnCompletion,
                    Id = options.JobManagerOptions.JobManagerId
                };

                jobManager.Constraints = new TaskConstraints(options.JobManagerOptions.MaxTaskWallClockTime, options.JobManagerOptions.RetentionTime, options.JobManagerOptions.MaxTaskRetryCount);

                unboundJobSchedule.JobSpecification.JobManagerTask = jobManager;
            }

            await unboundJobSchedule.CommitAsync();
        }
        public async Task CreateJobAsync(CreateJobOptions createJobOptions)
        {
            CloudJob unboundJob = this.Client.JobOperations.CreateJob();

            unboundJob.Id = createJobOptions.JobId;
            unboundJob.Priority = createJobOptions.Priority;
            unboundJob.Constraints = new JobConstraints(createJobOptions.MaxWallClockTime, createJobOptions.MaxRetryCount);

            PoolInformation poolInformation = new PoolInformation();
            if (createJobOptions.AutoPoolOptions.UseAutoPool.HasValue && createJobOptions.AutoPoolOptions.UseAutoPool.Value)
            {
                AutoPoolSpecification autoPoolSpecification = new AutoPoolSpecification()
                {
                    AutoPoolIdPrefix = createJobOptions.AutoPoolOptions.AutoPoolPrefix,
                    KeepAlive = createJobOptions.AutoPoolOptions.KeepAlive,
                    PoolLifetimeOption = (PoolLifetimeOption)Enum.Parse(typeof(PoolLifetimeOption), createJobOptions.AutoPoolOptions.LifeTimeOption),
                    PoolSpecification = new PoolSpecification()
                    {
                        OSFamily = createJobOptions.AutoPoolOptions.OSFamily,
                        VirtualMachineSize = createJobOptions.AutoPoolOptions.VirutalMachineSize,
                        TargetDedicated = createJobOptions.AutoPoolOptions.TargetDedicated
                    }
                };

                poolInformation.AutoPoolSpecification = autoPoolSpecification;
            }
            else
            {
                poolInformation.PoolId = createJobOptions.PoolId;
            }

            unboundJob.PoolInformation = poolInformation;

            if (createJobOptions.CreateJobManager.HasValue && createJobOptions.CreateJobManager.Value == true)
            {
                JobManagerTask jobManager = new JobManagerTask()
                {
                    CommandLine = createJobOptions.JobManagerOptions.CommandLine,
                    KillJobOnCompletion = createJobOptions.JobManagerOptions.KillOnCompletion,
                    Id = createJobOptions.JobManagerOptions.JobManagerId
                };

                jobManager.Constraints = new TaskConstraints(
                    createJobOptions.JobManagerOptions.MaxTaskWallClockTime, 
                    createJobOptions.JobManagerOptions.RetentionTime, 
                    createJobOptions.JobManagerOptions.MaxTaskRetryCount);

                unboundJob.JobManagerTask = jobManager;
            }

            await unboundJob.CommitAsync();
        }
        /// <summary>
        /// Populates Azure Storage with the required files, and
        /// submits the job to the Azure Batch service.
        /// </summary>
        public async Task RunAsync()
        {
            Console.WriteLine("Running with the following settings: ");
            Console.WriteLine("----------------------------------------");
            Console.WriteLine(this.textSearchSettings.ToString());
            Console.WriteLine(this.accountSettings.ToString());

            CloudStorageAccount cloudStorageAccount = new CloudStorageAccount(
                new StorageCredentials(
                    this.accountSettings.StorageAccountName,
                    this.accountSettings.StorageAccountKey),
                this.accountSettings.StorageServiceUrl,
                useHttps: true);

            //Upload resources if required.
            if (this.textSearchSettings.ShouldUploadResources)
            {
                Console.WriteLine("Splitting file: {0} into {1} subfiles",
                                  Constants.TextFilePath,
                                  this.textSearchSettings.NumberOfMapperTasks);

                //Split the text file into the correct number of files for consumption by the mapper tasks.
                FileSplitter  splitter        = new FileSplitter();
                List <string> mapperTaskFiles = await splitter.SplitAsync(
                    Constants.TextFilePath,
                    this.textSearchSettings.NumberOfMapperTasks);

                List <string> files = Constants.RequiredExecutableFiles.Union(mapperTaskFiles).ToList();

                await SampleHelpers.UploadResourcesAsync(
                    cloudStorageAccount,
                    this.textSearchSettings.BlobContainer,
                    files);
            }

            //Generate a SAS for the container.
            string containerSasUrl = SampleHelpers.ConstructContainerSas(
                cloudStorageAccount,
                this.textSearchSettings.BlobContainer);

            //Set up the Batch Service credentials used to authenticate with the Batch Service.
            BatchSharedKeyCredentials credentials = new BatchSharedKeyCredentials(
                this.accountSettings.BatchServiceUrl,
                this.accountSettings.BatchAccountName,
                this.accountSettings.BatchAccountKey);

            using (BatchClient batchClient = await BatchClient.OpenAsync(credentials))
            {
                //
                // Construct the job properties in local memory before commiting them to the Batch Service.
                //

                //Allow enough compute nodes in the pool to run each mapper task, and 1 extra to run the job manager.
                int numberOfPoolComputeNodes = 1 + this.textSearchSettings.NumberOfMapperTasks;

                //Define the pool specification for the pool which the job will run on.
                PoolSpecification poolSpecification = new PoolSpecification()
                {
                    TargetDedicated    = numberOfPoolComputeNodes,
                    VirtualMachineSize = "small",
                    //You can learn more about os families and versions at:
                    //http://azure.microsoft.com/documentation/articles/cloud-services-guestos-update-matrix
                    OSFamily        = "4",
                    TargetOSVersion = "*"
                };

                //Use the auto pool feature of the Batch Service to create a pool when the job is created.
                //This creates a new pool for each job which is added.
                AutoPoolSpecification autoPoolSpecification = new AutoPoolSpecification()
                {
                    AutoPoolIdPrefix   = "TextSearchPool",
                    KeepAlive          = false,
                    PoolLifetimeOption = PoolLifetimeOption.Job,
                    PoolSpecification  = poolSpecification
                };

                //Define the pool information for this job -- it will run on the pool defined by the auto pool specification above.
                PoolInformation poolInformation = new PoolInformation()
                {
                    AutoPoolSpecification = autoPoolSpecification
                };

                //Define the job manager for this job.  This job manager will run first and will submit the tasks for
                //the job.  The job manager is the executable which manages the lifetime of the job
                //and all tasks which should run for the job.  In this case, the job manager submits the mapper and reducer tasks.
                List <ResourceFile> jobManagerResourceFiles = SampleHelpers.GetResourceFiles(containerSasUrl, Constants.RequiredExecutableFiles);
                const string        jobManagerTaskId        = "JobManager";

                JobManagerTask jobManagerTask = new JobManagerTask()
                {
                    ResourceFiles = jobManagerResourceFiles,
                    CommandLine   = Constants.JobManagerExecutable,

                    //Determines if the job should terminate when the job manager process exits.
                    KillJobOnCompletion = true,
                    Id = jobManagerTaskId
                };

                //Create the unbound job in local memory.  An object which exists only in local memory (and not on the Batch Service) is "unbound".
                string jobId = Environment.GetEnvironmentVariable("USERNAME") + DateTime.UtcNow.ToString("yyyyMMdd-HHmmss");

                CloudJob unboundJob = batchClient.JobOperations.CreateJob(jobId, poolInformation);
                unboundJob.JobManagerTask = jobManagerTask; //Assign the job manager task to this job

                try
                {
                    //Commit the unbound job to the Batch Service.
                    Console.WriteLine("Adding job: {0} to the Batch Service.", unboundJob.Id);
                    await unboundJob.CommitAsync(); //Issues a request to the Batch Service to add the job which was defined above.

                    //
                    // Wait for the job manager task to complete.
                    //

                    //An object which is backed by a corresponding Batch Service object is "bound."
                    CloudJob boundJob = await batchClient.JobOperations.GetJobAsync(jobId);

                    CloudTask boundJobManagerTask = await boundJob.GetTaskAsync(jobManagerTaskId);

                    TimeSpan maxJobCompletionTimeout = TimeSpan.FromMinutes(30);

                    // Monitor the current tasks to see when they are done.
                    // Occasionally a task may get killed and requeued during an upgrade or hardware failure, including the job manager
                    // task.  The job manager will be re-run in this case.  Robustness against this was not added into the sample for
                    // simplicity, but should be added into any production code.
                    Console.WriteLine("Waiting for job's tasks to complete");

                    TaskStateMonitor taskStateMonitor = batchClient.Utilities.CreateTaskStateMonitor();
                    bool             timedOut         = await taskStateMonitor.WaitAllAsync(new List <CloudTask> {
                        boundJobManagerTask
                    }, TaskState.Completed, maxJobCompletionTimeout);

                    Console.WriteLine("Done waiting for job manager task.");

                    await boundJobManagerTask.RefreshAsync();

                    //Check to ensure the job manager task exited successfully.
                    await Helpers.CheckForTaskSuccessAsync(boundJobManagerTask, dumpStandardOutOnTaskSuccess : false);

                    if (timedOut)
                    {
                        throw new TimeoutException(string.Format("Timed out waiting for job manager task to complete."));
                    }

                    //
                    // Download and write out the reducer tasks output
                    //

                    string reducerText = await SampleHelpers.DownloadBlobTextAsync(cloudStorageAccount, this.textSearchSettings.BlobContainer, Constants.ReducerTaskResultBlobName);

                    Console.WriteLine("Reducer reuslts:");
                    Console.WriteLine(reducerText);
                }
                finally
                {
                    //Delete the job.
                    //This will delete the auto pool associated with the job as long as the pool
                    //keep alive property is set to false.
                    if (this.textSearchSettings.ShouldDeleteJob)
                    {
                        Console.WriteLine("Deleting job {0}", jobId);
                        batchClient.JobOperations.DeleteJob(jobId);
                    }

                    //Note that there were files uploaded to a container specified in the
                    //configuration file.  This container will not be deleted or cleaned up by this sample.
                }
            }
        }
        public void SampleCreateJobScheduleAutoPool()
        {
            Action test = () =>
            {
                using (BatchClient batchCli = TestUtilities.OpenBatchClientAsync(TestUtilities.GetCredentialsFromEnvironment()).Result)
                {
                    string jsId = Microsoft.Azure.Batch.Constants.DefaultConveniencePrefix + TestUtilities.GetMyName() + "-CreateWiAutoPoolTest";
                    try
                    {
                        CloudJobSchedule newJobSchedule = batchCli.JobScheduleOperations.CreateJobSchedule(jsId, null, null);
                        {
                            newJobSchedule.Metadata = MakeMetaData("onCreateName", "onCreateValue");

                            PoolInformation       poolInformation = new PoolInformation();
                            AutoPoolSpecification iaps            = new AutoPoolSpecification();
                            Schedule schedule = new Schedule()
                            {
                                RecurrenceInterval = TimeSpan.FromMinutes(18)
                            };
                            poolInformation.AutoPoolSpecification = iaps;

                            iaps.AutoPoolIdPrefix   = Microsoft.Azure.Batch.Constants.DefaultConveniencePrefix + TestUtilities.GetMyName();
                            iaps.PoolLifetimeOption = Microsoft.Azure.Batch.Common.PoolLifetimeOption.Job;
                            iaps.KeepAlive          = false;

                            PoolSpecification ps = new PoolSpecification();

                            iaps.PoolSpecification = ps;

                            ps.TargetDedicated    = 1;
                            ps.VirtualMachineSize = PoolFixture.VMSize;

                            ps.CloudServiceConfiguration = new CloudServiceConfiguration(PoolFixture.OSFamily);

                            ps.Metadata = MakeMetaData("pusMDIName", "pusMDIValue");

                            JobSpecification jobSpec = newJobSchedule.JobSpecification;
                            Assert.Null(jobSpec);

                            jobSpec = new JobSpecification(poolInformation);

                            JobManagerTask jobMgr = jobSpec.JobManagerTask;

                            Assert.Null(jobMgr);

                            jobMgr = new JobManagerTask(TestUtilities.GetMyName() + "-JobManagerTest", "hostname");

                            jobMgr.KillJobOnCompletion = false;

                            // set the JobManagerTask on the JobSpecification
                            jobSpec.JobManagerTask = jobMgr;

                            // set the JobSpecifcation on the Job Schedule
                            newJobSchedule.JobSpecification = jobSpec;

                            newJobSchedule.Schedule = schedule;

                            newJobSchedule.Commit();
                        }

                        CloudJobSchedule jobSchedule = batchCli.JobScheduleOperations.GetJobSchedule(jsId);
                        {
                            TestUtilities.DisplayJobScheduleLong(this.testOutputHelper, jobSchedule);

                            List <MetadataItem> mdi = new List <MetadataItem>(jobSchedule.Metadata);

                            // check the values specified for AddJobSchedule are correct.
                            foreach (MetadataItem curIMDI in mdi)
                            {
                                Assert.Equal("onCreateName", curIMDI.Name);
                                Assert.Equal("onCreateValue", curIMDI.Value);
                            }

                            // add metadata items
                            mdi.Add(new MetadataItem("modifiedName", "modifiedValue"));

                            jobSchedule.Metadata = mdi;

                            jobSchedule.Commit();

                            // confirm metadata updated correctly
                            CloudJobSchedule jsUpdated = batchCli.JobScheduleOperations.GetJobSchedule(jsId);
                            {
                                List <MetadataItem> updatedMDI = new List <MetadataItem>(jsUpdated.Metadata);

                                Assert.Equal(2, updatedMDI.Count);

                                Assert.Equal("onCreateName", updatedMDI[0].Name);
                                Assert.Equal("onCreateValue", updatedMDI[0].Value);

                                Assert.Equal("modifiedName", updatedMDI[1].Name);
                                Assert.Equal("modifiedValue", updatedMDI[1].Value);
                            }

                            jobSchedule.Refresh();

                            TestUtilities.DisplayJobScheduleLong(this.testOutputHelper, jobSchedule);
                        }
                    }
                    finally
                    {
                        // clean up
                        TestUtilities.DeleteJobScheduleIfExistsAsync(batchCli, jsId).Wait();
                    }
                }
            };

            SynchronizationContextHelper.RunTest(test, TestTimeout);
        }
Exemple #26
0
        static void Main(string[] args)
        {
            BatchSharedKeyCredentials cred = new BatchSharedKeyCredentials(endpoint, account, key);
            using (BatchClient client = BatchClient.Open(cred)) // <-- connect to the cluster
            {
                List<ResourceFile> resources = new List<ResourceFile>();
                foreach (string blob in StorageHelper.ListBlobs(resourceContainer))
                {
                    string filename = System.IO.Path.GetFileName((new Uri(blob)).LocalPath);
                    resources.Add(new ResourceFile(StorageHelper.GetBlobSASURL(blob), filename));
                }

                CloudPool pool = client.PoolOperations.CreatePool(poolname, "4", "medium", 10);
                pool.StartTask = new StartTask();
                pool.StartTask.ResourceFiles = resources;
                pool.StartTask.CommandLine = @"cmd /c copy *.* %WATASK_TVM_ROOT_DIR%\shared\";
                pool.StartTask.WaitForSuccess = true;
                //pool.Commit(); // <-- Create demo pool

                // Submit Job
                string jobname = "MVP_" + Environment.GetEnvironmentVariable("USERNAME") + "_" + DateTime.Now.ToString("yyyyMMdd-HHmmss");
                PoolInformation poolinfo = new PoolInformation();
                poolinfo.PoolId = poolname;

                CloudJob job = client.JobOperations.CreateJob(jobname, poolinfo); // <-- create a job that runs on demo pool

                Console.WriteLine("Creating job..." + jobname);
                job.Commit();
                job = client.JobOperations.GetJob(jobname);

                string inputcontainersas = StorageHelper.GetContainerSAS(inputContainer);
                string outputcontainersas = StorageHelper.GetContainerSAS(outputContainer);
                List<CloudTask> tasks = new List<CloudTask>();
                Console.WriteLine("Analyzing blobs...");
                foreach (string blob in StorageHelper.ListBlobs(inputContainer)) // <-- Going through blobs
                {
                    string filename = System.IO.Path.GetFileName((new Uri(blob)).LocalPath);
                    string taskname = "task_" + System.IO.Path.GetFileNameWithoutExtension(filename);

                    // prepare the command line
                    string cli;
                    cli = ". robocopy.exe ${env:WATASK_TVM_ROOT_DIR}\\shared\\ . *.*;";
                    cli += "ffmpeg.exe -i {0} -vf 'movie=microsoft.png [watermark]; [in][watermark] overlay=10:main_h-overlay_h-10 [out]' {0}.output.avi;".Replace("{0}", filename);
                    cli += "azcopy.exe . {0} *.output.avi /destsas:'{1}' /y".Replace("{0}", outputContainer).Replace("{1}", outputcontainersas);

                    cli = string.Format("powershell -command \"{0}\"", cli);

                    // prepare task object
                    CloudTask task = new CloudTask(taskname, cli);
                    task.ResourceFiles = new List<ResourceFile>();
                    task.ResourceFiles.Add(new ResourceFile(blob + inputcontainersas, filename));

                    tasks.Add(task); // <-- prepare 1 task for 1 blob
                }

                Console.WriteLine("Submit tasks...");
                client.JobOperations.AddTask(jobname, tasks); // <-- Submit all 100 tasks with 1 API call

                Console.WriteLine("Waiting for tasks to finish...");
                client.Utilities.CreateTaskStateMonitor().WaitAll(client.JobOperations.ListTasks(jobname), TaskState.Completed, new TimeSpan(0, 60, 0));

                Console.WriteLine("Closing job...");
                client.JobOperations.TerminateJob(jobname);

                Console.WriteLine("All done. Press Enter to exit.");
                Console.ReadLine();
            }
        }
Exemple #27
0
        public async Task CanCreateAndUpdateJobScheduleWithApplicationReferences()
        {
            var jobId = Guid.NewGuid().ToString();

            const string newVersion = "2.0";

            var poolInformation = new PoolInformation
            {
                AutoPoolSpecification = new AutoPoolSpecification
                {
                    PoolSpecification = new PoolSpecification
                    {
                        ApplicationPackageReferences = new List <ApplicationPackageReference>
                        {
                            new ApplicationPackageReference
                            {
                                ApplicationId = ApplicationId,
                                Version       = Version
                            }
                        },
                        CloudServiceConfiguration = new CloudServiceConfiguration(PoolFixture.OSFamily),
                        VirtualMachineSize        = PoolFixture.VMSize,
                    },
                    PoolLifetimeOption = PoolLifetimeOption.JobSchedule,
                    KeepAlive          = false,
                }
            };

            Schedule schedule = new Schedule {
                DoNotRunAfter = DateTime.UtcNow.AddMinutes(5), RecurrenceInterval = TimeSpan.FromMinutes(2)
            };
            JobSpecification jobSpecification = new JobSpecification(poolInformation);

            using BatchClient client = await TestUtilities.OpenBatchClientFromEnvironmentAsync().ConfigureAwait(false);

            CloudJobSchedule cloudJobSchedule = client.JobScheduleOperations.CreateJobSchedule(jobId, schedule, jobSpecification);

            async Task test()
            {
                CloudJobSchedule updatedBoundJobSchedule = null;

                try
                {
                    await cloudJobSchedule.CommitAsync().ConfigureAwait(false);

                    CloudJobSchedule boundJobSchedule = TestUtilities.WaitForJobOnJobSchedule(client.JobScheduleOperations, jobId);

                    ApplicationPackageReference apr = boundJobSchedule.JobSpecification.PoolInformation.AutoPoolSpecification.PoolSpecification.ApplicationPackageReferences.First();

                    Assert.Equal(ApplicationId, apr.ApplicationId);
                    Assert.Equal(Version, apr.Version);

                    boundJobSchedule.JobSpecification.PoolInformation.AutoPoolSpecification.PoolSpecification.ApplicationPackageReferences = new[]
                    {
                        new ApplicationPackageReference()
                        {
                            ApplicationId = ApplicationId,
                            Version       = newVersion
                        }
                    };

                    await boundJobSchedule.CommitAsync().ConfigureAwait(false);

                    await boundJobSchedule.RefreshAsync().ConfigureAwait(false);

                    updatedBoundJobSchedule = await client.JobScheduleOperations.GetJobScheduleAsync(jobId).ConfigureAwait(false);

                    ApplicationPackageReference updatedApr =
                        updatedBoundJobSchedule.JobSpecification.PoolInformation.AutoPoolSpecification.PoolSpecification.ApplicationPackageReferences
                        .First();

                    Assert.Equal(ApplicationId, updatedApr.ApplicationId);
                    Assert.Equal(newVersion, updatedApr.Version);
                }
                finally
                {
                    TestUtilities.DeleteJobScheduleIfExistsAsync(client, jobId).Wait();
                }
            }

            await SynchronizationContextHelper.RunTestAsync(test, LongTestTimeout);
        }
 public Task CreateBatchJobAsync(string jobId, CloudTask cloudTask, PoolInformation poolInformation) => azureProxy.CreateBatchJobAsync(jobId, cloudTask, poolInformation);
        public void LongRunning_Bug1965363Wat7OSVersionFeaturesQuickJobWithAutoPool()
        {
            void test()
            {
                using BatchClient batchCli = TestUtilities.OpenBatchClient(TestUtilities.GetCredentialsFromEnvironment());
                string jobId = "Bug1965363Job-" + TestUtilities.GetMyName();

                try
                {
                    PoolInformation poolInfo = new PoolInformation()
                    {
                        AutoPoolSpecification = new AutoPoolSpecification()
                        {
                            PoolLifetimeOption = PoolLifetimeOption.Job,
                            PoolSpecification  = new PoolSpecification()
                            {
                                CloudServiceConfiguration   = new CloudServiceConfiguration(PoolFixture.OSFamily),
                                VirtualMachineSize          = PoolFixture.VMSize,
                                TargetDedicatedComputeNodes = 1
                            }
                        }
                    };

                    CloudJob unboundJob = batchCli.JobOperations.CreateJob(jobId, poolInfo);

                    testOutputHelper.WriteLine("Commiting quickjob");
                    unboundJob.Commit();

                    CloudTask task     = new CloudTask("Bug1965363Wat7OSVersionFeaturesQuickJobWithAutoPoolTask", "cmd /c echo Bug1965363");
                    CloudJob  boundJob = batchCli.JobOperations.GetJob(jobId);
                    boundJob.AddTask(task);

                    testOutputHelper.WriteLine("Getting pool name: {0}", boundJob.ExecutionInformation.PoolId);

                    CloudPool           boundPool = batchCli.PoolOperations.GetPool(boundJob.ExecutionInformation.PoolId);
                    TaskStateMonitor    tsm       = batchCli.Utilities.CreateTaskStateMonitor();
                    ODATAMonitorControl odControl = new ODATAMonitorControl();

                    // we know that the autopool compute nodes will take a long time to become scheduleable so we slow down polling/spam
                    odControl.DelayBetweenDataFetch = TimeSpan.FromSeconds(5);

                    testOutputHelper.WriteLine("Invoking TaskStateMonitor");

                    tsm.WaitAll(
                        boundJob.ListTasks(),
                        TaskState.Completed,
                        TimeSpan.FromMinutes(15),
                        odControl,
                        new[] {
                        // spam/logging interceptor
                        new Protocol.RequestInterceptor((x) =>
                        {
                            testOutputHelper.WriteLine("Issuing request type: " + x.GetType().ToString());

                            // print out the compute node states... we are actually waiting on the compute nodes
                            List <ComputeNode> allComputeNodes = boundPool.ListComputeNodes().ToList();
                            testOutputHelper.WriteLine("    #comnpute nodes: " + allComputeNodes.Count);

                            allComputeNodes.ForEach((icn) => { testOutputHelper.WriteLine("  computeNode.id: " + icn.Id + ", state: " + icn.State); });
                            testOutputHelper.WriteLine("");
                        })
                    });

                    // confirm the task ran by inspecting the stdOut
                    string stdOut = boundJob.ListTasks().ToList()[0].GetNodeFile(Constants.StandardOutFileName).ReadAsString();

                    Assert.Contains("Bug1965363", stdOut);
                }
                finally
                {
                    TestUtilities.DeleteJobIfExistsAsync(batchCli, jobId).Wait();
                }
            }

            SynchronizationContextHelper.RunTest(test, LongTestTimeout);
        }
        public void TestJobUpdateWithAndWithoutPoolInfo()
        {
            void test()
            {
                using BatchClient batchCli = TestUtilities.OpenBatchClient(TestUtilities.GetCredentialsFromEnvironment());
                const string testName = "TestJobUpdateWithAndWithoutPoolInfo";

                // Create a job
                string   jobId      = testName + "_" + TestUtilities.GetMyName();
                CloudJob unboundJob = batchCli.JobOperations.CreateJob();

                unboundJob.Id = jobId;

                // Use an auto pool with the job, since PoolInformation can't be updated otherwise.
                PoolSpecification poolSpec = new PoolSpecification();

                poolSpec.CloudServiceConfiguration = new CloudServiceConfiguration(PoolFixture.OSFamily, "*");

                poolSpec.TargetDedicatedComputeNodes = 0;
                poolSpec.VirtualMachineSize          = PoolFixture.VMSize;
                AutoPoolSpecification autoPoolSpec = new AutoPoolSpecification();
                string autoPoolPrefix = "UpdPIAuto_" + TestUtilities.GetMyName();

                autoPoolSpec.AutoPoolIdPrefix = autoPoolPrefix;
                const bool originalKeepAlive = false;

                autoPoolSpec.KeepAlive          = originalKeepAlive;
                autoPoolSpec.PoolLifetimeOption = PoolLifetimeOption.Job;
                autoPoolSpec.PoolSpecification  = poolSpec;
                PoolInformation poolInfo = new PoolInformation();

                poolInfo.AutoPoolSpecification = autoPoolSpec;
                unboundJob.PoolInformation     = poolInfo;

                const int originalPriority = 0;

                unboundJob.Priority = originalPriority;
                List <MetadataItem> originalMetadata = new List <MetadataItem>
                {
                    new MetadataItem("meta1", "value1"),
                    new MetadataItem("meta2", "value2")
                };

                unboundJob.Metadata = originalMetadata;

                testOutputHelper.WriteLine("Creating job {0}", jobId);
                unboundJob.Commit();

                try
                {
                    // Get bound job
                    CloudJob createdJob = batchCli.JobOperations.GetJob(jobId);

                    // Verify that we can update something besides PoolInformation without getting an error for not being in the Disabled state.
                    Assert.NotEqual(JobState.Disabled, createdJob.State);

                    int updatedPriority = originalPriority + 1;
                    List <MetadataItem> updatedMetadata = new List <MetadataItem> {
                        new MetadataItem("updatedMeta1", "value1")
                    };
                    createdJob.Priority = updatedPriority;
                    createdJob.Metadata = updatedMetadata;

                    testOutputHelper.WriteLine("Updating job {0} without altering PoolInformation", jobId);

                    createdJob.Commit();

                    // Verify update occurred
                    CloudJob updatedJob = batchCli.JobOperations.GetJob(jobId);

                    Assert.Equal(updatedPriority, updatedJob.Priority);
                    Assert.Equal(updatedJob.Metadata.Count, updatedJob.Priority);
                    Assert.Equal(updatedJob.Metadata[0].Name, updatedMetadata[0].Name);
                    Assert.Equal(updatedJob.Metadata[0].Value, updatedMetadata[0].Value);

                    // Verify that updating the PoolInformation works.
                    // PoolInformation can only be changed in the Disabled state.
                    testOutputHelper.WriteLine("Disabling job {0}", jobId);
                    updatedJob.Disable(DisableJobOption.Terminate);
                    while (updatedJob.State != JobState.Disabled)
                    {
                        Thread.Sleep(500);
                        updatedJob.Refresh();
                    }

                    Assert.Equal(JobState.Disabled, updatedJob.State);

                    bool updatedKeepAlive = !originalKeepAlive;
                    updatedJob.PoolInformation.AutoPoolSpecification.KeepAlive = updatedKeepAlive;
                    int updatedAgainPriority = updatedPriority + 1;
                    updatedJob.Priority = updatedAgainPriority;
                    testOutputHelper.WriteLine("Updating job {0} properties, including PoolInformation", jobId);
                    updatedJob.Commit();

                    CloudJob updatedPoolInfoJob = batchCli.JobOperations.GetJob(jobId);

                    Assert.Equal(updatedKeepAlive, updatedPoolInfoJob.PoolInformation.AutoPoolSpecification.KeepAlive);
                    Assert.Equal(updatedAgainPriority, updatedPoolInfoJob.Priority);
                }
                finally
                {
                    testOutputHelper.WriteLine("Deleting job {0}", jobId);
                    TestUtilities.DeleteJobIfExistsAsync(batchCli, jobId).Wait();

                    // Explicitly delete auto pool
                    foreach (CloudPool pool in batchCli.PoolOperations.ListPools(new ODATADetailLevel(filterClause: string.Format("startswith(id,'{0}')", autoPoolPrefix))))
                    {
                        testOutputHelper.WriteLine("Deleting pool {0}", pool.Id);
                        TestUtilities.DeletePoolIfExistsAsync(batchCli, pool.Id).Wait();
                    }
                }
            }

            SynchronizationContextHelper.RunTest(test, TestTimeout);
        }
        public static void ParseTextForDeviceDetails(string text, List <PoolInformation> poolInformation)
        {
            List <string> responseParts = ParseResponseText(text);

            if (responseParts.Count == 0)
            {
                return;
            }

            foreach (string responsePart in responseParts)
            {
                Dictionary <string, string> keyValuePairs = ParseResponsePart(responsePart);

                //check for key-value pairs, seen Count == 0 with user API logs
                if (keyValuePairs.Count > 0)
                {
                    PoolInformation newPool = new PoolInformation();

                    newPool.Index = int.Parse(keyValuePairs["POOL"]);

                    //user bug reports indicate this key may not exist
                    if (keyValuePairs.ContainsKey("URL"))
                    {
                        newPool.Url = keyValuePairs["URL"];
                    }

                    //user bug reports indicate this key may not exist
                    if (keyValuePairs.ContainsKey("Status"))
                    {
                        newPool.Status = keyValuePairs["Status"];
                    }

                    newPool.Priority = TryToParseInt(keyValuePairs, "Priority", 0);
                    newPool.Quota    = TryToParseInt(keyValuePairs, "Quota", 0);

                    if (keyValuePairs.ContainsKey("Long Pool"))
                    {
                        newPool.LongPoll = !keyValuePairs["Long Pool"].Equals("n", StringComparison.OrdinalIgnoreCase);
                    }

                    newPool.GetWorks       = TryToParseInt(keyValuePairs, "Getworks", 0);
                    newPool.Accepted       = TryToParseInt(keyValuePairs, "Accepted", 0);
                    newPool.Rejected       = TryToParseInt(keyValuePairs, "Rejected", 0);
                    newPool.Works          = TryToParseInt(keyValuePairs, "Works", 0);
                    newPool.Discarded      = TryToParseInt(keyValuePairs, "Discarded", 0);
                    newPool.Stale          = TryToParseInt(keyValuePairs, "Stale", 0);
                    newPool.GetFailures    = TryToParseInt(keyValuePairs, "Get Failures", 0);
                    newPool.RemoteFailures = TryToParseInt(keyValuePairs, "Remote Failures", 0);

                    //user bug reports indicate this key may not exist
                    if (keyValuePairs.ContainsKey("User"))
                    {
                        newPool.User = keyValuePairs["User"];
                    }

                    newPool.LastShareTime = TryToParseInt(keyValuePairs, "Last Share Time", 0).UnixTimeToDateTime();
                    newPool.Diff1Shares   = TryToParseInt(keyValuePairs, "Diff1 Shares", 0);

                    //user bug reports indicate this key may not exist
                    if (keyValuePairs.ContainsKey("Proxy"))
                    {
                        newPool.Proxy = keyValuePairs["Proxy"];
                    }

                    newPool.DifficultyAccepted  = TryToParseDouble(keyValuePairs, "Difficulty Accepted", 0.0);
                    newPool.DifficultyRejected  = TryToParseDouble(keyValuePairs, "Difficulty Rejected", 0.0);
                    newPool.DifficultyStale     = TryToParseDouble(keyValuePairs, "Difficulty Stale", 0.0);
                    newPool.LastShareDifficulty = TryToParseDouble(keyValuePairs, "Last Share Difficulty", 0.0);

                    //user bug reports indicate this key may not exist
                    if (keyValuePairs.ContainsKey("Has Stratum"))
                    {
                        newPool.HasStratum = keyValuePairs["Has Stratum"].Equals("true", StringComparison.OrdinalIgnoreCase);
                    }

                    //user bug reports indicate this key may not exist
                    if (keyValuePairs.ContainsKey("Stratum Active"))
                    {
                        newPool.StratumActive = keyValuePairs["Stratum Active"].Equals("true", StringComparison.OrdinalIgnoreCase);
                    }

                    //user bug reports indicate this key may not exist
                    if (keyValuePairs.ContainsKey("Stratum URL"))
                    {
                        newPool.StratumUrl = keyValuePairs["Stratum URL"];
                    }

                    newPool.BestShare           = TryToParseDouble(keyValuePairs, "Best Share", 0);
                    newPool.PoolRejectedPercent = TryToParseDouble(keyValuePairs, "Pool Rejected%", 0.0);
                    newPool.PoolStalePercent    = TryToParseDouble(keyValuePairs, "Pool Stale%", 0.0);

                    poolInformation.Add(newPool);
                }
            }
        }
Exemple #32
0
        static async Task ManageTasks(string[] args)
        {
            int    experimentId = int.Parse(args[0], CultureInfo.InvariantCulture);
            string summaryName  = null;

            if (args.Length > 1)
            {
                summaryName = args[1];
            }
            //Console.WriteLine(String.Format("Params are:\n id: {0}\ncontainer: {8}\ndirectory:{9}\ncategory: {1}\nextensions: {10}\ndomain: {11}\nexec: {2}\nargs: {3}\ntimeout: {4}\nmemlimit: {5}\noutlimit: {6}\nerrlimit: {7}", experimentId, benchmarkCategory, executable, arguments, timeout, memoryLimit, outputLimit, errorLimit, benchmarkContainerUri, benchmarkDirectory, extensionsString, domainString));
#if DEBUG
            string jobId = "cz3_exp8535";
#else
            string jobId = Environment.GetEnvironmentVariable(JobIdEnvVariableName);
#endif
            Console.WriteLine("Retrieving credentials...");
            var secretStorage = new SecretStorage(Settings.Default.AADApplicationId, Settings.Default.AADApplicationCertThumbprint, Settings.Default.KeyVaultUrl);
            BatchConnectionString credentials = new BatchConnectionString(await secretStorage.GetSecret(Settings.Default.ConnectionStringSecretId));

            var batchCred = new BatchSharedKeyCredentials(credentials.BatchURL, credentials.BatchAccountName, credentials.BatchAccessKey);
            var storage   = new AzureExperimentStorage(credentials.WithoutBatchData().ToString());

            var expInfo = await storage.GetExperiment(experimentId);

            string benchmarkContainerUri = expInfo.BenchmarkContainerUri;  // args[1];
            string benchmarkDirectory    = expInfo.BenchmarkDirectory;     // args[2];
            string benchmarkCategory     = expInfo.Category;               // args[3];
            string extensionsString      = expInfo.BenchmarkFileExtension; //args[4];
            string domainString          = expInfo.DomainName;             // args[5];
            string executable            = expInfo.Executable;             // args[6];
            string arguments             = expInfo.Parameters;             // args[7];
            double timeout        = expInfo.BenchmarkTimeout;              // TimeSpan.FromSeconds(double.Parse(args[8]));
            double memoryLimit    = expInfo.MemoryLimitMB;                 // 0; // no limit
            int    maxRepetitions = expInfo.AdaptiveRunMaxRepetitions;
            double maxTime        = expInfo.AdaptiveRunMaxTimeInSeconds;

            long?outputLimit = 1 * (1024 * 1024); // 1 MB
            long?errorLimit  = 256 * 1024;        // 256 KB

            AzureBenchmarkStorage benchmarkStorage = CreateBenchmarkStorage(benchmarkContainerUri, storage);

            var queue = await storage.CreateResultsQueue(experimentId);

            DateTime before = DateTime.Now;
            Console.Write("Fetching existing results...");
            await FetchSavedResults(experimentId, storage);

            Domain           domain = ResolveDomain(domainString);
            HashSet <string> extensions;
            if (string.IsNullOrEmpty(extensionsString))
            {
                extensions = new HashSet <string>(domain.BenchmarkExtensions.Distinct());
            }
            else
            {
                extensions = new HashSet <string>(extensionsString.Split('|').Select(s => s.Trim().TrimStart('.')).Distinct());
            }

            using (BatchClient batchClient = BatchClient.Open(batchCred))
            {
                // Exclude benchmarks that finished correctly
                var    processedBlobs = new HashSet <string>();
                string prefix         = (benchmarkDirectory.Trim('/') + "/" + benchmarkCategory.Trim('/')).Trim('/');
                foreach (var r in goodResults.Select(g => prefix + "/" + g.BenchmarkFileName))
                {
                    processedBlobs.Add(r.Trim());
                }
                Console.WriteLine(" took {0}.", (DateTime.Now - before));

                // Exclude those that are still in progress
                ODATADetailLevel detailLevel = new ODATADetailLevel();
                detailLevel.FilterClause = "(state eq 'active') or (state eq 'running') or (state eq 'preparing')";
                detailLevel.SelectClause = "id,displayName";

                CloudJob old_job = null;
                try { old_job = batchClient.JobOperations.GetJob(jobId); } catch { /* OK */ }

                if (old_job != null)
                {
                    before = DateTime.Now;
                    Console.Write("Listing existing tasks...");
                    var ts = batchClient.JobOperations.ListTasks(jobId, detailLevel);
                    foreach (CloudTask t in ts)
                    {
                        int id;

                        if (int.TryParse(t.Id, out id))
                        {
                            string n = t.DisplayName.Trim();
                            if (!processedBlobs.Contains(n))
                            {
                                processedBlobs.Add(n);
                            }
                        }
                    }
                    ;
                    Console.WriteLine(" took {0}.", (DateTime.Now - before));

                    // Create new job if the old one is already sealed off
                    switch (old_job.State)
                    {
                    case Microsoft.Azure.Batch.Common.JobState.Completed:
                    case Microsoft.Azure.Batch.Common.JobState.Deleting:
                    case Microsoft.Azure.Batch.Common.JobState.Disabled:
                    case Microsoft.Azure.Batch.Common.JobState.Disabling:
                    case Microsoft.Azure.Batch.Common.JobState.Terminating:
                    {
                        before = DateTime.Now;
                        Console.Write("Creating fresh job...");
                        PoolInformation pool_info = old_job.PoolInformation;
                        string          new_jid;
                        int             cnt      = 1;
                        bool            have_jid = false;
                        do
                        {
                            new_jid = String.Format("{0}-{1}", jobId, cnt++);
                            try
                            {
                                CloudJob new_job = batchClient.JobOperations.CreateJob(new_jid, pool_info);
                                new_job.OnAllTasksComplete = Microsoft.Azure.Batch.Common.OnAllTasksComplete.NoAction;
                                new_job.OnTaskFailure      = old_job.OnTaskFailure;
                                new_job.Constraints        = old_job.Constraints;
                                new_job.DisplayName        = old_job.DisplayName;
                                new_job.Commit();
                                have_jid = true;
                            }
                            catch (Microsoft.Azure.Batch.Common.BatchException)
                            {
                                Console.Write(".");
                            }
                        }while (!have_jid);
                        jobId = new_jid;
                        Console.WriteLine(" took {0}.", (DateTime.Now - before));
                        break;
                    }
                    }
                }

                BlobContinuationToken continuationToken = null;
                BlobResultSegment     resultSegment     = null;

                before = DateTime.Now;
                Console.Write("Adding tasks...");
                List <Task> starterTasks       = new List <Task>();
                int         benchmarksTotal    = processedBlobs.Count();
                string      benchmarksPath     = CombineBlobPath(benchmarkDirectory, benchmarkCategory);
                string      outputQueueUri     = storage.GetOutputQueueSASUri(experimentId, TimeSpan.FromHours(48));
                string      outputContainerUri = storage.GetOutputContainerSASUri(TimeSpan.FromHours(48));
                do
                {
                    resultSegment = await benchmarkStorage.ListBlobsSegmentedAsync(benchmarksPath, continuationToken);

                    string[] blobNamesToProcess = resultSegment.Results.SelectMany(item =>
                    {
                        var blob = item as CloudBlockBlob;
                        if (blob == null || processedBlobs.Contains(blob.Name))
                        {
                            return new string[] { }
                        }
                        ;

                        var nameParts      = blob.Name.Split('/');
                        var shortnameParts = nameParts[nameParts.Length - 1].Split('.');
                        if (shortnameParts.Length == 1 && !extensions.Contains(""))
                        {
                            return new string[] { }
                        }
                        ;
                        var ext = shortnameParts[shortnameParts.Length - 1];
                        if (!extensions.Contains(ext))
                        {
                            return new string[] { }
                        }
                        ;

                        return(new string[] { blob.Name });
                    }).ToArray();
                    starterTasks.Add(StartTasksForSegment(timeout.ToString(), experimentId, executable, arguments, memoryLimit, domainString, outputQueueUri, outputContainerUri, outputLimit, errorLimit, jobId, batchClient, blobNamesToProcess, benchmarksPath, benchmarksTotal, benchmarkStorage, maxRepetitions, maxTime));

                    continuationToken = resultSegment.ContinuationToken;
                    benchmarksTotal  += blobNamesToProcess.Length;
                }while (continuationToken != null);

                await storage.SetBenchmarksTotal(experimentId, benchmarksTotal);

                Program.benchmarksTotal = benchmarksTotal;
                benchmarksToProcess     = benchmarksTotal - goodResults.Count;
                Console.WriteLine(" took {0}.", (DateTime.Now - before));

                before = DateTime.Now;
                Console.Write("Waiting for tasks to start...");
                await Task.WhenAll(starterTasks.ToArray());

                Console.WriteLine(" took {0}.", (DateTime.Now - before));

                CloudJob j = batchClient.JobOperations.GetJob(jobId);
                j.OnAllTasksComplete = Microsoft.Azure.Batch.Common.OnAllTasksComplete.TerminateJob;
                j.CommitChanges();

                before = DateTime.Now;
                Console.Write("Waiting for results...");
                var collectionTask = CollectResults(experimentId, storage);
                Console.WriteLine(" took {0}.", (DateTime.Now - before));

                MonitorTasksUntilCompletion(experimentId, jobId, collectionTask, batchClient, domain);

                if (summaryName != null && expInfo.Creator == "Nightly")
                {
                    Trace.WriteLine(string.Format("Building summary for experiment {0} and summary name {1}...", experimentId, summaryName));
                    AzureSummaryManager manager = new AzureSummaryManager(credentials.WithoutBatchData().ToString(), MEFDomainResolver.Instance);
                    await AppendSummaryAndSendReport(summaryName, experimentId, domain, manager);
                }
                else
                {
                    Trace.WriteLine("No summary requested.");
                }

                try
                {
                    int?amc = storage.GetResultsQueueReference(experimentId).ApproximateMessageCount;

                    if (amc.HasValue && amc.Value == 0)
                    {
                        switch (batchClient.JobOperations.GetJob(jobId).State)
                        {
                        case Microsoft.Azure.Batch.Common.JobState.Completed:
                        case Microsoft.Azure.Batch.Common.JobState.Disabled:
                            Console.WriteLine("Deleting Batch job and results queue.");
                            await batchClient.JobOperations.DeleteJobAsync(jobId);

                            await storage.DeleteResultsQueue(experimentId);

                            break;
                        }
                    }
                }
                catch { /* OK */ }

                Console.WriteLine("Closing.");
            }
        }
Exemple #33
0
        /// <summary>
        /// Creates a job schedule with the specified options.
        /// </summary>
        /// <param name="options">The options describing the job schedule to create.</param>
        /// <returns></returns>
        public async Task CreateJobScheduleAsync(CreateJobScheduleOptions options)
        {
            CloudJobSchedule unboundJobSchedule = this.Client.JobScheduleOperations.CreateJobSchedule();

            unboundJobSchedule.Id = options.JobScheduleId;

            PoolInformation poolInformation = new PoolInformation();

            if (options.AutoPoolOptions.UseAutoPool.HasValue && options.AutoPoolOptions.UseAutoPool.Value)
            {
                AutoPoolSpecification autoPoolSpecification = new AutoPoolSpecification()
                {
                    AutoPoolIdPrefix   = options.AutoPoolOptions.AutoPoolPrefix,
                    KeepAlive          = options.AutoPoolOptions.KeepAlive,
                    PoolLifetimeOption = (PoolLifetimeOption)Enum.Parse(typeof(PoolLifetimeOption), options.AutoPoolOptions.LifeTimeOption),
                    PoolSpecification  = new PoolSpecification()
                    {
                        CloudServiceConfiguration = new CloudServiceConfiguration(options.AutoPoolOptions.OSFamily),
                        VirtualMachineSize        = options.AutoPoolOptions.VirutalMachineSize,
                        TargetDedicated           = options.AutoPoolOptions.TargetDedicated
                    }
                };

                poolInformation.AutoPoolSpecification = autoPoolSpecification;
            }
            else
            {
                poolInformation.PoolId = options.PoolId;
            }

            unboundJobSchedule.JobSpecification = new JobSpecification()
            {
                Priority        = options.Priority,
                PoolInformation = poolInformation
            };

            // TODO: These are read only
            unboundJobSchedule.JobSpecification.Constraints = new JobConstraints(options.MaxWallClockTime, options.MaxRetryCount);

            Schedule schedule = new Schedule()
            {
                DoNotRunAfter      = options.DoNotRunAfter,
                DoNotRunUntil      = options.DoNotRunUntil,
                RecurrenceInterval = options.RecurrenceInterval,
                StartWindow        = options.StartWindow
            };

            unboundJobSchedule.Schedule = schedule;

            if (options.CreateJobManager.HasValue && options.CreateJobManager.Value == true)
            {
                JobManagerTask jobManager = new JobManagerTask()
                {
                    CommandLine         = options.JobManagerOptions.CommandLine,
                    KillJobOnCompletion = options.JobManagerOptions.KillOnCompletion,
                    Id = options.JobManagerOptions.JobManagerId
                };

                jobManager.Constraints = new TaskConstraints(options.JobManagerOptions.MaxTaskWallClockTime, options.JobManagerOptions.RetentionTime, options.JobManagerOptions.MaxTaskRetryCount);

                unboundJobSchedule.JobSpecification.JobManagerTask = jobManager;
            }

            await unboundJobSchedule.CommitAsync();
        }
        public void TestBoundJobScheduleCommit()
        {
            Action test = () =>
            {
                using (BatchClient batchCli = TestUtilities.OpenBatchClientAsync(TestUtilities.GetCredentialsFromEnvironment()).Result)
                {
                    string jobScheduleId = Microsoft.Azure.Batch.Constants.DefaultConveniencePrefix + TestUtilities.GetMyName() + "-TestBoundJobScheduleCommit";
                    try
                    {
                        //
                        // Create the job schedule
                        //
                        const int    jobSchedulePriority   = 5;
                        const string jobManagerId          = "TestBoundJobScheduleCommit";
                        const string jobManagerCommandLine = "ping 127.0.0.1 -n 500";

                        IList <MetadataItem> metadata = new List <MetadataItem> {
                            new MetadataItem("key1", "test1"), new MetadataItem("key2", "test2")
                        };
                        CloudJobSchedule jobSchedule             = batchCli.JobScheduleOperations.CreateJobSchedule(jobScheduleId, null, null);
                        TimeSpan         firstRecurrenceInterval = TimeSpan.FromMinutes(2);
                        jobSchedule.Schedule = new Schedule()
                        {
                            RecurrenceInterval = firstRecurrenceInterval
                        };
                        PoolInformation poolInfo = new PoolInformation()
                        {
                            PoolId = this.poolFixture.PoolId
                        };

                        jobSchedule.JobSpecification = new JobSpecification(poolInfo)
                        {
                            Priority       = jobSchedulePriority,
                            JobManagerTask = new JobManagerTask(jobManagerId, jobManagerCommandLine)
                        };

                        jobSchedule.Metadata = metadata;

                        this.testOutputHelper.WriteLine("Initial job schedule commit()");
                        jobSchedule.Commit();

                        //Get the bound job schedule
                        CloudJobSchedule boundJobSchedule = batchCli.JobScheduleOperations.GetJobSchedule(jobScheduleId);

                        //Ensure the job schedule is structured as expected
                        AssertJobScheduleCorrectness(batchCli.JobScheduleOperations, boundJobSchedule, this.poolFixture.PoolId, jobSchedulePriority, jobManagerId, jobManagerCommandLine, firstRecurrenceInterval, metadata);

                        //Update the bound job schedule schedule
                        TimeSpan recurrenceInterval = TimeSpan.FromMinutes(5);
                        boundJobSchedule.Schedule = new Schedule()
                        {
                            RecurrenceInterval = recurrenceInterval
                        };

                        this.testOutputHelper.WriteLine("Updating JobSchedule Schedule");
                        boundJobSchedule.Commit();

                        //Ensure the job schedule is correct after commit
                        AssertJobScheduleCorrectness(batchCli.JobScheduleOperations, boundJobSchedule, this.poolFixture.PoolId, jobSchedulePriority, jobManagerId, jobManagerCommandLine, recurrenceInterval, metadata);

                        //Update the bound job schedule priority
                        const int newJobSchedulePriority = 1;
                        boundJobSchedule.JobSpecification.Priority = newJobSchedulePriority;

                        this.testOutputHelper.WriteLine("Updating JobSpecification.Priority");
                        boundJobSchedule.Commit();

                        //Ensure the job schedule is correct after commit
                        AssertJobScheduleCorrectness(batchCli.JobScheduleOperations, boundJobSchedule, this.poolFixture.PoolId, newJobSchedulePriority, jobManagerId, jobManagerCommandLine, recurrenceInterval, metadata);

                        //Update the bound job schedule job manager commandline
                        const string newJobManagerCommandLine = "ping 127.0.0.1 -n 150";
                        boundJobSchedule.JobSpecification.JobManagerTask.CommandLine = newJobManagerCommandLine;

                        this.testOutputHelper.WriteLine("Updating JobSpecification.JobManagerTask.CommandLine");
                        boundJobSchedule.Commit();

                        //Ensure the job schedule is correct after commit
                        AssertJobScheduleCorrectness(batchCli.JobScheduleOperations, boundJobSchedule, this.poolFixture.PoolId, newJobSchedulePriority, jobManagerId, newJobManagerCommandLine, recurrenceInterval, metadata);

                        //Update the bound job schedule PoolInformation
                        const string newPoolId = "TestPool";

                        boundJobSchedule.JobSpecification.PoolInformation = new PoolInformation()
                        {
                            PoolId = newPoolId
                        };

                        this.testOutputHelper.WriteLine("Updating PoolInformation");
                        boundJobSchedule.Commit();

                        //Ensure the job schedule is correct after commit
                        AssertJobScheduleCorrectness(
                            batchCli.JobScheduleOperations,
                            boundJobSchedule,
                            newPoolId,
                            newJobSchedulePriority,
                            jobManagerId,
                            newJobManagerCommandLine,
                            recurrenceInterval,
                            metadata);

                        //Update the bound job schedule Metadata
                        IList <MetadataItem> newMetadata = new List <MetadataItem> {
                            new MetadataItem("Object", "Model")
                        };
                        boundJobSchedule.Metadata = newMetadata;

                        this.testOutputHelper.WriteLine("Updating Metadata");
                        boundJobSchedule.Commit();

                        //Ensure the job schedule is correct after commit
                        AssertJobScheduleCorrectness(
                            batchCli.JobScheduleOperations,
                            boundJobSchedule,
                            newPoolId,
                            newJobSchedulePriority,
                            jobManagerId,
                            newJobManagerCommandLine,
                            recurrenceInterval,
                            newMetadata);
                    }
                    finally
                    {
                        batchCli.JobScheduleOperations.DeleteJobSchedule(jobScheduleId);
                    }
                }
            };

            SynchronizationContextHelper.RunTest(test, TestTimeout);
        }
        /// <summary>
        /// Populates Azure Storage with the required files, and 
        /// submits the job to the Azure Batch service.
        /// </summary>
        public async Task RunAsync()
        {
            Console.WriteLine("Running with the following settings: ");
            Console.WriteLine("----------------------------------------");
            Console.WriteLine(this.configurationSettings.ToString());
            
            //Upload resources if required.
            if (this.configurationSettings.ShouldUploadResources)
            {
                Console.WriteLine("Splitting file: {0} into {1} subfiles", 
                    Constants.TextFilePath, 
                    this.configurationSettings.NumberOfMapperTasks);

                //Split the text file into the correct number of files for consumption by the mapper tasks.
                FileSplitter splitter = new FileSplitter();
                List<string> mapperTaskFiles = await splitter.SplitAsync(
                    Constants.TextFilePath, 
                    this.configurationSettings.NumberOfMapperTasks);
                
                await this.UploadResourcesAsync(mapperTaskFiles);
            }

            //Generate a SAS for the container.
            string containerSasUrl = Helpers.ConstructContainerSas(
                this.configurationSettings.StorageAccountName,
                this.configurationSettings.StorageAccountKey,
                this.configurationSettings.StorageServiceUrl,
                this.configurationSettings.BlobContainer);

            //Set up the Batch Service credentials used to authenticate with the Batch Service.
            BatchSharedKeyCredentials credentials = new BatchSharedKeyCredentials(
                this.configurationSettings.BatchServiceUrl,
                this.configurationSettings.BatchAccountName,
                this.configurationSettings.BatchAccountKey);

            using (BatchClient batchClient = await BatchClient.OpenAsync(credentials))
            {
                //
                // Construct the job properties in local memory before commiting them to the Batch Service.
                //

                //Allow enough compute nodes in the pool to run each mapper task, and 1 extra to run the job manager.
                int numberOfPoolComputeNodes = 1 + this.configurationSettings.NumberOfMapperTasks;

                //Define the pool specification for the pool which the job will run on.
                PoolSpecification poolSpecification = new PoolSpecification()
                    {
                        TargetDedicated = numberOfPoolComputeNodes,
                        VirtualMachineSize = "small",
                        //You can learn more about os families and versions at: 
                        //http://azure.microsoft.com/documentation/articles/cloud-services-guestos-update-matrix
                        OSFamily = "4",
                        TargetOSVersion = "*"
                    };

                //Use the auto pool feature of the Batch Service to create a pool when the job is created.
                //This creates a new pool for each job which is added.
                AutoPoolSpecification autoPoolSpecification = new AutoPoolSpecification()
                    {
                        AutoPoolIdPrefix= "TextSearchPool",
                        KeepAlive = false,
                        PoolLifetimeOption = PoolLifetimeOption.Job,
                        PoolSpecification = poolSpecification
                    };

                //Define the pool information for this job -- it will run on the pool defined by the auto pool specification above.
                PoolInformation poolInformation = new PoolInformation()
                    {
                        AutoPoolSpecification = autoPoolSpecification
                    };
                
                //Define the job manager for this job.  This job manager will run first and will submit the tasks for 
                //the job.  The job manager is the executable which manages the lifetime of the job
                //and all tasks which should run for the job.  In this case, the job manager submits the mapper and reducer tasks.
                List<ResourceFile> jobManagerResourceFiles = Helpers.GetResourceFiles(containerSasUrl, Constants.RequiredExecutableFiles);
                const string jobManagerTaskId = "JobManager";

                JobManagerTask jobManagerTask = new JobManagerTask()
                    {
                        ResourceFiles = jobManagerResourceFiles,
                        CommandLine = Constants.JobManagerExecutable,

                        //Determines if the job should terminate when the job manager process exits.
                        KillJobOnCompletion = true,
                        Id = jobManagerTaskId
                    };

                //Create the unbound job in local memory.  An object which exists only in local memory (and not on the Batch Service) is "unbound".
                string jobId = Environment.GetEnvironmentVariable("USERNAME") + DateTime.UtcNow.ToString("yyyyMMdd-HHmmss");

                CloudJob unboundJob = batchClient.JobOperations.CreateJob(jobId, poolInformation);
                unboundJob.JobManagerTask = jobManagerTask; //Assign the job manager task to this job

                try
                {
                    //Commit the unbound job to the Batch Service.
                    Console.WriteLine("Adding job: {0} to the Batch Service.", unboundJob.Id);
                    await unboundJob.CommitAsync(); //Issues a request to the Batch Service to add the job which was defined above.

                    //
                    // Wait for the job manager task to complete.
                    //
                    
                    //An object which is backed by a corresponding Batch Service object is "bound."
                    CloudJob boundJob = await batchClient.JobOperations.GetJobAsync(jobId);

                    CloudTask boundJobManagerTask = await boundJob.GetTaskAsync(jobManagerTaskId);

                    TimeSpan maxJobCompletionTimeout = TimeSpan.FromMinutes(30);
                    
                    // Monitor the current tasks to see when they are done.
                    // Occasionally a task may get killed and requeued during an upgrade or hardware failure, including the job manager
                    // task.  The job manager will be re-run in this case.  Robustness against this was not added into the sample for 
                    // simplicity, but should be added into any production code.
                    Console.WriteLine("Waiting for job's tasks to complete");

                    TaskStateMonitor taskStateMonitor = batchClient.Utilities.CreateTaskStateMonitor();
                    bool timedOut = await taskStateMonitor.WaitAllAsync(new List<CloudTask> { boundJobManagerTask }, TaskState.Completed, maxJobCompletionTimeout);

                    Console.WriteLine("Done waiting for job manager task.");

                    await boundJobManagerTask.RefreshAsync();

                    //Check to ensure the job manager task exited successfully.
                    await Helpers.CheckForTaskSuccessAsync(boundJobManagerTask, dumpStandardOutOnTaskSuccess: false);

                    if (timedOut)
                    {
                        throw new TimeoutException(string.Format("Timed out waiting for job manager task to complete."));
                    }

                    //
                    // Download and write out the reducer tasks output
                    //
                    CloudStorageAccount cloudStorageAccount = new CloudStorageAccount(
                        new StorageCredentials(
                            this.configurationSettings.StorageAccountName,
                            this.configurationSettings.StorageAccountKey),
                        this.configurationSettings.StorageServiceUrl,
                        useHttps: true);

                    string reducerText = await Helpers.DownloadBlobTextAsync(cloudStorageAccount, this.configurationSettings.BlobContainer, Constants.ReducerTaskResultBlobName);
                    Console.WriteLine("Reducer reuslts:");
                    Console.WriteLine(reducerText);

                }
                finally
                {
                    //Delete the job.
                    //This will delete the auto pool associated with the job as long as the pool
                    //keep alive property is set to false.
                    if (this.configurationSettings.ShouldDeleteJob)
                    {
                        Console.WriteLine("Deleting job {0}", jobId);
                        batchClient.JobOperations.DeleteJob(jobId);
                    }

                    //Note that there were files uploaded to a container specified in the 
                    //configuration file.  This container will not be deleted or cleaned up by this sample.
                }
            }
        }