예제 #1
0
        public static void Main(string[] args)
        {
            // This will boost parallel submission speed for REST APIs. If your use requires many simultaneous service calls set this number to something large, such as 100.  
            // See: http://msdn.microsoft.com/en-us/library/system.net.servicepointmanager.defaultconnectionlimit%28v=vs.110%29.aspx for more info.
            System.Net.ServicePointManager.DefaultConnectionLimit = 20;

            // Get an instance of the BatchClient for a given Azure Batch account.
            BatchSharedKeyCredentials cred = new BatchSharedKeyCredentials(BatchUrl, BatchAccount, BatchKey);
            using (BatchClient client = BatchClient.Open(cred))
            {
                // add a retry policy. The built-in policies are No Retry (default), Linear Retry, and Exponential Retry
                client.CustomBehaviors.Add(RetryPolicyProvider.LinearRetryProvider(TimeSpan.FromSeconds(10), 3 ));

                ListPools(client);
                ListJobs(client);

                CreatePoolIfNeeded(client, PoolId);
                AddJobTwoTasks(client, PoolId);

                ListPools(client);
                ListJobs(client);
                AddTasksWithFileStaging(client, PoolId);

                ListPools(client);
                ListJobs(client);

                SubmitLargeNumberOfTasks(client, PoolId);

                ListPools(client);
                ListJobs(client);
            }

            Console.WriteLine("Press return to exit...");
            Console.ReadLine();
        }
예제 #2
0
        /// <summary>
        /// Runs the reducer task.
        /// </summary>
        public async Task RunAsync()
        {
            //Set up the Batch Service credentials used to authenticate with the Batch Service.
            BatchSharedKeyCredentials credentials = new BatchSharedKeyCredentials(
                this.configurationSettings.BatchServiceUrl,
                this.configurationSettings.BatchAccountName, 
                this.configurationSettings.BatchAccountKey); 

            using (BatchClient batchClient = await BatchClient.OpenAsync(credentials))
            {
                //Gather each Mapper tasks output and write it to standard out.
                for (int i = 0; i < this.configurationSettings.NumberOfMapperTasks; i++)
                {
                    string mapperTaskId = Helpers.GetMapperTaskId(i);

                    //Download the standard out from each mapper task.
                    NodeFile mapperFile = await batchClient.JobOperations.GetNodeFileAsync(
                        this.jobId, 
                        mapperTaskId, 
                        Batch.Constants.StandardOutFileName);

                    string taskFileString = await mapperFile.ReadAsStringAsync();
                    Console.WriteLine(taskFileString);

                    Console.WriteLine();
                }
            }
        }
예제 #3
0
        private static async Task MainAsync(AccountSettings accountSettings)
        {
            // Use the AccountSettings from the Common project to initialize a BatchClient.
            var credentials = new BatchSharedKeyCredentials(
                accountSettings.BatchServiceUrl,
                accountSettings.BatchAccountName,
                accountSettings.BatchAccountKey);

            using (var batchClient = await BatchClient.OpenAsync(credentials))
            {
                // Create a MetricMonitor.  Once started, this will periodically fetch job metrics
                // (specifically, the counts of tasks in different states) from Azure Batch.
                // The monitor will stop reporting once disposed, so often you would have the monitor
                // as a long-lived member variable, but for demo purposes we use a 'using' statement
                // to ensure disposal.
                using (var monitor = new MetricMonitor(batchClient))
                {
                    // For demo purposes, print the latest metrics every time the monitor updates them.
                    monitor.MetricsUpdated += (s, e) =>
                        {
                            Console.WriteLine();
                            Console.WriteLine(FormatMetrics(monitor.CurrentMetrics));
                        };
                    // Start monitoring.  The monitor will fetch metrics in the background.
                    monitor.Start();

                    // Give the monitor some jobs to report on.
                    var jobSubmitter = new JobSubmitter(batchClient);
                    await jobSubmitter.SubmitJobsAsync();  // Submit a series of jobs over a period of several minutes
                    await Task.Delay(TimeSpan.FromMinutes(2));  // Give the last submitted job time to get under way so we can see it progressing
                    await jobSubmitter.CleanUpJobsAsync();
                    await jobSubmitter.CleanUpPoolIfRequiredAsync();
                }
            }
        }
예제 #4
0
        public static void Monitor()
        {
            if (!string.IsNullOrEmpty(jobName))
            {
                BatchSharedKeyCredentials cred = new BatchSharedKeyCredentials(Settings.batchEndpoint, Settings.batchAccount, Settings.batchKey);
                using (BatchClient client = BatchClient.Open(cred)) // <-- connect to the cluster
                {
                    {
                        do
                        {
                            int completed = 0;
                            int all = 0;

                            CloudJob job = client.JobOperations.GetJob(jobName);
                            if(state != job.State)
                                Log("job state is " + job.State);
                            state = job.State;

                            var tasks = client.JobOperations.ListTasks(jobName).ToList<CloudTask>();
                            completed = tasks.Where(t => t.State == TaskState.Completed).Count();
                            all = tasks.Count();

                            BatchServiceClient.completed = completed;
                            BatchServiceClient.all = all;
                            Thread.Sleep(1000);

                        } while (state != JobState.Completed);
                    }
                }
            }
        }
예제 #5
0
        public void CannotSetUsesTaskDependenciesFromABoundCloudJob()
        {
            const string jobId = "id-123";
            const bool   usesTaskDependencies = true;
            // Bound
            BatchSharedKeyCredentials credentials = ClientUnitTestCommon.CreateDummySharedKeyCredential();

            using (BatchClient client = BatchClient.Open(credentials))
            {
                Protocol.RequestInterceptor interceptor = new Protocol.RequestInterceptor(
                    baseRequest =>
                {
                    var request = (Protocol.BatchRequest <Models.JobGetOptions, AzureOperationResponse <Models.CloudJob, Models.JobGetHeaders> >)baseRequest;
                    request.ServiceRequestFunc = (token) =>
                    {
                        var response = new AzureOperationResponse <Models.CloudJob, Models.JobGetHeaders>
                        {
                            Body = new Protocol.Models.CloudJob {
                                UsesTaskDependencies = usesTaskDependencies
                            }
                        };

                        return(Task.FromResult(response));
                    };
                });

                var cloudJob = client.JobOperations.GetJob(jobId, additionalBehaviors: new List <BatchClientBehavior> {
                    interceptor
                });
                Assert.Equal(usesTaskDependencies, cloudJob.UsesTaskDependencies);
                Assert.Throws <InvalidOperationException>(() => cloudJob.UsesTaskDependencies = false);
            }
        }
        public async Task CreateJobScheduleWithApplicationPackageReferences()
        {
            const string applicationId = "blender.exe";
            const string version       = "blender";
            const string jobId         = "mock-job";

            BatchSharedKeyCredentials credentials = ClientUnitTestCommon.CreateDummySharedKeyCredential();

            using (BatchClient client = BatchClient.Open(credentials))
            {
                Protocol.RequestInterceptor interceptor = new Protocol.RequestInterceptor(
                    baseRequest =>
                {
                    var request =
                        (Protocol.BatchRequest <Models.JobScheduleGetOptions, AzureOperationResponse <Models.CloudJobSchedule, Models.JobScheduleGetHeaders> >)baseRequest;

                    request.ServiceRequestFunc = (token) =>
                    {
                        var response = new AzureOperationResponse <Models.CloudJobSchedule, Models.JobScheduleGetHeaders>
                        {
                            Body = new Models.CloudJobSchedule
                            {
                                JobSpecification = new Protocol.Models.JobSpecification
                                {
                                    PoolInfo = new Models.PoolInformation
                                    {
                                        AutoPoolSpecification = new Models.AutoPoolSpecification
                                        {
                                            Pool = new Models.PoolSpecification
                                            {
                                                ApplicationPackageReferences = new[]
                                                {
                                                    new Protocol.Models.ApplicationPackageReference
                                                    {
                                                        ApplicationId = applicationId,
                                                        Version       = version,
                                                    }
                                                },
                                                MaxTasksPerNode = 4
                                            }
                                        }
                                    }
                                }
                            }
                        };
                        return(Task.FromResult(response));
                    };
                });

                Microsoft.Azure.Batch.CloudJobSchedule cloudJobSchedule = await client.JobScheduleOperations.GetJobScheduleAsync(jobId, additionalBehaviors : new List <BatchClientBehavior> {
                    interceptor
                });

                Assert.Equal(cloudJobSchedule.JobSpecification.PoolInformation.AutoPoolSpecification.PoolSpecification.ApplicationPackageReferences.First().ApplicationId, applicationId);
                Assert.Equal(cloudJobSchedule.JobSpecification.PoolInformation.AutoPoolSpecification.PoolSpecification.ApplicationPackageReferences.First().Version, version);
            }
        }
        public AzureBatchComputeScheduler(IApplicationEnvironment applicationEnvironment)
        {
            var configuration = new ConfigurationBuilder(applicationEnvironment.ApplicationBasePath)
                .AddJsonFile("config.json")
                .AddEnvironmentVariables()
                .Build();

            _credentials = new BatchSharedKeyCredentials(
                configuration.GetSection("AppSettings:BatchAccountUrl").Value,
                configuration.GetSection("AppSettings:BatchAccountName").Value,
                configuration.GetSection("AppSettings:BatchKey").Value);
        }
        public async Task RunAsync()
        {
            Console.WriteLine("JobManager for account: {0}, job: {1} has started...",
                this.accountName,
                this.jobId);
            Console.WriteLine();

            Console.WriteLine("JobManager running with the following settings: ");
            Console.WriteLine("----------------------------------------");
            Console.WriteLine(this.configurationSettings.ToString());

            BatchSharedKeyCredentials credentials = new BatchSharedKeyCredentials(
                this.configurationSettings.BatchAccountUrl, 
                this.configurationSettings.BatchAccountName,
                this.configurationSettings.BatchAccountKey);

            CloudStorageAccount storageAccount = new CloudStorageAccount(
                new StorageCredentials(
                    this.configurationSettings.StorageAccountName,
                    this.configurationSettings.StorageAccountKey), 
                    new Uri(this.configurationSettings.StorageBlobEndpoint),
                    null,
                    null,
                    null);
            
            using (BatchClient batchClient = await BatchClient.OpenAsync(credentials))
            {
                HashSet<string> blobContainerNames = new HashSet<string>();

                try
                {
                    // Submit some tasks
                    blobContainerNames = await this.SubmitTasks(batchClient);

                    // Wait for the tasks to finish
                    List<CloudTask> tasks = await batchClient.JobOperations.ListTasks(jobId).ToListAsync();

                    // don't wait for the job manager task since it won't finish until this method exists
                    tasks.RemoveAll(t => t.Id.Equals(this.taskId, StringComparison.CurrentCultureIgnoreCase));
                    
                    await GettingStartedCommon.WaitForTasksAndPrintOutputAsync(batchClient, tasks, TimeSpan.FromMinutes(10));
                }
                finally
                {
                    // Clean up the files for the tasks
                    SampleHelpers.DeleteContainersAsync(storageAccount, blobContainerNames).Wait();
                }
            }
        }
예제 #9
0
파일: Batch.cs 프로젝트: tzkwizard/Azure
 public static void RunBatch(string[] args)
 {
     //CreateStorage();
     //CreateFiles();
     BatchSharedKeyCredentials cred = new BatchSharedKeyCredentials("https://testbatch.southcentralus.batch.azure.com", "testbatch", "O+w9Brj9NKnb2RXt13T0HaJLQ3v8HMf6RuojDkeAHVMTDQ/tdgjvim3pFiiH+ekqWiYppDDQ6M0rvAyqaIIUaw==");
     BatchClient client = BatchClient.Open(cred);
    // CreatePool(client);
     ListPools(client);
     //CreateJob(client);
     ListJobs(client);
     //DeleteTasks(client);
    // AddTasks(client);
     ListTasks(client);
    // DeleteJob(client);
    // DeletePool(client);
 }
예제 #10
0
        /// <summary>
        /// Populates Azure Storage with the required files, and 
        /// submits the job to the Azure Batch service.
        /// </summary>
        public async Task RunAsync()
        {
            Console.WriteLine("Running with the following settings: ");
            Console.WriteLine("-------------------------------------");
            Console.WriteLine(this.configurationSettings.ToString());

            // Set up the Batch Service credentials used to authenticate with the Batch Service.
            BatchSharedKeyCredentials credentials = new BatchSharedKeyCredentials(
                this.configurationSettings.BatchServiceUrl,
                this.configurationSettings.BatchAccountName,
                this.configurationSettings.BatchAccountKey);

            // Get an instance of the BatchClient for a given Azure Batch account.
            using (BatchClient batchClient = await BatchClient.OpenAsync(credentials))
            {
                // add a retry policy. The built-in policies are No Retry (default), Linear Retry, and Exponential Retry
                batchClient.CustomBehaviors.Add(RetryPolicyProvider.LinearRetryProvider(TimeSpan.FromSeconds(10), 3));

                string jobId = null;

                // Track the containers which are created as part of job submission so that we can clean them up later.
                HashSet<string> blobContainerNames = null;

                try
                {
                    // Allocate a pool
                    await this.CreatePoolIfNotExistAsync(batchClient);

                    // Submit the job
                    jobId = CreateJobId("SimpleJob");
                    blobContainerNames = await this.SubmitJobAsync(batchClient, jobId);

                    // Print out the status of the pools/jobs under this account
                    await ListJobsAsync(batchClient);
                    await ListPoolsAsync(batchClient);

                    // Wait for the job to complete
                    await this.WaitForJobAndPrintOutputAsync(batchClient, jobId);
                }
                finally
                {
                    // Delete the pool (if configured) and job
                    // TODO: In C# 6 we can await here instead of .Wait()
                    this.CleanupResourcesAsync(batchClient, jobId, blobContainerNames).Wait();
                }
            }
        }
예제 #11
0
        public static void Delete()
        {
            Log("Start Deleting jobs");

            BatchSharedKeyCredentials cred = new BatchSharedKeyCredentials(Settings.batchEndpoint, Settings.batchAccount, Settings.batchKey);
            using (BatchClient client = BatchClient.Open(cred)) // <-- connect to the cluster
            {
                try
                {
                    foreach (var job in client.JobOperations.ListJobs(new ODATADetailLevel(filterClause: "startswith(id, '" + prefix + "')")))
                    {
                        Log("Delete " + job.Id);
                        client.JobOperations.DeleteJob(job.Id);
                    }
                }
                catch { }
                Log("All WI deleted.");
            }
        }
예제 #12
0
        /// <summary>
        /// Submits a job to the Azure Batch service, and waits for it to complete
        /// </summary>
        private static async Task HelloWorldAsync(AccountSettings accountSettings, Settings helloWorldConfigurationSettings)
        {
            Console.WriteLine("Running with the following settings: ");
            Console.WriteLine("-------------------------------------");
            Console.WriteLine(helloWorldConfigurationSettings.ToString());
            Console.WriteLine(accountSettings.ToString());

            // Set up the Batch Service credentials used to authenticate with the Batch Service.
            BatchSharedKeyCredentials credentials = new BatchSharedKeyCredentials(
                accountSettings.BatchServiceUrl,
                accountSettings.BatchAccountName,
                accountSettings.BatchAccountKey);

            // Get an instance of the BatchClient for a given Azure Batch account.
            using (BatchClient batchClient = await BatchClient.OpenAsync(credentials))
            {
                // add a retry policy. The built-in policies are No Retry (default), Linear Retry, and Exponential Retry
                batchClient.CustomBehaviors.Add(RetryPolicyProvider.LinearRetryProvider(TimeSpan.FromSeconds(10), 3));

                string jobId = GettingStartedCommon.CreateJobId("HelloWorldJob");

                try
                {
                    // Submit the job
                    await SubmitJobAsync(batchClient, helloWorldConfigurationSettings, jobId);

                    // Wait for the job to complete
                    await WaitForJobAndPrintOutputAsync(batchClient, jobId);
                }
                finally
                {
                    // Delete the job to ensure the tasks are cleaned up
                    if (!string.IsNullOrEmpty(jobId) && helloWorldConfigurationSettings.ShouldDeleteJob)
                    {
                        Console.WriteLine("Deleting job: {0}", jobId);
                        batchClient.JobOperations.DeleteJob(jobId);
                    }
                }
            }
        }
예제 #13
0
        static void Main(string[] args)
        {
            BatchSharedKeyCredentials cred = new BatchSharedKeyCredentials(endpoint, account, key);
            using (BatchClient client = BatchClient.Open(cred)) // <-- connect to the cluster
            {
                List<ResourceFile> resources = new List<ResourceFile>();
                foreach (string blob in StorageHelper.ListBlobs(resourceContainer))
                {
                    string filename = System.IO.Path.GetFileName((new Uri(blob)).LocalPath);
                    resources.Add(new ResourceFile(StorageHelper.GetBlobSASURL(blob), filename));
                }

                CloudPool pool = client.PoolOperations.CreatePool(poolname, "4", "medium", 10);
                pool.StartTask = new StartTask();
                pool.StartTask.ResourceFiles = resources;
                pool.StartTask.CommandLine = @"cmd /c copy *.* %WATASK_TVM_ROOT_DIR%\shared\";
                pool.StartTask.WaitForSuccess = true;
                //pool.Commit(); // <-- Create demo pool

                // Submit Job
                string jobname = "MVP_" + Environment.GetEnvironmentVariable("USERNAME") + "_" + DateTime.Now.ToString("yyyyMMdd-HHmmss");
                PoolInformation poolinfo = new PoolInformation();
                poolinfo.PoolId = poolname;

                CloudJob job = client.JobOperations.CreateJob(jobname, poolinfo); // <-- create a job that runs on demo pool

                Console.WriteLine("Creating job..." + jobname);
                job.Commit();
                job = client.JobOperations.GetJob(jobname);

                string inputcontainersas = StorageHelper.GetContainerSAS(inputContainer);
                string outputcontainersas = StorageHelper.GetContainerSAS(outputContainer);
                List<CloudTask> tasks = new List<CloudTask>();
                Console.WriteLine("Analyzing blobs...");
                foreach (string blob in StorageHelper.ListBlobs(inputContainer)) // <-- Going through blobs
                {
                    string filename = System.IO.Path.GetFileName((new Uri(blob)).LocalPath);
                    string taskname = "task_" + System.IO.Path.GetFileNameWithoutExtension(filename);

                    // prepare the command line
                    string cli;
                    cli = ". robocopy.exe ${env:WATASK_TVM_ROOT_DIR}\\shared\\ . *.*;";
                    cli += "ffmpeg.exe -i {0} -vf 'movie=microsoft.png [watermark]; [in][watermark] overlay=10:main_h-overlay_h-10 [out]' {0}.output.avi;".Replace("{0}", filename);
                    cli += "azcopy.exe . {0} *.output.avi /destsas:'{1}' /y".Replace("{0}", outputContainer).Replace("{1}", outputcontainersas);

                    cli = string.Format("powershell -command \"{0}\"", cli);

                    // prepare task object
                    CloudTask task = new CloudTask(taskname, cli);
                    task.ResourceFiles = new List<ResourceFile>();
                    task.ResourceFiles.Add(new ResourceFile(blob + inputcontainersas, filename));

                    tasks.Add(task); // <-- prepare 1 task for 1 blob
                }

                Console.WriteLine("Submit tasks...");
                client.JobOperations.AddTask(jobname, tasks); // <-- Submit all 100 tasks with 1 API call

                Console.WriteLine("Waiting for tasks to finish...");
                client.Utilities.CreateTaskStateMonitor().WaitAll(client.JobOperations.ListTasks(jobname), TaskState.Completed, new TimeSpan(0, 60, 0));

                Console.WriteLine("Closing job...");
                client.JobOperations.TerminateJob(jobname);

                Console.WriteLine("All done. Press Enter to exit.");
                Console.ReadLine();
            }
        }
예제 #14
0
        /// <summary>
        /// Populates Azure Storage with the required files, and 
        /// submits the job to the Azure Batch service.
        /// </summary>
        public async Task RunAsync()
        {
            Console.WriteLine("Running with the following settings: ");
            Console.WriteLine("-------------------------------------");
            Console.WriteLine(this.jobManagerSettings.ToString());
            Console.WriteLine(this.accountSettings.ToString());

            // Set up the Batch Service credentials used to authenticate with the Batch Service.
            BatchSharedKeyCredentials credentials = new BatchSharedKeyCredentials(
                this.accountSettings.BatchServiceUrl,
                this.accountSettings.BatchAccountName,
                this.accountSettings.BatchAccountKey);

            CloudStorageAccount cloudStorageAccount = new CloudStorageAccount(
                new StorageCredentials(this.accountSettings.StorageAccountName,
                    this.accountSettings.StorageAccountKey),
                    this.accountSettings.StorageServiceUrl,
                    useHttps: true);

            // Get an instance of the BatchClient for a given Azure Batch account.
            using (BatchClient batchClient = await BatchClient.OpenAsync(credentials))
            {
                // add a retry policy. The built-in policies are No Retry (default), Linear Retry, and Exponential Retry
                batchClient.CustomBehaviors.Add(RetryPolicyProvider.LinearRetryProvider(TimeSpan.FromSeconds(10), 3));

                string jobId = null;

                try
                {
                    // Allocate a pool
                    await this.CreatePoolIfNotExistAsync(batchClient, cloudStorageAccount);

                    // Submit the job
                    jobId = GettingStartedCommon.CreateJobId("SimpleJob");
                    await this.SubmitJobAsync(batchClient, cloudStorageAccount, jobId);

                    // Print out the status of the pools/jobs under this account
                    await GettingStartedCommon.PrintJobsAsync(batchClient);
                    await GettingStartedCommon.PrintPoolsAsync(batchClient);

                    // Wait for the job manager to complete
                    CloudTask jobManagerTask = await batchClient.JobOperations.GetTaskAsync(jobId, JobManagerTaskId);
                    await GettingStartedCommon.WaitForTasksAndPrintOutputAsync(batchClient, new List<CloudTask>{ jobManagerTask }, TimeSpan.FromMinutes(10));
                }
                finally
                {
                    // TODO: In C# 6 we can await here instead of .Wait()

                    // Delete Azure Batch resources
                    List<string> jobIdsToDelete = new List<string>();
                    List<string> poolIdsToDelete = new List<string>();

                    if (this.jobManagerSettings.ShouldDeleteJob)
                    {
                        jobIdsToDelete.Add(jobId);
                    }

                    if (this.jobManagerSettings.ShouldDeletePool)
                    {
                        poolIdsToDelete.Add(this.jobManagerSettings.PoolId);
                    }

                    SampleHelpers.DeleteBatchResourcesAsync(batchClient, jobIdsToDelete, poolIdsToDelete).Wait();
                }
            }
        }
예제 #15
0
        public static async Task MainAsync()
        {
            const string poolId = "MultiInstanceSamplePool";
            const string jobId  = "MultiInstanceSampleJob";
            const string taskId = "MultiInstanceSampleTask";

            const int numberOfNodes = 3;

            // The application package and version to deploy to the compute nodes.
            // It should contain your MPIHelloWorld sample MS-MPI program:
            // https://blogs.technet.microsoft.com/windowshpc/2015/02/02/how-to-compile-and-run-a-simple-ms-mpi-program/
            // And the MSMpiSetup.exe installer:
            // https://www.microsoft.com/download/details.aspx?id=52981
            // Then upload it as an application package:
            // https://azure.microsoft.com/documentation/articles/batch-application-packages/
            const string appPackageId = "MPIHelloWorld";
            const string appPackageVersion = "1.0";

            TimeSpan timeout = TimeSpan.FromMinutes(30);

            // Configure your AccountSettings in the Microsoft.Azure.Batch.Samples.Common project within this solution
            BatchSharedKeyCredentials cred = new BatchSharedKeyCredentials(AccountSettings.Default.BatchServiceUrl,
                                                                           AccountSettings.Default.BatchAccountName,
                                                                           AccountSettings.Default.BatchAccountKey);

            using (BatchClient batchClient = BatchClient.Open(cred))
            {
                // Create the pool of compute nodes and the job to which we add the multi-instance task.
                await CreatePoolAsync(batchClient, poolId, numberOfNodes, appPackageId, appPackageVersion);
                await CreateJobAsync(batchClient, jobId, poolId);

                // Create the multi-instance task. The MultiInstanceSettings property (configured
                // below) tells Batch to create one primary and several subtasks, the total number
                // of which matches the number of instances you specify in the MultiInstanceSettings.
                // This main task's command line is the "application command," and is executed *only*
                // by the primary, and only after the primary and all subtasks have executed the
                // "coordination command" (the MultiInstanceSettings.CoordinationCommandLine).
                CloudTask multiInstanceTask = new CloudTask(id: taskId,
                    commandline: $"cmd /c mpiexec.exe -c 1 -wdir %AZ_BATCH_TASK_SHARED_DIR% %AZ_BATCH_APP_PACKAGE_{appPackageId.ToUpper()}#{appPackageVersion}%\\MPIHelloWorld.exe");

                // Configure the task's MultiInstanceSettings. Specify the number of nodes
                // to allocate to the multi-instance task, and the "coordination command".
                // The CoordinationCommandLine is run by the primary and subtasks, and is
                // used in this sample to start SMPD on the compute nodes.
                multiInstanceTask.MultiInstanceSettings =
                    new MultiInstanceSettings(numberOfNodes)
                    {
                        CoordinationCommandLine = @"cmd /c start cmd /c smpd.exe -d"
                    };

                // Submit the task to the job. Batch will take care of creating one primary and
                // enough subtasks to match the total number of nodes allocated to the task,
                // and schedule them for execution on the nodes.
                Console.WriteLine($"Adding task [{taskId}] to job [{jobId}]...");
                await batchClient.JobOperations.AddTaskAsync(jobId, multiInstanceTask);

                // Get the "bound" version of the multi-instance task.
                CloudTask mainTask = await batchClient.JobOperations.GetTaskAsync(jobId, taskId);

                // We use a TaskStateMonitor to monitor the state of our tasks. In this case,
                // we will wait for the task to reach the Completed state.
                Console.WriteLine($"Awaiting task completion, timeout in {timeout}...");
                TaskStateMonitor taskStateMonitor = batchClient.Utilities.CreateTaskStateMonitor();
                await taskStateMonitor.WhenAll(new List<CloudTask> { mainTask }, TaskState.Completed, timeout);

                // Refresh the task to obtain up-to-date property values from Batch, such as
                // its current state and information about the node on which it executed.
                await mainTask.RefreshAsync();

                string stdOut = mainTask.GetNodeFile(Constants.StandardOutFileName).ReadAsString();
                string stdErr = mainTask.GetNodeFile(Constants.StandardErrorFileName).ReadAsString();

                Console.WriteLine();
                Console.WriteLine($"Main task [{mainTask.Id}] is in state [{mainTask.State}] and ran on compute node [{mainTask.ComputeNodeInformation.ComputeNodeId}]:");
                Console.WriteLine("---- stdout.txt ----");
                Console.WriteLine(stdOut);
                Console.WriteLine("---- stderr.txt ----");
                Console.WriteLine(stdErr);

                // Need to delay a bit to allow the Batch service to mark the subtasks as Complete
                TimeSpan subtaskTimeout = TimeSpan.FromSeconds(10);
                Console.WriteLine($"Main task completed, waiting {subtaskTimeout} for subtasks to complete...");
                System.Threading.Thread.Sleep(subtaskTimeout);

                Console.WriteLine();
                Console.WriteLine("---- Subtask information ----");

                // Obtain the collection of subtasks for the multi-instance task, and print
                // some information about each.
                IPagedEnumerable<SubtaskInformation> subtasks = mainTask.ListSubtasks();
                await subtasks.ForEachAsync(async (subtask) =>
                {
                    Console.WriteLine("subtask: " + subtask.Id);
                    Console.WriteLine("\texit code: " + subtask.ExitCode);

                    if (subtask.State == TaskState.Completed)
                    {
                        // Obtain the file from the node on which the subtask executed. For normal CloudTasks,
                        // we could simply call CloudTask.GetNodeFile(Constants.StandardOutFileName), but the
                        // subtasks are not "normal" tasks in Batch, and thus must be handled differently.
                        ComputeNode node =
                            await batchClient.PoolOperations.GetComputeNodeAsync(subtask.ComputeNodeInformation.PoolId,
                                                                                 subtask.ComputeNodeInformation.ComputeNodeId);

                        string outPath = subtask.ComputeNodeInformation.TaskRootDirectory + "\\" + Constants.StandardOutFileName;
                        string errPath = subtask.ComputeNodeInformation.TaskRootDirectory + "\\" + Constants.StandardErrorFileName;

                        NodeFile stdOutFile = await node.GetNodeFileAsync(outPath.Trim('\\'));
                        NodeFile stdErrFile = await node.GetNodeFileAsync(errPath.Trim('\\'));

                        stdOut = await stdOutFile.ReadAsStringAsync();
                        stdErr = await stdErrFile.ReadAsStringAsync();

                        Console.WriteLine($"\tnode: " + node.Id);
                        Console.WriteLine("\tstdout.txt: " + stdOut);
                        Console.WriteLine("\tstderr.txt: " + stdErr);
                    }
                    else
                    {
                        Console.WriteLine($"\tSubtask {subtask.Id} is in state {subtask.State}");
                    }
                });

                // Clean up the resources we've created in the Batch account
                Console.WriteLine();
                Console.Write("Delete job? [yes] no: ");
                string response = Console.ReadLine().ToLower();
                if (response != "n" && response != "no")
                {
                    await batchClient.JobOperations.DeleteJobAsync(jobId);
                }

                Console.Write("Delete pool? [yes] no: ");
                response = Console.ReadLine().ToLower();
                if (response != "n" && response != "no")
                {
                    await batchClient.PoolOperations.DeletePoolAsync(poolId);
                }
            }
        }
        /// <summary>
        /// Runs the job manager task.
        /// </summary>
        public async Task RunAsync()
        {
            Console.WriteLine("JobManager for account: {0}, job: {1} has started...",
                this.accountName,
                this.jobId);
            Console.WriteLine();

            Console.WriteLine("JobManager running with the following settings: ");
            Console.WriteLine("----------------------------------------");
            Console.WriteLine(this.configurationSettings.ToString());

            //Set up the Batch Service credentials used to authenticate with the Batch Service.
            BatchSharedKeyCredentials batchSharedKeyCredentials = new BatchSharedKeyCredentials(
                this.configurationSettings.BatchServiceUrl,
                this.configurationSettings.BatchAccountName,
                this.configurationSettings.BatchAccountKey);

            using (BatchClient batchClient = await BatchClient.OpenAsync(batchSharedKeyCredentials))
            {
                //Construct a container SAS to provide the Batch Service access to the files required to
                //run the mapper and reducer tasks.
                string containerSas = Helpers.ConstructContainerSas(
                    this.configurationSettings.StorageAccountName,
                    this.configurationSettings.StorageAccountKey,
                    this.configurationSettings.StorageServiceUrl,
                    this.configurationSettings.BlobContainer);

                //
                // Submit mapper tasks.
                //
                Console.WriteLine("Submitting {0} mapper tasks.", this.configurationSettings.NumberOfMapperTasks);

                //The collection of tasks to add to the Batch Service.
                List<CloudTask> tasksToAdd = new List<CloudTask>();

                for (int i = 0; i < this.configurationSettings.NumberOfMapperTasks; i++)
                {
                    string taskId = Helpers.GetMapperTaskId(i);
                    string fileBlobName = Helpers.GetSplitFileName(i);
                    string fileBlobPath = Helpers.ConstructBlobSource(containerSas, fileBlobName);

                    string commandLine = string.Format("{0} -MapperTask {1}", Constants.TextSearchExe, fileBlobPath);
                    CloudTask unboundMapperTask = new CloudTask(taskId, commandLine);

                    //The set of files (exes, dlls and configuration files) required to run the mapper task.
                    IReadOnlyList<string> mapperTaskRequiredFiles = Constants.RequiredExecutableFiles;

                    List<ResourceFile> mapperTaskResourceFiles = Helpers.GetResourceFiles(containerSas, mapperTaskRequiredFiles);
                        
                    unboundMapperTask.ResourceFiles = mapperTaskResourceFiles; 

                    tasksToAdd.Add(unboundMapperTask);
                }

                //Submit the unbound task collection to the Batch Service.
                //Use the AddTask method which takes a collection of CloudTasks for the best performance.
                await batchClient.JobOperations.AddTaskAsync(this.jobId, tasksToAdd);

                //
                // Wait for the mapper tasks to complete.
                //
                Console.WriteLine("Waiting for the mapper tasks to complete...");

                //List all the mapper tasks using an id filter.
                DetailLevel mapperTaskIdFilter = new ODATADetailLevel()
                                                        {
                                                            FilterClause = string.Format("startswith(id, '{0}')", Constants.MapperTaskPrefix)
                                                        };

                IEnumerable<CloudTask> tasksToMonitor = batchClient.JobOperations.ListTasks(
                    this.jobId,
                    detailLevel: mapperTaskIdFilter);

                //Use the task state monitor to wait for the tasks to complete.
                TaskStateMonitor taskStateMonitor = batchClient.Utilities.CreateTaskStateMonitor();
                    
                bool timedOut = await taskStateMonitor.WaitAllAsync(tasksToMonitor, TaskState.Completed, TimeSpan.FromMinutes(5));

                //Get the list of mapper tasks in order to analyze their state and ensure they completed successfully.
                IPagedEnumerable<CloudTask> asyncEnumerable = batchClient.JobOperations.ListTasks(
                    this.jobId,
                    detailLevel: mapperTaskIdFilter);

                await asyncEnumerable.ForEachAsync(async cloudTask =>
                                             {
                                                 Console.WriteLine("Task {0} is in state: {1}", cloudTask.Id, cloudTask.State);

                                                 await Helpers.CheckForTaskSuccessAsync(cloudTask, dumpStandardOutOnTaskSuccess: false);

                                                 Console.WriteLine();
                                             });
                
                //If not all the tasks reached the desired state within the timeout then the job manager
                //cannot continue.
                if (timedOut)
                {
                    const string errorMessage = "Mapper tasks did not complete within expected timeout.";
                    Console.WriteLine(errorMessage);
                        
                    throw new TimeoutException(errorMessage);
                }
                    
                //
                // Create the reducer task.
                //
                string reducerTaskCommandLine = string.Format("{0} -ReducerTask", Constants.TextSearchExe);
                    
                Console.WriteLine("Adding the reducer task: {0}", Constants.ReducerTaskId);
                CloudTask unboundReducerTask = new CloudTask(Constants.ReducerTaskId, reducerTaskCommandLine);

                //The set of files (exes, dlls and configuration files) required to run the reducer task.
                List<ResourceFile> reducerTaskResourceFiles = Helpers.GetResourceFiles(containerSas, Constants.RequiredExecutableFiles);

                unboundReducerTask.ResourceFiles = reducerTaskResourceFiles;

                //Send the request to the Batch Service to add the reducer task.
                await batchClient.JobOperations.AddTaskAsync(this.jobId, unboundReducerTask);

                //
                //Wait for the reducer task to complete.
                //

                //Get the bound reducer task and monitor it for completion.
                CloudTask boundReducerTask = await batchClient.JobOperations.GetTaskAsync(this.jobId, Constants.ReducerTaskId);

                timedOut = await taskStateMonitor.WaitAllAsync(new List<CloudTask> {boundReducerTask}, TaskState.Completed, TimeSpan.FromMinutes(2));

                //Refresh the reducer task to get the most recent information about it from the Batch Service.
                await boundReducerTask.RefreshAsync();

                //Dump the reducer tasks exit code and scheduling error for debugging purposes.
                await Helpers.CheckForTaskSuccessAsync(boundReducerTask, dumpStandardOutOnTaskSuccess: true);

                //Handle the possibilty that the reducer task did not complete in the expected timeout.
                if (timedOut)
                {
                    const string errorMessage = "Reducer task did not complete within expected timeout.";

                    Console.WriteLine("Task {0} is in state: {1}", boundReducerTask.Id, boundReducerTask.State);

                    Console.WriteLine(errorMessage);
                    throw new TimeoutException(errorMessage);
                }
                    
                //The job manager has completed.
                Console.WriteLine("JobManager completed successfully.");
            }
        }
예제 #17
0
        public static void Submit()
        {
            Log("Start submission process.");
            state = null;

            BatchSharedKeyCredentials cred = new BatchSharedKeyCredentials(Settings.batchEndpoint, Settings.batchAccount, Settings.batchKey);
            using (BatchClient client = BatchClient.Open(cred)) // <-- connect to the cluster
            {
                #region job submission

                string jobname = prefix + Environment.GetEnvironmentVariable("USERNAME") + "_" + DateTime.Now.ToString("yyyyMMdd-HHmmss");
                PoolInformation pool = new PoolInformation();
                pool.PoolId = Settings.poolname;

                CloudJob job = client.JobOperations.CreateJob(jobname, pool); // <-- create a workitem that runs on pool "trdemo"

                Log("Submitting...");
                job.Commit();
                jobName = jobname;
                Log(string.Format("Job {0} created.", jobname));

                job = client.JobOperations.GetJob(jobname);

                Log("Analyzing input blobs...");
                string inputcontainersas = StorageHelper.GetContainerSAS(Settings.inputContainer);
                string outputcontainersas = StorageHelper.GetContainerSAS(Settings.outputContainer);
                foreach (string blob in StorageHelper.ListBlobs(Settings.inputContainer))
                {
                    string filename = System.IO.Path.GetFileName((new Uri(blob)).LocalPath);
                    string taskname = "task_" + System.IO.Path.GetFileNameWithoutExtension(filename);

                    // prepare the command line
                    string cli;
                    cli = ". robocopy.exe ${env:WATASK_TVM_ROOT_DIR}\\shared\\ . *.*;";
                    cli += "ffmpeg.exe -i {0} -vf 'movie=microsoft.png [watermark]; [in][watermark] overlay=10:main_h-overlay_h-10 [out]' {0}.output.avi;".Replace("{0}", filename);
                    cli += "azcopy.exe . {0} *.output.avi /destsas:'{1}' /y".Replace("{0}", Settings.outputContainer).Replace("{1}", outputcontainersas);

                    cli = string.Format("powershell -command \"{0}\"", cli);

                    // prepare task object
                    CloudTask task = new CloudTask(taskname, cli);
                    task.ResourceFiles = new List<ResourceFile>();
                    task.ResourceFiles.Add(new ResourceFile(blob + inputcontainersas, filename));

                    job.AddTask(task); // <-- add Task
                }

                #endregion job submission

                ThreadPool.QueueUserWorkItem((x) => { Monitor(); });

                client.Utilities.CreateTaskStateMonitor().WaitAll(client.JobOperations.ListTasks(jobname), TaskState.Completed, new TimeSpan(0, 60, 0));
                client.JobOperations.GetJob(jobname).Terminate();

            }
        }
예제 #18
0
        public static void Main(string[] args)
        {
            const int taskCount = 5000;

            const string poolId = "poolEffQuery";
            const string jobId  = "jobEffQuery";

            // Set up the credentials required by the BatchClient. Configure your AccountSettings in the
            // Microsoft.Azure.Batch.Samples.Common project within this solution.
            BatchSharedKeyCredentials cred = new BatchSharedKeyCredentials(AccountSettings.Default.BatchServiceUrl,
                                                                           AccountSettings.Default.BatchAccountName,
                                                                           AccountSettings.Default.BatchAccountKey);
            
            using (BatchClient batchClient = BatchClient.Open(cred))
            {
                // Create a CloudPool, or obtain an existing pool with the specified ID
                CreatePool(batchClient, poolId).Wait();
                CloudPool pool = batchClient.PoolOperations.GetPool(poolId);

                // Create a CloudJob, or obtain an existing job with the specified ID
                CloudJob job = ArticleHelpers.CreateJobAsync(batchClient, poolId, jobId).Result;

                // Configure the tasks we'll be querying. Each task simply echoes the node's
                // name and then exits. We create "large" tasks by setting an environment
                // variable for each that is 2048 bytes in size. This is done simply to
                // increase response time when querying the batch service to more clearly
                // demonstrate query durations.
                List<CloudTask> tasks = new List<CloudTask>();
                List<EnvironmentSetting> environmentSettings = new List<EnvironmentSetting>();
                environmentSettings.Add(new EnvironmentSetting("BIGENV", GetBigString(2048)));
                for (int i = 1; i < taskCount + 1; i++)
                {
                    string taskId = "task" + i.ToString().PadLeft(5, '0');
                    string taskCommandLine = "cmd /c echo %COMPUTERNAME%";
                    CloudTask task = new CloudTask(taskId, taskCommandLine);
                    task.EnvironmentSettings = environmentSettings;
                    tasks.Add(task);
                }

                Console.WriteLine();
                Console.WriteLine("Adding {0} tasks to job {1}...", taskCount, job.Id);

                Stopwatch stopwatch = new Stopwatch();
                stopwatch.Start();

                // To reduce the chances of hitting Batch service throttling limits, we add the tasks in
                // one API call as opposed to a separate AddTask call for each. This is crucial if you
                // are adding many tasks to your jobs.
                batchClient.JobOperations.AddTask(job.Id, tasks);

                stopwatch.Stop();
                Console.WriteLine("{0} tasks added in {1}, hit ENTER to query tasks...", taskCount, stopwatch.Elapsed);
                Console.ReadLine();
                Console.WriteLine();
                stopwatch.Reset();

                // Obtain the tasks, specifying different detail levels to demonstrate limiting the number of tasks returned
                // and the amount of data returned for each. If your job tasks number in the thousands or have "large" properties
                // (such as our big environment variable), specifying a DetailLevel is important in reducing the amount of data
                // transferred, lowering your query response times (potentially greatly).

                // Get a subset of the tasks based on different task states
                ODATADetailLevel detail = new ODATADetailLevel();
                detail.FilterClause = "state eq 'active'";
                detail.SelectClause = "id,state";
                QueryTasks(batchClient, job.Id, detail);
                detail.FilterClause = "state eq 'running'";
                QueryTasks(batchClient, job.Id, detail);
                detail.FilterClause = "state eq 'completed'";
                QueryTasks(batchClient, job.Id, detail);

                // Get all tasks, but limit the properties returned to task id and state only
                detail.FilterClause = null;
                detail.SelectClause = "id,state";
                QueryTasks(batchClient, job.Id, detail);

                // Get all tasks, include id and state, also include the inflated environment settings property
                detail.SelectClause = "id,state,environmentSettings";
                QueryTasks(batchClient, job.Id, detail);

                // Get all tasks, include all standard properties, and expand the statistics
                detail.ExpandClause = "stats";
                detail.SelectClause = null;
                QueryTasks(batchClient, job.Id, detail);

                Console.WriteLine();
                Console.WriteLine("Sample complete, hit ENTER to continue...");
                Console.ReadLine();

                // Clean up the resources we've created in the Batch account
                Console.WriteLine("Delete job? [yes] no");
                string response = Console.ReadLine().ToLower();
                if (response != "n" && response != "no")
                {
                    batchClient.JobOperations.DeleteJob(job.Id);
                }

                Console.WriteLine("Delete pool? [yes] no");
                response = Console.ReadLine().ToLower();
                if (response != "n" && response != "no")
                {
                    batchClient.PoolOperations.DeletePool(pool.Id);
                }
            }
        }
예제 #19
0
        static void Main(string[] args)
        {
            BatchSharedKeyCredentials cred = new BatchSharedKeyCredentials(url, account, key);
            string now = DateTime.UtcNow.ToString("r");
            using (BatchClient client = BatchClient.Open(cred))
            {
                //client.PoolOperations.EnableAutoScale("demo", "2");
                string formula = string.Format(@"
            $TargetDedicated={1};
            lifespan=time()-time(""{0}"");
            span=TimeInterval_Minute * 60;
            startup=TimeInterval_Minute * 10;
            ratio=50;
            $TargetDedicated=(lifespan>startup?(max($RunningTasks.GetSample(span, ratio), $ActiveTasks.GetSample(span, ratio)) == 0 ? 0 : $TargetDedicated):{1});
            ", now, 4);
                try {
                    CloudPool p = client.PoolOperations.CreatePool("formulasample", "4", "small");
                    p.AutoScaleEnabled = true;
                    p.AutoScaleFormula = formula;
                    p.Commit();
                } catch(Exception ex)
                {
                    // Go through all exceptions and dump useful information
                    (ex as AggregateException).Handle((x) =>
                    {
                        if (x is BatchException)
                        {
                            BatchException be = x as BatchException;

                            if (null != be.RequestInformation && null != be.RequestInformation.AzureError)
                            {
                                // Write the server side error information
                                Console.Error.WriteLine(be.RequestInformation.AzureError.Code);
                                Console.Error.WriteLine(be.RequestInformation.AzureError.Message.Value);

                                if (null != be.RequestInformation.AzureError.Values)
                                {
                                    foreach (var v in be.RequestInformation.AzureError.Values)
                                    {
                                        Console.Error.WriteLine(v.Key + " : " + v.Value);
                                    }
                                }
                            }
                        }

                        return false;
                    });
                }
                //var result = client.PoolOperations.EvaluateAutoScale("demo", formula);
                //if(result.AutoScaleRun.Error != null)
                //{
                //    Console.WriteLine(result.AutoScaleRun.Error.Code + " : " + result.AutoScaleRun.Error.Message);
                //    foreach(var e in result.AutoScaleRun.Error.Values)
                //    {
                //        Console.WriteLine(" " + e.Name + " : " + e.Value);
                //    }
                //}
                //Console.WriteLine(result.AutoScaleRun.Results);
                //Console.ReadLine();
            }
        }
예제 #20
0
        private static async Task MainAsync(string[] args)
        {
            // You may adjust these values to experiment with different compute resource scenarios.
            const string nodeSize     = "small";
            const int nodeCount       = 4;
            const int maxTasksPerNode = 4;
            const int taskCount       = 32;

            // Ensure there are enough tasks to help avoid hitting some timeout conditions below
            const int minimumTaskCount = nodeCount * maxTasksPerNode * 2;
            if (taskCount < minimumTaskCount)
            {
                Console.WriteLine("You must specify at least two tasks per node core for this sample ({0} tasks in this configuration).", minimumTaskCount);
                Console.WriteLine();

                // Not enough tasks, exit the application
                return;
            }
  
            // In this sample, the tasks simply ping localhost on the compute nodes; adjust these
            // values to simulate variable task duration
            const int minPings = 30;
            const int maxPings = 60;

            const string poolId = "ParallelTasksSamplePool";
            const string jobId  = "ParallelTasksSampleJob";

            // Amount of time to wait before timing out (potentially) long-running tasks
            TimeSpan longTaskDurationLimit = TimeSpan.FromMinutes(30);

            // Set up access to your Batch account with a BatchClient. Configure your AccountSettings in the
            // Microsoft.Azure.Batch.Samples.Common project within this solution.
            BatchSharedKeyCredentials cred = new BatchSharedKeyCredentials(AccountSettings.Default.BatchServiceUrl,
                                                                           AccountSettings.Default.BatchAccountName,
                                                                           AccountSettings.Default.BatchAccountKey);
            
            using (BatchClient batchClient = await BatchClient.OpenAsync(cred))
            {
                // Create a CloudPool, or obtain an existing pool with the specified ID
                CloudPool pool = await ArticleHelpers.CreatePoolIfNotExistAsync(batchClient,
                                                                      poolId,
                                                                      nodeSize,
                                                                      nodeCount,
                                                                      maxTasksPerNode);

                // Create a CloudJob, or obtain an existing pool with the specified ID
                CloudJob job = await ArticleHelpers.CreateJobIfNotExistAsync(batchClient, poolId, jobId);
                
                // The job's tasks ping localhost a random number of times between minPings and maxPings.
                // Adjust the minPings/maxPings values above to experiment with different task durations.
                Random rand = new Random();
                List<CloudTask> tasks = new List<CloudTask>();
                for (int i = 1; i <= taskCount; i++)
                {
                    string taskId = "task" + i.ToString().PadLeft(3, '0');
                    string taskCommandLine = "ping -n " + rand.Next(minPings, maxPings + 1).ToString() + " localhost";
                    CloudTask task = new CloudTask(taskId, taskCommandLine);
                    tasks.Add(task);
                }

                // Pause execution until the pool is steady and its compute nodes are ready to accept jobs.
                // NOTE: Such a pause is not necessary within your own code. Tasks can be added to a job at any point and will be 
                // scheduled to execute on a compute node as soon any node has reached Idle state. Because the focus of this sample 
                // is the demonstration of running tasks in parallel on multiple compute nodes, we wait for all compute nodes to 
                // complete initialization and reach the Idle state in order to maximize the number of compute nodes available for 
                // parallelization.
                await ArticleHelpers.WaitForPoolToReachStateAsync(batchClient, pool.Id, AllocationState.Steady, longTaskDurationLimit);
                await ArticleHelpers.WaitForNodesToReachStateAsync(batchClient, pool.Id, ComputeNodeState.Idle, longTaskDurationLimit);

                // Add the tasks in one API call as opposed to a separate AddTask call for each. Bulk task submission
                // helps to ensure efficient underlying API calls to the Batch service.
                await batchClient.JobOperations.AddTaskAsync(job.Id, tasks);

                // Pause again to wait until *all* nodes are running tasks
                await ArticleHelpers.WaitForNodesToReachStateAsync(batchClient, pool.Id, ComputeNodeState.Running, TimeSpan.FromMinutes(2));

                Stopwatch stopwatch = Stopwatch.StartNew();

                // Print out task assignment information.
                Console.WriteLine();
                await GettingStartedCommon.PrintNodeTasksAsync(batchClient, pool.Id);
                Console.WriteLine();

                // Pause execution while we wait for all of the tasks to complete
                Console.WriteLine("Waiting for task completion...");
                Console.WriteLine();

                if (await batchClient.Utilities.CreateTaskStateMonitor().WhenAllAsync(job.ListTasks(),
                                                                   TaskState.Completed,
                                                                   longTaskDurationLimit))
                {
                    Console.WriteLine("Operation timed out while waiting for submitted tasks to reach state {0}", TaskState.Completed); 
                }

                stopwatch.Stop();

                // Obtain the tasks, specifying a detail level to limit the number of properties returned for each task.
                // If you have a large number of tasks, specifying a DetailLevel is extremely important in reducing the
                // amount of data transferred, lowering your query response times in increasing performance.
                ODATADetailLevel detail = new ODATADetailLevel(selectClause: "id,commandLine,nodeInfo,state");
                IPagedEnumerable<CloudTask> allTasks = batchClient.JobOperations.ListTasks(job.Id, detail);

                // Get a collection of the completed tasks sorted by the compute nodes on which they executed
                List<CloudTask> completedTasks = allTasks
                                                .Where(t => t.State == TaskState.Completed)
                                                .OrderBy(t => t.ComputeNodeInformation.ComputeNodeId)
                                                .ToList();

                // Print the completed task information
                Console.WriteLine();
                Console.WriteLine("Completed tasks:");
                string lastNodeId = string.Empty;
                foreach (CloudTask task in completedTasks)
                {
                    if (!string.Equals(lastNodeId, task.ComputeNodeInformation.ComputeNodeId))
                    {
                        Console.WriteLine();
                        Console.WriteLine(task.ComputeNodeInformation.ComputeNodeId);
                    }

                    lastNodeId = task.ComputeNodeInformation.ComputeNodeId;

                    Console.WriteLine("\t{0}: {1}", task.Id, task.CommandLine);
                }

                // Get a collection of the uncompleted tasks which may exist if the TaskMonitor timeout was hit
                List<CloudTask> uncompletedTasks = allTasks
                                                   .Where(t => t.State != TaskState.Completed)
                                                   .OrderBy(t => t.Id)
                                                   .ToList();

                // Print a list of uncompleted tasks, if any
                Console.WriteLine();
                Console.WriteLine("Uncompleted tasks:");
                Console.WriteLine();
                if (uncompletedTasks.Any())
                {
                    foreach (CloudTask task in uncompletedTasks)
                    {
                        Console.WriteLine("\t{0}: {1}", task.Id, task.CommandLine);
                    }
                }
                else
                {
                    Console.WriteLine("\t<none>");
                }

                // Print some summary information
                Console.WriteLine();
                Console.WriteLine("             Nodes: " + nodeCount);
                Console.WriteLine("         Node size: " + nodeSize);
                Console.WriteLine("Max tasks per node: " + pool.MaxTasksPerComputeNode);
                Console.WriteLine("             Tasks: " + tasks.Count);
                Console.WriteLine("          Duration: " + stopwatch.Elapsed);
                Console.WriteLine();
                Console.WriteLine("Done!");
                Console.WriteLine();

                // Clean up the resources we've created in the Batch account
                Console.WriteLine("Delete job? [yes] no");
                string response = Console.ReadLine().ToLower();
                if (response != "n" && response != "no")
                {
                    await batchClient.JobOperations.DeleteJobAsync(job.Id);
                }

                Console.WriteLine("Delete pool? [yes] no");
                response = Console.ReadLine();
                if (response != "n" && response != "no")
                {
                    await batchClient.PoolOperations.DeletePoolAsync(pool.Id);
                }
            }
        }
예제 #21
0
        public static void JobMain(string[] args)
        {
            Console.WriteLine("Setting up Batch Process - ImageBlur. \nPress Enter to begin.");
            Console.WriteLine("-------------------------------------------------------------");
            Console.ReadLine();
            Settings imageBlurSettings = Settings.Default;
            AccountSettings accountSettings = AccountSettings.Default;

            /* Setting up credentials for Batch and Storage accounts
             * =====================================================
             */

            StorageCredentials storageCredentials = new StorageCredentials(
                accountSettings.StorageAccountName, 
                accountSettings.StorageAccountKey);
            CloudStorageAccount storageAccount = new CloudStorageAccount(storageCredentials, useHttps: true);

            StagingStorageAccount stagingStorageAccount = new StagingStorageAccount(
                accountSettings.StorageAccountName,
                accountSettings.StorageAccountKey,
                storageAccount.BlobEndpoint.ToString());

            BatchSharedKeyCredentials batchCredentials = new BatchSharedKeyCredentials(
                accountSettings.BatchServiceUrl, 
                accountSettings.BatchAccountName, 
                accountSettings.BatchAccountKey);


            using (BatchClient client = BatchClient.Open(batchCredentials))
            {
                string stagingContainer = null;

                /* Setting up pool to run job and tasks in
                 * =======================================
                 */

                CreatePool(client, imageBlurSettings, accountSettings);

                try
                {

                    /* Setting up Job ------------------------
                     * =======================================
                     */

                    Console.WriteLine("Creating job {0}. \nPress Enter to continue.", imageBlurSettings.JobId);
                    Console.ReadLine();

                    CloudJob unboundJob = client.JobOperations.CreateJob();
                    unboundJob.Id = imageBlurSettings.JobId;
                    unboundJob.PoolInformation = new PoolInformation() { PoolId = imageBlurSettings.PoolId };
                    unboundJob.Commit();


                    /* Uploading Source Image(s) to run varying degrees of Blur on
                     * ===========================================================
                     * Here, the input data is uploaded separately to Storage and 
                     * its URI is passed to the task as an argument.
                     */

                    Console.WriteLine("Uploading source images. \nPress Enter to continue.");
                    Console.ReadLine();

                    string[] sourceImages = imageBlurSettings.SourceImageNames.Split(',');
                    List<String> sourceImageUris = new List<String>();
                    for( var i = 0; i < sourceImages.Length; i++)
                    {
                        Console.WriteLine("    Uploading {0}.", sourceImages[i]);
                        sourceImageUris.Add( UploadSourceImagesFileToCloudBlob(accountSettings, sourceImages[i]));
                        Console.WriteLine("    Source Image uploaded to: <{0}>.", sourceImageUris[i]);
                    }

                    Console.WriteLine();
                    Console.WriteLine("All Source Images uploaded. \nPress Enter to continue.");
                    Console.ReadLine();

                    /* Setting up tasks with dependencies ----------------
                     * ===================================================
                     */

                    Console.WriteLine("Setting up files to stage for tasks. \nPress Enter to continue.");
                    Console.ReadLine();

                    // Setting up Files to Stage - Files to upload into each task (executables and dependent assemblies)
                    FileToStage imageBlurExe = new FileToStage(ImageBlurExeName, stagingStorageAccount);
                    FileToStage storageDll = new FileToStage(StorageClientDllName, stagingStorageAccount);
                    FileToStage imageProcessorDll = new FileToStage(ImageProcessorDllName, stagingStorageAccount);

                    // initialize collection to hold tasks that will be submitted in their entirety
                    List<CloudTask> tasksToRun = new List<CloudTask>(imageBlurSettings.NumberOfTasks);

                    for (int i = 0; i < imageBlurSettings.NumberOfTasks; i++)
                    {
                        // create individual tasks (cmd line passed in as argument)
                        CloudTask task = new CloudTask("task_" + i, String.Format("{0} --Task {1} {2} {3}",
                            ImageBlurExeName,
                            sourceImageUris[i],
                            accountSettings.StorageAccountName,
                            accountSettings.StorageAccountKey));

                        // list of files to stage to a container -- for each job, one container is created and
                        // files all resolve to Azure Blobs by their name
                        task.FilesToStage = new List<IFileStagingProvider> { imageBlurExe, storageDll, imageProcessorDll };

                        tasksToRun.Add(task);
                        Console.WriteLine("\t task {0} has been added", "task_" + i);
                    }
                    Console.WriteLine();

                    /* Commit tasks with dependencies ----------------
                     * ===============================================
                     */

                    Console.WriteLine("Running Tasks. \nPress Enter to continue.");
                    Console.WriteLine("-------------------------------------------------------------");
                    Console.ReadLine();

                    ConcurrentBag<ConcurrentDictionary<Type, IFileStagingArtifact>> fsArtifactBag = new ConcurrentBag<ConcurrentDictionary<Type, IFileStagingArtifact>>();
                    client.JobOperations.AddTask(imageBlurSettings.JobId, tasksToRun, fileStagingArtifacts: fsArtifactBag);

                    foreach (var fsBagItem in fsArtifactBag)
                    {
                        IFileStagingArtifact fsValue;
                        if (fsBagItem.TryGetValue(typeof(FileToStage), out fsValue))
                        {
                            SequentialFileStagingArtifact stagingArtifact = fsValue as SequentialFileStagingArtifact;
                            if (stagingArtifact != null)
                            {
                                stagingContainer = stagingArtifact.BlobContainerCreated;
                                Console.WriteLine(
                                    "Uploaded files to container: {0} -- \nyou will be charged for their storage unless you delete them.",
                                    stagingArtifact.BlobContainerCreated);
                            }
                        }
                    }

                    //Get the job to monitor status.
                    CloudJob job = client.JobOperations.GetJob(imageBlurSettings.JobId);

                    Console.WriteLine();
                    Console.Write("Waiting for tasks to complete ...   ");
                    IPagedEnumerable<CloudTask> ourTasks = job.ListTasks(new ODATADetailLevel(selectClause: "id"));
                    client.Utilities.CreateTaskStateMonitor().WaitAll(ourTasks, TaskState.Completed, TimeSpan.FromMinutes(20));
                    Console.WriteLine("tasks are done.");
                    Console.WriteLine();

                    Console.WriteLine("See below for Stdout / Stderr for each node.");
                    Console.WriteLine("============================================");

                    /* Display stdout/stderr for each task on completion 
                     * =================================================
                     */

                    foreach (CloudTask t in ourTasks)
                    {
                        Console.WriteLine("Task " + t.Id + ":");
                        Console.WriteLine("    stdout:" + Environment.NewLine + t.GetNodeFile("stdout.txt").ReadAsString());
                        Console.WriteLine();
                        Console.WriteLine("    stderr:" + Environment.NewLine + t.GetNodeFile("stderr.txt").ReadAsString());
                    }

                    Console.WriteLine();
                    Console.WriteLine("Please find the resulting images in storage. \nPress Enter to continue.");
                    Console.WriteLine("=======================================================================");
                    Console.ReadLine();
                }
                finally
                {
                    /* If configured as such, Delete the resources that were used in this process
                     * ==========================================================================
                     */

                    //Delete the pool that we created
                    if (imageBlurSettings.DeletePool)
                    {

                        Console.WriteLine("Deleting Pool. \nPress Enter to continue.");
                        Console.ReadLine();

                        Console.WriteLine("Deleting pool: {0}", imageBlurSettings.PoolId);
                        client.PoolOperations.DeletePool(imageBlurSettings.PoolId);
                    }

                    //Delete the job that we created
                    if (imageBlurSettings.DeleteJob)
                    {

                        Console.WriteLine("Deleting Job. \nPress Enter to continue.");
                        Console.ReadLine();

                        Console.WriteLine("Deleting job: {0}", imageBlurSettings.JobId);
                        client.JobOperations.DeleteJob(imageBlurSettings.JobId);
                    }

                    //Delete the containers we created
                    if (imageBlurSettings.DeleteContainer)
                    {

                        Console.WriteLine("Deleting Container. \nPress Enter to continue.");
                        Console.ReadLine();

                        DeleteContainers(accountSettings, stagingContainer);
                    }
                    Console.WriteLine();
                    Console.WriteLine("Please check the Azure portal to make sure that all resources you want deleted are in fact deleted");
                    Console.WriteLine("==================================================================================================");
                    Console.WriteLine();
                    Console.WriteLine("Press Enter to exit the program");
                    Console.WriteLine("Exiting program...");
                }

            }

        }
예제 #22
0
        private static async Task MainAsync(string[] args)
        {
            // You may adjust these values to experiment with different compute resource scenarios.
            const string nodeSize     = "small";
            const int nodeCount       = 1;
            const int maxTasksPerNode = 4;

            // Adjust the task count to experiment with different list operation query durations
            const int taskCount = 5000;

            const string poolId = "EfficientListQueriesSamplePool";
            const string jobId  = "EfficientListQueriesSampleJob";

            // Set up the credentials required by the BatchClient. Configure your AccountSettings in the
            // Microsoft.Azure.Batch.Samples.Common project within this solution.
            BatchSharedKeyCredentials cred = new BatchSharedKeyCredentials(AccountSettings.Default.BatchServiceUrl,
                                                                           AccountSettings.Default.BatchAccountName,
                                                                           AccountSettings.Default.BatchAccountKey);
            

            using (BatchClient batchClient = await BatchClient.OpenAsync(cred))
            {
                // Create a CloudPool, or obtain an existing pool with the specified ID
                CloudPool pool = await ArticleHelpers.CreatePoolIfNotExistAsync(batchClient,
                                                                      poolId,
                                                                      nodeSize,
                                                                      nodeCount,
                                                                      maxTasksPerNode);
                
                // Create a CloudJob, or obtain an existing job with the specified ID
                CloudJob job = await ArticleHelpers.CreateJobIfNotExistAsync(batchClient, poolId, jobId);

                // Configure the tasks we'll be querying. Each task simply echoes the node's
                // name and then exits. We create "large" tasks by setting an environment
                // variable for each that is 2048 bytes in size. This is done simply to
                // increase response time when querying the batch service to more clearly
                // demonstrate query durations.
                List<CloudTask> tasks = new List<CloudTask>();
                List<EnvironmentSetting> environmentSettings = new List<EnvironmentSetting>();
                environmentSettings.Add(new EnvironmentSetting("BIGENV", GetBigString(2048)));
                for (int i = 1; i < taskCount + 1; i++)
                {
                    string taskId = "task" + i.ToString().PadLeft(5, '0');
                    string taskCommandLine = "cmd /c echo %COMPUTERNAME%";
                    CloudTask task = new CloudTask(taskId, taskCommandLine);
                    task.EnvironmentSettings = environmentSettings;
                    tasks.Add(task);
                }

                Console.WriteLine();
                Console.WriteLine("Adding {0} tasks to job {1}...", taskCount, job.Id);

                Stopwatch stopwatch = Stopwatch.StartNew();

                // Add the tasks in one API call as opposed to a separate AddTask call for each. Bulk task submission
                // helps to ensure efficient underlying API calls to the Batch service.
                await batchClient.JobOperations.AddTaskAsync(job.Id, tasks);

                stopwatch.Stop();
                Console.WriteLine("{0} tasks added in {1}, hit ENTER to query tasks...", taskCount, stopwatch.Elapsed);
                Console.ReadLine();
                Console.WriteLine();
                stopwatch.Reset();

                // Obtain the tasks, specifying different detail levels to demonstrate limiting the number of tasks returned
                // and the amount of data returned for each. If your job tasks number in the thousands or have "large" properties
                // (such as our big environment variable), specifying a DetailLevel is important in reducing the amount of data
                // transferred, lowering your query response times (potentially greatly).

                // Get a subset of the tasks based on different task states
                ODATADetailLevel detail = new ODATADetailLevel();
                detail.FilterClause = "state eq 'active'";
                detail.SelectClause = "id,state";
                await QueryTasksAsync(batchClient, job.Id, detail);
                detail.FilterClause = "state eq 'running'";
                await QueryTasksAsync(batchClient, job.Id, detail);
                detail.FilterClause = "state eq 'completed'";
                await QueryTasksAsync(batchClient, job.Id, detail);

                // Get all tasks, but limit the properties returned to task id and state only
                detail.FilterClause = null;
                detail.SelectClause = "id,state";
                await QueryTasksAsync(batchClient, job.Id, detail);

                // Get all tasks, include id and state, also include the inflated environment settings property
                detail.SelectClause = "id,state,environmentSettings";
                await QueryTasksAsync(batchClient, job.Id, detail);

                // Get all tasks, include all standard properties, and expand the statistics
                detail.ExpandClause = "stats";
                detail.SelectClause = null;
                await QueryTasksAsync(batchClient, job.Id, detail);

                Console.WriteLine();
                Console.WriteLine("Done!");
                Console.WriteLine();

                // Clean up the resources we've created in the Batch account
                Console.WriteLine("Delete job? [yes] no");
                string response = Console.ReadLine().ToLower();
                if (response != "n" && response != "no")
                {
                    await batchClient.JobOperations.DeleteJobAsync(job.Id);
                }

                Console.WriteLine("Delete pool? [yes] no");
                response = Console.ReadLine().ToLower();
                if (response != "n" && response != "no")
                {
                    await batchClient.PoolOperations.DeletePoolAsync(pool.Id);
                }
            }
        }
예제 #23
0
        public static void Terminate()
        {
            Log("Start Terminating Workitem");

            BatchSharedKeyCredentials cred = new BatchSharedKeyCredentials(Settings.batchEndpoint, Settings.batchAccount, Settings.batchKey);
            using (BatchClient client = BatchClient.Open(cred)) // <-- connect to the cluster
            {
                try
                {
                    client.JobOperations.TerminateJob(jobName);
                }
                catch (Exception) { }
                Log(string.Format("Job {0} terminated.", jobName));
            }
        }
예제 #24
0
        /// <summary>
        /// Populates Azure Storage with the required files, and 
        /// submits the job to the Azure Batch service.
        /// </summary>
        public async Task RunAsync()
        {
            Console.WriteLine("Running with the following settings: ");
            Console.WriteLine("-------------------------------------");
            Console.WriteLine(this.configurationSettings.ToString());

            // Set up the Batch Service credentials used to authenticate with the Batch Service.
            BatchSharedKeyCredentials credentials = new BatchSharedKeyCredentials(
                this.configurationSettings.BatchServiceUrl,
                this.configurationSettings.BatchAccountName,
                this.configurationSettings.BatchAccountKey);

            // Delete the blob containers which contain the task input files since we no longer need them
            CloudStorageAccount cloudStorageAccount = new CloudStorageAccount(
                new StorageCredentials(this.configurationSettings.StorageAccountName,
                    this.configurationSettings.StorageAccountKey),
                    new Uri(this.configurationSettings.StorageBlobEndpoint),
                    null,
                    null,
                    null);

            // Get an instance of the BatchClient for a given Azure Batch account.
            using (BatchClient batchClient = await BatchClient.OpenAsync(credentials))
            {
                // add a retry policy. The built-in policies are No Retry (default), Linear Retry, and Exponential Retry
                batchClient.CustomBehaviors.Add(RetryPolicyProvider.LinearRetryProvider(TimeSpan.FromSeconds(10), 3));

                string jobId = null;

                // Track the containers which are created as part of job submission so that we can clean them up later.
                HashSet<string> blobContainerNames = new HashSet<string>();

                try
                {
                    // Allocate a pool
                    await this.CreatePoolIfNotExistAsync(batchClient, cloudStorageAccount);

                    // Submit the job
                    jobId = GettingStartedCommon.CreateJobId("SimpleJob");
                    blobContainerNames = await this.SubmitJobAsync(batchClient, jobId);

                    // Print out the status of the pools/jobs under this account
                    await GettingStartedCommon.PrintJobsAsync(batchClient);
                    await GettingStartedCommon.PrintPoolsAsync(batchClient);

                    // Wait for the job to complete
                    List<CloudTask> tasks = await batchClient.JobOperations.ListTasks(jobId).ToListAsync();
                    await GettingStartedCommon.WaitForTasksAndPrintOutputAsync(batchClient, tasks, TimeSpan.FromMinutes(10));
                }
                finally
                {
                    // Delete the pool (if configured) and job
                    // TODO: In C# 6 we can await here instead of .Wait()
                    
                    // Delete Azure Storage container data
                    SampleHelpers.DeleteContainersAsync(cloudStorageAccount, blobContainerNames).Wait();

                    // Delete Azure Batch resources
                    List<string> jobIdsToDelete = new List<string>();
                    List<string> poolIdsToDelete = new List<string>();

                    if (this.configurationSettings.ShouldDeleteJob)
                    {
                        jobIdsToDelete.Add(jobId);
                    }

                    if (this.configurationSettings.ShouldDeletePool)
                    {
                        poolIdsToDelete.Add(this.configurationSettings.PoolId);
                    }

                    SampleHelpers.DeleteBatchResourcesAsync(batchClient, jobIdsToDelete, poolIdsToDelete).Wait();
                }
            }
        }
예제 #25
0
        /// <summary>
        /// Provides an asynchronous version of the Main method, allowing for the awaiting of async method calls within.
        /// </summary>
        /// <returns>A <see cref="System.Threading.Tasks.Task"/> object that represents the asynchronous operation.</returns>
        private static async Task MainAsync()
        {
            Console.WriteLine("Sample start: {0}", DateTime.Now);
            Console.WriteLine();
            Stopwatch timer = new Stopwatch();
            timer.Start();

            // Construct the Storage account connection string
            string storageConnectionString = String.Format("DefaultEndpointsProtocol=https;AccountName={0};AccountKey={1}",
                                                            StorageAccountName, StorageAccountKey);

            // Retrieve the storage account
            CloudStorageAccount storageAccount = CloudStorageAccount.Parse(storageConnectionString);

            // Create the blob client, for use in obtaining references to blob storage containers
            CloudBlobClient blobClient = storageAccount.CreateCloudBlobClient();
            
            // Use the blob client to create the containers in Azure Storage if they don't yet exist
            const string appContainerName    = "application";
            const string inputContainerName  = "input";
            const string outputContainerName = "output";
            await CreateContainerIfNotExistAsync(blobClient, appContainerName);
            await CreateContainerIfNotExistAsync(blobClient, inputContainerName);
            await CreateContainerIfNotExistAsync(blobClient, outputContainerName);

            // Paths to the executable and its dependencies that will be executed by the tasks
            List<string> applicationFilePaths = new List<string>
            {
                // The DotNetTutorial project includes a project reference to TaskApplication, allowing us to
                // determine the path of the task application binary dynamically
                typeof(TaskApplication.Program).Assembly.Location,
                "Microsoft.WindowsAzure.Storage.dll"
            };

            // The collection of data files that are to be processed by the tasks
            List<string> inputFilePaths = new List<string>
            {
                @"..\..\taskdata1.txt",
                @"..\..\taskdata2.txt",
                @"..\..\taskdata3.txt"
            };

            // Upload the application and its dependencies to Azure Storage. This is the application that will
            // process the data files, and will be executed by each of the tasks on the compute nodes.
            List<ResourceFile> applicationFiles = await UploadFilesToContainerAsync(blobClient, appContainerName, applicationFilePaths);

            // Upload the data files. This is the data that will be processed by each of the tasks that are
            // executed on the compute nodes within the pool.
            List<ResourceFile> inputFiles = await UploadFilesToContainerAsync(blobClient, inputContainerName, inputFilePaths);

            // Obtain a shared access signature that provides write access to the output container to which
            // the tasks will upload their output.
            string outputContainerSasUrl = GetContainerSasUrl(blobClient, outputContainerName, SharedAccessBlobPermissions.Write);

            // Create a BatchClient. We'll now be interacting with the Batch service in addition to Storage
            BatchSharedKeyCredentials cred = new BatchSharedKeyCredentials(BatchAccountUrl, BatchAccountName, BatchAccountKey);
            using (BatchClient batchClient = BatchClient.Open(cred))
            {
                // Create the pool that will contain the compute nodes that will execute the tasks.
                // The ResourceFile collection that we pass in is used for configuring the pool's StartTask
                // which is executed each time a node first joins the pool (or is rebooted or reimaged).
                await CreatePoolAsync(batchClient, PoolId, applicationFiles);

                // Create the job that will run the tasks.
                await CreateJobAsync(batchClient, JobId, PoolId);

                // Add the tasks to the job. We need to supply a container shared access signature for the
                // tasks so that they can upload their output to Azure Storage.
                await AddTasksAsync(batchClient, JobId, inputFiles, outputContainerSasUrl);

                // Monitor task success/failure, specifying a maximum amount of time to wait for the tasks to complete
                await MonitorTasks(batchClient, JobId, TimeSpan.FromMinutes(30));

                // Download the task output files from the output Storage container to a local directory
                await DownloadBlobsFromContainerAsync(blobClient, outputContainerName, Environment.GetEnvironmentVariable("TEMP"));

                // Clean up Storage resources
                await DeleteContainerAsync(blobClient, appContainerName);
                await DeleteContainerAsync(blobClient, inputContainerName);
                await DeleteContainerAsync(blobClient, outputContainerName);

                // Print out some timing info
                timer.Stop();
                Console.WriteLine();
                Console.WriteLine("Sample end: {0}", DateTime.Now);
                Console.WriteLine("Elapsed time: {0}", timer.Elapsed);

                // Clean up Batch resources (if the user so chooses)
                Console.WriteLine();
                Console.Write("Delete job? [yes] no: ");
                string response = Console.ReadLine().ToLower();
                if (response != "n" && response != "no")
                {
                    await batchClient.JobOperations.DeleteJobAsync(JobId);
                }

                Console.Write("Delete pool? [yes] no: ");
                response = Console.ReadLine().ToLower();
                if (response != "n" && response != "no")
                {
                    await batchClient.PoolOperations.DeletePoolAsync(PoolId);
                }
            }
        }
예제 #26
0
        private static async Task MainAsync(string[] args)
        {
            // You may adjust these values to experiment with different compute resource scenarios.
            const string nodeSize = "small";
            const string osFamily = "4";
            const int nodeCount = 1;

            const string poolId = "TaskDependenciesSamplePool";
            const string jobId = "TaskDependenciesSampleJob";

            // Amount of time to wait before timing out long-running tasks.
            TimeSpan timeLimit = TimeSpan.FromMinutes(30);

            // Set up access to your Batch account with a BatchClient. Configure your AccountSettings in the
            // Microsoft.Azure.Batch.Samples.Common project within this solution.
            BatchSharedKeyCredentials cred = new BatchSharedKeyCredentials(AccountSettings.Default.BatchServiceUrl,
                                                                           AccountSettings.Default.BatchAccountName,
                                                                           AccountSettings.Default.BatchAccountKey);

            using (BatchClient batchClient = await BatchClient.OpenAsync(cred))
            {
                // Create the pool.
                Console.WriteLine("Creating pool [{0}]...", poolId);
                CloudPool unboundPool =
                    batchClient.PoolOperations.CreatePool(poolId: poolId,
                                                          cloudServiceConfiguration: new CloudServiceConfiguration(osFamily),
                                                          virtualMachineSize: nodeSize,
                                                          targetDedicated: nodeCount);
                await unboundPool.CommitAsync();

                // Create the job and specify that it uses tasks dependencies.
                Console.WriteLine("Creating job [{0}]...", jobId);
                CloudJob unboundJob = batchClient.JobOperations.CreateJob( jobId,
                    new PoolInformation { PoolId = poolId });

                // IMPORTANT: This is REQUIRED for using task dependencies.
                unboundJob.UsesTaskDependencies = true;
                
                await unboundJob.CommitAsync();

                // Create the collection of tasks that will be added to the job.
                List<CloudTask> tasks = new List<CloudTask>
                {
                    // 'Rain' and 'Sun' don't depend on any other tasks
                    new CloudTask("Rain", "cmd.exe /c echo Rain"),
                    new CloudTask("Sun", "cmd.exe /c echo Sun"),
 
                    // Task 'Flowers' depends on completion of both 'Rain' and 'Sun'
                    // before it is run.
                    new CloudTask("Flowers", "cmd.exe /c echo Flowers")
                    {
                        DependsOn = TaskDependencies.OnIds("Rain", "Sun")
                    },
 
                    // Tasks 1, 2, and 3 don't depend on any other tasks. Because
                    // we will be using them for a task range dependency, we must
                    // specify string representations of integers as their ids.
                    new CloudTask("1", "cmd.exe /c echo 1"),
                    new CloudTask("2", "cmd.exe /c echo 2"),
                    new CloudTask("3", "cmd.exe /c echo 3"),
 
                    // Task 4 depends on a range of tasks, 1 through 3
                    new CloudTask("4", "cmd.exe /c echo 4")
                    {
                        // To use a range of tasks, their ids must be integer values.
                        // Note that we pass integers as parameters to TaskIdRange,
                        // but their ids (above) are string representations of the ids.
                        DependsOn = TaskDependencies.OnIdRange(1, 3)
                    },

                    // Task 5 depends on a range of tasks, 1 through 3, and 'Flowers'
                    new CloudTask("5", "cmd.exe /c echo 5")
                    {
                        DependsOn = new TaskDependencies(
                            new[] { "Flowers" },
                            new[] { new TaskIdRange(1, 3) })
                    },
                };

                // Add the tasks to the job.
                await batchClient.JobOperations.AddTaskAsync(jobId, tasks);

                // Pause execution while we wait for the tasks to complete, and notify
                // whether the tasks completed successfully.
                Console.WriteLine("Waiting for task completion...");
                Console.WriteLine();
                CloudJob job = await batchClient.JobOperations.GetJobAsync(jobId);

                try
                {
                    await batchClient.Utilities.CreateTaskStateMonitor().WhenAll(
                        job.ListTasks(),
                        TaskState.Completed,
                        timeLimit);

                    Console.WriteLine("All tasks completed successfully.");
                    Console.WriteLine();
                }
                catch (TimeoutException e)
                {
                    Console.WriteLine(e);
                }

                // Clean up the resources we've created in the Batch account
                Console.Write("Delete job? [yes] no: ");
                string response = Console.ReadLine().ToLower();
                if (response != "n" && response != "no")
                {
                    await batchClient.JobOperations.DeleteJobAsync(job.Id);
                }

                Console.Write("Delete pool? [yes] no: ");
                response = Console.ReadLine().ToLower();
                if (response != "n" && response != "no")
                {
                    await batchClient.PoolOperations.DeletePoolAsync(poolId);
                }
            }
        }
예제 #27
0
        /// <summary>
        /// Populates Azure Storage with the required files, and 
        /// submits the job to the Azure Batch service.
        /// </summary>
        public async Task RunAsync()
        {
            Console.WriteLine("Running with the following settings: ");
            Console.WriteLine("----------------------------------------");
            Console.WriteLine(this.configurationSettings.ToString());
            
            //Upload resources if required.
            if (this.configurationSettings.ShouldUploadResources)
            {
                Console.WriteLine("Splitting file: {0} into {1} subfiles", 
                    Constants.TextFilePath, 
                    this.configurationSettings.NumberOfMapperTasks);

                //Split the text file into the correct number of files for consumption by the mapper tasks.
                FileSplitter splitter = new FileSplitter();
                List<string> mapperTaskFiles = await splitter.SplitAsync(
                    Constants.TextFilePath, 
                    this.configurationSettings.NumberOfMapperTasks);
                
                await this.UploadResourcesAsync(mapperTaskFiles);
            }

            //Generate a SAS for the container.
            string containerSasUrl = Helpers.ConstructContainerSas(
                this.configurationSettings.StorageAccountName,
                this.configurationSettings.StorageAccountKey,
                this.configurationSettings.StorageServiceUrl,
                this.configurationSettings.BlobContainer);

            //Set up the Batch Service credentials used to authenticate with the Batch Service.
            BatchSharedKeyCredentials credentials = new BatchSharedKeyCredentials(
                this.configurationSettings.BatchServiceUrl,
                this.configurationSettings.BatchAccountName,
                this.configurationSettings.BatchAccountKey);

            using (BatchClient batchClient = await BatchClient.OpenAsync(credentials))
            {
                //
                // Construct the job properties in local memory before commiting them to the Batch Service.
                //

                //Allow enough compute nodes in the pool to run each mapper task, and 1 extra to run the job manager.
                int numberOfPoolComputeNodes = 1 + this.configurationSettings.NumberOfMapperTasks;

                //Define the pool specification for the pool which the job will run on.
                PoolSpecification poolSpecification = new PoolSpecification()
                    {
                        TargetDedicated = numberOfPoolComputeNodes,
                        VirtualMachineSize = "small",
                        //You can learn more about os families and versions at: 
                        //http://azure.microsoft.com/documentation/articles/cloud-services-guestos-update-matrix
                        OSFamily = "4",
                        TargetOSVersion = "*"
                    };

                //Use the auto pool feature of the Batch Service to create a pool when the job is created.
                //This creates a new pool for each job which is added.
                AutoPoolSpecification autoPoolSpecification = new AutoPoolSpecification()
                    {
                        AutoPoolIdPrefix= "TextSearchPool",
                        KeepAlive = false,
                        PoolLifetimeOption = PoolLifetimeOption.Job,
                        PoolSpecification = poolSpecification
                    };

                //Define the pool information for this job -- it will run on the pool defined by the auto pool specification above.
                PoolInformation poolInformation = new PoolInformation()
                    {
                        AutoPoolSpecification = autoPoolSpecification
                    };
                
                //Define the job manager for this job.  This job manager will run first and will submit the tasks for 
                //the job.  The job manager is the executable which manages the lifetime of the job
                //and all tasks which should run for the job.  In this case, the job manager submits the mapper and reducer tasks.
                List<ResourceFile> jobManagerResourceFiles = Helpers.GetResourceFiles(containerSasUrl, Constants.RequiredExecutableFiles);
                const string jobManagerTaskId = "JobManager";

                JobManagerTask jobManagerTask = new JobManagerTask()
                    {
                        ResourceFiles = jobManagerResourceFiles,
                        CommandLine = Constants.JobManagerExecutable,

                        //Determines if the job should terminate when the job manager process exits.
                        KillJobOnCompletion = true,
                        Id = jobManagerTaskId
                    };

                //Create the unbound job in local memory.  An object which exists only in local memory (and not on the Batch Service) is "unbound".
                string jobId = Environment.GetEnvironmentVariable("USERNAME") + DateTime.UtcNow.ToString("yyyyMMdd-HHmmss");

                CloudJob unboundJob = batchClient.JobOperations.CreateJob(jobId, poolInformation);
                unboundJob.JobManagerTask = jobManagerTask; //Assign the job manager task to this job

                try
                {
                    //Commit the unbound job to the Batch Service.
                    Console.WriteLine("Adding job: {0} to the Batch Service.", unboundJob.Id);
                    await unboundJob.CommitAsync(); //Issues a request to the Batch Service to add the job which was defined above.

                    //
                    // Wait for the job manager task to complete.
                    //
                    
                    //An object which is backed by a corresponding Batch Service object is "bound."
                    CloudJob boundJob = await batchClient.JobOperations.GetJobAsync(jobId);

                    CloudTask boundJobManagerTask = await boundJob.GetTaskAsync(jobManagerTaskId);

                    TimeSpan maxJobCompletionTimeout = TimeSpan.FromMinutes(30);
                    
                    // Monitor the current tasks to see when they are done.
                    // Occasionally a task may get killed and requeued during an upgrade or hardware failure, including the job manager
                    // task.  The job manager will be re-run in this case.  Robustness against this was not added into the sample for 
                    // simplicity, but should be added into any production code.
                    Console.WriteLine("Waiting for job's tasks to complete");

                    TaskStateMonitor taskStateMonitor = batchClient.Utilities.CreateTaskStateMonitor();
                    bool timedOut = await taskStateMonitor.WaitAllAsync(new List<CloudTask> { boundJobManagerTask }, TaskState.Completed, maxJobCompletionTimeout);

                    Console.WriteLine("Done waiting for job manager task.");

                    await boundJobManagerTask.RefreshAsync();

                    //Check to ensure the job manager task exited successfully.
                    await Helpers.CheckForTaskSuccessAsync(boundJobManagerTask, dumpStandardOutOnTaskSuccess: false);

                    if (timedOut)
                    {
                        throw new TimeoutException(string.Format("Timed out waiting for job manager task to complete."));
                    }

                    //
                    // Download and write out the reducer tasks output
                    //
                    CloudStorageAccount cloudStorageAccount = new CloudStorageAccount(
                        new StorageCredentials(
                            this.configurationSettings.StorageAccountName,
                            this.configurationSettings.StorageAccountKey),
                        this.configurationSettings.StorageServiceUrl,
                        useHttps: true);

                    string reducerText = await Helpers.DownloadBlobTextAsync(cloudStorageAccount, this.configurationSettings.BlobContainer, Constants.ReducerTaskResultBlobName);
                    Console.WriteLine("Reducer reuslts:");
                    Console.WriteLine(reducerText);

                }
                finally
                {
                    //Delete the job.
                    //This will delete the auto pool associated with the job as long as the pool
                    //keep alive property is set to false.
                    if (this.configurationSettings.ShouldDeleteJob)
                    {
                        Console.WriteLine("Deleting job {0}", jobId);
                        batchClient.JobOperations.DeleteJob(jobId);
                    }

                    //Note that there were files uploaded to a container specified in the 
                    //configuration file.  This container will not be deleted or cleaned up by this sample.
                }
            }
        }
예제 #28
0
        public static void ReCreatePool()
        {
            Log("Recreate pool");

            BatchSharedKeyCredentials cred = new BatchSharedKeyCredentials(Settings.batchEndpoint, Settings.batchAccount, Settings.batchKey);
            using (BatchClient client = BatchClient.Open(cred)) // <-- connect to the cluster
            {
                {
                    bool found = false;
                    foreach (var p in client.PoolOperations.ListPools(new ODATADetailLevel(filterClause: "id eq '" + Settings.poolname + "'")))
                    {
                        found = true;
                        break;
                    }

                    if (found)
                    {
                        Log("Deleting current pool...");
                        client.PoolOperations.DeletePool(Settings.poolname);
                        Log("Delete command sent.");

                        while (found)
                        {
                            found = false;
                            Thread.Sleep(1000);
                            Log("Waiting pool to be deleted.");
                            foreach (var p in client.PoolOperations.ListPools(new ODATADetailLevel(filterClause: "id eq '" + Settings.poolname + "'")))
                            {
                                found = true;
                                break;
                            }
                        }
                        Log("Pool deleted.");
                    }

                    #region resource file
                    List<ResourceFile> resources = new List<ResourceFile>();
                    foreach (string blob in StorageHelper.ListBlobs(Settings.resourceContainer))
                    {
                        string filename = System.IO.Path.GetFileName((new Uri(blob)).LocalPath);
                        resources.Add(new ResourceFile(StorageHelper.GetBlobSASURL(blob), filename));
                    }
                    #endregion

                    CloudPool pool = client.PoolOperations.CreatePool(Settings.poolname, "4", "medium", 10);
                    pool.StartTask = new StartTask();
                    pool.StartTask.ResourceFiles = resources;
                    pool.StartTask.CommandLine = @"cmd /c copy *.* %WATASK_TVM_ROOT_DIR%\shared\";
                    pool.StartTask.WaitForSuccess = true;
                    Log("Creating the new pool...");
                    pool.Commit();
                    Log("Pool created.");
                }
            }
        }
예제 #29
0
        public static void Main(string[] args)
        {
            // Configure your AccountSettings in the Microsoft.Azure.Batch.Samples.Common project within this solution
            BatchSharedKeyCredentials cred = new BatchSharedKeyCredentials(AccountSettings.Default.BatchServiceUrl,
                                                                           AccountSettings.Default.BatchAccountName,
                                                                           AccountSettings.Default.BatchAccountKey);

            StorageCredentials storageCred = new StorageCredentials(AccountSettings.Default.StorageAccountName,
                                                                    AccountSettings.Default.StorageAccountKey);

            string jobId = "PersistOutput-" + DateTime.Now.ToString("yyyyMMdd-HHmmss");
            const string poolId = "PersistOutputsSamplePool";
            const int nodeCount = 1;
            const string appPackageId = "PersistOutputsTask";
            const string appPackageVersion = "1.0";

            using (BatchClient batchClient = BatchClient.Open(cred))
            {
                // Create and configure an unbound pool.
                CloudPool pool = batchClient.PoolOperations.CreatePool(poolId: poolId,
                    virtualMachineSize: "small",
                    targetDedicated: nodeCount,
                    cloudServiceConfiguration: new CloudServiceConfiguration(osFamily: "4"));

                // Specify the application and version to deploy to the compute nodes. You must
                // first build PersistOutputsTask, then upload it as an application package.
                // See https://azure.microsoft.com/documentation/articles/batch-application-packages/
                pool.ApplicationPackageReferences = new List<ApplicationPackageReference>
                {
                    new ApplicationPackageReference
                    {
                        ApplicationId = appPackageId,
                        Version = appPackageVersion
                    }
                };

                // Commit the pool to the Batch service
                pool.Commit();
                
                CloudJob job = batchClient.JobOperations.CreateJob(jobId, new PoolInformation { PoolId = poolId });

                CloudStorageAccount linkedStorageAccount = new CloudStorageAccount(storageCred, true);

                // Create the blob storage container for the outputs.
                job.PrepareOutputStorageAsync(linkedStorageAccount).Wait();

                // Create an environment variable on the compute nodes that the
                // task application can reference when persisting its outputs.
                string containerName = job.OutputStorageContainerName();
                CloudBlobContainer container = linkedStorageAccount.CreateCloudBlobClient().GetContainerReference(containerName);
                string containerSas = container.GetSharedAccessSignature(CreateFullAccessPolicy());
                string containerUrl = container.Uri.AbsoluteUri + containerSas;
                job.CommonEnvironmentSettings = new[] { new EnvironmentSetting("JOB_CONTAINER_URL", containerUrl) };

                // Commit the job to the Batch service
                job.Commit();
                Console.WriteLine($"Created job {jobId}");

                // Obtain the bound job from the Batch service
                job = batchClient.JobOperations.GetJob(jobId);


                IEnumerable<CloudTask> tasks = Enumerable.Range(1, 20).Select(i =>
                    new CloudTask(i.ToString().PadLeft(3, '0'), $"cmd /c %AZ_BATCH_APP_PACKAGE_{appPackageId.ToUpper()}#{appPackageVersion}%\\PersistOutputsTask.exe")
                );

                // Add the tasks to the job; the tasks are automatically
                // scheduled for execution on the nodes by the Batch service.
                job.AddTask(tasks);

                Console.WriteLine($"All tasks added to job {job.Id}");
                Console.WriteLine();

                foreach (CloudTask task in CompletedTasks(job))
                {
                    Console.Write($"Task {task.Id} completed, ");
                    foreach (OutputFileReference output in task.OutputStorage(linkedStorageAccount).ListOutputs(TaskOutputKind.TaskOutput))
                    {
                        Console.WriteLine($"output file: {output.FilePath}");
                        output.DownloadToFileAsync($"{jobId}-{output.FilePath}", System.IO.FileMode.Create).Wait();
                    }
                }

                Console.WriteLine();
                Console.WriteLine("All tasks completed and outputs downloaded. You can view the task outputs in the Azure portal");
                Console.WriteLine("before deleting the job.");

                // Clean up the resources we've created (job, pool, and blob storage container)
                Console.WriteLine();
                Console.Write("Delete job? [yes] no: ");
                string response = Console.ReadLine().ToLower();
                if (response != "n" && response != "no")
                {
                    batchClient.JobOperations.DeleteJob(job.Id);
                }

                Console.Write("Delete pool? [yes] no: ");
                response = Console.ReadLine().ToLower();
                if (response != "n" && response != "no")
                {
                    batchClient.PoolOperations.DeletePool(poolId);
                }

                Console.Write("Delete storage container? [yes] no: ");
                response = Console.ReadLine().ToLower();
                if (response != "n" && response != "no")
                {
                    container.Delete();
                }

                Console.WriteLine();
                Console.WriteLine("Sample complete, hit ENTER to exit...");
                Console.ReadLine();
            }
        }
예제 #30
0
        private static async Task MainAsync(string[] args)
        {
            const string poolId = "JobPrepReleaseSamplePool";
            const string jobId  = "JobPrepReleaseSampleJob";

            // Location of the file that the job tasks will work with, a text file in the
            // node's "shared" directory.
            const string taskOutputFile = "%AZ_BATCH_NODE_SHARED_DIR%\\job_prep_and_release.txt";

            // The job prep task will write the node ID to the text file in the shared directory
            const string jobPrepCmdLine = "cmd /c echo %AZ_BATCH_NODE_ID% tasks: >" + taskOutputFile;

            // Each task then echoes its ID to the same text file
            const string taskCmdLine = "cmd /c echo   %AZ_BATCH_TASK_ID% >> " + taskOutputFile;

            // The job release task will then delete the text file from the shared directory
            const string jobReleaseCmdLine = "cmd /c del " + taskOutputFile;

            // Configure your AccountSettings in the Microsoft.Azure.Batch.Samples.Common project within this solution
            BatchSharedKeyCredentials cred = new BatchSharedKeyCredentials(AccountSettings.Default.BatchServiceUrl,
                                                                           AccountSettings.Default.BatchAccountName,
                                                                           AccountSettings.Default.BatchAccountKey);

            // Initialize the BatchClient for access to your Batch account
            using (BatchClient batchClient = await BatchClient.OpenAsync(cred))
            {
                // Create a CloudPool (or obtain an existing pool with the specified ID)
                CloudPool pool = await ArticleHelpers.CreatePoolIfNotExistAsync(batchClient,
                                                                      poolId,
                                                                      "small",
                                                                      2,
                                                                      1);
                
                // Create a CloudJob (or obtain an existing job with the specified ID)
                CloudJob job = await SampleHelpers.GetJobIfExistAsync(batchClient, jobId);
                if (job == null)
                {
                    Console.WriteLine("Job {0} not found, creating...", jobId);

                    CloudJob unboundJob = batchClient.JobOperations.CreateJob(jobId, new PoolInformation() { PoolId = poolId });

                    // Configure and assign the job preparation task
                    unboundJob.JobPreparationTask = new JobPreparationTask { CommandLine = jobPrepCmdLine };

                    // Configure and assign the job release task
                    unboundJob.JobReleaseTask = new JobReleaseTask { CommandLine = jobReleaseCmdLine };

                    await unboundJob.CommitAsync();

                    // Get the bound version of the job with all of its properties populated
                    job = await batchClient.JobOperations.GetJobAsync(jobId);
                }
                
                // Create the tasks that the job will execute
                List<CloudTask> tasks = new List<CloudTask>();
                for (int i = 1; i <= 8; i++)
                {
                    string taskId = "task" + i.ToString().PadLeft(3, '0');
                    string taskCommandLine = taskCmdLine;
                    CloudTask task = new CloudTask(taskId, taskCommandLine);
                    tasks.Add(task);
                }

                // Add the tasks in one API call as opposed to a separate AddTask call for each. Bulk task
                // submission helps to ensure efficient underlying API calls to the Batch service.
                Console.WriteLine("Submitting tasks and awaiting completion...");
                await batchClient.JobOperations.AddTaskAsync(job.Id, tasks);

                // Wait for the tasks to complete before proceeding. The long timeout here is to allow time
                // for the nodes within the pool to be created and started if the pool had not yet been created.
                if (await batchClient.Utilities.CreateTaskStateMonitor().WhenAllAsync(job.ListTasks(),
                                                                   TaskState.Completed,
                                                                   TimeSpan.FromMinutes(30)))
                {
                    Console.WriteLine("Operation timed out while waiting for submitted tasks to reach state {0}", TaskState.Completed);

                    return;
                }
                else
                {
                    Console.WriteLine("All tasks completed.");
                    Console.WriteLine();
                }

                // Print the contents of the shared text file modified by the job preparation and other tasks.
                ODATADetailLevel nodeDetail = new ODATADetailLevel(selectClause: "id, state");
                IPagedEnumerable<ComputeNode> nodes = batchClient.PoolOperations.ListComputeNodes(pool.Id, nodeDetail);
                await nodes.ForEachAsync(async (node) =>
                {
                    // Check to ensure that the node is Idle before attempting to pull the text file.
                    // If the pool was just created, there is a chance that another node completed all
                    // of the tasks prior to the other node(s) completing their startup procedure.
                    if (node.State == ComputeNodeState.Idle)
                    {
                        NodeFile sharedTextFile = await node.GetNodeFileAsync("shared\\job_prep_and_release.txt");
                        Console.WriteLine("Contents of {0} on {1}:", sharedTextFile.Name, node.Id);
                        Console.WriteLine("-------------------------------------------");
                        Console.WriteLine(await sharedTextFile.ReadAsStringAsync());
                    }
                });

                // Terminate the job to mark it as Completed; this will initiate the Job Release Task on any node
                // that executed job tasks. Note that the Job Release Task is also executed when a job is deleted,
                // thus you need not call Terminate if you typically delete your jobs upon task completion.
                await batchClient.JobOperations.TerminateJobAsync(job.Id);

                // Wait for the job to reach state "Completed." Note that this wait is not typically necessary in
                // production code, but is done here to enable the checking of the release tasks exit code below.
                await ArticleHelpers.WaitForJobToReachStateAsync(batchClient, job.Id, JobState.Completed, TimeSpan.FromMinutes(2));

                // Print the exit codes of the prep and release tasks by obtaining their execution info
                List<JobPreparationAndReleaseTaskExecutionInformation> prepReleaseInfo = await batchClient.JobOperations.ListJobPreparationAndReleaseTaskStatus(job.Id).ToListAsync();
                foreach (JobPreparationAndReleaseTaskExecutionInformation info in prepReleaseInfo)
                {
                    Console.WriteLine();
                    Console.WriteLine("{0}: ", info.ComputeNodeId);

                    // If no tasks were scheduled to run on the node, the JobPreparationTaskExecutionInformation will be null
                    if (info.JobPreparationTaskExecutionInformation != null)
                    {
                        Console.WriteLine("  Prep task exit code:    {0}", info.JobPreparationTaskExecutionInformation.ExitCode);
                    }

                    // If no tasks were scheduled to run on the node, the JobReleaseTaskExecutionInformation will be null
                    if (info.JobReleaseTaskExecutionInformation != null)
                    {
                        Console.WriteLine("  Release task exit code: {0}", info.JobReleaseTaskExecutionInformation.ExitCode);
                    }
                }

                // Clean up the resources we've created in the Batch account
                Console.WriteLine();
                Console.WriteLine("Delete job? [yes] no");
                string response = Console.ReadLine().ToLower();
                if (response != "n" && response != "no")
                {
                    // Note that deleting the job will execute the job release task if the job was not previously terminated
                    await batchClient.JobOperations.DeleteJobAsync(job.Id);
                }

                Console.WriteLine("Delete pool? [yes] no");
                response = Console.ReadLine();
                if (response != "n" && response != "no")
                {
                    await batchClient.PoolOperations.DeletePoolAsync(pool.Id);
                }
            }
        }
        /// <summary>
        /// Runs the job manager task.
        /// </summary>
        public async Task RunAsync()
        {
            Console.WriteLine("JobManager for account: {0}, job: {1} has started...",
                this.accountName,
                this.jobId);
            Console.WriteLine();

            Console.WriteLine("JobManager running with the following settings: ");
            Console.WriteLine("----------------------------------------");
            Console.WriteLine(this.configurationSettings.ToString());

            //Set up the Batch Service credentials used to authenticate with the Batch Service.
            BatchSharedKeyCredentials batchSharedKeyCredentials = new BatchSharedKeyCredentials(
                this.configurationSettings.BatchServiceUrl,
                this.configurationSettings.BatchAccountName,
                this.configurationSettings.BatchAccountKey);

            CloudStorageAccount cloudStorageAccount = new CloudStorageAccount(
                new StorageCredentials(
                    this.configurationSettings.StorageAccountName,
                    this.configurationSettings.StorageAccountKey),
                this.configurationSettings.StorageServiceUrl,
                useHttps: true);

            using (BatchClient batchClient = await BatchClient.OpenAsync(batchSharedKeyCredentials))
            {
                //Construct a container SAS to provide the Batch Service access to the files required to
                //run the mapper and reducer tasks.
                string containerSas = SampleHelpers.ConstructContainerSas(
                    cloudStorageAccount,
                    this.configurationSettings.BlobContainer);

                //
                // Submit mapper tasks.
                //
                await this.SubmitMapperTasksAsync(batchClient, containerSas);

                //
                // Wait for the mapper tasks to complete.
                //
                await this.WaitForMapperTasksToCompleteAsync(batchClient);
                    
                //
                // Create the reducer task.
                //
                await this.SubmitReducerTaskAsync(batchClient, containerSas);

                //
                // Wait for the reducer task to complete.
                //
                string textToUpload = await this.WaitForReducerTaskToCompleteAsync(batchClient);

                //
                // Upload the results of the reducer task to Azure storage for consumption later
                //

                await SampleHelpers.UploadBlobTextAsync(cloudStorageAccount, this.configurationSettings.BlobContainer, Constants.ReducerTaskResultBlobName, textToUpload);

                //The job manager has completed.
                Console.WriteLine("JobManager completed successfully.");
            }
        }
예제 #32
0
        static void Main(string[] args)
        {
            CreateStorage();
            CreateFiles();

            BatchSharedKeyCredentials cred = new BatchSharedKeyCredentials(
                "https://mybatch00.westus.batch.azure.com",
                "mybatch00",
                "YXDbkAXOSLzuPet3NwW0kvjdF2PE6x7Je80qhy1BrDe2VP8PtKxz8q/cyx9Tgox1cdru5g6Lq73qIPotgKcJjA=="
                );
            BatchClient client = BatchClient.Open(cred);

            CreatePool(client);
            ListPools(client);

            CreateJob(client);
            ListJobs(client);

            AddTasks(client);
            ListTasks(client);

            DeleteTasks(client);
            DeleteJob(client);
            DeletePool(client);
        }