/// <summary>
        /// Creates a pool if it doesn't already exist.  If the pool already exists, this method resizes it to meet the expected
        /// targets specified in settings.
        /// </summary>
        /// <param name="batchClient">The BatchClient to use when interacting with the Batch service.</param>
        /// <param name="cloudStorageAccount">The CloudStorageAccount to upload start task required files to.</param>
        /// <returns>An asynchronous <see cref="Task"/> representing the operation.</returns>
        private async Task CreatePoolIfNotExistAsync(BatchClient batchClient, CloudStorageAccount cloudStorageAccount)
        {
            // You can learn more about os families and versions at:
            // https://azure.microsoft.com/en-us/documentation/articles/cloud-services-guestos-update-matrix/
            CloudPool pool = batchClient.PoolOperations.CreatePool(
                poolId: this.jobManagerSettings.PoolId,
                targetDedicated: this.jobManagerSettings.PoolTargetNodeCount,
                virtualMachineSize: this.jobManagerSettings.PoolNodeVirtualMachineSize,
                cloudServiceConfiguration: new CloudServiceConfiguration(this.jobManagerSettings.PoolOSFamily));

            // Create a new start task to facilitate pool-wide file management or installation.
            // In this case, we just add a single dummy data file to the StartTask.
            string        localSampleFilePath = GettingStartedCommon.GenerateTemporaryFile("StartTask.txt", "hello from Batch JobManager sample!");
            List <string> files = new List <string> {
                localSampleFilePath
            };

            List <ResourceFile> resourceFiles = await SampleHelpers.UploadResourcesAndCreateResourceFileReferencesAsync(
                cloudStorageAccount,
                this.jobManagerSettings.BlobContainer,
                files);

            pool.StartTask = new StartTask()
            {
                CommandLine   = "cmd /c dir",
                ResourceFiles = resourceFiles
            };

            await GettingStartedCommon.CreatePoolIfNotExistAsync(batchClient, pool);
        }
Exemplo n.º 2
0
        /// <summary>
        /// Creates a job and adds a task to it. The task is a
        /// custom executable which has a resource file associated with it.
        /// </summary>
        /// <param name="batchClient">The BatchClient to use when interacting with the Batch service.</param>
        /// <param name="cloudStorageAccount">The storage account to upload the files to.</param>
        /// <param name="jobId">The ID of the job.</param>
        /// <returns>The set of container names containing the jobs input files.</returns>
        private async Task <HashSet <string> > SubmitJobAsync(BatchClient batchClient, CloudStorageAccount cloudStorageAccount, string jobId)
        {
            // create an empty unbound Job
            CloudJob unboundJob = batchClient.JobOperations.CreateJob();

            unboundJob.Id = jobId;
            unboundJob.PoolInformation = new PoolInformation()
            {
                PoolId = this.poolsAndResourceFileSettings.PoolId
            };

            // Commit Job to create it in the service
            await unboundJob.CommitAsync();

            List <CloudTask> tasksToRun = new List <CloudTask>();

            // Create a task which requires some resource files
            CloudTask taskWithFiles = new CloudTask("task_with_file1", SimpleTaskExe);

            // Set up a collection of files to be staged -- these files will be uploaded to Azure Storage
            // when the tasks are submitted to the Azure Batch service.
            taskWithFiles.FilesToStage = new List <IFileStagingProvider>();

            // generate a local file in temp directory
            string localSampleFile = Path.Combine(Environment.GetEnvironmentVariable("TEMP"), "HelloWorld.txt");

            File.WriteAllText(localSampleFile, "hello from Batch PoolsAndResourceFiles sample!");

            StagingStorageAccount fileStagingStorageAccount = new StagingStorageAccount(
                storageAccount: this.accountSettings.StorageAccountName,
                storageAccountKey: this.accountSettings.StorageAccountKey,
                blobEndpoint: cloudStorageAccount.BlobEndpoint.ToString());

            // add the files as a task dependency so they will be uploaded to storage before the task
            // is submitted and downloaded to the node before the task starts execution.
            FileToStage helloWorldFile = new FileToStage(localSampleFile, fileStagingStorageAccount);
            FileToStage simpleTaskFile = new FileToStage(SimpleTaskExe, fileStagingStorageAccount);

            // When this task is added via JobOperations.AddTaskAsync below, the FilesToStage are uploaded to storage once.
            // The Batch service does not automatically delete content from your storage account, so files added in this
            // way must be manually removed when they are no longer used.
            taskWithFiles.FilesToStage.Add(helloWorldFile);
            taskWithFiles.FilesToStage.Add(simpleTaskFile);

            tasksToRun.Add(taskWithFiles);

            var fileStagingArtifacts = new ConcurrentBag <ConcurrentDictionary <Type, IFileStagingArtifact> >();

            // Use the AddTask method which takes an enumerable of tasks for best performance, as it submits up to 100
            // tasks at once in a single request.  If the list of tasks is N where N > 100, this will correctly parallelize
            // the requests and return when all N tasks have been added.
            await batchClient.JobOperations.AddTaskAsync(jobId, tasksToRun, fileStagingArtifacts : fileStagingArtifacts);

            // Extract the names of the blob containers from the file staging artifacts
            HashSet <string> blobContainerNames = GettingStartedCommon.ExtractBlobContainerNames(fileStagingArtifacts);

            return(blobContainerNames);
        }
Exemplo n.º 3
0
        /// <summary>
        /// Creates a CloudPool with a single compute node associated with the Batch account.
        /// </summary>
        /// <param name="batchClient">A fully initialized <see cref="BatchClient"/>.</param>
        /// <param name="poolId">The ID of the <see cref="CloudPool"/>.</param>
        private async static Task CreatePool(BatchClient batchClient, string poolId)
        {
            // Create and configure an unbound pool with the specified ID
            CloudPool pool = batchClient.PoolOperations.CreatePool(poolId: poolId,
                                                                   osFamily: "3",
                                                                   virtualMachineSize: "small",
                                                                   targetDedicated: 1);

            await GettingStartedCommon.CreatePoolIfNotExistAsync(batchClient, pool);
        }
Exemplo n.º 4
0
        // Creates a pool so that the sample jobs have somewhere to run, so that they can
        // make progress and you can see their progress being tracked by the MetricMonitor.
        private async Task CreatePoolAsync()
        {
            var pool = this.batchClient.PoolOperations.CreatePool(
                poolId: PoolId,
                targetDedicated: PoolNodeCount,
                virtualMachineSize: PoolNodeSize,
                cloudServiceConfiguration: new CloudServiceConfiguration(PoolOSFamily));

            pool.MaxTasksPerComputeNode = 2;

            var createPoolResult = await GettingStartedCommon.CreatePoolIfNotExistAsync(this.batchClient, pool);

            this.createdNewPool = (createPoolResult == CreatePoolResult.CreatedNew);
        }
        public async Task RunAsync()
        {
            Console.WriteLine("JobManager for account: {0}, job: {1} has started...",
                              this.accountName,
                              this.jobId);
            Console.WriteLine();

            Console.WriteLine("JobManager running with the following settings: ");
            Console.WriteLine("----------------------------------------");
            Console.WriteLine(this.configurationSettings.ToString());

            BatchSharedKeyCredentials credentials = new BatchSharedKeyCredentials(
                this.configurationSettings.BatchAccountUrl,
                this.configurationSettings.BatchAccountName,
                this.configurationSettings.BatchAccountKey);

            CloudStorageAccount storageAccount = new CloudStorageAccount(
                new StorageCredentials(
                    this.configurationSettings.StorageAccountName,
                    this.configurationSettings.StorageAccountKey),
                new Uri(this.configurationSettings.StorageBlobEndpoint),
                null,
                null,
                null);

            using (BatchClient batchClient = await BatchClient.OpenAsync(credentials))
            {
                HashSet <string> blobContainerNames = new HashSet <string>();

                try
                {
                    // Submit some tasks
                    blobContainerNames = await this.SubmitTasks(batchClient);

                    // Wait for the tasks to finish
                    List <CloudTask> tasks = await batchClient.JobOperations.ListTasks(jobId).ToListAsync();

                    // don't wait for the job manager task since it won't finish until this method exists
                    tasks.RemoveAll(t => t.Id.Equals(this.taskId, StringComparison.CurrentCultureIgnoreCase));

                    await GettingStartedCommon.WaitForTasksAndPrintOutputAsync(batchClient, tasks, TimeSpan.FromMinutes(10));
                }
                finally
                {
                    // Clean up the files for the tasks
                    SampleHelpers.DeleteContainersAsync(storageAccount, blobContainerNames).Wait();
                }
            }
        }
Exemplo n.º 6
0
        /// <summary>
        /// Creates a CloudPool associated with the Batch account. If an existing pool with the specified ID is found,
        /// the pool is resized to match the specified node count.
        /// </summary>
        /// <param name="batchClient">A fully initialized <see cref="BatchClient"/>.</param>
        /// <param name="poolId">The ID of the <see cref="CloudPool"/>.</param>
        /// <param name="nodeSize">The size of the nodes within the pool.</param>
        /// <param name="nodeCount">The number of nodes to create within the pool.</param>
        /// <param name="maxTasksPerNode">The maximum number of tasks to run concurrently on each node.</param>
        /// <returns>A <see cref="System.Threading.Tasks.Task"/> object that represents the asynchronous operation.</returns>
        private async static Task CreatePoolAsync(BatchClient batchClient, string poolId, string nodeSize, int nodeCount, int maxTasksPerNode)
        {
            // Create and configure an unbound pool with the specified ID
            CloudPool pool = batchClient.PoolOperations.CreatePool(poolId: poolId,
                                                                   osFamily: "3",
                                                                   virtualMachineSize: nodeSize,
                                                                   targetDedicated: nodeCount);

            pool.MaxTasksPerComputeNode = maxTasksPerNode;

            // We want each node to be completely filled with tasks (i.e. up to maxTasksPerNode) before
            // tasks are assigned to the next node in the pool
            pool.TaskSchedulingPolicy = new TaskSchedulingPolicy(ComputeNodeFillType.Pack);

            await GettingStartedCommon.CreatePoolIfNotExistAsync(batchClient, pool);
        }
        /// <summary>
        /// Submits a set of tasks to the job
        /// </summary>
        /// <param name="batchClient">The batch client to use.</param>
        /// <returns>The set of blob artifacts created by file staging.</returns>
        private async Task <HashSet <string> > SubmitTasks(BatchClient batchClient)
        {
            List <CloudTask> tasksToRun = new List <CloudTask>();

            // Create a task which requires some resource files
            CloudTask taskWithFiles = new CloudTask("task_with_file1", SimpleTaskExe);

            // Set up a collection of files to be staged -- these files will be uploaded to Azure Storage
            // when the tasks are submitted to the Azure Batch service.
            taskWithFiles.FilesToStage = new List <IFileStagingProvider>();

            // generate a local file in temp directory
            string localSampleFilePath = GettingStartedCommon.GenerateTemporaryFile("HelloWorld.txt", "hello from Batch JobManager sample!");

            StagingStorageAccount fileStagingStorageAccount = new StagingStorageAccount(
                storageAccount: this.configurationSettings.StorageAccountName,
                storageAccountKey: this.configurationSettings.StorageAccountKey,
                blobEndpoint: this.configurationSettings.StorageBlobEndpoint);

            // add the files as a task dependency so they will be uploaded to storage before the task
            // is submitted and downloaded to the node before the task starts execution.
            FileToStage helloWorldFile = new FileToStage(localSampleFilePath, fileStagingStorageAccount);
            FileToStage simpleTaskFile = new FileToStage(SimpleTaskExe, fileStagingStorageAccount);

            // When this task is added via JobOperations.AddTaskAsync below, the FilesToStage are uploaded to storage once.
            // The Batch service does not automatically delete content from your storage account, so files added in this
            // way must be manually removed when they are no longer used.
            taskWithFiles.FilesToStage.Add(helloWorldFile);
            taskWithFiles.FilesToStage.Add(simpleTaskFile);

            tasksToRun.Add(taskWithFiles);

            var fileStagingArtifacts = new ConcurrentBag <ConcurrentDictionary <Type, IFileStagingArtifact> >();

            // Use the AddTask method which takes an enumerable of tasks for best performance, as it submits up to 100
            // tasks at once in a single request.  If the list of tasks is N where N > 100, this will correctly parallelize
            // the requests and return when all N tasks have been added.
            await batchClient.JobOperations.AddTaskAsync(jobId, tasksToRun, fileStagingArtifacts : fileStagingArtifacts);

            // Extract the names of the blob containers from the file staging artifacts
            HashSet <string> blobContainerNames = GettingStartedCommon.ExtractBlobContainerNames(fileStagingArtifacts);

            return(blobContainerNames);
        }
Exemplo n.º 8
0
        private static async Task CreatePoolAsync(BatchClient batchClient, string poolId, int nodeCount)
        {
            Func <ImageReference, bool> imageScanner = imageRef =>
                                                       imageRef.Publisher == "MicrosoftWindowsServer" &&
                                                       imageRef.Offer == "WindowsServer" &&
                                                       imageRef.Sku.Contains("2012-R2-Datacenter");

            SampleHelpers.SkuAndImage skuAndImage = await SampleHelpers.GetNodeAgentSkuReferenceAsync(batchClient, imageScanner);

            // Create and configure an unbound pool.
            CloudPool pool = batchClient.PoolOperations.CreatePool(poolId: poolId,
                                                                   virtualMachineSize: "standard_d1_v2",
                                                                   targetDedicatedComputeNodes: nodeCount,
                                                                   virtualMachineConfiguration: new VirtualMachineConfiguration(
                                                                       skuAndImage.Image,
                                                                       skuAndImage.Sku.Id));

            // Commit the pool to the Batch service
            await GettingStartedCommon.CreatePoolIfNotExistAsync(batchClient, pool);
        }
Exemplo n.º 9
0
        private static async Task CreatePoolAsync(BatchClient batchClient, string poolId, int nodeCount)
        {
            Func <ImageReference, bool> imageScanner = imageRef =>
                                                       imageRef.Publisher.Equals("MicrosoftWindowsServer", StringComparison.InvariantCultureIgnoreCase) &&
                                                       imageRef.Offer.Equals("WindowsServer", StringComparison.InvariantCultureIgnoreCase) &&
                                                       imageRef.Sku.IndexOf("2012-R2-Datacenter", StringComparison.InvariantCultureIgnoreCase) > -1;

            ImageInformation imageInfo = await SampleHelpers.GetNodeAgentSkuReferenceAsync(batchClient, imageScanner);

            // Create and configure an unbound pool.
            CloudPool pool = batchClient.PoolOperations.CreatePool(poolId: poolId,
                                                                   virtualMachineSize: "standard_d1_v2",
                                                                   targetDedicatedComputeNodes: nodeCount,
                                                                   virtualMachineConfiguration: new VirtualMachineConfiguration(
                                                                       imageInfo.ImageReference,
                                                                       imageInfo.NodeAgentSkuId));

            // Commit the pool to the Batch service
            await GettingStartedCommon.CreatePoolIfNotExistAsync(batchClient, pool);
        }
Exemplo n.º 10
0
        /// <summary>
        /// Submits a job to the Azure Batch service, and waits for it to complete
        /// </summary>
        private static async Task HelloWorldAsync(AccountSettings accountSettings, Settings helloWorldConfigurationSettings)
        {
            Console.WriteLine("Running with the following settings: ");
            Console.WriteLine("-------------------------------------");
            Console.WriteLine(helloWorldConfigurationSettings.ToString());
            Console.WriteLine(accountSettings.ToString());

            // Set up the Batch Service credentials used to authenticate with the Batch Service.
            BatchSharedKeyCredentials credentials = new BatchSharedKeyCredentials(
                accountSettings.BatchServiceUrl,
                accountSettings.BatchAccountName,
                accountSettings.BatchAccountKey);

            // Get an instance of the BatchClient for a given Azure Batch account.
            using (BatchClient batchClient = await BatchClient.OpenAsync(credentials))
            {
                // add a retry policy. The built-in policies are No Retry (default), Linear Retry, and Exponential Retry
                batchClient.CustomBehaviors.Add(RetryPolicyProvider.LinearRetryProvider(TimeSpan.FromSeconds(10), 3));

                string jobId = GettingStartedCommon.CreateJobId("HelloWorldJob");

                try
                {
                    // Submit the job
                    await SubmitJobAsync(batchClient, helloWorldConfigurationSettings, jobId);

                    // Wait for the job to complete
                    await WaitForJobAndPrintOutputAsync(batchClient, jobId);
                }
                finally
                {
                    // Delete the job to ensure the tasks are cleaned up
                    if (!string.IsNullOrEmpty(jobId) && helloWorldConfigurationSettings.ShouldDeleteJob)
                    {
                        Console.WriteLine("Deleting job: {0}", jobId);
                        batchClient.JobOperations.DeleteJob(jobId);
                    }
                }
            }
        }
Exemplo n.º 11
0
        public async static Task JobMain(string[] args)
        {
            //Load the configuration
            Settings        topNWordsConfiguration = Settings.Default;
            AccountSettings accountSettings        = AccountSettings.Default;

            CloudStorageAccount cloudStorageAccount = new CloudStorageAccount(
                new StorageCredentials(
                    accountSettings.StorageAccountName,
                    accountSettings.StorageAccountKey),
                accountSettings.StorageServiceUrl,
                useHttps: true);

            StagingStorageAccount stagingStorageAccount = new StagingStorageAccount(
                accountSettings.StorageAccountName,
                accountSettings.StorageAccountKey,
                cloudStorageAccount.BlobEndpoint.ToString());

            using (BatchClient client = BatchClient.Open(new BatchSharedKeyCredentials(accountSettings.BatchServiceUrl, accountSettings.BatchAccountName, accountSettings.BatchAccountKey)))
            {
                string stagingContainer = null;

                //OSFamily 4 == OS 2012 R2. You can learn more about os families and versions at:
                //http://msdn.microsoft.com/en-us/library/azure/ee924680.aspx
                CloudPool pool = client.PoolOperations.CreatePool(
                    topNWordsConfiguration.PoolId,
                    targetDedicatedComputeNodes: topNWordsConfiguration.PoolNodeCount,
                    virtualMachineSize: "small",
                    cloudServiceConfiguration: new CloudServiceConfiguration(osFamily: "4"));

                List <string> files = new List <string>
                {
                    Path.Combine(BatchStartTaskFolderName, BatchStartTaskTelemetryRunnerName),
                };

                files.AddRange(AIFilesToUpload);

                var resourceHelperTask = SampleHelpers.UploadResourcesAndCreateResourceFileReferencesAsync(
                    cloudStorageAccount,
                    AIBlobConatinerName,
                    files);

                List <ResourceFile> resourceFiles = resourceHelperTask.Result;

                pool.StartTask = new StartTask()
                {
                    CommandLine   = string.Format("cmd /c {0}", BatchStartTaskTelemetryRunnerName),
                    ResourceFiles = resourceFiles
                };

                Console.WriteLine("Adding pool {0}", topNWordsConfiguration.PoolId);
                try
                {
                    await GettingStartedCommon.CreatePoolIfNotExistAsync(client, pool);
                }
                catch (AggregateException ae)
                {
                    // Go through all exceptions and dump useful information
                    ae.Handle(x =>
                    {
                        Console.Error.WriteLine("Creating pool ID {0} failed", topNWordsConfiguration.PoolId);
                        if (x is BatchException)
                        {
                            BatchException be = x as BatchException;

                            Console.WriteLine(be.ToString());
                            Console.WriteLine();
                        }
                        else
                        {
                            Console.WriteLine(x);
                        }

                        // can't continue without a pool
                        return(false);
                    });
                }
                catch (BatchException be)
                {
                    Console.Error.WriteLine("Creating pool ID {0} failed", topNWordsConfiguration.PoolId);
                    Console.WriteLine(be.ToString());
                    Console.WriteLine();
                }

                try
                {
                    Console.WriteLine("Creating job: " + topNWordsConfiguration.JobId);
                    // get an empty unbound Job
                    CloudJob unboundJob = client.JobOperations.CreateJob();
                    unboundJob.Id = topNWordsConfiguration.JobId;
                    unboundJob.PoolInformation = new PoolInformation()
                    {
                        PoolId = topNWordsConfiguration.PoolId
                    };

                    // Commit Job to create it in the service
                    await unboundJob.CommitAsync();

                    // create file staging objects that represent the executable and its dependent assembly to run as the task.
                    // These files are copied to every node before the corresponding task is scheduled to run on that node.
                    FileToStage topNWordExe = new FileToStage(TopNWordsExeName, stagingStorageAccount);
                    FileToStage storageDll  = new FileToStage(StorageClientDllName, stagingStorageAccount);

                    // Upload application insights assemblies
                    List <FileToStage> aiStagedFiles = new List <FileToStage>();
                    foreach (string aiFile in AIFilesToUpload)
                    {
                        aiStagedFiles.Add(new FileToStage(aiFile, stagingStorageAccount));
                    }

                    // In this sample, the input data is copied separately to Storage and its URI is passed to the task as an argument.
                    // This approach is appropriate when the amount of input data is large such that copying it to every node via FileStaging
                    // is not desired and the number of tasks is small since a large number of readers of the blob might get throttled
                    // by Storage which will lengthen the overall processing time.
                    //
                    // You'll need to observe the behavior and use published techniques for finding the right balance of performance versus
                    // complexity.

                    string[] documents = Directory.GetFiles(topNWordsConfiguration.DocumentsRootPath);
                    await SampleHelpers.UploadResourcesAsync(cloudStorageAccount, BooksContainerName, documents);

                    // initialize a collection to hold the tasks that will be submitted in their entirety
                    List <CloudTask> tasksToRun = new List <CloudTask>(documents.Length);

                    for (int i = 0; i < documents.Length; i++)
                    {
                        CloudTask task = new CloudTask("task_no_" + i, String.Format("{0} --Task {1} {2} {3} {4}",
                                                                                     TopNWordsExeName,
                                                                                     "https://onbehalfoutput.blob.core.windows.net/" + documents[i],
                                                                                     topNWordsConfiguration.TopWordCount,
                                                                                     accountSettings.StorageAccountName,
                                                                                     accountSettings.StorageAccountKey));

                        //This is the list of files to stage to a container -- for each job, one container is created and
                        //files all resolve to Azure Blobs by their name (so two tasks with the same named file will create just 1 blob in
                        //the container).
                        task.FilesToStage = new List <IFileStagingProvider>
                        {
                            topNWordExe,
                            storageDll,
                        };

                        foreach (FileToStage stagedFile in aiStagedFiles)
                        {
                            task.FilesToStage.Add(stagedFile);
                        }

                        tasksToRun.Add(task);
                    }

                    // Commit all the tasks to the Batch Service. Ask AddTask to return information about the files that were staged.
                    // The container information is used later on to remove these files from Storage.
                    ConcurrentBag <ConcurrentDictionary <Type, IFileStagingArtifact> > fsArtifactBag = new ConcurrentBag <ConcurrentDictionary <Type, IFileStagingArtifact> >();
                    client.JobOperations.AddTask(topNWordsConfiguration.JobId, tasksToRun, fileStagingArtifacts: fsArtifactBag);

                    // loop through the bag of artifacts, looking for the one that matches our staged files. Once there,
                    // capture the name of the container holding the files so they can be deleted later on if that option
                    // was configured in the settings.
                    foreach (var fsBagItem in fsArtifactBag)
                    {
                        IFileStagingArtifact fsValue;
                        if (fsBagItem.TryGetValue(typeof(FileToStage), out fsValue))
                        {
                            SequentialFileStagingArtifact stagingArtifact = fsValue as SequentialFileStagingArtifact;
                            if (stagingArtifact != null)
                            {
                                stagingContainer = stagingArtifact.BlobContainerCreated;
                                Console.WriteLine(
                                    "Uploaded files to container: {0} -- you will be charged for their storage unless you delete them.",
                                    stagingArtifact.BlobContainerCreated);
                            }
                        }
                    }

                    //Get the job to monitor status.
                    CloudJob job = client.JobOperations.GetJob(topNWordsConfiguration.JobId);

                    Console.Write("Waiting for tasks to complete ...   ");
                    // Wait 20 minutes for all tasks to reach the completed state. The long timeout is necessary for the first
                    // time a pool is created in order to allow nodes to be added to the pool and initialized to run tasks.
                    IPagedEnumerable <CloudTask> ourTasks = job.ListTasks(new ODATADetailLevel(selectClause: "id"));
                    client.Utilities.CreateTaskStateMonitor().WaitAll(ourTasks, TaskState.Completed, TimeSpan.FromMinutes(20));
                    Console.WriteLine("tasks are done.");

                    foreach (CloudTask t in ourTasks)
                    {
                        Console.WriteLine("Task " + t.Id);

                        Console.WriteLine("stdout:" + Environment.NewLine + t.GetNodeFile(Batch.Constants.StandardOutFileName).ReadAsString());
                        Console.WriteLine();
                        Console.WriteLine("stderr:" + Environment.NewLine + t.GetNodeFile(Batch.Constants.StandardErrorFileName).ReadAsString());
                    }
                }
                finally
                {
                    //Delete the pool that we created
                    if (topNWordsConfiguration.ShouldDeletePool)
                    {
                        Console.WriteLine("Deleting pool: {0}", topNWordsConfiguration.PoolId);
                        client.PoolOperations.DeletePool(topNWordsConfiguration.PoolId);
                    }

                    //Delete the job that we created
                    if (topNWordsConfiguration.ShouldDeleteJob)
                    {
                        Console.WriteLine("Deleting job: {0}", topNWordsConfiguration.JobId);
                        client.JobOperations.DeleteJob(topNWordsConfiguration.JobId);
                    }

                    //Delete the containers we created
                    if (topNWordsConfiguration.ShouldDeleteContainer)
                    {
                        DeleteContainers(accountSettings, stagingContainer);
                    }
                }
            }
        }
Exemplo n.º 12
0
        /// <summary>
        /// Populates Azure Storage with the required files, and
        /// submits the job to the Azure Batch service.
        /// </summary>
        public async Task RunAsync()
        {
            Console.WriteLine("Running with the following settings: ");
            Console.WriteLine("-------------------------------------");
            Console.WriteLine(this.poolsAndResourceFileSettings.ToString());
            Console.WriteLine(this.accountSettings.ToString());

            // Set up the Batch Service credentials used to authenticate with the Batch Service.
            BatchSharedKeyCredentials credentials = new BatchSharedKeyCredentials(
                this.accountSettings.BatchServiceUrl,
                this.accountSettings.BatchAccountName,
                this.accountSettings.BatchAccountKey);

            // Delete the blob containers which contain the task input files since we no longer need them
            CloudStorageAccount cloudStorageAccount = new CloudStorageAccount(
                new StorageCredentials(this.accountSettings.StorageAccountName,
                                       this.accountSettings.StorageAccountKey),
                this.accountSettings.StorageServiceUrl,
                useHttps: true);

            // Get an instance of the BatchClient for a given Azure Batch account.
            using (BatchClient batchClient = await BatchClient.OpenAsync(credentials))
            {
                // add a retry policy. The built-in policies are No Retry (default), Linear Retry, and Exponential Retry
                batchClient.CustomBehaviors.Add(RetryPolicyProvider.LinearRetryProvider(TimeSpan.FromSeconds(10), 3));

                string jobId = null;

                // Track the containers which are created as part of job submission so that we can clean them up later.
                HashSet <string> blobContainerNames = new HashSet <string>();

                try
                {
                    // Allocate a pool
                    await this.CreatePoolIfNotExistAsync(batchClient, cloudStorageAccount);

                    // Submit the job
                    jobId = GettingStartedCommon.CreateJobId("SimpleJob");
                    blobContainerNames = await this.SubmitJobAsync(batchClient, cloudStorageAccount, jobId);

                    // Print out the status of the pools/jobs under this account
                    await GettingStartedCommon.PrintJobsAsync(batchClient);

                    await GettingStartedCommon.PrintPoolsAsync(batchClient);

                    // Wait for the job to complete
                    List <CloudTask> tasks = await batchClient.JobOperations.ListTasks(jobId).ToListAsync();

                    await GettingStartedCommon.WaitForTasksAndPrintOutputAsync(batchClient, tasks, TimeSpan.FromMinutes(10));
                }
                finally
                {
                    // Delete the pool (if configured) and job
                    // TODO: In C# 6 we can await here instead of .Wait()

                    // Delete Azure Storage container data
                    SampleHelpers.DeleteContainersAsync(cloudStorageAccount, blobContainerNames).Wait();

                    // Delete Azure Batch resources
                    List <string> jobIdsToDelete  = new List <string>();
                    List <string> poolIdsToDelete = new List <string>();

                    if (this.poolsAndResourceFileSettings.ShouldDeleteJob)
                    {
                        jobIdsToDelete.Add(jobId);
                    }

                    if (this.poolsAndResourceFileSettings.ShouldDeletePool)
                    {
                        poolIdsToDelete.Add(this.poolsAndResourceFileSettings.PoolId);
                    }

                    SampleHelpers.DeleteBatchResourcesAsync(batchClient, jobIdsToDelete, poolIdsToDelete).Wait();
                }
            }
        }
Exemplo n.º 13
0
        public static async Task <CloudBlobContainer> Run(
            BatchClient batchClient,
            CloudStorageAccount linkedStorageAccount,
            string poolId,
            int nodeCount,
            string jobId)
        {
            const string appPackageId      = "PersistOutputsTask";
            const string appPackageVersion = "1.0";

            // Create and configure an unbound pool.
            CloudPool pool = batchClient.PoolOperations.CreatePool(
                poolId: poolId,
                virtualMachineSize: "standard_d1_v2",
                targetDedicatedComputeNodes: nodeCount,
                cloudServiceConfiguration: new CloudServiceConfiguration(osFamily: "5"));

            // Specify the application and version to deploy to the compute nodes. You must
            // first build PersistOutputsTask, then upload it as an application package.
            // See https://azure.microsoft.com/documentation/articles/batch-application-packages/
            pool.ApplicationPackageReferences = new List <ApplicationPackageReference>
            {
                new ApplicationPackageReference
                {
                    ApplicationId = appPackageId,
                    Version       = appPackageVersion
                }
            };

            // Commit the pool to the Batch service
            await GettingStartedCommon.CreatePoolIfNotExistAsync(batchClient, pool);

            CloudJob job = batchClient.JobOperations.CreateJob(jobId, new PoolInformation {
                PoolId = poolId
            });

            // Create the blob storage container for the outputs.
            await job.PrepareOutputStorageAsync(linkedStorageAccount);

            // Create an environment variable on the compute nodes that the
            // task application can reference when persisting its outputs.
            string             containerName = job.OutputStorageContainerName();
            CloudBlobContainer container     = linkedStorageAccount.CreateCloudBlobClient().GetContainerReference(containerName);
            string             containerUrl  = job.GetOutputStorageContainerUrl(linkedStorageAccount);

            job.CommonEnvironmentSettings = new[] { new EnvironmentSetting("JOB_CONTAINER_URL", containerUrl) };

            // Commit the job to the Batch service
            await job.CommitAsync();

            Console.WriteLine($"Created job {jobId}");

            // Obtain the bound job from the Batch service
            await job.RefreshAsync();

            IEnumerable <CloudTask> tasks = Enumerable.Range(1, 20).Select(i =>
                                                                           new CloudTask(i.ToString().PadLeft(3, '0'), $"cmd /c %AZ_BATCH_APP_PACKAGE_{appPackageId.ToUpper()}#{appPackageVersion}%\\PersistOutputsTask.exe")
                                                                           );

            // Add the tasks to the job; the tasks are automatically
            // scheduled for execution on the nodes by the Batch service.
            await job.AddTaskAsync(tasks);

            Console.WriteLine($"All tasks added to job {job.Id}");
            Console.WriteLine();

            Console.WriteLine($"Downloading outputs to {Directory.GetCurrentDirectory()}");

            foreach (CloudTask task in job.CompletedTasks())
            {
                if (task.ExecutionInformation.Result != TaskExecutionResult.Success)
                {
                    Console.WriteLine($"Task {task.Id} failed");
                    Console.WriteLine(SampleHelpers.GetFailureInfoDetails(task.ExecutionInformation.FailureInformation));
                }
                else
                {
                    Console.WriteLine($"Task {task.Id} completed successfully");
                }

                foreach (OutputFileReference output in task.OutputStorage(linkedStorageAccount).ListOutputs(TaskOutputKind.TaskOutput))
                {
                    Console.WriteLine($"output file: {output.FilePath}");
                    await output.DownloadToFileAsync($"{jobId}-{output.FilePath}", System.IO.FileMode.Create);
                }
            }

            return(container);
        }
Exemplo n.º 14
0
        public static void JobMain(string[] args)
        {
            //Load the configuration
            Settings topNWordsConfiguration = new ConfigurationBuilder()
                                              .SetBasePath(Directory.GetCurrentDirectory())
                                              .AddJsonFile("settings.json")
                                              .Build()
                                              .Get <Settings>();
            AccountSettings accountSettings = SampleHelpers.LoadAccountSettings();

            CloudStorageAccount cloudStorageAccount = new CloudStorageAccount(
                new StorageCredentials(
                    accountSettings.StorageAccountName,
                    accountSettings.StorageAccountKey),
                accountSettings.StorageServiceUrl,
                useHttps: true);

            StagingStorageAccount stagingStorageAccount = new StagingStorageAccount(
                accountSettings.StorageAccountName,
                accountSettings.StorageAccountKey,
                cloudStorageAccount.BlobEndpoint.ToString());

            using (BatchClient client = BatchClient.Open(new BatchSharedKeyCredentials(accountSettings.BatchServiceUrl, accountSettings.BatchAccountName, accountSettings.BatchAccountKey)))
            {
                string stagingContainer = null;

                //OSFamily 5 == Windows 2016. You can learn more about os families and versions at:
                //http://msdn.microsoft.com/en-us/library/azure/ee924680.aspx
                CloudPool pool = client.PoolOperations.CreatePool(
                    topNWordsConfiguration.PoolId,
                    targetDedicatedComputeNodes: topNWordsConfiguration.PoolNodeCount,
                    virtualMachineSize: "standard_d1_v2",
                    cloudServiceConfiguration: new CloudServiceConfiguration(osFamily: "6"));
                Console.WriteLine("Adding pool {0}", topNWordsConfiguration.PoolId);
                pool.TaskSchedulingPolicy   = new TaskSchedulingPolicy(ComputeNodeFillType.Spread);
                pool.MaxTasksPerComputeNode = 4;

                GettingStartedCommon.CreatePoolIfNotExistAsync(client, pool).Wait();
                var formula     = @"startingNumberOfVMs = 2;
                    maxNumberofVMs = 4;
                    pendingTaskSamplePercent = $PendingTasks.GetSamplePercent(90 * TimeInterval_Second);
                    pendingTaskSamples = pendingTaskSamplePercent < 70 ? startingNumberOfVMs : avg($PendingTasks.GetSample(180 * TimeInterval_Second));
                    $TargetDedicatedNodes = min(maxNumberofVMs, pendingTaskSamples);
                    $NodeDeallocationOption = taskcompletion;";
                var noOfSeconds = 150;
                Thread.Sleep(noOfSeconds * 1000);

                client.PoolOperations.EnableAutoScale(
                    poolId: topNWordsConfiguration.PoolId, autoscaleFormula: formula,
                    autoscaleEvaluationInterval: TimeSpan.FromMinutes(5));

                try
                {
                    Console.WriteLine("Creating job: " + topNWordsConfiguration.JobId);
                    // get an empty unbound Job
                    CloudJob unboundJob = client.JobOperations.CreateJob();
                    unboundJob.Id = topNWordsConfiguration.JobId;
                    unboundJob.PoolInformation = new PoolInformation()
                    {
                        PoolId = topNWordsConfiguration.PoolId
                    };

                    // Commit Job to create it in the service
                    unboundJob.Commit();

                    // create file staging objects that represent the executable and its dependent assembly to run as the task.
                    // These files are copied to every node before the corresponding task is scheduled to run on that node.
                    FileToStage topNWordExe         = new FileToStage(TopNWordsExeName, stagingStorageAccount);
                    FileToStage storageDll          = new FileToStage(StorageClientDllName, stagingStorageAccount);
                    FileToStage newtonJsoftDll      = new FileToStage(NewtonJSoftDllName, stagingStorageAccount);
                    FileToStage microsoftEFDll      = new FileToStage(MicrosoftEntityFrameworkDllName, stagingStorageAccount);
                    FileToStage microsoftEFCoreDll  = new FileToStage(MicrosoftEntityFrameworkCoreDllName, stagingStorageAccount);
                    FileToStage microsoftBCLDll     = new FileToStage(MicrosoftBCLDllName, stagingStorageAccount);
                    FileToStage systemTasksDll      = new FileToStage(SystemTasksDllName, stagingStorageAccount);
                    FileToStage topNWordsConfigFile = new FileToStage(TopnWordsConfig, stagingStorageAccount);
                    FileToStage SystemValueTupleDll = new FileToStage(SystemValueTupleDllName, stagingStorageAccount);
                    FileToStage DependencyInjectionAbstractionsDll = new FileToStage(DependecyInjectionAbstractionsDllName, stagingStorageAccount);
                    FileToStage DependencyInjectionDll             = new FileToStage(DependecyInjectionDllName, stagingStorageAccount);
                    FileToStage LoggingAbstractionsDll             = new FileToStage(LoggingAbstractionsDllName, stagingStorageAccount);
                    FileToStage DiagnosticsDll        = new FileToStage(DiagnosticssDllName, stagingStorageAccount);
                    FileToStage CachingAbstractionDll = new FileToStage(CachingAbstractionsDllName, stagingStorageAccount);
                    FileToStage MicrosoftSqlServerDll = new FileToStage(MicrosoftSqlServerDllName, stagingStorageAccount);
                    FileToStage SystemComponentDll    = new FileToStage(SystemComponentDllName, stagingStorageAccount);
                    FileToStage SystemCollectionsDll  = new FileToStage(SystemCollectionsDllName, stagingStorageAccount);
                    FileToStage pDll                 = new FileToStage(pdllName, stagingStorageAccount);
                    FileToStage oDll                 = new FileToStage(odllName, stagingStorageAccount);
                    FileToStage lDll                 = new FileToStage(ldllName, stagingStorageAccount);
                    FileToStage hashcodeDll          = new FileToStage(hashcodeDllName, stagingStorageAccount);
                    FileToStage clientSqlDll         = new FileToStage(clientSqlClientDllName, stagingStorageAccount);
                    FileToStage cachingMemoryDll     = new FileToStage(CachingMemoryDllName, stagingStorageAccount);
                    FileToStage configAbstractionDll = new FileToStage(configAbstractionDllName, stagingStorageAccount);
                    FileToStage SNIDll               = new FileToStage(SNIDllName, stagingStorageAccount);


                    FileToStage relationDll = new FileToStage(relationddllName, stagingStorageAccount);



                    var textFile = "E:\\WeatherAPIPOC\\cities_id.txt";
                    var text     = File.ReadAllLines(textFile);
                    var cityList = new List <string>(text);

                    // In this sample, the input data is copied separately to Storage and its URI is passed to the task as an argument.
                    // This approach is appropriate when the amount of input data is large such that copying it to every node via FileStaging
                    // is not desired and the number of tasks is small since a large number of readers of the blob might get throttled
                    // by Storage which will lengthen the overall processing time.
                    //
                    // You'll need to observe the behavior and use published techniques for finding the right balance of performance versus
                    // complexity.

                    Console.WriteLine("{0} uploaded to cloud", topNWordsConfiguration.FileName);

                    // initialize a collection to hold the tasks that will be submitted in their entirety
                    List <CloudTask> tasksToRun = new List <CloudTask>(topNWordsConfiguration.NumberOfTasks);

                    for (int i = 0; i < cityList.Count; i++)
                    {
                        string    programLaunchTime = DateTime.Now.ToString("h:mm:sstt");
                        CloudTask task = new CloudTask(
                            id: $"task_no_{i + 1}",
                            commandline: $"cmd /c mkdir x64 & move SNI.dll x64 & {TopNWordsExeName} --Task {cityList[i]} %AZ_BATCH_NODE_ID% {programLaunchTime}");

                        //This is the list of files to stage to a container -- for each job, one container is created and
                        //files all resolve to Azure Blobs by their name (so two tasks with the same named file will create just 1 blob in
                        //the container).
                        task.FilesToStage = new List <IFileStagingProvider>
                        {
                            topNWordExe,
                            storageDll,
                            newtonJsoftDll,
                            microsoftEFDll,
                            microsoftEFCoreDll,
                            microsoftBCLDll,
                            systemTasksDll,
                            topNWordsConfigFile,
                            SystemValueTupleDll,
                            DependencyInjectionAbstractionsDll,
                            DependencyInjectionDll,
                            LoggingAbstractionsDll,
                            DiagnosticsDll,
                            CachingAbstractionDll,
                            MicrosoftSqlServerDll,
                            SystemComponentDll,
                            SystemCollectionsDll,
                            oDll,
                            pDll,
                            lDll,
                            relationDll,
                            hashcodeDll,
                            clientSqlDll,
                            cachingMemoryDll,
                            configAbstractionDll,
                            SNIDll
                        };

                        tasksToRun.Add(task);
                    }

                    // Commit all the tasks to the Batch Service. Ask AddTask to return information about the files that were staged.
                    // The container information is used later on to remove these files from Storage.
                    ConcurrentBag <ConcurrentDictionary <Type, IFileStagingArtifact> > fsArtifactBag = new ConcurrentBag <ConcurrentDictionary <Type, IFileStagingArtifact> >();
                    client.JobOperations.AddTask(topNWordsConfiguration.JobId, tasksToRun, fileStagingArtifacts: fsArtifactBag);

                    // loop through the bag of artifacts, looking for the one that matches our staged files. Once there,
                    // capture the name of the container holding the files so they can be deleted later on if that option
                    // was configured in the settings.
                    foreach (var fsBagItem in fsArtifactBag)
                    {
                        IFileStagingArtifact fsValue;
                        if (fsBagItem.TryGetValue(typeof(FileToStage), out fsValue))
                        {
                            SequentialFileStagingArtifact stagingArtifact = fsValue as SequentialFileStagingArtifact;
                            if (stagingArtifact != null)
                            {
                                stagingContainer = stagingArtifact.BlobContainerCreated;
                                Console.WriteLine(
                                    "Uploaded files to container: {0} -- you will be charged for their storage unless you delete them.",
                                    stagingArtifact.BlobContainerCreated);
                            }
                        }
                    }

                    //Get the job to monitor status.
                    CloudJob job = client.JobOperations.GetJob(topNWordsConfiguration.JobId);

                    Console.Write("Waiting for tasks to complete ...   ");
                    // Wait 20 minutes for all tasks to reach the completed state. The long timeout is necessary for the first
                    // time a pool is created in order to allow nodes to be added to the pool and initialized to run tasks.
                    IPagedEnumerable <CloudTask> ourTasks = job.ListTasks(new ODATADetailLevel(selectClause: "id"));
                    client.Utilities.CreateTaskStateMonitor().WaitAll(ourTasks, TaskState.Completed, TimeSpan.FromMinutes(20));
                    Console.WriteLine("tasks are done.");

                    foreach (CloudTask t in ourTasks)
                    {
                        Console.WriteLine("Task " + t.Id);
                        Console.WriteLine("stdout:" + Environment.NewLine + t.GetNodeFile(Batch.Constants.StandardOutFileName).ReadAsString());
                        Console.WriteLine();
                        Console.WriteLine("stderr:" + Environment.NewLine + t.GetNodeFile(Batch.Constants.StandardErrorFileName).ReadAsString());
                    }
                }
                finally
                {
                    //Delete the pool that we created
                    if (topNWordsConfiguration.ShouldDeletePool)
                    {
                        Console.WriteLine("Deleting pool: {0}", topNWordsConfiguration.PoolId);
                        client.PoolOperations.DeletePool(topNWordsConfiguration.PoolId);
                    }

                    //Delete the job that we created
                    if (topNWordsConfiguration.ShouldDeleteJob)
                    {
                        Console.WriteLine("Deleting job: {0}", topNWordsConfiguration.JobId);
                        client.JobOperations.DeleteJob(topNWordsConfiguration.JobId);
                    }

                    //Delete the containers we created
                    if (topNWordsConfiguration.ShouldDeleteContainer)
                    {
                        DeleteContainers(accountSettings, stagingContainer);
                    }
                }
            }
        }
Exemplo n.º 15
0
        /// <summary>
        /// Populates Azure Storage with the required files, and
        /// submits the job to the Azure Batch service.
        /// </summary>
        public async Task RunAsync()
        {
            Console.WriteLine("Running with the following settings: ");
            Console.WriteLine("-------------------------------------");
            Console.WriteLine(this.jobManagerSettings.ToString());
            Console.WriteLine(this.accountSettings.ToString());

            // Set up the Batch Service credentials used to authenticate with the Batch Service.
            BatchSharedKeyCredentials credentials = new BatchSharedKeyCredentials(
                this.accountSettings.BatchServiceUrl,
                this.accountSettings.BatchAccountName,
                this.accountSettings.BatchAccountKey);

            CloudStorageAccount cloudStorageAccount = new CloudStorageAccount(
                new StorageCredentials(this.accountSettings.StorageAccountName,
                                       this.accountSettings.StorageAccountKey),
                this.accountSettings.StorageServiceUrl,
                useHttps: true);

            // Get an instance of the BatchClient for a given Azure Batch account.
            using (BatchClient batchClient = await BatchClient.OpenAsync(credentials))
            {
                // add a retry policy. The built-in policies are No Retry (default), Linear Retry, and Exponential Retry
                batchClient.CustomBehaviors.Add(RetryPolicyProvider.ExponentialRetryProvider(TimeSpan.FromSeconds(5), 3));

                string jobId = null;

                try
                {
                    // Allocate a pool
                    await this.CreatePoolIfNotExistAsync(batchClient, cloudStorageAccount);

                    // Submit the job
                    jobId = GettingStartedCommon.CreateJobId("SimpleJob");
                    await this.SubmitJobAsync(batchClient, cloudStorageAccount, jobId);

                    // Print out the status of the pools/jobs under this account
                    await GettingStartedCommon.PrintJobsAsync(batchClient);

                    await GettingStartedCommon.PrintPoolsAsync(batchClient);

                    // Wait for the job manager to complete
                    CloudTask jobManagerTask = await batchClient.JobOperations.GetTaskAsync(jobId, JobManagerTaskId);

                    await GettingStartedCommon.WaitForTasksAndPrintOutputAsync(batchClient, new List <CloudTask> {
                        jobManagerTask
                    }, TimeSpan.FromMinutes(10));
                }
                finally
                {
                    // Delete Azure Batch resources
                    List <string> jobIdsToDelete  = new List <string>();
                    List <string> poolIdsToDelete = new List <string>();

                    if (this.jobManagerSettings.ShouldDeleteJob)
                    {
                        jobIdsToDelete.Add(jobId);
                    }

                    if (this.jobManagerSettings.ShouldDeletePool)
                    {
                        poolIdsToDelete.Add(this.jobManagerSettings.PoolId);
                    }

                    await SampleHelpers.DeleteBatchResourcesAsync(batchClient, jobIdsToDelete, poolIdsToDelete);
                }
            }
        }
Exemplo n.º 16
0
        private static async Task MainAsync(string[] args)
        {
            // You may adjust these values to experiment with different compute resource scenarios.
            const string nodeSize        = "small";
            const int    nodeCount       = 4;
            const int    maxTasksPerNode = 4;
            const int    taskCount       = 32;

            // Ensure there are enough tasks to help avoid hitting some timeout conditions below
            int minimumTaskCount = nodeCount * maxTasksPerNode * 2;

            if (taskCount < minimumTaskCount)
            {
                Console.WriteLine("You must specify at least two tasks per node core for this sample ({0} tasks in this configuration).", minimumTaskCount);
                Console.WriteLine();

                // Not enough tasks, exit the application
                return;
            }

            // In this sample, the tasks simply ping localhost on the compute nodes; adjust these
            // values to simulate variable task duration
            const int minPings = 30;
            const int maxPings = 60;

            const string poolId = "ParallelTasksSamplePool";
            const string jobId  = "ParallelTasksSampleJob";

            // Amount of time to wait before timing out (potentially) long-running tasks
            TimeSpan longTaskDurationLimit = TimeSpan.FromMinutes(30);

            // Set up access to your Batch account with a BatchClient. Configure your AccountSettings in the
            // Microsoft.Azure.Batch.Samples.Common project within this solution.
            BatchSharedKeyCredentials cred = new BatchSharedKeyCredentials(AccountSettings.Default.BatchServiceUrl,
                                                                           AccountSettings.Default.BatchAccountName,
                                                                           AccountSettings.Default.BatchAccountKey);

            using (BatchClient batchClient = await BatchClient.OpenAsync(cred))
            {
                // Create a CloudPool, or obtain an existing pool with the specified ID
                CloudPool pool = await ArticleHelpers.CreatePoolIfNotExistAsync(batchClient,
                                                                                poolId,
                                                                                nodeSize,
                                                                                nodeCount,
                                                                                maxTasksPerNode);

                // Create a CloudJob, or obtain an existing pool with the specified ID
                CloudJob job = await ArticleHelpers.CreateJobIfNotExistAsync(batchClient, poolId, jobId);

                // The job's tasks ping localhost a random number of times between minPings and maxPings.
                // Adjust the minPings/maxPings values above to experiment with different task durations.
                Random           rand  = new Random();
                List <CloudTask> tasks = new List <CloudTask>();
                for (int i = 1; i <= taskCount; i++)
                {
                    string    taskId          = "task" + i.ToString().PadLeft(3, '0');
                    string    taskCommandLine = "ping -n " + rand.Next(minPings, maxPings + 1).ToString() + " localhost";
                    CloudTask task            = new CloudTask(taskId, taskCommandLine);
                    tasks.Add(task);
                }

                // Pause execution until the pool is steady and its compute nodes are ready to accept jobs.
                // NOTE: Such a pause is not necessary within your own code. Tasks can be added to a job at any point and will be
                // scheduled to execute on a compute node as soon any node has reached Idle state. Because the focus of this sample
                // is the demonstration of running tasks in parallel on multiple compute nodes, we wait for all compute nodes to
                // complete initialization and reach the Idle state in order to maximize the number of compute nodes available for
                // parallelization.
                await ArticleHelpers.WaitForPoolToReachStateAsync(batchClient, pool.Id, AllocationState.Steady, longTaskDurationLimit);

                await ArticleHelpers.WaitForNodesToReachStateAsync(batchClient, pool.Id, ComputeNodeState.Idle, longTaskDurationLimit);

                // Add the tasks in one API call as opposed to a separate AddTask call for each. Bulk task submission
                // helps to ensure efficient underlying API calls to the Batch service.
                await batchClient.JobOperations.AddTaskAsync(job.Id, tasks);

                // Pause again to wait until *all* nodes are running tasks
                await ArticleHelpers.WaitForNodesToReachStateAsync(batchClient, pool.Id, ComputeNodeState.Running, TimeSpan.FromMinutes(2));

                Stopwatch stopwatch = Stopwatch.StartNew();

                // Print out task assignment information.
                Console.WriteLine();
                await GettingStartedCommon.PrintNodeTasksAsync(batchClient, pool.Id);

                Console.WriteLine();

                // Pause execution while we wait for all of the tasks to complete
                Console.WriteLine("Waiting for task completion...");
                Console.WriteLine();

                try
                {
                    await batchClient.Utilities.CreateTaskStateMonitor().WhenAll(
                        job.ListTasks(),
                        TaskState.Completed,
                        longTaskDurationLimit);
                }
                catch (TimeoutException e)
                {
                    Console.WriteLine(e.ToString());
                }

                stopwatch.Stop();

                // Obtain the tasks, specifying a detail level to limit the number of properties returned for each task.
                // If you have a large number of tasks, specifying a DetailLevel is extremely important in reducing the
                // amount of data transferred, lowering your query response times in increasing performance.
                ODATADetailLevel             detail   = new ODATADetailLevel(selectClause: "id,commandLine,nodeInfo,state");
                IPagedEnumerable <CloudTask> allTasks = batchClient.JobOperations.ListTasks(job.Id, detail);

                // Get a collection of the completed tasks sorted by the compute nodes on which they executed
                List <CloudTask> completedTasks = allTasks
                                                  .Where(t => t.State == TaskState.Completed)
                                                  .OrderBy(t => t.ComputeNodeInformation.ComputeNodeId)
                                                  .ToList();

                // Print the completed task information
                Console.WriteLine();
                Console.WriteLine("Completed tasks:");
                string lastNodeId = string.Empty;
                foreach (CloudTask task in completedTasks)
                {
                    if (!string.Equals(lastNodeId, task.ComputeNodeInformation.ComputeNodeId))
                    {
                        Console.WriteLine();
                        Console.WriteLine(task.ComputeNodeInformation.ComputeNodeId);
                    }

                    lastNodeId = task.ComputeNodeInformation.ComputeNodeId;

                    Console.WriteLine("\t{0}: {1}", task.Id, task.CommandLine);
                }

                // Get a collection of the uncompleted tasks which may exist if the TaskMonitor timeout was hit
                List <CloudTask> uncompletedTasks = allTasks
                                                    .Where(t => t.State != TaskState.Completed)
                                                    .OrderBy(t => t.Id)
                                                    .ToList();

                // Print a list of uncompleted tasks, if any
                Console.WriteLine();
                Console.WriteLine("Uncompleted tasks:");
                Console.WriteLine();
                if (uncompletedTasks.Any())
                {
                    foreach (CloudTask task in uncompletedTasks)
                    {
                        Console.WriteLine("\t{0}: {1}", task.Id, task.CommandLine);
                    }
                }
                else
                {
                    Console.WriteLine("\t<none>");
                }

                // Print some summary information
                Console.WriteLine();
                Console.WriteLine("             Nodes: " + nodeCount);
                Console.WriteLine("         Node size: " + nodeSize);
                Console.WriteLine("Max tasks per node: " + pool.MaxTasksPerComputeNode);
                Console.WriteLine("             Tasks: " + tasks.Count);
                Console.WriteLine("          Duration: " + stopwatch.Elapsed);
                Console.WriteLine();
                Console.WriteLine("Done!");
                Console.WriteLine();

                // Clean up the resources we've created in the Batch account
                Console.WriteLine("Delete job? [yes] no");
                string response = Console.ReadLine().ToLower();
                if (response != "n" && response != "no")
                {
                    await batchClient.JobOperations.DeleteJobAsync(job.Id);
                }

                Console.WriteLine("Delete pool? [yes] no");
                response = Console.ReadLine();
                if (response != "n" && response != "no")
                {
                    await batchClient.PoolOperations.DeletePoolAsync(pool.Id);
                }
            }
        }
Exemplo n.º 17
0
        public static void JobMain(string[] args)
        {
            //Load the configuration
            Settings topNWordsConfiguration = new ConfigurationBuilder()
                                              .SetBasePath(Directory.GetCurrentDirectory())
                                              .AddJsonFile("settings.json")
                                              .Build()
                                              .Get <Settings>();
            AccountSettings accountSettings = SampleHelpers.LoadAccountSettings();

            CloudStorageAccount cloudStorageAccount = new CloudStorageAccount(
                new StorageCredentials(
                    accountSettings.StorageAccountName,
                    accountSettings.StorageAccountKey),
                accountSettings.StorageServiceUrl,
                useHttps: true);

            StagingStorageAccount stagingStorageAccount = new StagingStorageAccount(
                accountSettings.StorageAccountName,
                accountSettings.StorageAccountKey,
                cloudStorageAccount.BlobEndpoint.ToString());

            using (BatchClient client = BatchClient.Open(new BatchSharedKeyCredentials(accountSettings.BatchServiceUrl, accountSettings.BatchAccountName, accountSettings.BatchAccountKey)))
            {
                string stagingContainer = null;

                //OSFamily 5 == Windows 2016. You can learn more about os families and versions at:
                //http://msdn.microsoft.com/en-us/library/azure/ee924680.aspx
                CloudPool pool = client.PoolOperations.CreatePool(
                    topNWordsConfiguration.PoolId,
                    targetDedicatedComputeNodes: topNWordsConfiguration.PoolNodeCount,
                    virtualMachineSize: "standard_d1_v2",
                    cloudServiceConfiguration: new CloudServiceConfiguration(osFamily: "5"));
                Console.WriteLine("Adding pool {0}", topNWordsConfiguration.PoolId);
                GettingStartedCommon.CreatePoolIfNotExistAsync(client, pool).Wait();

                try
                {
                    Console.WriteLine("Creating job: " + topNWordsConfiguration.JobId);
                    // get an empty unbound Job
                    CloudJob unboundJob = client.JobOperations.CreateJob();
                    unboundJob.Id = topNWordsConfiguration.JobId;
                    unboundJob.PoolInformation = new PoolInformation()
                    {
                        PoolId = topNWordsConfiguration.PoolId
                    };

                    // Commit Job to create it in the service
                    unboundJob.Commit();

                    // create file staging objects that represent the executable and its dependent assembly to run as the task.
                    // These files are copied to every node before the corresponding task is scheduled to run on that node.
                    FileToStage topNWordExe = new FileToStage(TopNWordsExeName, stagingStorageAccount);
                    FileToStage storageDll  = new FileToStage(StorageClientDllName, stagingStorageAccount);

                    // In this sample, the input data is copied separately to Storage and its URI is passed to the task as an argument.
                    // This approach is appropriate when the amount of input data is large such that copying it to every node via FileStaging
                    // is not desired and the number of tasks is small since a large number of readers of the blob might get throttled
                    // by Storage which will lengthen the overall processing time.
                    //
                    // You'll need to observe the behavior and use published techniques for finding the right balance of performance versus
                    // complexity.
                    string bookFileUri = UploadBookFileToCloudBlob(accountSettings, topNWordsConfiguration.FileName);
                    Console.WriteLine("{0} uploaded to cloud", topNWordsConfiguration.FileName);

                    // initialize a collection to hold the tasks that will be submitted in their entirety
                    List <CloudTask> tasksToRun = new List <CloudTask>(topNWordsConfiguration.NumberOfTasks);

                    for (int i = 1; i <= topNWordsConfiguration.NumberOfTasks; i++)
                    {
                        CloudTask task = new CloudTask("task_no_" + i, string.Format("{0} --Task {1} {2} {3} {4}",
                                                                                     TopNWordsExeName,
                                                                                     bookFileUri,
                                                                                     topNWordsConfiguration.TopWordCount,
                                                                                     accountSettings.StorageAccountName,
                                                                                     accountSettings.StorageAccountKey));

                        //This is the list of files to stage to a container -- for each job, one container is created and
                        //files all resolve to Azure Blobs by their name (so two tasks with the same named file will create just 1 blob in
                        //the container).
                        task.FilesToStage = new List <IFileStagingProvider>
                        {
                            topNWordExe,
                            storageDll
                        };

                        tasksToRun.Add(task);
                    }

                    // Commit all the tasks to the Batch Service. Ask AddTask to return information about the files that were staged.
                    // The container information is used later on to remove these files from Storage.
                    ConcurrentBag <ConcurrentDictionary <Type, IFileStagingArtifact> > fsArtifactBag = new ConcurrentBag <ConcurrentDictionary <Type, IFileStagingArtifact> >();
                    client.JobOperations.AddTask(topNWordsConfiguration.JobId, tasksToRun, fileStagingArtifacts: fsArtifactBag);

                    // loop through the bag of artifacts, looking for the one that matches our staged files. Once there,
                    // capture the name of the container holding the files so they can be deleted later on if that option
                    // was configured in the settings.
                    foreach (var fsBagItem in fsArtifactBag)
                    {
                        IFileStagingArtifact fsValue;
                        if (fsBagItem.TryGetValue(typeof(FileToStage), out fsValue))
                        {
                            SequentialFileStagingArtifact stagingArtifact = fsValue as SequentialFileStagingArtifact;
                            if (stagingArtifact != null)
                            {
                                stagingContainer = stagingArtifact.BlobContainerCreated;
                                Console.WriteLine(
                                    "Uploaded files to container: {0} -- you will be charged for their storage unless you delete them.",
                                    stagingArtifact.BlobContainerCreated);
                            }
                        }
                    }

                    //Get the job to monitor status.
                    CloudJob job = client.JobOperations.GetJob(topNWordsConfiguration.JobId);

                    Console.Write("Waiting for tasks to complete ...   ");
                    // Wait 20 minutes for all tasks to reach the completed state. The long timeout is necessary for the first
                    // time a pool is created in order to allow nodes to be added to the pool and initialized to run tasks.
                    IPagedEnumerable <CloudTask> ourTasks = job.ListTasks(new ODATADetailLevel(selectClause: "id"));
                    client.Utilities.CreateTaskStateMonitor().WaitAll(ourTasks, TaskState.Completed, TimeSpan.FromMinutes(20));
                    Console.WriteLine("tasks are done.");

                    foreach (CloudTask t in ourTasks)
                    {
                        Console.WriteLine("Task " + t.Id);
                        Console.WriteLine("stdout:" + Environment.NewLine + t.GetNodeFile(Batch.Constants.StandardOutFileName).ReadAsString());
                        Console.WriteLine();
                        Console.WriteLine("stderr:" + Environment.NewLine + t.GetNodeFile(Batch.Constants.StandardErrorFileName).ReadAsString());
                    }
                }
                finally
                {
                    //Delete the pool that we created
                    if (topNWordsConfiguration.ShouldDeletePool)
                    {
                        Console.WriteLine("Deleting pool: {0}", topNWordsConfiguration.PoolId);
                        client.PoolOperations.DeletePool(topNWordsConfiguration.PoolId);
                    }

                    //Delete the job that we created
                    if (topNWordsConfiguration.ShouldDeleteJob)
                    {
                        Console.WriteLine("Deleting job: {0}", topNWordsConfiguration.JobId);
                        client.JobOperations.DeleteJob(topNWordsConfiguration.JobId);
                    }

                    //Delete the containers we created
                    if (topNWordsConfiguration.ShouldDeleteContainer)
                    {
                        DeleteContainers(accountSettings, stagingContainer);
                    }
                }
            }
        }