コード例 #1
0
 public static SampleWithMetricsResult <IExampleSpace <T> > SampleExampleSpacesWithMetrics <T>(
     this IGenAdvanced <Test <T> > advanced,
     int?iterations = null,
     int?seed       = null,
     int?size       = null) => SampleHelpers.RunExampleSpaceSample(advanced, iterations: iterations, seed: seed, size: size);
コード例 #2
0
        public ActionResult Shutdown()
        {
            SampleHelpers.RunShutDownTasks(this);

            return(RedirectToAction("Index"));
        }
コード例 #3
0
ファイル: OutputFilesExample.cs プロジェクト: carlosap/CSharp
        /// <summary>
        /// Runs a series of tasks, using the OutputFiles feature in conjunction with the file conventions library to upload the tasks to a container.
        /// Then downloads the files from the container to the local machine.
        /// </summary>
        public static async Task <CloudBlobContainer> RunWithConventions(
            BatchClient batchClient,
            CloudStorageAccount linkedStorageAccount,
            string poolId,
            int nodeCount,
            string jobId)
        {
            await CreatePoolAsync(batchClient, poolId, nodeCount);

            CloudJob job = batchClient.JobOperations.CreateJob(jobId, new PoolInformation {
                PoolId = poolId
            });

            // Get the container URL to use
            string             containerName = job.OutputStorageContainerName();
            CloudBlobContainer container     = linkedStorageAccount.CreateCloudBlobClient().GetContainerReference(containerName);
            await container.CreateIfNotExistsAsync();

            string containerUrl = job.GetOutputStorageContainerUrl(linkedStorageAccount);

            // Commit the job to the Batch service
            await job.CommitAsync();

            Console.WriteLine($"Created job {jobId}");

            // Obtain the bound job from the Batch service
            await job.RefreshAsync();

            // Create a series of simple tasks which dump the task environment to a file and then write random values to a text file
            IEnumerable <CloudTask> tasksToAdd = Enumerable.Range(1, 20).Select(i =>
            {
                var taskId = i.ToString().PadLeft(3, '0');
                var task   = new CloudTask(taskId, "cmd /v:ON /c \"echo off && set && (FOR /L %i IN (1,1,100000) DO (ECHO !RANDOM!)) > output.txt\"");

                task.WithOutputFile(@"..\std*.txt", containerUrl, TaskOutputKind.TaskLog, OutputFileUploadCondition.TaskCompletion)
                .WithOutputFile(@"output.txt", containerUrl, TaskOutputKind.TaskOutput, OutputFileUploadCondition.TaskSuccess);

                return(task);
            }
                                                                                );

            // Add the tasks to the job; the tasks are automatically
            // scheduled for execution on the nodes by the Batch service.
            await job.AddTaskAsync(tasksToAdd);

            Console.WriteLine($"All tasks added to job {job.Id}");
            Console.WriteLine();

            Console.WriteLine($"Downloading outputs to {Directory.GetCurrentDirectory()}");

            foreach (CloudTask task in job.CompletedTasks())
            {
                if (task.ExecutionInformation.Result != TaskExecutionResult.Success)
                {
                    Console.WriteLine($"Task {task.Id} failed");
                    Console.WriteLine(SampleHelpers.GetFailureInfoDetails(task.ExecutionInformation.FailureInformation));
                }
                else
                {
                    Console.WriteLine($"Task {task.Id} completed successfully");
                }

                foreach (OutputFileReference output in task.OutputStorage(linkedStorageAccount).ListOutputs(TaskOutputKind.TaskOutput))
                {
                    Console.WriteLine($"output file: {output.FilePath}");
                    await output.DownloadToFileAsync($"{jobId}-{output.FilePath}", System.IO.FileMode.Create);
                }
            }

            return(container);
        }
コード例 #4
0
        /// <summary>
        /// Populates Azure Storage with the required files, and
        /// submits the job to the Azure Batch service.
        /// </summary>
        public async Task RunAsync()
        {
            Console.WriteLine("Running with the following settings: ");
            Console.WriteLine("-------------------------------------");
            Console.WriteLine(this.jobManagerSettings.ToString());
            Console.WriteLine(this.accountSettings.ToString());

            // Set up the Batch Service credentials used to authenticate with the Batch Service.
            BatchSharedKeyCredentials credentials = new BatchSharedKeyCredentials(
                this.accountSettings.BatchServiceUrl,
                this.accountSettings.BatchAccountName,
                this.accountSettings.BatchAccountKey);

            CloudStorageAccount cloudStorageAccount = new CloudStorageAccount(
                new StorageCredentials(this.accountSettings.StorageAccountName,
                                       this.accountSettings.StorageAccountKey),
                this.accountSettings.StorageServiceUrl,
                useHttps: true);

            // Get an instance of the BatchClient for a given Azure Batch account.
            using (BatchClient batchClient = await BatchClient.OpenAsync(credentials))
            {
                // add a retry policy. The built-in policies are No Retry (default), Linear Retry, and Exponential Retry
                batchClient.CustomBehaviors.Add(RetryPolicyProvider.ExponentialRetryProvider(TimeSpan.FromSeconds(5), 3));

                string jobId = null;

                try
                {
                    // Allocate a pool
                    await this.CreatePoolIfNotExistAsync(batchClient, cloudStorageAccount);

                    // Submit the job
                    jobId = GettingStartedCommon.CreateJobId("SimpleJob");
                    await this.SubmitJobAsync(batchClient, cloudStorageAccount, jobId);

                    // Print out the status of the pools/jobs under this account
                    await GettingStartedCommon.PrintJobsAsync(batchClient);

                    await GettingStartedCommon.PrintPoolsAsync(batchClient);

                    // Wait for the job manager to complete
                    CloudTask jobManagerTask = await batchClient.JobOperations.GetTaskAsync(jobId, JobManagerTaskId);

                    await GettingStartedCommon.WaitForTasksAndPrintOutputAsync(batchClient, new List <CloudTask> {
                        jobManagerTask
                    }, TimeSpan.FromMinutes(10));
                }
                finally
                {
                    // Delete Azure Batch resources
                    List <string> jobIdsToDelete  = new List <string>();
                    List <string> poolIdsToDelete = new List <string>();

                    if (this.jobManagerSettings.ShouldDeleteJob)
                    {
                        jobIdsToDelete.Add(jobId);
                    }

                    if (this.jobManagerSettings.ShouldDeletePool)
                    {
                        poolIdsToDelete.Add(this.jobManagerSettings.PoolId);
                    }

                    await SampleHelpers.DeleteBatchResourcesAsync(batchClient, jobIdsToDelete, poolIdsToDelete);
                }
            }
        }
コード例 #5
0
        [STAThread] // Added to support UX
        static void Main(string[] args)
        {
            CrmServiceClient service = null;

            try
            {
                service = SampleHelpers.Connect("Connect");
                if (service.IsReady)
                {
                    #region Sample Code
                    //////////////////////////////////////////////
                    #region Set up
                    SetUpSample(service);
                    #endregion Set up
                    #region Demonstrate

                    #region Create goal metric

                    // Create the metric, setting the Metric Type to 'Count' and enabling
                    // stretch tracking.
                    Metric metric = new Metric()
                    {
                        Name             = "Sample Count Metric",
                        IsAmount         = false,
                        IsStretchTracked = true
                    };
                    _metricId = service.Create(metric);
                    metric.Id = _metricId;

                    Console.Write("Created count metric, ");

                    #endregion

                    #region Create RollupFields

                    // Create RollupField which targets completed (received) phone calls.
                    RollupField actual = new RollupField()
                    {
                        SourceEntity           = PhoneCall.EntityLogicalName,
                        GoalAttribute          = "actualinteger",
                        SourceState            = 1,
                        SourceStatus           = 4,
                        EntityForDateAttribute = PhoneCall.EntityLogicalName,
                        DateAttribute          = "actualend",
                        MetricId = metric.ToEntityReference()
                    };
                    _actualId = service.Create(actual);

                    Console.Write("created completed phone call RollupField, ");

                    #endregion

                    #region Create the goal rollup queries

                    // Note: Formatting the FetchXml onto multiple lines in the following
                    // rollup queries causes the length property to be greater than 1,000
                    // chars and will cause an exception.

                    // The following query locates closed incoming phone calls.
                    GoalRollupQuery goalRollupQuery = new GoalRollupQuery()
                    {
                        Name            = "Example Goal Rollup Query",
                        QueryEntityType = PhoneCall.EntityLogicalName,
                        FetchXml        = @"<fetch version='1.0' output-format='xml-platform' mapping='logical' distinct='false'><entity name='phonecall'><attribute name='subject'/><attribute name='statecode'/><attribute name='prioritycode'/><attribute name='scheduledend'/><attribute name='createdby'/><attribute name='regardingobjectid'/><attribute name='activityid'/><order attribute='subject' descending='false'/><filter type='and'><condition attribute='directioncode' operator='eq' value='0'/><condition attribute='statecode' operator='eq' value='1' /></filter></entity></fetch>"
                    };
                    _rollupQueryIds.Add(service.Create(goalRollupQuery));
                    goalRollupQuery.Id = _rollupQueryIds[0];

                    // The following query locates closed outgoing phone calls.
                    GoalRollupQuery goalRollupQuery2 = new GoalRollupQuery()
                    {
                        Name            = "Example Goal Rollup Query",
                        QueryEntityType = PhoneCall.EntityLogicalName,
                        FetchXml        = @"<fetch version='1.0' output-format='xml-platform' mapping='logical' distinct='false'><entity name='phonecall'><attribute name='subject'/><attribute name='statecode'/><attribute name='prioritycode'/><attribute name='scheduledend'/><attribute name='createdby'/><attribute name='regardingobjectid'/><attribute name='activityid'/><order attribute='subject' descending='false'/><filter type='and'><condition attribute='directioncode' operator='eq' value='1'/><condition attribute='statecode' operator='eq' value='1' /></filter></entity></fetch>"
                    };
                    _rollupQueryIds.Add(service.Create(goalRollupQuery2));
                    goalRollupQuery2.Id = _rollupQueryIds[1];

                    Console.Write("created rollup queries for phone calls.\n");
                    Console.WriteLine();

                    #endregion

                    #region Create goals

                    // Determine current fiscal period and year.
                    // Note: This sample assumes quarterly fiscal periods.
                    DateTime date          = DateTime.Now;
                    int      quarterNumber = (date.Month - 1) / 3 + 1;
                    int      yearNumber    = date.Year;

                    // Create three goals: one parent goal and two child goals.
                    Goal parentGoal = new Goal()
                    {
                        Title = "Parent Goal Example",
                        RollupOnlyFromChildGoals      = true,
                        ConsiderOnlyGoalOwnersRecords = true,
                        TargetInteger        = 8,
                        StretchTargetInteger = 10,
                        IsFiscalPeriodGoal   = true,
                        FiscalPeriod         = new OptionSetValue(quarterNumber),
                        FiscalYear           = new OptionSetValue(yearNumber),
                        MetricId             = metric.ToEntityReference(),
                        GoalOwnerId          = new EntityReference
                        {
                            Id          = _salesManagerId,
                            LogicalName = SystemUser.EntityLogicalName
                        },
                        OwnerId = new EntityReference
                        {
                            Id          = _salesManagerId,
                            LogicalName = SystemUser.EntityLogicalName
                        }
                    };
                    _parentGoalId = service.Create(parentGoal);
                    parentGoal.Id = _parentGoalId;

                    Console.WriteLine("Created parent goal");
                    Console.WriteLine("-------------------");
                    Console.WriteLine("Target: {0}", parentGoal.TargetInteger.Value);
                    Console.WriteLine("Stretch Target: {0}",
                                      parentGoal.StretchTargetInteger.Value);
                    Console.WriteLine("Goal owner: {0}", parentGoal.GoalOwnerId.Id);
                    Console.WriteLine("Goal Fiscal Period: {0}",
                                      parentGoal.FiscalPeriod.Value);
                    Console.WriteLine("Goal Fiscal Year: {0}",
                                      parentGoal.FiscalYear.Value);
                    Console.WriteLine("<End of Listing>");
                    Console.WriteLine();

                    Goal firstChildGoal = new Goal()
                    {
                        Title = "First Child Goal Example",
                        ConsiderOnlyGoalOwnersRecords = true,
                        TargetInteger        = 5,
                        StretchTargetInteger = 6,
                        IsFiscalPeriodGoal   = true,
                        FiscalPeriod         = new OptionSetValue(quarterNumber),
                        FiscalYear           = new OptionSetValue(yearNumber),
                        MetricId             = metric.ToEntityReference(),
                        ParentGoalId         = parentGoal.ToEntityReference(),
                        GoalOwnerId          = new EntityReference
                        {
                            Id          = _salesRepresentativeIds[0],
                            LogicalName = SystemUser.EntityLogicalName
                        },
                        OwnerId = new EntityReference
                        {
                            Id          = _salesManagerId,
                            LogicalName = SystemUser.EntityLogicalName
                        },
                        RollupQueryActualIntegerId = goalRollupQuery.ToEntityReference()
                    };
                    _firstChildGoalId = service.Create(firstChildGoal);

                    Console.WriteLine("First child goal");
                    Console.WriteLine("----------------");
                    Console.WriteLine("Target: {0}", firstChildGoal.TargetInteger.Value);
                    Console.WriteLine("Stretch Target: {0}",
                                      firstChildGoal.StretchTargetInteger.Value);
                    Console.WriteLine("Goal owner: {0}", firstChildGoal.GoalOwnerId.Id);
                    Console.WriteLine("Goal Fiscal Period: {0}",
                                      firstChildGoal.FiscalPeriod.Value);
                    Console.WriteLine("Goal Fiscal Year: {0}",
                                      firstChildGoal.FiscalYear.Value);
                    Console.WriteLine();

                    Goal secondChildGoal = new Goal()
                    {
                        Title = "Second Child Goal Example",
                        ConsiderOnlyGoalOwnersRecords = true,
                        TargetInteger        = 3,
                        StretchTargetInteger = 4,
                        IsFiscalPeriodGoal   = true,
                        FiscalPeriod         = new OptionSetValue(quarterNumber),
                        FiscalYear           = new OptionSetValue(yearNumber),
                        MetricId             = metric.ToEntityReference(),
                        ParentGoalId         = parentGoal.ToEntityReference(),
                        GoalOwnerId          = new EntityReference
                        {
                            Id          = _salesRepresentativeIds[1],
                            LogicalName = SystemUser.EntityLogicalName
                        },
                        OwnerId = new EntityReference
                        {
                            Id          = _salesManagerId,
                            LogicalName = SystemUser.EntityLogicalName
                        },
                        RollupQueryActualIntegerId = goalRollupQuery2.ToEntityReference()
                    };
                    _secondChildGoalId = service.Create(secondChildGoal);

                    Console.WriteLine("Second child goal");
                    Console.WriteLine("-----------------");
                    Console.WriteLine("Target: {0}",
                                      secondChildGoal.TargetInteger.Value);
                    Console.WriteLine("Stretch Target: {0}",
                                      secondChildGoal.StretchTargetInteger.Value);
                    Console.WriteLine("Goal owner: {0}", secondChildGoal.GoalOwnerId.Id);
                    Console.WriteLine("Goal Fiscal Period: {0}",
                                      secondChildGoal.FiscalPeriod.Value);
                    Console.WriteLine("Goal Fiscal Year: {0}",
                                      secondChildGoal.FiscalYear.Value);
                    Console.WriteLine();

                    #endregion

                    #region Calculate rollup and display result

                    // Calculate roll-up of goals.
                    RecalculateRequest recalculateRequest = new RecalculateRequest()
                    {
                        Target = new EntityReference(Goal.EntityLogicalName, _parentGoalId)
                    };
                    service.Execute(recalculateRequest);

                    Console.WriteLine("Calculated roll-up of goals.");

                    // Retrieve and report 3 different computed values for the goals
                    // - Percentage
                    // - ComputedTargetAsOfTodayPercentageAchieved
                    // - ComputedTargetAsOfTodayInteger
                    QueryExpression retrieveValues = new QueryExpression()
                    {
                        EntityName = Goal.EntityLogicalName,
                        ColumnSet  = new ColumnSet(
                            "title",
                            "percentage",
                            "computedtargetasoftodaypercentageachieved",
                            "computedtargetasoftodayinteger")
                    };
                    EntityCollection ec = service.RetrieveMultiple(retrieveValues);

                    // Compute and display the results
                    for (int i = 0; i < ec.Entities.Count; i++)
                    {
                        Goal temp = (Goal)ec.Entities[i];
                        Console.WriteLine("Roll-up details for goal: {0}", temp.Title);
                        Console.WriteLine("---------------");
                        Console.WriteLine("Percentage: {0}", temp.Percentage);
                        Console.WriteLine("ComputedTargetAsOfTodayPercentageAchieved: {0}",
                                          temp.ComputedTargetAsOfTodayPercentageAchieved);
                        Console.WriteLine("ComputedTargetAsOfTodayInteger: {0}",
                                          temp.ComputedTargetAsOfTodayInteger.Value);
                        Console.WriteLine("<End of Listing>");
                    }

                    #endregion


                    #region Clean up
                    CleanUpSample(service);
                    #endregion Clean up
                }
                #endregion Demonstrate
                #endregion Sample Code
                else
                {
                    const string UNABLE_TO_LOGIN_ERROR = "Unable to Login to Dynamics CRM";
                    if (service.LastCrmError.Equals(UNABLE_TO_LOGIN_ERROR))
                    {
                        Console.WriteLine("Check the connection string values in cds/App.config.");
                        throw new Exception(service.LastCrmError);
                    }
                    else
                    {
                        throw service.LastCrmException;
                    }
                }
            }

            catch (Exception ex)
            {
                SampleHelpers.HandleException(ex);
            }

            finally
            {
                if (service != null)
                {
                    service.Dispose();
                }

                Console.WriteLine("Press <Enter> to exit.");
                Console.ReadLine();
            }
        }
コード例 #6
0
        /// <summary>
        /// Populates Azure Storage with the required files, and
        /// submits the job to the Azure Batch service.
        /// </summary>
        public async Task RunAsync()
        {
            Console.WriteLine("Running with the following settings: ");
            Console.WriteLine("-------------------------------------");
            Console.WriteLine(this.poolsAndResourceFileSettings.ToString());
            Console.WriteLine(this.accountSettings.ToString());

            // Set up the Batch Service credentials used to authenticate with the Batch Service.
            BatchSharedKeyCredentials credentials = new BatchSharedKeyCredentials(
                this.accountSettings.BatchServiceUrl,
                this.accountSettings.BatchAccountName,
                this.accountSettings.BatchAccountKey);

            // Delete the blob containers which contain the task input files since we no longer need them
            CloudStorageAccount cloudStorageAccount = new CloudStorageAccount(
                new StorageCredentials(this.accountSettings.StorageAccountName,
                                       this.accountSettings.StorageAccountKey),
                this.accountSettings.StorageServiceUrl,
                useHttps: true);

            // Get an instance of the BatchClient for a given Azure Batch account.
            using (BatchClient batchClient = await BatchClient.OpenAsync(credentials))
            {
                // add a retry policy. The built-in policies are No Retry (default), Linear Retry, and Exponential Retry
                batchClient.CustomBehaviors.Add(RetryPolicyProvider.LinearRetryProvider(TimeSpan.FromSeconds(10), 3));

                string jobId = null;

                // Track the containers which are created as part of job submission so that we can clean them up later.
                HashSet <string> blobContainerNames = new HashSet <string>();

                try
                {
                    // Allocate a pool
                    await this.CreatePoolIfNotExistAsync(batchClient, cloudStorageAccount);

                    // Submit the job
                    jobId = GettingStartedCommon.CreateJobId("SimpleJob");
                    blobContainerNames = await this.SubmitJobAsync(batchClient, cloudStorageAccount, jobId);

                    // Print out the status of the pools/jobs under this account
                    await GettingStartedCommon.PrintJobsAsync(batchClient);

                    await GettingStartedCommon.PrintPoolsAsync(batchClient);

                    // Wait for the job to complete
                    List <CloudTask> tasks = await batchClient.JobOperations.ListTasks(jobId).ToListAsync();

                    await GettingStartedCommon.WaitForTasksAndPrintOutputAsync(batchClient, tasks, TimeSpan.FromMinutes(10));
                }
                finally
                {
                    // Delete the pool (if configured) and job
                    // TODO: In C# 6 we can await here instead of .Wait()

                    // Delete Azure Storage container data
                    SampleHelpers.DeleteContainersAsync(cloudStorageAccount, blobContainerNames).Wait();

                    // Delete Azure Batch resources
                    List <string> jobIdsToDelete  = new List <string>();
                    List <string> poolIdsToDelete = new List <string>();

                    if (this.poolsAndResourceFileSettings.ShouldDeleteJob)
                    {
                        jobIdsToDelete.Add(jobId);
                    }

                    if (this.poolsAndResourceFileSettings.ShouldDeletePool)
                    {
                        poolIdsToDelete.Add(this.poolsAndResourceFileSettings.PoolId);
                    }

                    SampleHelpers.DeleteBatchResourcesAsync(batchClient, jobIdsToDelete, poolIdsToDelete).Wait();
                }
            }
        }
コード例 #7
0
        public static void Main(string[] args)
        {
            // Configure your AccountSettings in the Microsoft.Azure.Batch.Samples.Common project within this solution
            AccountSettings accountSettings = SampleHelpers.LoadAccountSettings();

            BatchSharedKeyCredentials cred = new BatchSharedKeyCredentials(
                accountSettings.BatchServiceUrl,
                accountSettings.BatchAccountName,
                accountSettings.BatchAccountKey);

            StorageCredentials storageCred = new StorageCredentials(
                accountSettings.StorageAccountName,
                accountSettings.StorageAccountKey);

            CloudStorageAccount storageAccount = new CloudStorageAccount(storageCred, true);

            using (BatchClient batchClient = BatchClient.Open(cred))
            {
                string             jobId     = "PersistOutput-" + DateTime.Now.ToString("yyyyMMdd-HHmmss");
                const string       poolId    = "PersistOutputsSamplePool";
                const int          nodeCount = 1;
                CloudBlobContainer container;

                Console.Write("Which persistence technology would you like to use? 1) File conventions, 2) OutputFiles, or 3) OutputFiles implementing conventions: ");
                string response = Console.ReadLine().ToLower();
                if (response == "1")
                {
                    container = FileConventionsExample.Run(batchClient, storageAccount, poolId, nodeCount, jobId).Result;

                    Console.WriteLine();
                    Console.WriteLine("All tasks completed and outputs downloaded. You can view the task outputs in the Azure portal");
                    Console.WriteLine("before deleting the job.");
                }
                else if (response == "2")
                {
                    container = OutputFilesExample.Run(batchClient, storageAccount, poolId, nodeCount, jobId).Result;

                    Console.WriteLine();
                    Console.WriteLine("All tasks completed and outputs downloaded.");
                }
                else if (response == "3")
                {
                    container = OutputFilesExample.RunWithConventions(batchClient, storageAccount, poolId, nodeCount, jobId).Result;

                    Console.WriteLine();
                    Console.WriteLine("All tasks completed and outputs downloaded. You can view the task outputs in the Azure portal");
                    Console.WriteLine("before deleting the job.");
                }
                else
                {
                    throw new ArgumentException($"Unexpected response: {response}");
                }

                // Clean up the resources we've created (job, pool, and blob storage container)
                Console.WriteLine();
                Console.Write("Delete job? [yes] no: ");
                response = Console.ReadLine().ToLower();
                if (response != "n" && response != "no")
                {
                    batchClient.JobOperations.DeleteJob(jobId);
                }

                Console.Write("Delete pool? [yes] no: ");
                response = Console.ReadLine().ToLower();
                if (response != "n" && response != "no")
                {
                    batchClient.PoolOperations.DeletePool(poolId);
                }

                Console.Write("Delete storage container? [yes] no: ");
                response = Console.ReadLine().ToLower();
                if (response != "n" && response != "no")
                {
                    container.Delete();
                }

                Console.WriteLine();
                Console.WriteLine("Sample complete, hit ENTER to exit...");
                Console.ReadLine();
            }
        }
コード例 #8
0
        private static async Task MainAsync(string[] args)
        {
            const string poolId = "JobPrepReleaseSamplePool";
            const string jobId  = "JobPrepReleaseSampleJob";

            // Location of the file that the job tasks will work with, a text file in the
            // node's "shared" directory.
            const string taskOutputFile = "%AZ_BATCH_NODE_SHARED_DIR%\\job_prep_and_release.txt";

            // The job prep task will write the node ID to the text file in the shared directory
            const string jobPrepCmdLine = "cmd /c echo %AZ_BATCH_NODE_ID% tasks: >" + taskOutputFile;

            // Each task then echoes its ID to the same text file
            const string taskCmdLine = "cmd /c echo   %AZ_BATCH_TASK_ID% >> " + taskOutputFile;

            // The job release task will then delete the text file from the shared directory
            const string jobReleaseCmdLine = "cmd /c del " + taskOutputFile;

            // Configure your AccountSettings in the Microsoft.Azure.Batch.Samples.Common project within this solution
            BatchSharedKeyCredentials cred = new BatchSharedKeyCredentials(AccountSettings.Default.BatchServiceUrl,
                                                                           AccountSettings.Default.BatchAccountName,
                                                                           AccountSettings.Default.BatchAccountKey);

            // Initialize the BatchClient for access to your Batch account
            using (BatchClient batchClient = await BatchClient.OpenAsync(cred))
            {
                // Create a CloudPool (or obtain an existing pool with the specified ID)
                CloudPool pool = await ArticleHelpers.CreatePoolIfNotExistAsync(batchClient,
                                                                                poolId,
                                                                                "small",
                                                                                2,
                                                                                1);

                // Create a CloudJob (or obtain an existing job with the specified ID)
                CloudJob job = await SampleHelpers.GetJobIfExistAsync(batchClient, jobId);

                if (job == null)
                {
                    Console.WriteLine("Job {0} not found, creating...", jobId);

                    CloudJob unboundJob = batchClient.JobOperations.CreateJob(jobId, new PoolInformation()
                    {
                        PoolId = poolId
                    });

                    // Configure and assign the job preparation task
                    unboundJob.JobPreparationTask = new JobPreparationTask {
                        CommandLine = jobPrepCmdLine
                    };

                    // Configure and assign the job release task
                    unboundJob.JobReleaseTask = new JobReleaseTask {
                        CommandLine = jobReleaseCmdLine
                    };

                    await unboundJob.CommitAsync();

                    // Get the bound version of the job with all of its properties populated
                    job = await batchClient.JobOperations.GetJobAsync(jobId);
                }

                // Create the tasks that the job will execute
                List <CloudTask> tasks = new List <CloudTask>();
                for (int i = 1; i <= 8; i++)
                {
                    string    taskId          = "task" + i.ToString().PadLeft(3, '0');
                    string    taskCommandLine = taskCmdLine;
                    CloudTask task            = new CloudTask(taskId, taskCommandLine);
                    tasks.Add(task);
                }

                // Add the tasks in one API call as opposed to a separate AddTask call for each. Bulk task
                // submission helps to ensure efficient underlying API calls to the Batch service.
                Console.WriteLine("Submitting tasks and awaiting completion...");
                await batchClient.JobOperations.AddTaskAsync(job.Id, tasks);

                // Wait for the tasks to complete before proceeding. The long timeout here is to allow time
                // for the nodes within the pool to be created and started if the pool had not yet been created.
                await batchClient.Utilities.CreateTaskStateMonitor().WhenAll(
                    job.ListTasks(),
                    TaskState.Completed,
                    TimeSpan.FromMinutes(30));

                Console.WriteLine("All tasks completed.");
                Console.WriteLine();

                // Print the contents of the shared text file modified by the job preparation and other tasks.
                ODATADetailLevel nodeDetail          = new ODATADetailLevel(selectClause: "id, state");
                IPagedEnumerable <ComputeNode> nodes = batchClient.PoolOperations.ListComputeNodes(pool.Id, nodeDetail);
                await nodes.ForEachAsync(async (node) =>
                {
                    // Check to ensure that the node is Idle before attempting to pull the text file.
                    // If the pool was just created, there is a chance that another node completed all
                    // of the tasks prior to the other node(s) completing their startup procedure.
                    if (node.State == ComputeNodeState.Idle)
                    {
                        NodeFile sharedTextFile = await node.GetNodeFileAsync("shared\\job_prep_and_release.txt");
                        Console.WriteLine("Contents of {0} on {1}:", sharedTextFile.Name, node.Id);
                        Console.WriteLine("-------------------------------------------");
                        Console.WriteLine(await sharedTextFile.ReadAsStringAsync());
                    }
                });

                // Terminate the job to mark it as Completed; this will initiate the Job Release Task on any node
                // that executed job tasks. Note that the Job Release Task is also executed when a job is deleted,
                // thus you need not call Terminate if you typically delete your jobs upon task completion.
                await batchClient.JobOperations.TerminateJobAsync(job.Id);

                // Wait for the job to reach state "Completed." Note that this wait is not typically necessary in
                // production code, but is done here to enable the checking of the release tasks exit code below.
                await ArticleHelpers.WaitForJobToReachStateAsync(batchClient, job.Id, JobState.Completed, TimeSpan.FromMinutes(2));

                // Print the exit codes of the prep and release tasks by obtaining their execution info
                List <JobPreparationAndReleaseTaskExecutionInformation> prepReleaseInfo = await batchClient.JobOperations.ListJobPreparationAndReleaseTaskStatus(job.Id).ToListAsync();

                foreach (JobPreparationAndReleaseTaskExecutionInformation info in prepReleaseInfo)
                {
                    Console.WriteLine();
                    Console.WriteLine("{0}: ", info.ComputeNodeId);

                    // If no tasks were scheduled to run on the node, the JobPreparationTaskExecutionInformation will be null
                    if (info.JobPreparationTaskExecutionInformation != null)
                    {
                        Console.WriteLine("  Prep task exit code:    {0}", info.JobPreparationTaskExecutionInformation.ExitCode);
                    }

                    // If no tasks were scheduled to run on the node, the JobReleaseTaskExecutionInformation will be null
                    if (info.JobReleaseTaskExecutionInformation != null)
                    {
                        Console.WriteLine("  Release task exit code: {0}", info.JobReleaseTaskExecutionInformation.ExitCode);
                    }
                }

                // Clean up the resources we've created in the Batch account
                Console.WriteLine();
                Console.WriteLine("Delete job? [yes] no");
                string response = Console.ReadLine().ToLower();
                if (response != "n" && response != "no")
                {
                    // Note that deleting the job will execute the job release task if the job was not previously terminated
                    await batchClient.JobOperations.DeleteJobAsync(job.Id);
                }

                Console.WriteLine("Delete pool? [yes] no");
                response = Console.ReadLine();
                if (response != "n" && response != "no")
                {
                    await batchClient.PoolOperations.DeletePoolAsync(pool.Id);
                }
            }
        }
コード例 #9
0
        public static void JobMain(string[] args)
        {
            //Load the configuration
            Settings topNWordsConfiguration = new ConfigurationBuilder()
                                              .SetBasePath(Directory.GetCurrentDirectory())
                                              .AddJsonFile("settings.json")
                                              .Build()
                                              .Get <Settings>();
            AccountSettings accountSettings = SampleHelpers.LoadAccountSettings();

            CloudStorageAccount cloudStorageAccount = new CloudStorageAccount(
                new StorageCredentials(
                    accountSettings.StorageAccountName,
                    accountSettings.StorageAccountKey),
                accountSettings.StorageServiceUrl,
                useHttps: true);

            StagingStorageAccount stagingStorageAccount = new StagingStorageAccount(
                accountSettings.StorageAccountName,
                accountSettings.StorageAccountKey,
                cloudStorageAccount.BlobEndpoint.ToString());

            using (BatchClient client = BatchClient.Open(new BatchSharedKeyCredentials(accountSettings.BatchServiceUrl, accountSettings.BatchAccountName, accountSettings.BatchAccountKey)))
            {
                string stagingContainer = null;

                //OSFamily 5 == Windows 2016. You can learn more about os families and versions at:
                //http://msdn.microsoft.com/en-us/library/azure/ee924680.aspx
                CloudPool pool = client.PoolOperations.CreatePool(
                    topNWordsConfiguration.PoolId,
                    targetDedicatedComputeNodes: topNWordsConfiguration.PoolNodeCount,
                    virtualMachineSize: "standard_d1_v2",
                    cloudServiceConfiguration: new CloudServiceConfiguration(osFamily: "6"));
                Console.WriteLine("Adding pool {0}", topNWordsConfiguration.PoolId);
                pool.TaskSchedulingPolicy   = new TaskSchedulingPolicy(ComputeNodeFillType.Spread);
                pool.MaxTasksPerComputeNode = 4;

                GettingStartedCommon.CreatePoolIfNotExistAsync(client, pool).Wait();
                var formula     = @"startingNumberOfVMs = 2;
                    maxNumberofVMs = 4;
                    pendingTaskSamplePercent = $PendingTasks.GetSamplePercent(90 * TimeInterval_Second);
                    pendingTaskSamples = pendingTaskSamplePercent < 70 ? startingNumberOfVMs : avg($PendingTasks.GetSample(180 * TimeInterval_Second));
                    $TargetDedicatedNodes = min(maxNumberofVMs, pendingTaskSamples);
                    $NodeDeallocationOption = taskcompletion;";
                var noOfSeconds = 150;
                Thread.Sleep(noOfSeconds * 1000);

                client.PoolOperations.EnableAutoScale(
                    poolId: topNWordsConfiguration.PoolId, autoscaleFormula: formula,
                    autoscaleEvaluationInterval: TimeSpan.FromMinutes(5));

                try
                {
                    Console.WriteLine("Creating job: " + topNWordsConfiguration.JobId);
                    // get an empty unbound Job
                    CloudJob unboundJob = client.JobOperations.CreateJob();
                    unboundJob.Id = topNWordsConfiguration.JobId;
                    unboundJob.PoolInformation = new PoolInformation()
                    {
                        PoolId = topNWordsConfiguration.PoolId
                    };

                    // Commit Job to create it in the service
                    unboundJob.Commit();

                    // create file staging objects that represent the executable and its dependent assembly to run as the task.
                    // These files are copied to every node before the corresponding task is scheduled to run on that node.
                    FileToStage topNWordExe         = new FileToStage(TopNWordsExeName, stagingStorageAccount);
                    FileToStage storageDll          = new FileToStage(StorageClientDllName, stagingStorageAccount);
                    FileToStage newtonJsoftDll      = new FileToStage(NewtonJSoftDllName, stagingStorageAccount);
                    FileToStage microsoftEFDll      = new FileToStage(MicrosoftEntityFrameworkDllName, stagingStorageAccount);
                    FileToStage microsoftEFCoreDll  = new FileToStage(MicrosoftEntityFrameworkCoreDllName, stagingStorageAccount);
                    FileToStage microsoftBCLDll     = new FileToStage(MicrosoftBCLDllName, stagingStorageAccount);
                    FileToStage systemTasksDll      = new FileToStage(SystemTasksDllName, stagingStorageAccount);
                    FileToStage topNWordsConfigFile = new FileToStage(TopnWordsConfig, stagingStorageAccount);
                    FileToStage SystemValueTupleDll = new FileToStage(SystemValueTupleDllName, stagingStorageAccount);
                    FileToStage DependencyInjectionAbstractionsDll = new FileToStage(DependecyInjectionAbstractionsDllName, stagingStorageAccount);
                    FileToStage DependencyInjectionDll             = new FileToStage(DependecyInjectionDllName, stagingStorageAccount);
                    FileToStage LoggingAbstractionsDll             = new FileToStage(LoggingAbstractionsDllName, stagingStorageAccount);
                    FileToStage DiagnosticsDll        = new FileToStage(DiagnosticssDllName, stagingStorageAccount);
                    FileToStage CachingAbstractionDll = new FileToStage(CachingAbstractionsDllName, stagingStorageAccount);
                    FileToStage MicrosoftSqlServerDll = new FileToStage(MicrosoftSqlServerDllName, stagingStorageAccount);
                    FileToStage SystemComponentDll    = new FileToStage(SystemComponentDllName, stagingStorageAccount);
                    FileToStage SystemCollectionsDll  = new FileToStage(SystemCollectionsDllName, stagingStorageAccount);
                    FileToStage pDll                 = new FileToStage(pdllName, stagingStorageAccount);
                    FileToStage oDll                 = new FileToStage(odllName, stagingStorageAccount);
                    FileToStage lDll                 = new FileToStage(ldllName, stagingStorageAccount);
                    FileToStage hashcodeDll          = new FileToStage(hashcodeDllName, stagingStorageAccount);
                    FileToStage clientSqlDll         = new FileToStage(clientSqlClientDllName, stagingStorageAccount);
                    FileToStage cachingMemoryDll     = new FileToStage(CachingMemoryDllName, stagingStorageAccount);
                    FileToStage configAbstractionDll = new FileToStage(configAbstractionDllName, stagingStorageAccount);
                    FileToStage SNIDll               = new FileToStage(SNIDllName, stagingStorageAccount);


                    FileToStage relationDll = new FileToStage(relationddllName, stagingStorageAccount);



                    var textFile = "E:\\WeatherAPIPOC\\cities_id.txt";
                    var text     = File.ReadAllLines(textFile);
                    var cityList = new List <string>(text);

                    // In this sample, the input data is copied separately to Storage and its URI is passed to the task as an argument.
                    // This approach is appropriate when the amount of input data is large such that copying it to every node via FileStaging
                    // is not desired and the number of tasks is small since a large number of readers of the blob might get throttled
                    // by Storage which will lengthen the overall processing time.
                    //
                    // You'll need to observe the behavior and use published techniques for finding the right balance of performance versus
                    // complexity.

                    Console.WriteLine("{0} uploaded to cloud", topNWordsConfiguration.FileName);

                    // initialize a collection to hold the tasks that will be submitted in their entirety
                    List <CloudTask> tasksToRun = new List <CloudTask>(topNWordsConfiguration.NumberOfTasks);

                    for (int i = 0; i < cityList.Count; i++)
                    {
                        string    programLaunchTime = DateTime.Now.ToString("h:mm:sstt");
                        CloudTask task = new CloudTask(
                            id: $"task_no_{i + 1}",
                            commandline: $"cmd /c mkdir x64 & move SNI.dll x64 & {TopNWordsExeName} --Task {cityList[i]} %AZ_BATCH_NODE_ID% {programLaunchTime}");

                        //This is the list of files to stage to a container -- for each job, one container is created and
                        //files all resolve to Azure Blobs by their name (so two tasks with the same named file will create just 1 blob in
                        //the container).
                        task.FilesToStage = new List <IFileStagingProvider>
                        {
                            topNWordExe,
                            storageDll,
                            newtonJsoftDll,
                            microsoftEFDll,
                            microsoftEFCoreDll,
                            microsoftBCLDll,
                            systemTasksDll,
                            topNWordsConfigFile,
                            SystemValueTupleDll,
                            DependencyInjectionAbstractionsDll,
                            DependencyInjectionDll,
                            LoggingAbstractionsDll,
                            DiagnosticsDll,
                            CachingAbstractionDll,
                            MicrosoftSqlServerDll,
                            SystemComponentDll,
                            SystemCollectionsDll,
                            oDll,
                            pDll,
                            lDll,
                            relationDll,
                            hashcodeDll,
                            clientSqlDll,
                            cachingMemoryDll,
                            configAbstractionDll,
                            SNIDll
                        };

                        tasksToRun.Add(task);
                    }

                    // Commit all the tasks to the Batch Service. Ask AddTask to return information about the files that were staged.
                    // The container information is used later on to remove these files from Storage.
                    ConcurrentBag <ConcurrentDictionary <Type, IFileStagingArtifact> > fsArtifactBag = new ConcurrentBag <ConcurrentDictionary <Type, IFileStagingArtifact> >();
                    client.JobOperations.AddTask(topNWordsConfiguration.JobId, tasksToRun, fileStagingArtifacts: fsArtifactBag);

                    // loop through the bag of artifacts, looking for the one that matches our staged files. Once there,
                    // capture the name of the container holding the files so they can be deleted later on if that option
                    // was configured in the settings.
                    foreach (var fsBagItem in fsArtifactBag)
                    {
                        IFileStagingArtifact fsValue;
                        if (fsBagItem.TryGetValue(typeof(FileToStage), out fsValue))
                        {
                            SequentialFileStagingArtifact stagingArtifact = fsValue as SequentialFileStagingArtifact;
                            if (stagingArtifact != null)
                            {
                                stagingContainer = stagingArtifact.BlobContainerCreated;
                                Console.WriteLine(
                                    "Uploaded files to container: {0} -- you will be charged for their storage unless you delete them.",
                                    stagingArtifact.BlobContainerCreated);
                            }
                        }
                    }

                    //Get the job to monitor status.
                    CloudJob job = client.JobOperations.GetJob(topNWordsConfiguration.JobId);

                    Console.Write("Waiting for tasks to complete ...   ");
                    // Wait 20 minutes for all tasks to reach the completed state. The long timeout is necessary for the first
                    // time a pool is created in order to allow nodes to be added to the pool and initialized to run tasks.
                    IPagedEnumerable <CloudTask> ourTasks = job.ListTasks(new ODATADetailLevel(selectClause: "id"));
                    client.Utilities.CreateTaskStateMonitor().WaitAll(ourTasks, TaskState.Completed, TimeSpan.FromMinutes(20));
                    Console.WriteLine("tasks are done.");

                    foreach (CloudTask t in ourTasks)
                    {
                        Console.WriteLine("Task " + t.Id);
                        Console.WriteLine("stdout:" + Environment.NewLine + t.GetNodeFile(Batch.Constants.StandardOutFileName).ReadAsString());
                        Console.WriteLine();
                        Console.WriteLine("stderr:" + Environment.NewLine + t.GetNodeFile(Batch.Constants.StandardErrorFileName).ReadAsString());
                    }
                }
                finally
                {
                    //Delete the pool that we created
                    if (topNWordsConfiguration.ShouldDeletePool)
                    {
                        Console.WriteLine("Deleting pool: {0}", topNWordsConfiguration.PoolId);
                        client.PoolOperations.DeletePool(topNWordsConfiguration.PoolId);
                    }

                    //Delete the job that we created
                    if (topNWordsConfiguration.ShouldDeleteJob)
                    {
                        Console.WriteLine("Deleting job: {0}", topNWordsConfiguration.JobId);
                        client.JobOperations.DeleteJob(topNWordsConfiguration.JobId);
                    }

                    //Delete the containers we created
                    if (topNWordsConfiguration.ShouldDeleteContainer)
                    {
                        DeleteContainers(accountSettings, stagingContainer);
                    }
                }
            }
        }
コード例 #10
0
ファイル: JobSubmitter.cs プロジェクト: carlosap/CSharp
        /// <summary>
        /// Populates Azure Storage with the required files, and
        /// submits the job to the Azure Batch service.
        /// </summary>
        public async Task RunAsync()
        {
            Console.WriteLine("Running with the following settings: ");
            Console.WriteLine("----------------------------------------");
            Console.WriteLine(this.textSearchSettings.ToString());
            Console.WriteLine(this.accountSettings.ToString());

            CloudStorageAccount cloudStorageAccount = new CloudStorageAccount(
                new StorageCredentials(
                    this.accountSettings.StorageAccountName,
                    this.accountSettings.StorageAccountKey),
                this.accountSettings.StorageServiceUrl,
                useHttps: true);

            //Upload resources if required
            Console.WriteLine($"Creating container {this.textSearchSettings.OutputBlobContainer} if it doesn't exist...");
            var blobClient      = cloudStorageAccount.CreateCloudBlobClient();
            var outputContainer = blobClient.GetContainerReference(this.textSearchSettings.OutputBlobContainer);
            await outputContainer.CreateIfNotExistsAsync();

            if (this.textSearchSettings.ShouldUploadResources)
            {
                Console.WriteLine("Splitting file: {0} into {1} subfiles",
                                  Constants.TextFilePath,
                                  this.textSearchSettings.NumberOfMapperTasks);

                //Split the text file into the correct number of files for consumption by the mapper tasks.
                FileSplitter  splitter        = new FileSplitter();
                List <string> mapperTaskFiles = await splitter.SplitAsync(
                    Constants.TextFilePath,
                    this.textSearchSettings.NumberOfMapperTasks);

                List <string> files = Constants.RequiredExecutableFiles.Union(mapperTaskFiles).ToList();

                await SampleHelpers.UploadResourcesAsync(
                    cloudStorageAccount,
                    this.textSearchSettings.InputBlobContainer,
                    files);
            }

            //Generate a SAS for the container.
            string inputContainerSasUrl = SampleHelpers.ConstructContainerSas(
                cloudStorageAccount,
                this.textSearchSettings.InputBlobContainer,
                permissions: WindowsAzure.Storage.Blob.SharedAccessBlobPermissions.Read);

            string outputContainerSasUrl = SampleHelpers.ConstructContainerSas(
                cloudStorageAccount,
                this.textSearchSettings.OutputBlobContainer,
                permissions: WindowsAzure.Storage.Blob.SharedAccessBlobPermissions.Read |
                WindowsAzure.Storage.Blob.SharedAccessBlobPermissions.Write);

            //Set up the Batch Service credentials used to authenticate with the Batch Service.
            BatchSharedKeyCredentials credentials = new BatchSharedKeyCredentials(
                this.accountSettings.BatchServiceUrl,
                this.accountSettings.BatchAccountName,
                this.accountSettings.BatchAccountKey);

            using (BatchClient batchClient = BatchClient.Open(credentials))
            {
                //
                // Construct the job properties in local memory before commiting them to the Batch Service.
                //

                //Allow enough compute nodes in the pool to run each mapper task
                int numberOfPoolComputeNodes = this.textSearchSettings.NumberOfMapperTasks;

                //Define the pool specification for the pool which the job will run on.
                PoolSpecification poolSpecification = new PoolSpecification()
                {
                    TargetDedicatedComputeNodes = numberOfPoolComputeNodes,
                    VirtualMachineSize          = "standard_d1_v2",
                    //You can learn more about os families and versions at:
                    //http://azure.microsoft.com/documentation/articles/cloud-services-guestos-update-matrix
                    CloudServiceConfiguration = new CloudServiceConfiguration(osFamily: "5")
                };

                //Use the auto pool feature of the Batch Service to create a pool when the job is created.
                //This creates a new pool for each job which is added.
                AutoPoolSpecification autoPoolSpecification = new AutoPoolSpecification()
                {
                    AutoPoolIdPrefix   = "TextSearchPool",
                    KeepAlive          = false,
                    PoolLifetimeOption = PoolLifetimeOption.Job,
                    PoolSpecification  = poolSpecification
                };

                //Define the pool information for this job -- it will run on the pool defined by the auto pool specification above.
                PoolInformation poolInformation = new PoolInformation()
                {
                    AutoPoolSpecification = autoPoolSpecification
                };

                //Create the unbound job in local memory.  An object which exists only in local memory (and not on the Batch Service) is "unbound".
                string jobId = Environment.GetEnvironmentVariable("USERNAME") + DateTime.UtcNow.ToString("yyyyMMdd-HHmmss");

                CloudJob unboundJob = batchClient.JobOperations.CreateJob(jobId, poolInformation);
                unboundJob.UsesTaskDependencies = true;

                try
                {
                    //Commit the unbound job to the Batch Service.
                    Console.WriteLine($"Adding job: {unboundJob.Id} to the Batch Service.");
                    await unboundJob.CommitAsync(); //Issues a request to the Batch Service to add the job which was defined above.

                    // Add tasks to the job
                    var mapperTasks = CreateMapperTasks(inputContainerSasUrl, outputContainerSasUrl);
                    var reducerTask = CreateReducerTask(inputContainerSasUrl, outputContainerSasUrl, mapperTasks);

                    var tasksToAdd = Enumerable.Concat(mapperTasks, new[] { reducerTask });

                    //Submit the unbound task collection to the Batch Service.
                    //Use the AddTask method which takes a collection of CloudTasks for the best performance.
                    Console.WriteLine("Submitting {0} mapper tasks", this.textSearchSettings.NumberOfMapperTasks);
                    Console.WriteLine("Submitting 1 reducer task");
                    await batchClient.JobOperations.AddTaskAsync(jobId, tasksToAdd);

                    //An object which is backed by a corresponding Batch Service object is "bound."
                    CloudJob boundJob = await batchClient.JobOperations.GetJobAsync(jobId);

                    // Update the job now that we've added tasks so that when all of the tasks which we have added
                    // are complete, the job will automatically move to the completed state.
                    boundJob.OnAllTasksComplete = OnAllTasksComplete.TerminateJob;
                    boundJob.Commit();
                    boundJob.Refresh();

                    //
                    // Wait for the tasks to complete.
                    //
                    List <CloudTask> tasks = await batchClient.JobOperations.ListTasks(jobId).ToListAsync();

                    TimeSpan maxJobCompletionTimeout = TimeSpan.FromMinutes(30);

                    // Monitor the current tasks to see when they are done.
                    // Occasionally a task may get killed and requeued during an upgrade or hardware failure,
                    // Robustness against this was not added into the sample for
                    // simplicity, but should be added into any production code.
                    Console.WriteLine("Waiting for job's tasks to complete");

                    TaskStateMonitor taskStateMonitor = batchClient.Utilities.CreateTaskStateMonitor();
                    try
                    {
                        await taskStateMonitor.WhenAll(tasks, TaskState.Completed, maxJobCompletionTimeout);
                    }
                    finally
                    {
                        Console.WriteLine("Done waiting for all tasks to complete");

                        // Refresh the task list
                        tasks = await batchClient.JobOperations.ListTasks(jobId).ToListAsync();

                        //Check to ensure the job manager task exited successfully.
                        foreach (var task in tasks)
                        {
                            await Helpers.CheckForTaskSuccessAsync(task, dumpStandardOutOnTaskSuccess : false);
                        }
                    }

                    //
                    // Download and write out the reducer tasks output
                    //
                    string reducerText = await SampleHelpers.DownloadBlobTextAsync(cloudStorageAccount, this.textSearchSettings.OutputBlobContainer, Constants.ReducerTaskResultBlobName);

                    Console.WriteLine("Reducer reuslts:");
                    Console.WriteLine(reducerText);
                }
                finally
                {
                    //Delete the job.
                    //This will delete the auto pool associated with the job as long as the pool
                    //keep alive property is set to false.
                    if (this.textSearchSettings.ShouldDeleteJob)
                    {
                        Console.WriteLine($"Deleting job {jobId}");
                        await batchClient.JobOperations.DeleteJobAsync(jobId);
                    }

                    if (this.textSearchSettings.ShouldDeleteContainers)
                    {
                        Console.WriteLine("Deleting containers");
                        var inputContainer = blobClient.GetContainerReference(this.textSearchSettings.InputBlobContainer);
                        await inputContainer.DeleteIfExistsAsync();

                        await outputContainer.DeleteIfExistsAsync();
                    }
                }
            }
        }
コード例 #11
0
ファイル: MainPage.xaml.cs プロジェクト: jhealy/batchcheck
        private async void Button_JobA_Click(object sender, RoutedEventArgs e)
        {
            const string DQUOTE = "\"";

            string joba_task_cmdline = $"cmd /c {DQUOTE}set AZ_BATCH & timeout /t 30 > NUL{DQUOTE}";

            StringBuilder sb = new StringBuilder(1024);

            sb.AppendLine("Submitting job");
            string jobid = NamingHelpers.GenJobName("A");

            sb.AppendLine($"jobid={jobid}");
            string taskid = NamingHelpers.GenTaskName("JOBA");

            sb.AppendLine($"taskid={taskid}");

            sb.AppendLine($"task command line={joba_task_cmdline}");

            // read account settings, dump
            AccountSettings accountSettings = SampleHelpers.LoadAccountSettings();

            // read job settings, dump
            JobSettings jobSettings = SampleHelpers.LoadJobSettings();

            // connect to batch, dump status
            BatchSharedKeyCredentials cred = new BatchSharedKeyCredentials(
                accountSettings.BatchServiceUrl,
                accountSettings.BatchAccountName,
                accountSettings.BatchAccountKey
                );

            sb.AppendLine($"batchcred created to {accountSettings.BatchAccountName} at {accountSettings.BatchServiceUrl}");
            using (BatchClient client = BatchClient.Open(cred))
            {
                PoolInformation pool = new PoolInformation();
                pool.PoolId = jobSettings.PoolID;

                sb.AppendLine("creating job " + jobid);
                CloudJob ourJob = client.JobOperations.CreateJob(jobid, pool);
                ourJob.OnAllTasksComplete = Microsoft.Azure.Batch.Common.OnAllTasksComplete.TerminateJob;

                await ourJob.CommitAsync();

                sb.AppendLine("job created " + jobid);

                // Get the bound version of the job with all of its properties populated
                CloudJob committedJob = await client.JobOperations.GetJobAsync(jobid);

                sb.AppendLine("bound version of job retrieved " + jobid);

                sb.AppendLine("submitting task " + taskid);
                // Create the tasks that the job will execute
                CloudTask task = new CloudTask(taskid, joba_task_cmdline);
                await client.JobOperations.AddTaskAsync(jobid, task);

                sb.AppendLine("task submitted " + taskid);

                TextBox_JobID.Text = jobid;
                TextBox_Task.Text  = taskid;

                sb.AppendLine("task submitted.  use job status button to see job and task checks");

                TextBlock_Out.Text = sb.ToString();
            }
        }
コード例 #12
0
ファイル: MainPage.xaml.cs プロジェクト: jhealy/batchcheck
        private async void Button_JobStatus_Click(object sender, RoutedEventArgs e)
        {
            StringBuilder sb = new StringBuilder(1024);
            // read account settings, dump
            AccountSettings accountSettings = SampleHelpers.LoadAccountSettings();

            // read job settings, dump
            JobSettings jobSettings = SampleHelpers.LoadJobSettings();

            // connect to batch, dump status
            BatchSharedKeyCredentials cred = new BatchSharedKeyCredentials(
                accountSettings.BatchServiceUrl,
                accountSettings.BatchAccountName,
                accountSettings.BatchAccountKey
                );

            sb.AppendLine($"batchcred created to {accountSettings.BatchAccountName} at {accountSettings.BatchServiceUrl}");
            using (BatchClient client = BatchClient.Open(cred))
            {
                string jobid = TextBox_JobID.Text.Trim();

                CloudJob job = null;
                sb.AppendLine($"GetJob({jobid})");
                try
                {
                    job = await client.JobOperations.GetJobAsync(jobid);
                }
                catch (Exception ex)
                {
                    job = null;
                    sb.AppendLine($"job not found.  jobid=[{jobid}]");
                    sb.AppendLine("job not found exception: " + ex.ToString());
                }

                if (job != null)
                {
                    TimeSpan?jobdur = job.ExecutionInformation.EndTime - job.ExecutionInformation.StartTime;
                    if (jobdur == null)
                    {
                        sb.AppendLine($"job state:{job.State} ");
                    }
                    else
                    {
                        sb.AppendLine($"job state:{job.State} duration: {jobdur}");
                    }

                    foreach (CloudTask t in job.ListTasks())
                    {
                        TimeSpan?dur = t.ExecutionInformation.EndTime - t.ExecutionInformation.StartTime;
                        if (dur == null)
                        {
                            sb.AppendLine($"task: {t.Id} {t.State} start: {t.ExecutionInformation.StartTime} end:{t.ExecutionInformation.EndTime}");
                        }
                        else
                        {
                            sb.AppendLine($"task: {t.Id} {t.State} duration:{dur} start: {t.ExecutionInformation.StartTime} end:{t.ExecutionInformation.EndTime}");
                        }
                    }
                }
            }

            TextBlock_Out.Text = sb.ToString();
        }
コード例 #13
0
ファイル: Job.cs プロジェクト: yong2dayz/azure-batch-samples
        public static void JobMain(string[] args)
        {
            //Load the configuration
            Settings topNWordsConfiguration = new ConfigurationBuilder()
                                              .SetBasePath(Directory.GetCurrentDirectory())
                                              .AddJsonFile("settings.json")
                                              .Build()
                                              .Get <Settings>();
            AccountSettings accountSettings = SampleHelpers.LoadAccountSettings();

            CloudStorageAccount cloudStorageAccount = new CloudStorageAccount(
                new StorageCredentials(
                    accountSettings.StorageAccountName,
                    accountSettings.StorageAccountKey),
                accountSettings.StorageServiceUrl,
                useHttps: true);

            StagingStorageAccount stagingStorageAccount = new StagingStorageAccount(
                accountSettings.StorageAccountName,
                accountSettings.StorageAccountKey,
                cloudStorageAccount.BlobEndpoint.ToString());

            using (BatchClient client = BatchClient.Open(new BatchSharedKeyCredentials(accountSettings.BatchServiceUrl, accountSettings.BatchAccountName, accountSettings.BatchAccountKey)))
            {
                string stagingContainer = null;

                //OSFamily 5 == Windows 2016. You can learn more about os families and versions at:
                //http://msdn.microsoft.com/en-us/library/azure/ee924680.aspx
                CloudPool pool = client.PoolOperations.CreatePool(
                    topNWordsConfiguration.PoolId,
                    targetDedicatedComputeNodes: topNWordsConfiguration.PoolNodeCount,
                    virtualMachineSize: "standard_d1_v2",
                    cloudServiceConfiguration: new CloudServiceConfiguration(osFamily: "5"));
                Console.WriteLine("Adding pool {0}", topNWordsConfiguration.PoolId);
                GettingStartedCommon.CreatePoolIfNotExistAsync(client, pool).Wait();

                try
                {
                    Console.WriteLine("Creating job: " + topNWordsConfiguration.JobId);
                    // get an empty unbound Job
                    CloudJob unboundJob = client.JobOperations.CreateJob();
                    unboundJob.Id = topNWordsConfiguration.JobId;
                    unboundJob.PoolInformation = new PoolInformation()
                    {
                        PoolId = topNWordsConfiguration.PoolId
                    };

                    // Commit Job to create it in the service
                    unboundJob.Commit();

                    // create file staging objects that represent the executable and its dependent assembly to run as the task.
                    // These files are copied to every node before the corresponding task is scheduled to run on that node.
                    FileToStage topNWordExe = new FileToStage(TopNWordsExeName, stagingStorageAccount);
                    FileToStage storageDll  = new FileToStage(StorageClientDllName, stagingStorageAccount);

                    // In this sample, the input data is copied separately to Storage and its URI is passed to the task as an argument.
                    // This approach is appropriate when the amount of input data is large such that copying it to every node via FileStaging
                    // is not desired and the number of tasks is small since a large number of readers of the blob might get throttled
                    // by Storage which will lengthen the overall processing time.
                    //
                    // You'll need to observe the behavior and use published techniques for finding the right balance of performance versus
                    // complexity.
                    string bookFileUri = UploadBookFileToCloudBlob(accountSettings, topNWordsConfiguration.FileName);
                    Console.WriteLine("{0} uploaded to cloud", topNWordsConfiguration.FileName);

                    // initialize a collection to hold the tasks that will be submitted in their entirety
                    List <CloudTask> tasksToRun = new List <CloudTask>(topNWordsConfiguration.NumberOfTasks);

                    for (int i = 1; i <= topNWordsConfiguration.NumberOfTasks; i++)
                    {
                        CloudTask task = new CloudTask("task_no_" + i, string.Format("{0} --Task {1} {2} {3} {4}",
                                                                                     TopNWordsExeName,
                                                                                     bookFileUri,
                                                                                     topNWordsConfiguration.TopWordCount,
                                                                                     accountSettings.StorageAccountName,
                                                                                     accountSettings.StorageAccountKey));

                        //This is the list of files to stage to a container -- for each job, one container is created and
                        //files all resolve to Azure Blobs by their name (so two tasks with the same named file will create just 1 blob in
                        //the container).
                        task.FilesToStage = new List <IFileStagingProvider>
                        {
                            topNWordExe,
                            storageDll
                        };

                        tasksToRun.Add(task);
                    }

                    // Commit all the tasks to the Batch Service. Ask AddTask to return information about the files that were staged.
                    // The container information is used later on to remove these files from Storage.
                    ConcurrentBag <ConcurrentDictionary <Type, IFileStagingArtifact> > fsArtifactBag = new ConcurrentBag <ConcurrentDictionary <Type, IFileStagingArtifact> >();
                    client.JobOperations.AddTask(topNWordsConfiguration.JobId, tasksToRun, fileStagingArtifacts: fsArtifactBag);

                    // loop through the bag of artifacts, looking for the one that matches our staged files. Once there,
                    // capture the name of the container holding the files so they can be deleted later on if that option
                    // was configured in the settings.
                    foreach (var fsBagItem in fsArtifactBag)
                    {
                        IFileStagingArtifact fsValue;
                        if (fsBagItem.TryGetValue(typeof(FileToStage), out fsValue))
                        {
                            SequentialFileStagingArtifact stagingArtifact = fsValue as SequentialFileStagingArtifact;
                            if (stagingArtifact != null)
                            {
                                stagingContainer = stagingArtifact.BlobContainerCreated;
                                Console.WriteLine(
                                    "Uploaded files to container: {0} -- you will be charged for their storage unless you delete them.",
                                    stagingArtifact.BlobContainerCreated);
                            }
                        }
                    }

                    //Get the job to monitor status.
                    CloudJob job = client.JobOperations.GetJob(topNWordsConfiguration.JobId);

                    Console.Write("Waiting for tasks to complete ...   ");
                    // Wait 20 minutes for all tasks to reach the completed state. The long timeout is necessary for the first
                    // time a pool is created in order to allow nodes to be added to the pool and initialized to run tasks.
                    IPagedEnumerable <CloudTask> ourTasks = job.ListTasks(new ODATADetailLevel(selectClause: "id"));
                    client.Utilities.CreateTaskStateMonitor().WaitAll(ourTasks, TaskState.Completed, TimeSpan.FromMinutes(20));
                    Console.WriteLine("tasks are done.");

                    foreach (CloudTask t in ourTasks)
                    {
                        Console.WriteLine("Task " + t.Id);
                        Console.WriteLine("stdout:" + Environment.NewLine + t.GetNodeFile(Batch.Constants.StandardOutFileName).ReadAsString());
                        Console.WriteLine();
                        Console.WriteLine("stderr:" + Environment.NewLine + t.GetNodeFile(Batch.Constants.StandardErrorFileName).ReadAsString());
                    }
                }
                finally
                {
                    //Delete the pool that we created
                    if (topNWordsConfiguration.ShouldDeletePool)
                    {
                        Console.WriteLine("Deleting pool: {0}", topNWordsConfiguration.PoolId);
                        client.PoolOperations.DeletePool(topNWordsConfiguration.PoolId);
                    }

                    //Delete the job that we created
                    if (topNWordsConfiguration.ShouldDeleteJob)
                    {
                        Console.WriteLine("Deleting job: {0}", topNWordsConfiguration.JobId);
                        client.JobOperations.DeleteJob(topNWordsConfiguration.JobId);
                    }

                    //Delete the containers we created
                    if (topNWordsConfiguration.ShouldDeleteContainer)
                    {
                        DeleteContainers(accountSettings, stagingContainer);
                    }
                }
            }
        }
コード例 #14
0
        /// <summary>
        /// Runs the job manager task.
        /// </summary>
        public async Task RunAsync()
        {
            Console.WriteLine("JobManager for account: {0}, job: {1} has started...",
                              this.accountName,
                              this.jobId);
            Console.WriteLine();

            Console.WriteLine("JobManager running with the following settings: ");
            Console.WriteLine("----------------------------------------");
            Console.WriteLine(this.configurationSettings.ToString());

            //Set up the Batch Service credentials used to authenticate with the Batch Service.
            BatchSharedKeyCredentials batchSharedKeyCredentials = new BatchSharedKeyCredentials(
                this.configurationSettings.BatchServiceUrl,
                this.configurationSettings.BatchAccountName,
                this.configurationSettings.BatchAccountKey);

            CloudStorageAccount cloudStorageAccount = new CloudStorageAccount(
                new StorageCredentials(
                    this.configurationSettings.StorageAccountName,
                    this.configurationSettings.StorageAccountKey),
                this.configurationSettings.StorageServiceUrl,
                useHttps: true);

            using (BatchClient batchClient = await BatchClient.OpenAsync(batchSharedKeyCredentials))
            {
                //Construct a container SAS to provide the Batch Service access to the files required to
                //run the mapper and reducer tasks.
                string containerSas = SampleHelpers.ConstructContainerSas(
                    cloudStorageAccount,
                    this.configurationSettings.BlobContainer);

                //
                // Submit mapper tasks.
                //
                await this.SubmitMapperTasksAsync(batchClient, containerSas);

                //
                // Wait for the mapper tasks to complete.
                //
                await this.WaitForMapperTasksToCompleteAsync(batchClient);

                //
                // Create the reducer task.
                //
                await this.SubmitReducerTaskAsync(batchClient, containerSas);

                //
                // Wait for the reducer task to complete.
                //
                string textToUpload = await this.WaitForReducerTaskToCompleteAsync(batchClient);

                //
                // Upload the results of the reducer task to Azure storage for consumption later
                //

                await SampleHelpers.UploadBlobTextAsync(cloudStorageAccount, this.configurationSettings.BlobContainer, Constants.ReducerTaskResultBlobName, textToUpload);

                //The job manager has completed.
                Console.WriteLine("JobManager completed successfully.");
            }
        }
コード例 #15
0
        private static async Task MainAsync(string[] args)
        {
            // You may adjust these values to experiment with different compute resource scenarios.
            const string nodeSize        = "standard_d1_v2";
            const int    nodeCount       = 4;
            const int    maxTasksPerNode = 4;
            const int    taskCount       = 32;

            // Ensure there are enough tasks to help avoid hitting some timeout conditions below
            int minimumTaskCount = nodeCount * maxTasksPerNode * 2;

            if (taskCount < minimumTaskCount)
            {
                Console.WriteLine("You must specify at least two tasks per node core for this sample ({0} tasks in this configuration).", minimumTaskCount);
                Console.WriteLine();

                // Not enough tasks, exit the application
                return;
            }

            // In this sample, the tasks simply ping localhost on the compute nodes; adjust these
            // values to simulate variable task duration
            const int minPings = 30;
            const int maxPings = 60;

            const string poolId = "ParallelTasksSamplePool";
            const string jobId  = "ParallelTasksSampleJob";

            // Amount of time to wait before timing out (potentially) long-running tasks
            TimeSpan longTaskDurationLimit = TimeSpan.FromMinutes(30);

            // Set up access to your Batch account with a BatchClient. Configure your AccountSettings in the
            // Microsoft.Azure.Batch.Samples.Common project within this solution.
            AccountSettings accountSettings = SampleHelpers.LoadAccountSettings();

            BatchSharedKeyCredentials cred = new BatchSharedKeyCredentials(
                accountSettings.BatchServiceUrl,
                accountSettings.BatchAccountName,
                accountSettings.BatchAccountKey);

            using (BatchClient batchClient = BatchClient.Open(cred))
            {
                // Create a CloudPool, or obtain an existing pool with the specified ID
                CloudPool pool = await ArticleHelpers.CreatePoolIfNotExistAsync(
                    batchClient,
                    poolId,
                    nodeSize,
                    nodeCount,
                    maxTasksPerNode);

                // Create a CloudJob, or obtain an existing pool with the specified ID
                CloudJob job = await ArticleHelpers.CreateJobIfNotExistAsync(batchClient, poolId, jobId);

                // The job's tasks ping localhost a random number of times between minPings and maxPings.
                // Adjust the minPings/maxPings values above to experiment with different task durations.
                Random           rand  = new Random();
                List <CloudTask> tasks = new List <CloudTask>();
                for (int i = 1; i <= taskCount; i++)
                {
                    string    taskId          = "task" + i.ToString().PadLeft(3, '0');
                    string    taskCommandLine = "ping -n " + rand.Next(minPings, maxPings + 1).ToString() + " localhost";
                    CloudTask task            = new CloudTask(taskId, taskCommandLine);
                    tasks.Add(task);
                }

                // Pause execution until the pool is steady and its compute nodes are ready to accept jobs.
                // NOTE: Such a pause is not necessary within your own code. Tasks can be added to a job at any point and will be
                // scheduled to execute on a compute node as soon any node has reached Idle state. Because the focus of this sample
                // is the demonstration of running tasks in parallel on multiple compute nodes, we wait for all compute nodes to
                // complete initialization and reach the Idle state in order to maximize the number of compute nodes available for
                // parallelization.
                await ArticleHelpers.WaitForPoolToReachStateAsync(batchClient, pool.Id, AllocationState.Steady, longTaskDurationLimit);

                await ArticleHelpers.WaitForNodesToReachStateAsync(batchClient, pool.Id, ComputeNodeState.Idle, longTaskDurationLimit);

                // Add the tasks in one API call as opposed to a separate AddTask call for each. Bulk task submission
                // helps to ensure efficient underlying API calls to the Batch service.
                await batchClient.JobOperations.AddTaskAsync(job.Id, tasks);

                // Pause again to wait until *all* nodes are running tasks
                await ArticleHelpers.WaitForNodesToReachStateAsync(batchClient, pool.Id, ComputeNodeState.Running, TimeSpan.FromMinutes(2));

                Stopwatch stopwatch = Stopwatch.StartNew();

                // Print out task assignment information.
                Console.WriteLine();
                await GettingStartedCommon.PrintNodeTasksAsync(batchClient, pool.Id);

                Console.WriteLine();

                // Pause execution while we wait for all of the tasks to complete
                Console.WriteLine("Waiting for task completion...");
                Console.WriteLine();

                try
                {
                    await batchClient.Utilities.CreateTaskStateMonitor().WhenAll(
                        job.ListTasks(),
                        TaskState.Completed,
                        longTaskDurationLimit);
                }
                catch (TimeoutException e)
                {
                    Console.WriteLine(e.ToString());
                }

                stopwatch.Stop();

                // Obtain the tasks, specifying a detail level to limit the number of properties returned for each task.
                // If you have a large number of tasks, specifying a DetailLevel is extremely important in reducing the
                // amount of data transferred, lowering your query response times in increasing performance.
                ODATADetailLevel             detail   = new ODATADetailLevel(selectClause: "id,commandLine,nodeInfo,state");
                IPagedEnumerable <CloudTask> allTasks = batchClient.JobOperations.ListTasks(job.Id, detail);

                // Get a collection of the completed tasks sorted by the compute nodes on which they executed
                List <CloudTask> completedTasks = allTasks
                                                  .Where(t => t.State == TaskState.Completed)
                                                  .OrderBy(t => t.ComputeNodeInformation.ComputeNodeId)
                                                  .ToList();

                // Print the completed task information
                Console.WriteLine();
                Console.WriteLine("Completed tasks:");
                string lastNodeId = string.Empty;
                foreach (CloudTask task in completedTasks)
                {
                    if (!string.Equals(lastNodeId, task.ComputeNodeInformation.ComputeNodeId))
                    {
                        Console.WriteLine();
                        Console.WriteLine(task.ComputeNodeInformation.ComputeNodeId);
                    }

                    lastNodeId = task.ComputeNodeInformation.ComputeNodeId;

                    Console.WriteLine("\t{0}: {1}", task.Id, task.CommandLine);
                }

                // Get a collection of the uncompleted tasks which may exist if the TaskMonitor timeout was hit
                List <CloudTask> uncompletedTasks = allTasks
                                                    .Where(t => t.State != TaskState.Completed)
                                                    .OrderBy(t => t.Id)
                                                    .ToList();

                // Print a list of uncompleted tasks, if any
                Console.WriteLine();
                Console.WriteLine("Uncompleted tasks:");
                Console.WriteLine();
                if (uncompletedTasks.Any())
                {
                    foreach (CloudTask task in uncompletedTasks)
                    {
                        Console.WriteLine("\t{0}: {1}", task.Id, task.CommandLine);
                    }
                }
                else
                {
                    Console.WriteLine("\t<none>");
                }

                // Print some summary information
                Console.WriteLine();
                Console.WriteLine("             Nodes: " + nodeCount);
                Console.WriteLine("         Node size: " + nodeSize);
                Console.WriteLine("Max tasks per node: " + pool.MaxTasksPerComputeNode);
                Console.WriteLine("             Tasks: " + tasks.Count);
                Console.WriteLine("          Duration: " + stopwatch.Elapsed);
                Console.WriteLine();
                Console.WriteLine("Done!");
                Console.WriteLine();

                // Clean up the resources we've created in the Batch account
                Console.WriteLine("Delete job? [yes] no");
                string response = Console.ReadLine().ToLower();
                if (response != "n" && response != "no")
                {
                    await batchClient.JobOperations.DeleteJobAsync(job.Id);
                }

                Console.WriteLine("Delete pool? [yes] no");
                response = Console.ReadLine();
                if (response != "n" && response != "no")
                {
                    await batchClient.PoolOperations.DeletePoolAsync(pool.Id);
                }
            }
        }
コード例 #16
0
 private void Shutdown()
 {
     SampleHelpers.RunShutDownTasks(this);
 }
コード例 #17
0
ファイル: Program.cs プロジェクト: carlosap/CSharp
        public static async Task MainAsync()
        {
            const string poolId = "MultiInstanceSamplePool";
            const string jobId  = "MultiInstanceSampleJob";
            const string taskId = "MultiInstanceSampleTask";

            const int numberOfNodes = 3;

            // The application package and version to deploy to the compute nodes.
            // It should contain your MPIHelloWorld sample MS-MPI program:
            // https://blogs.technet.microsoft.com/windowshpc/2015/02/02/how-to-compile-and-run-a-simple-ms-mpi-program/
            // And the MSMpiSetup.exe installer:
            // https://www.microsoft.com/download/details.aspx?id=52981
            // Then upload it as an application package:
            // https://azure.microsoft.com/documentation/articles/batch-application-packages/
            const string appPackageId      = "MPIHelloWorld";
            const string appPackageVersion = "1.0";

            TimeSpan timeout = TimeSpan.FromMinutes(30);

            AccountSettings accountSettings = SampleHelpers.LoadAccountSettings();

            // Configure your AccountSettings in the Microsoft.Azure.Batch.Samples.Common project within this solution
            BatchSharedKeyCredentials cred = new BatchSharedKeyCredentials(
                accountSettings.BatchServiceUrl,
                accountSettings.BatchAccountName,
                accountSettings.BatchAccountKey);

            using (BatchClient batchClient = BatchClient.Open(cred))
            {
                // Create the pool of compute nodes and the job to which we add the multi-instance task.
                await CreatePoolAsync(batchClient, poolId, numberOfNodes, appPackageId, appPackageVersion);
                await CreateJobAsync(batchClient, jobId, poolId);

                // Create the multi-instance task. The MultiInstanceSettings property (configured
                // below) tells Batch to create one primary and several subtasks, the total number
                // of which matches the number of instances you specify in the MultiInstanceSettings.
                // This main task's command line is the "application command," and is executed *only*
                // by the primary, and only after the primary and all subtasks have executed the
                // "coordination command" (the MultiInstanceSettings.CoordinationCommandLine).
                CloudTask multiInstanceTask = new CloudTask(id: taskId,
                                                            commandline: $"cmd /c mpiexec.exe -c 1 -wdir %AZ_BATCH_TASK_SHARED_DIR% %AZ_BATCH_APP_PACKAGE_{appPackageId.ToUpper()}#{appPackageVersion}%\\MPIHelloWorld.exe");

                // Configure the task's MultiInstanceSettings. Specify the number of nodes
                // to allocate to the multi-instance task, and the "coordination command".
                // The CoordinationCommandLine is run by the primary and subtasks, and is
                // used in this sample to start SMPD on the compute nodes.
                multiInstanceTask.MultiInstanceSettings =
                    new MultiInstanceSettings(@"cmd /c start cmd /c smpd.exe -d", numberOfNodes);

                // Submit the task to the job. Batch will take care of creating one primary and
                // enough subtasks to match the total number of nodes allocated to the task,
                // and schedule them for execution on the nodes.
                Console.WriteLine($"Adding task [{taskId}] to job [{jobId}]...");
                await batchClient.JobOperations.AddTaskAsync(jobId, multiInstanceTask);

                // Get the "bound" version of the multi-instance task.
                CloudTask mainTask = await batchClient.JobOperations.GetTaskAsync(jobId, taskId);

                // We use a TaskStateMonitor to monitor the state of our tasks. In this case,
                // we will wait for the task to reach the Completed state.
                Console.WriteLine($"Awaiting task completion, timeout in {timeout}...");
                TaskStateMonitor taskStateMonitor = batchClient.Utilities.CreateTaskStateMonitor();
                await taskStateMonitor.WhenAll(new List <CloudTask> {
                    mainTask
                }, TaskState.Completed, timeout);

                // Refresh the task to obtain up-to-date property values from Batch, such as
                // its current state and information about the node on which it executed.
                await mainTask.RefreshAsync();

                string stdOut = mainTask.GetNodeFile(Constants.StandardOutFileName).ReadAsString();
                string stdErr = mainTask.GetNodeFile(Constants.StandardErrorFileName).ReadAsString();

                Console.WriteLine();
                Console.WriteLine($"Main task [{mainTask.Id}] is in state [{mainTask.State}] and ran on compute node [{mainTask.ComputeNodeInformation.ComputeNodeId}]:");
                Console.WriteLine("---- stdout.txt ----");
                Console.WriteLine(stdOut);
                Console.WriteLine("---- stderr.txt ----");
                Console.WriteLine(stdErr);

                // Need to delay a bit to allow the Batch service to mark the subtasks as Complete
                TimeSpan subtaskTimeout = TimeSpan.FromSeconds(10);
                Console.WriteLine($"Main task completed, waiting {subtaskTimeout} for subtasks to complete...");
                System.Threading.Thread.Sleep(subtaskTimeout);

                Console.WriteLine();
                Console.WriteLine("---- Subtask information ----");

                // Obtain the collection of subtasks for the multi-instance task, and print
                // some information about each.
                IPagedEnumerable <SubtaskInformation> subtasks = mainTask.ListSubtasks();
                await subtasks.ForEachAsync(async (subtask) =>
                {
                    Console.WriteLine("subtask: " + subtask.Id);
                    Console.WriteLine("\texit code: " + subtask.ExitCode);

                    if (subtask.State == SubtaskState.Completed)
                    {
                        // Obtain the file from the node on which the subtask executed. For normal CloudTasks,
                        // we could simply call CloudTask.GetNodeFile(Constants.StandardOutFileName), but the
                        // subtasks are not "normal" tasks in Batch, and thus must be handled differently.
                        ComputeNode node =
                            await batchClient.PoolOperations.GetComputeNodeAsync(subtask.ComputeNodeInformation.PoolId,
                                                                                 subtask.ComputeNodeInformation.ComputeNodeId);

                        string outPath = subtask.ComputeNodeInformation.TaskRootDirectory + "\\" + Constants.StandardOutFileName;
                        string errPath = subtask.ComputeNodeInformation.TaskRootDirectory + "\\" + Constants.StandardErrorFileName;

                        NodeFile stdOutFile = await node.GetNodeFileAsync(outPath.Trim('\\'));
                        NodeFile stdErrFile = await node.GetNodeFileAsync(errPath.Trim('\\'));

                        stdOut = await stdOutFile.ReadAsStringAsync();
                        stdErr = await stdErrFile.ReadAsStringAsync();

                        Console.WriteLine($"\tnode: " + node.Id);
                        Console.WriteLine("\tstdout.txt: " + stdOut);
                        Console.WriteLine("\tstderr.txt: " + stdErr);
                    }
                    else
                    {
                        Console.WriteLine($"\tSubtask {subtask.Id} is in state {subtask.State}");
                    }
                });

                // Clean up the resources we've created in the Batch account
                Console.WriteLine();
                Console.Write("Delete job? [yes] no: ");
                string response = Console.ReadLine().ToLower();
                if (response != "n" && response != "no")
                {
                    await batchClient.JobOperations.DeleteJobAsync(jobId);
                }

                Console.Write("Delete pool? [yes] no: ");
                response = Console.ReadLine().ToLower();
                if (response != "n" && response != "no")
                {
                    await batchClient.PoolOperations.DeletePoolAsync(poolId);
                }
            }
        }
コード例 #18
0
        private static async Task MainAsync(string[] args)
        {
            // You may adjust these values to experiment with different compute resource scenarios.
            const string nodeSize         = "standard_d1_v2";
            const int    nodeCount        = 1;
            const int    taskSlotsPerNode = 4;

            // Adjust the task count to experiment with different list operation query durations
            const int taskCount = 5000;

            const string poolId = "EfficientListQueriesSamplePool";
            const string jobId  = "EfficientListQueriesSampleJob";

            var accountSettings = SampleHelpers.LoadAccountSettings();

            // Set up the credentials required by the BatchClient. Configure your AccountSettings in the
            // Microsoft.Azure.Batch.Samples.Common project within this solution.
            BatchSharedKeyCredentials cred = new BatchSharedKeyCredentials(
                accountSettings.BatchServiceUrl,
                accountSettings.BatchAccountName,
                accountSettings.BatchAccountKey);

            using (BatchClient batchClient = BatchClient.Open(cred))
            {
                // Create a CloudPool, or obtain an existing pool with the specified ID
                CloudPool pool = await ArticleHelpers.CreatePoolIfNotExistAsync(
                    batchClient,
                    poolId,
                    nodeSize,
                    nodeCount,
                    taskSlotsPerNode);

                // Create a CloudJob, or obtain an existing job with the specified ID
                CloudJob job = await ArticleHelpers.CreateJobIfNotExistAsync(batchClient, poolId, jobId);

                // Configure the tasks we'll be querying. Each task simply echoes the node's
                // name and then exits. We create "large" tasks by setting an environment
                // variable for each that is 2048 bytes in size. This is done simply to
                // increase response time when querying the batch service to more clearly
                // demonstrate query durations.
                List <CloudTask>          tasks = new List <CloudTask>();
                List <EnvironmentSetting> environmentSettings = new List <EnvironmentSetting>();
                environmentSettings.Add(new EnvironmentSetting("BIGENV", GetBigString(2048)));
                for (int i = 1; i < taskCount + 1; i++)
                {
                    string    taskId          = "task" + i.ToString().PadLeft(5, '0');
                    string    taskCommandLine = "cmd /c echo %COMPUTERNAME%";
                    CloudTask task            = new CloudTask(taskId, taskCommandLine);
                    task.EnvironmentSettings = environmentSettings;
                    tasks.Add(task);
                }

                Console.WriteLine();
                Console.WriteLine("Adding {0} tasks to job {1}...", taskCount, job.Id);

                Stopwatch stopwatch = Stopwatch.StartNew();

                // Add the tasks in one API call as opposed to a separate AddTask call for each. Bulk task submission
                // helps to ensure efficient underlying API calls to the Batch service.
                await batchClient.JobOperations.AddTaskAsync(job.Id, tasks);

                stopwatch.Stop();
                Console.WriteLine("{0} tasks added in {1}, hit ENTER to query tasks...", taskCount, stopwatch.Elapsed);
                Console.ReadLine();
                Console.WriteLine();
                stopwatch.Reset();

                // Obtain the tasks, specifying different detail levels to demonstrate limiting the number of tasks returned
                // and the amount of data returned for each. If your job tasks number in the thousands or have "large" properties
                // (such as our big environment variable), specifying a DetailLevel is important in reducing the amount of data
                // transferred, lowering your query response times (potentially greatly).

                // Get a subset of the tasks based on different task states
                ODATADetailLevel detail = new ODATADetailLevel();
                detail.FilterClause = "state eq 'active'";
                detail.SelectClause = "id,state";
                await QueryTasksAsync(batchClient, job.Id, detail);

                detail.FilterClause = "state eq 'running'";
                await QueryTasksAsync(batchClient, job.Id, detail);

                detail.FilterClause = "state eq 'completed'";
                await QueryTasksAsync(batchClient, job.Id, detail);

                // Get all tasks, but limit the properties returned to task id and state only
                detail.FilterClause = null;
                detail.SelectClause = "id,state";
                await QueryTasksAsync(batchClient, job.Id, detail);

                // Get all tasks, include id and state, also include the inflated environment settings property
                detail.SelectClause = "id,state,environmentSettings";
                await QueryTasksAsync(batchClient, job.Id, detail);

                // Get all tasks, include all standard properties, and expand the statistics
                detail.ExpandClause = "stats";
                detail.SelectClause = null;
                await QueryTasksAsync(batchClient, job.Id, detail);

                Console.WriteLine();
                Console.WriteLine("Done!");
                Console.WriteLine();

                // Clean up the resources we've created in the Batch account
                Console.WriteLine("Delete job? [yes] no");
                string response = Console.ReadLine().ToLower();
                if (response != "n" && response != "no")
                {
                    await batchClient.JobOperations.DeleteJobAsync(job.Id);
                }

                Console.WriteLine("Delete pool? [yes] no");
                response = Console.ReadLine().ToLower();
                if (response != "n" && response != "no")
                {
                    await batchClient.PoolOperations.DeletePoolAsync(pool.Id);
                }
            }
        }
コード例 #19
0
        public static async Task <CloudBlobContainer> Run(
            BatchClient batchClient,
            CloudStorageAccount linkedStorageAccount,
            string poolId,
            int nodeCount,
            string jobId)
        {
            const string appPackageId      = "PersistOutputsTask";
            const string appPackageVersion = "1.0";

            // Create and configure an unbound pool.
            CloudPool pool = batchClient.PoolOperations.CreatePool(
                poolId: poolId,
                virtualMachineSize: "standard_d1_v2",
                targetDedicatedComputeNodes: nodeCount,
                cloudServiceConfiguration: new CloudServiceConfiguration(osFamily: "5"));

            // Specify the application and version to deploy to the compute nodes. You must
            // first build PersistOutputsTask, then upload it as an application package.
            // See https://azure.microsoft.com/documentation/articles/batch-application-packages/
            pool.ApplicationPackageReferences = new List <ApplicationPackageReference>
            {
                new ApplicationPackageReference
                {
                    ApplicationId = appPackageId,
                    Version       = appPackageVersion
                }
            };

            // Commit the pool to the Batch service
            await GettingStartedCommon.CreatePoolIfNotExistAsync(batchClient, pool);

            CloudJob job = batchClient.JobOperations.CreateJob(jobId, new PoolInformation {
                PoolId = poolId
            });

            // Create the blob storage container for the outputs.
            await job.PrepareOutputStorageAsync(linkedStorageAccount);

            // Create an environment variable on the compute nodes that the
            // task application can reference when persisting its outputs.
            string             containerName = job.OutputStorageContainerName();
            CloudBlobContainer container     = linkedStorageAccount.CreateCloudBlobClient().GetContainerReference(containerName);
            string             containerUrl  = job.GetOutputStorageContainerUrl(linkedStorageAccount);

            job.CommonEnvironmentSettings = new[] { new EnvironmentSetting("JOB_CONTAINER_URL", containerUrl) };

            // Commit the job to the Batch service
            await job.CommitAsync();

            Console.WriteLine($"Created job {jobId}");

            // Obtain the bound job from the Batch service
            await job.RefreshAsync();

            IEnumerable <CloudTask> tasks = Enumerable.Range(1, 20).Select(i =>
                                                                           new CloudTask(i.ToString().PadLeft(3, '0'), $"cmd /c %AZ_BATCH_APP_PACKAGE_{appPackageId.ToUpper()}#{appPackageVersion}%\\PersistOutputsTask.exe")
                                                                           );

            // Add the tasks to the job; the tasks are automatically
            // scheduled for execution on the nodes by the Batch service.
            await job.AddTaskAsync(tasks);

            Console.WriteLine($"All tasks added to job {job.Id}");
            Console.WriteLine();

            Console.WriteLine($"Downloading outputs to {Directory.GetCurrentDirectory()}");

            foreach (CloudTask task in job.CompletedTasks())
            {
                if (task.ExecutionInformation.Result != TaskExecutionResult.Success)
                {
                    Console.WriteLine($"Task {task.Id} failed");
                    Console.WriteLine(SampleHelpers.GetFailureInfoDetails(task.ExecutionInformation.FailureInformation));
                }
                else
                {
                    Console.WriteLine($"Task {task.Id} completed successfully");
                }

                foreach (OutputFileReference output in task.OutputStorage(linkedStorageAccount).ListOutputs(TaskOutputKind.TaskOutput))
                {
                    Console.WriteLine($"output file: {output.FilePath}");
                    await output.DownloadToFileAsync($"{jobId}-{output.FilePath}", System.IO.FileMode.Create);
                }
            }

            return(container);
        }
コード例 #20
0
ファイル: Program.cs プロジェクト: carlosap/CSharp
        private static async Task MainAsync(string[] args)
        {
            // You may adjust these values to experiment with different compute resource scenarios.
            const string nodeSize  = "standard_d1_v2";
            const string osFamily  = "5";
            const int    nodeCount = 1;

            const string poolId = "TaskDependenciesSamplePool";
            const string jobId  = "TaskDependenciesSampleJob";

            // Amount of time to wait before timing out long-running tasks.
            TimeSpan timeLimit = TimeSpan.FromMinutes(30);

            // Set up access to your Batch account with a BatchClient. Configure your AccountSettings in the
            // Microsoft.Azure.Batch.Samples.Common project within this solution.
            AccountSettings accountSettings = SampleHelpers.LoadAccountSettings();

            BatchSharedKeyCredentials cred = new BatchSharedKeyCredentials(
                accountSettings.BatchServiceUrl,
                accountSettings.BatchAccountName,
                accountSettings.BatchAccountKey);

            try
            {
                using (BatchClient batchClient = BatchClient.Open(cred))
                {
                    // Create the pool.
                    Console.WriteLine("Creating pool [{0}]...", poolId);
                    CloudPool unboundPool =
                        batchClient.PoolOperations.CreatePool(
                            poolId: poolId,
                            cloudServiceConfiguration: new CloudServiceConfiguration(osFamily),
                            virtualMachineSize: nodeSize,
                            targetDedicatedComputeNodes: nodeCount);
                    await unboundPool.CommitAsync();

                    // Create the job and specify that it uses tasks dependencies.
                    Console.WriteLine("Creating job [{0}]...", jobId);
                    CloudJob unboundJob = batchClient.JobOperations.CreateJob(jobId,
                                                                              new PoolInformation {
                        PoolId = poolId
                    });

                    // IMPORTANT: This is REQUIRED for using task dependencies.
                    unboundJob.UsesTaskDependencies = true;

                    await unboundJob.CommitAsync();

                    // Create the collection of tasks that will be added to the job.
                    List <CloudTask> tasks = new List <CloudTask>
                    {
                        // 'Rain' and 'Sun' don't depend on any other tasks
                        new CloudTask("Rain", "cmd.exe /c echo Rain"),
                        new CloudTask("Sun", "cmd.exe /c echo Sun"),

                        // Task 'Flowers' depends on completion of both 'Rain' and 'Sun'
                        // before it is run.
                        new CloudTask("Flowers", "cmd.exe /c echo Flowers")
                        {
                            DependsOn = TaskDependencies.OnIds("Rain", "Sun")
                        },

                        // Tasks 1, 2, and 3 don't depend on any other tasks. Because
                        // we will be using them for a task range dependency, we must
                        // specify string representations of integers as their ids.
                        new CloudTask("1", "cmd.exe /c echo 1"),
                        new CloudTask("2", "cmd.exe /c echo 2"),
                        new CloudTask("3", "cmd.exe /c echo 3"),

                        // Task A is the parent task.
                        new CloudTask("A", "cmd.exe /c echo A")
                        {
                            // Specify exit conditions for task A and their dependency actions.
                            ExitConditions = new ExitConditions
                            {
                                // If task A exits with a pre-processing error, block any downstream tasks (in this example, task B).
                                PreProcessingError = new ExitOptions
                                {
                                    DependencyAction = DependencyAction.Block
                                },
                                // If task A exits with the specified error codes, block any downstream tasks (in this example, task B).
                                ExitCodes = new List <ExitCodeMapping>
                                {
                                    new ExitCodeMapping(10, new ExitOptions()
                                    {
                                        DependencyAction = DependencyAction.Block
                                    }),
                                    new ExitCodeMapping(20, new ExitOptions()
                                    {
                                        DependencyAction = DependencyAction.Block
                                    })
                                },
                                // If task A succeeds or fails with any other error, any downstream tasks become eligible to run
                                // (in this example, task B).
                                Default = new ExitOptions
                                {
                                    DependencyAction = DependencyAction.Satisfy
                                }
                            }
                        },
                        // Task B depends on task A. Whether it becomes eligible to run depends on how task A exits.
                        new CloudTask("B", "cmd.exe /c echo B")
                        {
                            DependsOn = TaskDependencies.OnId("A")
                        },
                    };

                    // Add the tasks to the job.
                    await batchClient.JobOperations.AddTaskAsync(jobId, tasks);

                    // Pause execution while we wait for the tasks to complete, and notify
                    // whether the tasks completed successfully.
                    Console.WriteLine("Waiting for task completion...");
                    Console.WriteLine();
                    CloudJob job = await batchClient.JobOperations.GetJobAsync(jobId);

                    await batchClient.Utilities.CreateTaskStateMonitor().WhenAll(
                        job.ListTasks(),
                        TaskState.Completed,
                        timeLimit);

                    Console.WriteLine("All tasks completed successfully.");
                    Console.WriteLine();
                }
            }
            catch (Exception e)
            {
                Console.WriteLine();
                Console.WriteLine("An exception occurred.");
                Console.WriteLine(e.Message);
                Console.WriteLine(e.StackTrace);
            }
            finally
            {
                using (BatchClient batchClient = BatchClient.Open(cred))
                {
                    CloudJob job = await batchClient.JobOperations.GetJobAsync(jobId);

                    // Clean up the resources we've created in the Batch account
                    Console.Write("Delete job? [yes] no: ");
                    string response = Console.ReadLine().ToLower();
                    if (response != "n" && response != "no")
                    {
                        await batchClient.JobOperations.DeleteJobAsync(job.Id);
                    }

                    Console.Write("Delete pool? [yes] no: ");
                    response = Console.ReadLine().ToLower();
                    if (response != "n" && response != "no")
                    {
                        await batchClient.PoolOperations.DeletePoolAsync(poolId);
                    }
                }
            }
        }
コード例 #21
0
        private void Vkctrl_VulkanInitialized(object sender, SharpVulkanWpf.VulkanEventArgs args)
        {
            var device = args.Device;
            var commandPoolCreateInfo = new VkCommandPoolCreateInfo()
            {
                queueFamilyIndex = args.GraphicsQueueIndex,
                flags            = VkCommandPoolCreateFlags.VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT,
            };

            VulkanAPI.vkCreateCommandPool(device, ref commandPoolCreateInfo, out m_commandPool);

            var allocateInfo = new VkCommandBufferAllocateInfo()
            {
                commandBufferCount = 2,
                commandPool        = m_commandPool,
            };

            VulkanAPI.vkAllocateCommandBuffers(device, ref allocateInfo, out m_commandBuffers);

            // 頂点/インデックスバッファの作成.
            CreateCubeModel(device, args.PhysicalDevice);

            // 定数バッファの準備.
            CreateUniformBuffer(device, args.PhysicalDevice);

            // ディスクリプタの準備.
            PrepareDescriptor(device);

            // 頂点入力情報の構築.
            m_vertexInputState = CreateVertexInputState();
            // プリミティブの情報.
            m_inputAssemblyState = new VkPipelineInputAssemblyStateCreateInfo()
            {
                topology = VkPrimitiveTopology.VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST
            };
            // シェーダーステージ情報の構築.
            m_shaderStages = new VkPipelineShaderStageCreateInfo[2]
            {
                SampleHelpers.CreateShader(device, "resource/simpleVS.spv", VkShaderStageFlagBits.VK_SHADER_STAGE_VERTEX_BIT),
                SampleHelpers.CreateShader(device, "resource/simpleFS.spv", VkShaderStageFlagBits.VK_SHADER_STAGE_FRAGMENT_BIT),
            };
            // ラスタライザーステートの構築.
            m_rasterizationState = new VkPipelineRasterizationStateCreateInfo();
            // デプスステンシルステートの構築.
            m_depthStencilState = new VkPipelineDepthStencilStateCreateInfo();
            m_depthStencilState.depthTestEnable  = true;
            m_depthStencilState.depthWriteEnable = true;
            m_depthStencilState.depthCompareOp   = VkCompareOp.VK_COMPARE_OP_LESS_OR_EQUAL;

            // カラーブレンドステートの構築.
            m_colorBlendState = new VkPipelineColorBlendStateCreateInfo();
            var colorBlendAttachment = new VkPipelineColorBlendAttachmentState();

            m_colorBlendState.attachments = new[] { colorBlendAttachment };

            // マルチサンプルステートの構築.
            m_multisampleState = new VkPipelineMultisampleStateCreateInfo();

            // パイプラインレイアウトの構築.
            m_pipelineLayout = m_resourceManager.CreatePipelineLayout(device, m_descriptorSetLayout);

            // ビューポートステートの構築.
            m_viewportState = CreateViewportState();

            // グラフィックスパイプラインを構築.
            m_graphicsPipeline = CreateGraphicsPipeline(device, vkctrl.GetControlRenderPass());
        }
コード例 #22
0
        public async static Task JobMain(string[] args)
        {
            //Load the configuration
            Settings        topNWordsConfiguration = Settings.Default;
            AccountSettings accountSettings        = AccountSettings.Default;

            CloudStorageAccount cloudStorageAccount = new CloudStorageAccount(
                new StorageCredentials(
                    accountSettings.StorageAccountName,
                    accountSettings.StorageAccountKey),
                accountSettings.StorageServiceUrl,
                useHttps: true);

            StagingStorageAccount stagingStorageAccount = new StagingStorageAccount(
                accountSettings.StorageAccountName,
                accountSettings.StorageAccountKey,
                cloudStorageAccount.BlobEndpoint.ToString());

            using (BatchClient client = BatchClient.Open(new BatchSharedKeyCredentials(accountSettings.BatchServiceUrl, accountSettings.BatchAccountName, accountSettings.BatchAccountKey)))
            {
                string stagingContainer = null;

                //OSFamily 4 == OS 2012 R2. You can learn more about os families and versions at:
                //http://msdn.microsoft.com/en-us/library/azure/ee924680.aspx
                CloudPool pool = client.PoolOperations.CreatePool(
                    topNWordsConfiguration.PoolId,
                    targetDedicatedComputeNodes: topNWordsConfiguration.PoolNodeCount,
                    virtualMachineSize: "small",
                    cloudServiceConfiguration: new CloudServiceConfiguration(osFamily: "4"));

                List <string> files = new List <string>
                {
                    Path.Combine(BatchStartTaskFolderName, BatchStartTaskTelemetryRunnerName),
                };

                files.AddRange(AIFilesToUpload);

                var resourceHelperTask = SampleHelpers.UploadResourcesAndCreateResourceFileReferencesAsync(
                    cloudStorageAccount,
                    AIBlobConatinerName,
                    files);

                List <ResourceFile> resourceFiles = resourceHelperTask.Result;

                pool.StartTask = new StartTask()
                {
                    CommandLine   = string.Format("cmd /c {0}", BatchStartTaskTelemetryRunnerName),
                    ResourceFiles = resourceFiles
                };

                Console.WriteLine("Adding pool {0}", topNWordsConfiguration.PoolId);
                try
                {
                    await GettingStartedCommon.CreatePoolIfNotExistAsync(client, pool);
                }
                catch (AggregateException ae)
                {
                    // Go through all exceptions and dump useful information
                    ae.Handle(x =>
                    {
                        Console.Error.WriteLine("Creating pool ID {0} failed", topNWordsConfiguration.PoolId);
                        if (x is BatchException)
                        {
                            BatchException be = x as BatchException;

                            Console.WriteLine(be.ToString());
                            Console.WriteLine();
                        }
                        else
                        {
                            Console.WriteLine(x);
                        }

                        // can't continue without a pool
                        return(false);
                    });
                }
                catch (BatchException be)
                {
                    Console.Error.WriteLine("Creating pool ID {0} failed", topNWordsConfiguration.PoolId);
                    Console.WriteLine(be.ToString());
                    Console.WriteLine();
                }

                try
                {
                    Console.WriteLine("Creating job: " + topNWordsConfiguration.JobId);
                    // get an empty unbound Job
                    CloudJob unboundJob = client.JobOperations.CreateJob();
                    unboundJob.Id = topNWordsConfiguration.JobId;
                    unboundJob.PoolInformation = new PoolInformation()
                    {
                        PoolId = topNWordsConfiguration.PoolId
                    };

                    // Commit Job to create it in the service
                    await unboundJob.CommitAsync();

                    // create file staging objects that represent the executable and its dependent assembly to run as the task.
                    // These files are copied to every node before the corresponding task is scheduled to run on that node.
                    FileToStage topNWordExe = new FileToStage(TopNWordsExeName, stagingStorageAccount);
                    FileToStage storageDll  = new FileToStage(StorageClientDllName, stagingStorageAccount);

                    // Upload application insights assemblies
                    List <FileToStage> aiStagedFiles = new List <FileToStage>();
                    foreach (string aiFile in AIFilesToUpload)
                    {
                        aiStagedFiles.Add(new FileToStage(aiFile, stagingStorageAccount));
                    }

                    // In this sample, the input data is copied separately to Storage and its URI is passed to the task as an argument.
                    // This approach is appropriate when the amount of input data is large such that copying it to every node via FileStaging
                    // is not desired and the number of tasks is small since a large number of readers of the blob might get throttled
                    // by Storage which will lengthen the overall processing time.
                    //
                    // You'll need to observe the behavior and use published techniques for finding the right balance of performance versus
                    // complexity.

                    string[] documents = Directory.GetFiles(topNWordsConfiguration.DocumentsRootPath);
                    await SampleHelpers.UploadResourcesAsync(cloudStorageAccount, BooksContainerName, documents);

                    // initialize a collection to hold the tasks that will be submitted in their entirety
                    List <CloudTask> tasksToRun = new List <CloudTask>(documents.Length);

                    for (int i = 0; i < documents.Length; i++)
                    {
                        CloudTask task = new CloudTask("task_no_" + i, String.Format("{0} --Task {1} {2} {3} {4}",
                                                                                     TopNWordsExeName,
                                                                                     "https://onbehalfoutput.blob.core.windows.net/" + documents[i],
                                                                                     topNWordsConfiguration.TopWordCount,
                                                                                     accountSettings.StorageAccountName,
                                                                                     accountSettings.StorageAccountKey));

                        //This is the list of files to stage to a container -- for each job, one container is created and
                        //files all resolve to Azure Blobs by their name (so two tasks with the same named file will create just 1 blob in
                        //the container).
                        task.FilesToStage = new List <IFileStagingProvider>
                        {
                            topNWordExe,
                            storageDll,
                        };

                        foreach (FileToStage stagedFile in aiStagedFiles)
                        {
                            task.FilesToStage.Add(stagedFile);
                        }

                        tasksToRun.Add(task);
                    }

                    // Commit all the tasks to the Batch Service. Ask AddTask to return information about the files that were staged.
                    // The container information is used later on to remove these files from Storage.
                    ConcurrentBag <ConcurrentDictionary <Type, IFileStagingArtifact> > fsArtifactBag = new ConcurrentBag <ConcurrentDictionary <Type, IFileStagingArtifact> >();
                    client.JobOperations.AddTask(topNWordsConfiguration.JobId, tasksToRun, fileStagingArtifacts: fsArtifactBag);

                    // loop through the bag of artifacts, looking for the one that matches our staged files. Once there,
                    // capture the name of the container holding the files so they can be deleted later on if that option
                    // was configured in the settings.
                    foreach (var fsBagItem in fsArtifactBag)
                    {
                        IFileStagingArtifact fsValue;
                        if (fsBagItem.TryGetValue(typeof(FileToStage), out fsValue))
                        {
                            SequentialFileStagingArtifact stagingArtifact = fsValue as SequentialFileStagingArtifact;
                            if (stagingArtifact != null)
                            {
                                stagingContainer = stagingArtifact.BlobContainerCreated;
                                Console.WriteLine(
                                    "Uploaded files to container: {0} -- you will be charged for their storage unless you delete them.",
                                    stagingArtifact.BlobContainerCreated);
                            }
                        }
                    }

                    //Get the job to monitor status.
                    CloudJob job = client.JobOperations.GetJob(topNWordsConfiguration.JobId);

                    Console.Write("Waiting for tasks to complete ...   ");
                    // Wait 20 minutes for all tasks to reach the completed state. The long timeout is necessary for the first
                    // time a pool is created in order to allow nodes to be added to the pool and initialized to run tasks.
                    IPagedEnumerable <CloudTask> ourTasks = job.ListTasks(new ODATADetailLevel(selectClause: "id"));
                    client.Utilities.CreateTaskStateMonitor().WaitAll(ourTasks, TaskState.Completed, TimeSpan.FromMinutes(20));
                    Console.WriteLine("tasks are done.");

                    foreach (CloudTask t in ourTasks)
                    {
                        Console.WriteLine("Task " + t.Id);

                        Console.WriteLine("stdout:" + Environment.NewLine + t.GetNodeFile(Batch.Constants.StandardOutFileName).ReadAsString());
                        Console.WriteLine();
                        Console.WriteLine("stderr:" + Environment.NewLine + t.GetNodeFile(Batch.Constants.StandardErrorFileName).ReadAsString());
                    }
                }
                finally
                {
                    //Delete the pool that we created
                    if (topNWordsConfiguration.ShouldDeletePool)
                    {
                        Console.WriteLine("Deleting pool: {0}", topNWordsConfiguration.PoolId);
                        client.PoolOperations.DeletePool(topNWordsConfiguration.PoolId);
                    }

                    //Delete the job that we created
                    if (topNWordsConfiguration.ShouldDeleteJob)
                    {
                        Console.WriteLine("Deleting job: {0}", topNWordsConfiguration.JobId);
                        client.JobOperations.DeleteJob(topNWordsConfiguration.JobId);
                    }

                    //Delete the containers we created
                    if (topNWordsConfiguration.ShouldDeleteContainer)
                    {
                        DeleteContainers(accountSettings, stagingContainer);
                    }
                }
            }
        }
コード例 #23
0
        public static async Task MainAsync()
        {
            const string poolId = "MultiInstanceSamplePool";
            const string jobId  = "MultiInstanceSampleJob";
            const string taskId = "MultiInstanceSampleTask";

            const int numberOfNodes = 5;


            //jmeno package kterou uploaduju na azure s polu s MSMpiSetup
            const string appPackageId      = "Parallel";
            const string appPackageVersion = "1.0";

            TimeSpan timeout = TimeSpan.FromMinutes(15);

            AccountSettings accountSettings = SampleHelpers.LoadAccountSettings();

            //nakonfigurované batch accounty abych se mohl připojit ke svému účtu
            BatchSharedKeyCredentials cred = new BatchSharedKeyCredentials(
                accountSettings.BatchServiceUrl,
                accountSettings.BatchAccountName,
                accountSettings.BatchAccountKey);

            using (BatchClient batchClient = BatchClient.Open(cred))
            {
                // Vytvoření fondu výpočetních uzlů a úlohu, do které přidáme úlohu s více instancemi.
                await CreatePoolAsync(batchClient, poolId, numberOfNodes, appPackageId, appPackageVersion);
                await CreateJobAsync(batchClient, jobId, poolId);

                //batch vytvoří jednu hlavní a několik dílčích úkolů
                CloudTask multiInstanceTask = new CloudTask(id: taskId,
                                                            commandline: $"cmd /c mpiexec.exe -c 1 -wdir %AZ_BATCH_TASK_SHARED_DIR% %AZ_BATCH_APP_PACKAGE_{appPackageId.ToUpper()}#{appPackageVersion}%\\ParallelMpiApp.exe");

                // příkaz SPMD = více samostatných procesorů současně spouští stejný program
                multiInstanceTask.MultiInstanceSettings =
                    new MultiInstanceSettings(@"cmd /c start cmd /c smpd.exe -d", numberOfNodes);

                //zadání úkolů, vytvoří se jeden primární a několik dílčích,
                //aby odpovídaly počtu uzlů a naplánuje se jejich provedení v uzlech
                Console.WriteLine($"Adding task [{taskId}] to job [{jobId}]...");
                await batchClient.JobOperations.AddTaskAsync(jobId, multiInstanceTask);

                //verze úlohy
                CloudTask mainTask = await batchClient.JobOperations.GetTaskAsync(jobId, taskId);

                // sledování stavu úkolů,čekáme až bude úloha dokončena
                Console.WriteLine($"Awaiting task completion, timeout in {timeout}...");
                TaskStateMonitor taskStateMonitor = batchClient.Utilities.CreateTaskStateMonitor();
                await taskStateMonitor.WhenAll(new List <CloudTask> {
                    mainTask
                }, TaskState.Completed, timeout);

                //aktualizace úlohy
                await mainTask.RefreshAsync();

                string stdOut = mainTask.GetNodeFile(Constants.StandardOutFileName).ReadAsString();
                string stdErr = mainTask.GetNodeFile(Constants.StandardErrorFileName).ReadAsString();

                Console.WriteLine();
                Console.WriteLine($"Main task [{mainTask.Id}] is in state [{mainTask.State}] and ran on compute node [{mainTask.ComputeNodeInformation.ComputeNodeId}]:");
                Console.WriteLine("---- stdout.txt ----");
                Console.WriteLine(stdOut);
                Console.WriteLine("---- stderr.txt ----");
                Console.WriteLine(stdErr);

                // par sekund čas aby se stačily dílčí úlohy dokončit
                TimeSpan subtaskTimeout = TimeSpan.FromSeconds(10);
                Console.WriteLine($"Main task completed, waiting {subtaskTimeout} for subtasks to complete...");
                System.Threading.Thread.Sleep(subtaskTimeout);

                Console.WriteLine();
                Console.WriteLine("---- Subtask information ----");

                //kolekce dílčích úlohů a tisk informací o každém
                IPagedEnumerable <SubtaskInformation> subtasks = mainTask.ListSubtasks();
                await subtasks.ForEachAsync(async (subtask) =>
                {
                    Console.WriteLine("subtask: " + subtask.Id);
                    Console.WriteLine("\texit code: " + subtask.ExitCode);

                    if (subtask.State == SubtaskState.Completed)
                    {
                        //získání souborů z uzlů
                        ComputeNode node =
                            await batchClient.PoolOperations.GetComputeNodeAsync(subtask.ComputeNodeInformation.PoolId,
                                                                                 subtask.ComputeNodeInformation.ComputeNodeId);

                        string outPath = subtask.ComputeNodeInformation.TaskRootDirectory + "\\" + Constants.StandardOutFileName;
                        string errPath = subtask.ComputeNodeInformation.TaskRootDirectory + "\\" + Constants.StandardErrorFileName;

                        NodeFile stdOutFile = await node.GetNodeFileAsync(outPath.Trim('\\'));
                        NodeFile stdErrFile = await node.GetNodeFileAsync(errPath.Trim('\\'));

                        stdOut = await stdOutFile.ReadAsStringAsync();
                        stdErr = await stdErrFile.ReadAsStringAsync();

                        Console.WriteLine($"\tnode: " + node.Id);
                        Console.WriteLine("\tstdout.txt: " + stdOut);
                        Console.WriteLine("\tstderr.txt: " + stdErr);
                    }
                    else
                    {
                        Console.WriteLine($"\tSubtask {subtask.Id} is in state {subtask.State}");
                    }
                });

                // vymazání zdrojů které jsme vytvořili, abychom to nemuseli dělat manuálně(fondy,úlohy)
                Console.WriteLine();
                Console.Write("Delete job? [yes] no: ");
                string response = Console.ReadLine().ToLower();
                if (response != "n" && response != "no")
                {
                    await batchClient.JobOperations.DeleteJobAsync(jobId);
                }

                Console.Write("Delete pool? [yes] no: ");
                response = Console.ReadLine().ToLower();
                if (response != "n" && response != "no")
                {
                    await batchClient.PoolOperations.DeletePoolAsync(poolId);
                }
            }
        }
コード例 #24
0
ファイル: OutputFilesExample.cs プロジェクト: carlosap/CSharp
        /// <summary>
        /// Runs a series of tasks, using the OutputFiles feature to upload the tasks files to a container.
        /// Then downloads the files from the container to the local machine.
        /// </summary>
        public static async Task <CloudBlobContainer> Run(
            BatchClient batchClient,
            CloudStorageAccount storageAccount,
            string poolId,
            int nodeCount,
            string jobId)
        {
            const string containerName = "outputfilescontainer";

            await CreatePoolAsync(batchClient, poolId, nodeCount);

            CloudJob job = batchClient.JobOperations.CreateJob(jobId, new PoolInformation {
                PoolId = poolId
            });

            CloudBlobContainer container = storageAccount.CreateCloudBlobClient().GetContainerReference(containerName);
            await container.CreateIfNotExistsAsync();

            string containerSas = container.GetSharedAccessSignature(new SharedAccessBlobPolicy()
            {
                Permissions            = SharedAccessBlobPermissions.Write,
                SharedAccessExpiryTime = DateTimeOffset.UtcNow.AddDays(1)
            });
            string containerUrl = container.Uri.AbsoluteUri + containerSas;

            // Commit the job to the Batch service
            await job.CommitAsync();

            Console.WriteLine($"Created job {jobId}");

            // Obtain the bound job from the Batch service
            await job.RefreshAsync();

            // Create a series of simple tasks which dump the task environment to a file and then write random values to a text file
            IEnumerable <CloudTask> tasksToAdd = Enumerable.Range(1, 20).Select(i =>
            {
                var taskId = i.ToString().PadLeft(3, '0');
                return(new CloudTask(taskId, "cmd /v:ON /c \"echo off && set && (FOR /L %i IN (1,1,100000) DO (ECHO !RANDOM!)) > output.txt\"")
                {
                    OutputFiles = new List <OutputFile>
                    {
                        new OutputFile(
                            filePattern: @"..\std*.txt",
                            destination: new OutputFileDestination(new OutputFileBlobContainerDestination(
                                                                       containerUrl: containerUrl,
                                                                       path: taskId)),
                            uploadOptions: new OutputFileUploadOptions(
                                uploadCondition: OutputFileUploadCondition.TaskCompletion)),
                        new OutputFile(
                            filePattern: @"output.txt",
                            destination: new OutputFileDestination(new OutputFileBlobContainerDestination(
                                                                       containerUrl: containerUrl,
                                                                       path: taskId + @"\output.txt")),
                            uploadOptions: new OutputFileUploadOptions(
                                uploadCondition: OutputFileUploadCondition.TaskCompletion)),
                    }
                });
            }
                                                                                );

            // Add the tasks to the job; the tasks are automatically
            // scheduled for execution on the nodes by the Batch service.
            await job.AddTaskAsync(tasksToAdd);

            Console.WriteLine($"All tasks added to job {job.Id}");
            Console.WriteLine();

            Console.WriteLine($"Downloading outputs to {Directory.GetCurrentDirectory()}");

            foreach (CloudTask task in job.CompletedTasks())
            {
                if (task.ExecutionInformation.Result != TaskExecutionResult.Success)
                {
                    Console.WriteLine($"Task {task.Id} failed");
                    Console.WriteLine(SampleHelpers.GetFailureInfoDetails(task.ExecutionInformation.FailureInformation));
                }
                else
                {
                    Console.WriteLine($"Task {task.Id} completed successfully");
                }

                CloudBlobDirectory directory = container.GetDirectoryReference(task.Id);
                Directory.CreateDirectory(task.Id);
                foreach (var blobInDirectory in directory.ListBlobs())
                {
                    CloudBlockBlob blockBlob = blobInDirectory as CloudBlockBlob;
                    Console.WriteLine($"  {blockBlob.Name}");
                    await blockBlob.DownloadToFileAsync(blockBlob.Name, FileMode.Create);
                }
            }

            return(container);
        }
コード例 #25
0
        public ActionResult Index()
        {
            var envVars = SampleHelpers.GetDatadogEnvironmentVariables();

            return(View(envVars.ToList()));
        }
コード例 #26
0
        /// <summary>
        /// Populates Azure Storage with the required files, and
        /// submits the job to the Azure Batch service.
        /// </summary>
        public async Task RunAsync()
        {
            Console.WriteLine("Running with the following settings: ");
            Console.WriteLine("----------------------------------------");
            Console.WriteLine(this.textSearchSettings.ToString());
            Console.WriteLine(this.accountSettings.ToString());

            CloudStorageAccount cloudStorageAccount = new CloudStorageAccount(
                new StorageCredentials(
                    this.accountSettings.StorageAccountName,
                    this.accountSettings.StorageAccountKey),
                this.accountSettings.StorageServiceUrl,
                useHttps: true);

            //Upload resources if required.
            if (this.textSearchSettings.ShouldUploadResources)
            {
                Console.WriteLine("Splitting file: {0} into {1} subfiles",
                                  Constants.TextFilePath,
                                  this.textSearchSettings.NumberOfMapperTasks);

                //Split the text file into the correct number of files for consumption by the mapper tasks.
                FileSplitter  splitter        = new FileSplitter();
                List <string> mapperTaskFiles = await splitter.SplitAsync(
                    Constants.TextFilePath,
                    this.textSearchSettings.NumberOfMapperTasks);

                List <string> files = Constants.RequiredExecutableFiles.Union(mapperTaskFiles).ToList();

                await SampleHelpers.UploadResourcesAsync(
                    cloudStorageAccount,
                    this.textSearchSettings.BlobContainer,
                    files);
            }

            //Generate a SAS for the container.
            string containerSasUrl = SampleHelpers.ConstructContainerSas(
                cloudStorageAccount,
                this.textSearchSettings.BlobContainer);

            //Set up the Batch Service credentials used to authenticate with the Batch Service.
            BatchSharedKeyCredentials credentials = new BatchSharedKeyCredentials(
                this.accountSettings.BatchServiceUrl,
                this.accountSettings.BatchAccountName,
                this.accountSettings.BatchAccountKey);

            using (BatchClient batchClient = await BatchClient.OpenAsync(credentials))
            {
                //
                // Construct the job properties in local memory before commiting them to the Batch Service.
                //

                //Allow enough compute nodes in the pool to run each mapper task, and 1 extra to run the job manager.
                int numberOfPoolComputeNodes = 1 + this.textSearchSettings.NumberOfMapperTasks;

                //Define the pool specification for the pool which the job will run on.
                PoolSpecification poolSpecification = new PoolSpecification()
                {
                    TargetDedicated    = numberOfPoolComputeNodes,
                    VirtualMachineSize = "small",
                    //You can learn more about os families and versions at:
                    //http://azure.microsoft.com/documentation/articles/cloud-services-guestos-update-matrix
                    OSFamily        = "4",
                    TargetOSVersion = "*"
                };

                //Use the auto pool feature of the Batch Service to create a pool when the job is created.
                //This creates a new pool for each job which is added.
                AutoPoolSpecification autoPoolSpecification = new AutoPoolSpecification()
                {
                    AutoPoolIdPrefix   = "TextSearchPool",
                    KeepAlive          = false,
                    PoolLifetimeOption = PoolLifetimeOption.Job,
                    PoolSpecification  = poolSpecification
                };

                //Define the pool information for this job -- it will run on the pool defined by the auto pool specification above.
                PoolInformation poolInformation = new PoolInformation()
                {
                    AutoPoolSpecification = autoPoolSpecification
                };

                //Define the job manager for this job.  This job manager will run first and will submit the tasks for
                //the job.  The job manager is the executable which manages the lifetime of the job
                //and all tasks which should run for the job.  In this case, the job manager submits the mapper and reducer tasks.
                List <ResourceFile> jobManagerResourceFiles = SampleHelpers.GetResourceFiles(containerSasUrl, Constants.RequiredExecutableFiles);
                const string        jobManagerTaskId        = "JobManager";

                JobManagerTask jobManagerTask = new JobManagerTask()
                {
                    ResourceFiles = jobManagerResourceFiles,
                    CommandLine   = Constants.JobManagerExecutable,

                    //Determines if the job should terminate when the job manager process exits.
                    KillJobOnCompletion = true,
                    Id = jobManagerTaskId
                };

                //Create the unbound job in local memory.  An object which exists only in local memory (and not on the Batch Service) is "unbound".
                string jobId = Environment.GetEnvironmentVariable("USERNAME") + DateTime.UtcNow.ToString("yyyyMMdd-HHmmss");

                CloudJob unboundJob = batchClient.JobOperations.CreateJob(jobId, poolInformation);
                unboundJob.JobManagerTask = jobManagerTask; //Assign the job manager task to this job

                try
                {
                    //Commit the unbound job to the Batch Service.
                    Console.WriteLine("Adding job: {0} to the Batch Service.", unboundJob.Id);
                    await unboundJob.CommitAsync(); //Issues a request to the Batch Service to add the job which was defined above.

                    //
                    // Wait for the job manager task to complete.
                    //

                    //An object which is backed by a corresponding Batch Service object is "bound."
                    CloudJob boundJob = await batchClient.JobOperations.GetJobAsync(jobId);

                    CloudTask boundJobManagerTask = await boundJob.GetTaskAsync(jobManagerTaskId);

                    TimeSpan maxJobCompletionTimeout = TimeSpan.FromMinutes(30);

                    // Monitor the current tasks to see when they are done.
                    // Occasionally a task may get killed and requeued during an upgrade or hardware failure, including the job manager
                    // task.  The job manager will be re-run in this case.  Robustness against this was not added into the sample for
                    // simplicity, but should be added into any production code.
                    Console.WriteLine("Waiting for job's tasks to complete");

                    TaskStateMonitor taskStateMonitor = batchClient.Utilities.CreateTaskStateMonitor();
                    bool             timedOut         = await taskStateMonitor.WaitAllAsync(new List <CloudTask> {
                        boundJobManagerTask
                    }, TaskState.Completed, maxJobCompletionTimeout);

                    Console.WriteLine("Done waiting for job manager task.");

                    await boundJobManagerTask.RefreshAsync();

                    //Check to ensure the job manager task exited successfully.
                    await Helpers.CheckForTaskSuccessAsync(boundJobManagerTask, dumpStandardOutOnTaskSuccess : false);

                    if (timedOut)
                    {
                        throw new TimeoutException(string.Format("Timed out waiting for job manager task to complete."));
                    }

                    //
                    // Download and write out the reducer tasks output
                    //

                    string reducerText = await SampleHelpers.DownloadBlobTextAsync(cloudStorageAccount, this.textSearchSettings.BlobContainer, Constants.ReducerTaskResultBlobName);

                    Console.WriteLine("Reducer reuslts:");
                    Console.WriteLine(reducerText);
                }
                finally
                {
                    //Delete the job.
                    //This will delete the auto pool associated with the job as long as the pool
                    //keep alive property is set to false.
                    if (this.textSearchSettings.ShouldDeleteJob)
                    {
                        Console.WriteLine("Deleting job {0}", jobId);
                        batchClient.JobOperations.DeleteJob(jobId);
                    }

                    //Note that there were files uploaded to a container specified in the
                    //configuration file.  This container will not be deleted or cleaned up by this sample.
                }
            }
        }
コード例 #27
0
        private void PrepareDescriptor(VkDevice device)
        {
            // 今は定数バッファを1つ、サンプラーを1つを格納できるだけのディスクリプタプールを準備.
            VkDescriptorPoolSize descriptorPoolSize = new VkDescriptorPoolSize()
            {
                descriptorCount = 1,
                type            = VkDescriptorType.VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
            };
            VkDescriptorPoolSize descriptorPoolSizeForSampler = new VkDescriptorPoolSize()
            {
                descriptorCount = 1,
                type            = VkDescriptorType.VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
            };
            var descriptorPoolCreateInfo = new VkDescriptorPoolCreateInfo()
            {
                poolSizes = new[] { descriptorPoolSize, descriptorPoolSizeForSampler },
                maxSets   = 1,
                flags     = VkDescriptorPoolCreateFlags.VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
            };

            VulkanAPI.vkCreateDescriptorPool(device, ref descriptorPoolCreateInfo, out m_descriptorPool);

            // ディスクリプタセットレイアウトの作成.
            //  - 定数バッファを1つ
            //  - テクスチャサンプラ1つ
            var descLayoutBindingForUniform = new VkDescriptorSetLayoutBinding();

            descLayoutBindingForUniform.binding         = 0;
            descLayoutBindingForUniform.descriptorCount = 1;
            descLayoutBindingForUniform.descriptorType  = VkDescriptorType.VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
            descLayoutBindingForUniform.stageFlags      = VkShaderStageFlagBits.VK_SHADER_STAGE_VERTEX_BIT;
            var descLayoutBindingForSampler = new VkDescriptorSetLayoutBinding();

            descLayoutBindingForSampler.binding         = 1;
            descLayoutBindingForSampler.descriptorCount = 1;
            descLayoutBindingForSampler.descriptorType  = VkDescriptorType.VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
            descLayoutBindingForSampler.stageFlags      = VkShaderStageFlagBits.VK_SHADER_STAGE_FRAGMENT_BIT;
            var descriptorSetLayoutCreateInfo = new VkDescriptorSetLayoutCreateInfo();

            descriptorSetLayoutCreateInfo.bindings = new[] {
                descLayoutBindingForUniform,
                descLayoutBindingForSampler
            };
            VulkanAPI.vkCreateDescriptorSetLayout(device, ref descriptorSetLayoutCreateInfo, out m_descriptorSetLayout);


            // ディスクリプタを作成&更新.
            VkDescriptorSet[] descriptorSets;
            var descriptorSetAllocateInfo = new VkDescriptorSetAllocateInfo(m_descriptorPool, new[] { m_descriptorSetLayout });

            VulkanAPI.vkAllocateDescriptorSets(device, ref descriptorSetAllocateInfo, out descriptorSets);
            m_descriptorSet = descriptorSets[0];

            var descUniform = new VkDescriptorBufferInfo()
            {
                buffer = m_uniformBuffer,
                range  = Marshal.SizeOf <Transform>(),
            };
            var descSampler = new VkDescriptorImageInfo()
            {
                imageView = m_imageView,
                sampler   = m_imageSampler,
            };
            var descForUniform   = SampleHelpers.CreateDescriptorFromUniformBuffer(0, descUniform, m_descriptorSet);
            var descForSampler   = SampleHelpers.CreateDescriptorFromImageSampler(1, descSampler, m_descriptorSet);
            var descriptorWrites = new[] { descForUniform, descForSampler };

            VulkanAPI.vkUpdateDescriptorSets(device, descriptorWrites, null);
        }