static void AddCloudTask(BatchClient client, string code)
        {
            var poolId = "applicationpool" + code;
            var jobId  = "cloudjob" + code;

            CloudJob job = client.JobOperations.GetJob(jobId);

            ResourceFile programFile = new ResourceFile("http:////MyApp.exe", "MyApp.exe");

            ResourceFile appConfigurationData = new ResourceFile("http:////MyApp.exe.config", "MyApp.exe.config");

            string taskName = "applicationtask" + code;

            CloudTask           task      = new CloudTask(taskName, "MyApp.exe " + code);
            List <ResourceFile> taskFiles = new List <ResourceFile>();

            taskFiles.Add(appConfigurationData);
            taskFiles.Add(programFile);
            task.ResourceFiles = taskFiles;
            job.AddTask(task);
            job.Commit();
            job.Refresh();


            client.Utilities.CreateTaskStateMonitor().WaitAll(job.ListTasks(), TaskState.Completed, new TimeSpan(0, 30, 0));
            Console.WriteLine("Process completed successfully for code :" + code);
            foreach (CloudTask taskInProgress in job.ListTasks())
            {
                Console.WriteLine("Process " + taskInProgress.Id + " Output:\n" + taskInProgress.GetNodeFile(Constants.StandardOutFileName).ReadAsString());
            }
        }
示例#2
0
        public void Bug2338301_CheckStreamPositionAfterFileRead()
        {
            Action test = () =>
            {
                using (BatchClient batchCli = TestUtilities.OpenBatchClientAsync(TestUtilities.GetCredentialsFromEnvironment()).Result)
                {
                    JobOperations jobOperations = batchCli.JobOperations;
                    {
                        string jobId = "Bug2338301Job-" + TestUtilities.GetMyName();

                        try
                        {
                            const string taskId = "hiWorld";

                            //
                            // Create the job
                            //
                            CloudJob unboundJob = jobOperations.CreateJob(jobId, new PoolInformation()
                            {
                                PoolId = this.poolFixture.PoolId
                            });
                            unboundJob.Commit();

                            CloudJob  boundJob = jobOperations.GetJob(jobId);
                            CloudTask myTask   = new CloudTask(taskId, "cmd /c echo hello world");

                            boundJob.AddTask(myTask);

                            this.testOutputHelper.WriteLine("Initial job commit()");

                            //
                            // Wait for task to go to completion
                            //
                            Utilities        utilities        = batchCli.Utilities;
                            TaskStateMonitor taskStateMonitor = utilities.CreateTaskStateMonitor();
                            taskStateMonitor.WaitAll(
                                boundJob.ListTasks(),
                                Microsoft.Azure.Batch.Common.TaskState.Completed,
                                TimeSpan.FromMinutes(3));

                            CloudTask boundTask = boundJob.GetTask(taskId);

                            //Get the task file
                            const string fileToGet = "stdout.txt";
                            NodeFile     file      = boundTask.GetNodeFile(fileToGet);

                            //Download the file data
                            string result = file.ReadAsString();
                            Assert.True(result.Length > 0);
                        }
                        finally
                        {
                            jobOperations.DeleteJob(jobId);
                        }
                    }
                }
            };

            SynchronizationContextHelper.RunTest(test, TestTimeout);
        }
示例#3
0
        static void AddTasks(BatchClient client, CloudJob cloudJob, string jobId, IEnumerable <IComputeTask> computeTasks)
        {
            foreach (var computeTask in computeTasks)
            {
                var definition = computeTask.Definition;
                var executable = new ResourceFile($"{definition.StorageUri}/{definition.ExecutableName}", definition.ExecutableName);
                var resources  = definition.Resources.Select(resource => new ResourceFile($"{definition.StorageUri}/{resource}", resource));
                var inputs     = computeTask.Inputs.Select(input => new ResourceFile($"{definition.StorageUri}/{input}", input));

                var resourceFiles = new List <ResourceFile> {
                    executable
                };
                resourceFiles.AddRange(resources);
                resourceFiles.AddRange(inputs);

                var task = client.JobOperations.ListTasks(jobId).SingleOrDefault(t => t.Id == computeTask.Id);

                if (task == null)
                {
                    task = new CloudTask(computeTask.Id, computeTask.CommandLine)
                    {
                        ResourceFiles = resourceFiles
                    };

                    cloudJob.AddTask(task);
                    cloudJob.Commit();
                    cloudJob.Refresh();
                }
            }

            client.Utilities.CreateTaskStateMonitor().WaitAll(cloudJob.ListTasks(), TaskState.Completed, new TimeSpan(0, 30, 0));
        }
示例#4
0
        /// <summary>
        /// Monitors the specified job's tasks and returns each as they complete. When all
        /// of the tasks in the job have completed, the method returns.
        /// </summary>
        /// <param name="job">The <see cref="CloudJob"/> containing the tasks to monitor.</param>
        /// <returns>One or more completed <see cref="CloudTask"/>.</returns>
        public static IEnumerable <CloudTask> CompletedTasks(this CloudJob job)
        {
            HashSet <string> yieldedTasks = new HashSet <string>();

            ODATADetailLevel detailLevel = new ODATADetailLevel();

            detailLevel.SelectClause = "id,state,url,executionInfo";

            while (true)
            {
                List <CloudTask> tasks = job.ListTasks(detailLevel).ToList();

                IEnumerable <CloudTask> newlyCompleted = tasks.Where(t => t.State == TaskState.Completed)
                                                         .Where(t => !yieldedTasks.Contains(t.Id));

                foreach (CloudTask task in newlyCompleted)
                {
                    yield return(task);

                    yieldedTasks.Add(task.Id);
                }

                if (yieldedTasks.Count == tasks.Count)
                {
                    yield break;
                }
            }
        }
示例#5
0
        // Calls the Batch service to get metrics for a single job.  The first time the
        // MetricMonitor sees a job, it creates a TaskStateCache to hold task state information,
        // and queries the states of *all* tasks in the job. Subsequent times, it queries
        // only for tasks whose states have changed since the previous query -- this significant
        // reduces download volumes for large jobs. In either case, it then updates the
        // cached task states and aggregates them into a TaskStateCounts object.
        private async Task CollectTaskMetricsAsync(MetricEvent.Builder metricsBuilder, CloudJob job)
        {
            TaskStateCache taskStateCache;

            bool firstTime = !this.jobStateCache.ContainsKey(job.Id);

            if (firstTime)
            {
                taskStateCache = new TaskStateCache();
                this.jobStateCache.Add(job.Id, taskStateCache);
            }
            else
            {
                taskStateCache = this.jobStateCache[job.Id];
            }

            // If the monitor API is called for the first time, it has to issue a query to enumerate all the tasks once to get its state.
            // This is a relatively slow query.
            // Subsequent calls to the monitor API will only look for changes to the task state since the last time the query was issued and
            // a clock skew (which is within 30 seconds approximately for Azure). Thus if the monitoring API periodicity is 1 minute, then the query
            // should look for changes in the last minute and 30 seconds.

            // TODO: it would be better to record the time at which the last query was issued and use that,
            // rather than subtracting the monitor interval from the current time
            DateTime since       = DateTime.UtcNow - (this.monitorInterval + MaximumClockSkew);
            var      tasksToList = firstTime ? DetailLevels.IdAndState.AllEntities : DetailLevels.IdAndState.OnlyChangedAfter(since);

            var listTasksTimer = Stopwatch.StartNew();
            var tasks          = await job.ListTasks(tasksToList).ToListAsync(this.runCancel.Token);

            listTasksTimer.Stop();

            var listTasksLatency = listTasksTimer.Elapsed;

            foreach (var task in tasks)
            {
                taskStateCache.UpdateTaskState(task.Id, task.State.Value);
            }

            var taskStateCounts = taskStateCache.GetTaskStateCounts();

            metricsBuilder.JobStats.Add(job.Id, new JobMetrics(listTasksLatency, taskStateCounts));
        }
        static void DeleteCloudTask(BatchClient client, string code)
        {
            var jobId = "cloudjob" + code;
            IPagedEnumerable <CloudJob> jobs = client.JobOperations.ListJobs();

            foreach (CloudJob checkjob in jobs)
            {
                if (checkjob.Id.Equals(jobId))
                {
                    CloudJob job = client.JobOperations.GetJob(jobId);
                    foreach (CloudTask task in job.ListTasks())
                    {
                        task.Delete();
                    }
                }
            }

            Console.WriteLine("Cloud tasks deleted for code : " + code);
        }
示例#7
0
        /// <summary>
        /// Create a job and add two simple tasks to it. Wait for completion using the Task state monitor
        /// </summary>
        private static void AddJobTwoTasks(BatchClient client, string sharedPoolId)
        {
            string jobId = CreateJobId("HelloWorldTwoTaskJob");

            Console.WriteLine("Creating job: " + jobId);
            CloudJob boundJob = CreateBoundJob(client.JobOperations, sharedPoolId, jobId);

            // add 2 quick tasks. Each task within a job must have a unique ID
            List <CloudTask> tasksToRun = new List <CloudTask>(2);

            tasksToRun.Add(new CloudTask("task1", "hostname"));
            tasksToRun.Add(new CloudTask("task2", "cmd /c dir /s"));

            client.JobOperations.AddTask(boundJob.Id, tasksToRun);

            Console.WriteLine("Waiting for all tasks to complete on Job: {0} ...", boundJob.Id);

            //We use the task state monitor to monitor the state of our tasks -- in this case we will wait for them all to complete.
            TaskStateMonitor taskStateMonitor = client.Utilities.CreateTaskStateMonitor();

            // blocking wait on the list of tasks until all tasks reach completed state or the timeout is reached.
            // If the pool is being resized then enough time is needed for the VMs to reach the idle state in order
            // for tasks to run on them.
            IPagedEnumerable <CloudTask> ourTasks = boundJob.ListTasks();
            bool timedOut = taskStateMonitor.WaitAll(ourTasks, TaskState.Completed, new TimeSpan(0, 10, 0));

            if (timedOut)
            {
                throw new TimeoutException("Timed out waiting for tasks");
            }

            // dump task output
            foreach (CloudTask t in ourTasks)
            {
                Console.WriteLine("Task " + t.Id);
                Console.WriteLine("stdout:\n" + t.GetNodeFile(Constants.StandardOutFileName).ReadAsString());
                Console.WriteLine("\nstderr:\n" + t.GetNodeFile(Constants.StandardErrorFileName).ReadAsString());
            }

            //Delete the job to ensure the tasks are cleaned up
            Console.WriteLine("Deleting job: {0}", boundJob.Id);
            client.JobOperations.DeleteJob(boundJob.Id);
        }
示例#8
0
        private static void MonitorProgess(Settings unzipperSettings, BatchClient client)
        {
            //Get the job to monitor status.
            CloudJob job = client.JobOperations.GetJob(unzipperSettings.JobId);

            Console.Write("Waiting for tasks to complete ...   ");
            // Wait 120 minutes for all tasks to reach the completed state. The long timeout is necessary for the first
            // time a pool is created in order to allow nodes to be added to the pool and initialized to run tasks.
            IPagedEnumerable <CloudTask> ourTasks = job.ListTasks(new ODATADetailLevel(selectClause: "id"));

            client.Utilities.CreateTaskStateMonitor().WaitAll(ourTasks, TaskState.Completed, TimeSpan.FromMinutes(120));
            Console.WriteLine("tasks are done.");

            foreach (CloudTask t in ourTasks)
            {
                Console.WriteLine("Task " + t.Id);
                Console.WriteLine("stdout:" + Environment.NewLine + t.GetNodeFile(Microsoft.Azure.Batch.Constants.StandardOutFileName).ReadAsString());
                Console.WriteLine();
                Console.WriteLine("stderr:" + Environment.NewLine + t.GetNodeFile(Microsoft.Azure.Batch.Constants.StandardErrorFileName).ReadAsString());
            }
        }
示例#9
0
        private static async Task MainAsync(string[] args)
        {
            // You may adjust these values to experiment with different compute resource scenarios.
            const string nodeSize  = "standard_d1_v2";
            const string osFamily  = "5";
            const int    nodeCount = 1;

            const string poolId = "TaskDependenciesSamplePool";
            const string jobId  = "TaskDependenciesSampleJob";

            // Amount of time to wait before timing out long-running tasks.
            TimeSpan timeLimit = TimeSpan.FromMinutes(30);

            // Set up access to your Batch account with a BatchClient. Configure your AccountSettings in the
            // Microsoft.Azure.Batch.Samples.Common project within this solution.
            AccountSettings accountSettings = SampleHelpers.LoadAccountSettings();

            BatchSharedKeyCredentials cred = new BatchSharedKeyCredentials(
                accountSettings.BatchServiceUrl,
                accountSettings.BatchAccountName,
                accountSettings.BatchAccountKey);

            try
            {
                using (BatchClient batchClient = BatchClient.Open(cred))
                {
                    // Create the pool.
                    Console.WriteLine("Creating pool [{0}]...", poolId);
                    CloudPool unboundPool =
                        batchClient.PoolOperations.CreatePool(
                            poolId: poolId,
                            cloudServiceConfiguration: new CloudServiceConfiguration(osFamily),
                            virtualMachineSize: nodeSize,
                            targetDedicatedComputeNodes: nodeCount);
                    await unboundPool.CommitAsync();

                    // Create the job and specify that it uses tasks dependencies.
                    Console.WriteLine("Creating job [{0}]...", jobId);
                    CloudJob unboundJob = batchClient.JobOperations.CreateJob(jobId,
                                                                              new PoolInformation {
                        PoolId = poolId
                    });

                    // IMPORTANT: This is REQUIRED for using task dependencies.
                    unboundJob.UsesTaskDependencies = true;

                    await unboundJob.CommitAsync();

                    // Create the collection of tasks that will be added to the job.
                    List <CloudTask> tasks = new List <CloudTask>
                    {
                        // 'Rain' and 'Sun' don't depend on any other tasks
                        new CloudTask("Rain", "cmd.exe /c echo Rain"),
                        new CloudTask("Sun", "cmd.exe /c echo Sun"),

                        // Task 'Flowers' depends on completion of both 'Rain' and 'Sun'
                        // before it is run.
                        new CloudTask("Flowers", "cmd.exe /c echo Flowers")
                        {
                            DependsOn = TaskDependencies.OnIds("Rain", "Sun")
                        },

                        // Tasks 1, 2, and 3 don't depend on any other tasks. Because
                        // we will be using them for a task range dependency, we must
                        // specify string representations of integers as their ids.
                        new CloudTask("1", "cmd.exe /c echo 1"),
                        new CloudTask("2", "cmd.exe /c echo 2"),
                        new CloudTask("3", "cmd.exe /c echo 3"),

                        // Task A is the parent task.
                        new CloudTask("A", "cmd.exe /c echo A")
                        {
                            // Specify exit conditions for task A and their dependency actions.
                            ExitConditions = new ExitConditions
                            {
                                // If task A exits with a pre-processing error, block any downstream tasks (in this example, task B).
                                PreProcessingError = new ExitOptions
                                {
                                    DependencyAction = DependencyAction.Block
                                },
                                // If task A exits with the specified error codes, block any downstream tasks (in this example, task B).
                                ExitCodes = new List <ExitCodeMapping>
                                {
                                    new ExitCodeMapping(10, new ExitOptions()
                                    {
                                        DependencyAction = DependencyAction.Block
                                    }),
                                    new ExitCodeMapping(20, new ExitOptions()
                                    {
                                        DependencyAction = DependencyAction.Block
                                    })
                                },
                                // If task A succeeds or fails with any other error, any downstream tasks become eligible to run
                                // (in this example, task B).
                                Default = new ExitOptions
                                {
                                    DependencyAction = DependencyAction.Satisfy
                                }
                            }
                        },
                        // Task B depends on task A. Whether it becomes eligible to run depends on how task A exits.
                        new CloudTask("B", "cmd.exe /c echo B")
                        {
                            DependsOn = TaskDependencies.OnId("A")
                        },
                    };

                    // Add the tasks to the job.
                    await batchClient.JobOperations.AddTaskAsync(jobId, tasks);

                    // Pause execution while we wait for the tasks to complete, and notify
                    // whether the tasks completed successfully.
                    Console.WriteLine("Waiting for task completion...");
                    Console.WriteLine();
                    CloudJob job = await batchClient.JobOperations.GetJobAsync(jobId);

                    await batchClient.Utilities.CreateTaskStateMonitor().WhenAll(
                        job.ListTasks(),
                        TaskState.Completed,
                        timeLimit);

                    Console.WriteLine("All tasks completed successfully.");
                    Console.WriteLine();
                }
            }
            catch (Exception e)
            {
                Console.WriteLine();
                Console.WriteLine("An exception occurred.");
                Console.WriteLine(e.Message);
                Console.WriteLine(e.StackTrace);
            }
            finally
            {
                using (BatchClient batchClient = BatchClient.Open(cred))
                {
                    CloudJob job = await batchClient.JobOperations.GetJobAsync(jobId);

                    // Clean up the resources we've created in the Batch account
                    Console.Write("Delete job? [yes] no: ");
                    string response = Console.ReadLine().ToLower();
                    if (response != "n" && response != "no")
                    {
                        await batchClient.JobOperations.DeleteJobAsync(job.Id);
                    }

                    Console.Write("Delete pool? [yes] no: ");
                    response = Console.ReadLine().ToLower();
                    if (response != "n" && response != "no")
                    {
                        await batchClient.PoolOperations.DeletePoolAsync(poolId);
                    }
                }
            }
        }
        public void LongRunning_Bug1965363Wat7OSVersionFeaturesQuickJobWithAutoPool()
        {
            void test()
            {
                using BatchClient batchCli = TestUtilities.OpenBatchClient(TestUtilities.GetCredentialsFromEnvironment());
                string jobId = "Bug1965363Job-" + TestUtilities.GetMyName();

                try
                {
                    PoolInformation poolInfo = new PoolInformation()
                    {
                        AutoPoolSpecification = new AutoPoolSpecification()
                        {
                            PoolLifetimeOption = PoolLifetimeOption.Job,
                            PoolSpecification  = new PoolSpecification()
                            {
                                CloudServiceConfiguration   = new CloudServiceConfiguration(PoolFixture.OSFamily),
                                VirtualMachineSize          = PoolFixture.VMSize,
                                TargetDedicatedComputeNodes = 1
                            }
                        }
                    };

                    CloudJob unboundJob = batchCli.JobOperations.CreateJob(jobId, poolInfo);

                    testOutputHelper.WriteLine("Commiting quickjob");
                    unboundJob.Commit();

                    CloudTask task     = new CloudTask("Bug1965363Wat7OSVersionFeaturesQuickJobWithAutoPoolTask", "cmd /c echo Bug1965363");
                    CloudJob  boundJob = batchCli.JobOperations.GetJob(jobId);
                    boundJob.AddTask(task);

                    testOutputHelper.WriteLine("Getting pool name: {0}", boundJob.ExecutionInformation.PoolId);

                    CloudPool           boundPool = batchCli.PoolOperations.GetPool(boundJob.ExecutionInformation.PoolId);
                    TaskStateMonitor    tsm       = batchCli.Utilities.CreateTaskStateMonitor();
                    ODATAMonitorControl odControl = new ODATAMonitorControl();

                    // we know that the autopool compute nodes will take a long time to become scheduleable so we slow down polling/spam
                    odControl.DelayBetweenDataFetch = TimeSpan.FromSeconds(5);

                    testOutputHelper.WriteLine("Invoking TaskStateMonitor");

                    tsm.WaitAll(
                        boundJob.ListTasks(),
                        TaskState.Completed,
                        TimeSpan.FromMinutes(15),
                        odControl,
                        new[] {
                        // spam/logging interceptor
                        new Protocol.RequestInterceptor((x) =>
                        {
                            testOutputHelper.WriteLine("Issuing request type: " + x.GetType().ToString());

                            // print out the compute node states... we are actually waiting on the compute nodes
                            List <ComputeNode> allComputeNodes = boundPool.ListComputeNodes().ToList();
                            testOutputHelper.WriteLine("    #comnpute nodes: " + allComputeNodes.Count);

                            allComputeNodes.ForEach((icn) => { testOutputHelper.WriteLine("  computeNode.id: " + icn.Id + ", state: " + icn.State); });
                            testOutputHelper.WriteLine("");
                        })
                    });

                    // confirm the task ran by inspecting the stdOut
                    string stdOut = boundJob.ListTasks().ToList()[0].GetNodeFile(Constants.StandardOutFileName).ReadAsString();

                    Assert.Contains("Bug1965363", stdOut);
                }
                finally
                {
                    TestUtilities.DeleteJobIfExistsAsync(batchCli, jobId).Wait();
                }
            }

            SynchronizationContextHelper.RunTest(test, LongTestTimeout);
        }
        public void Bug1665834TaskStateMonitor()
        {
            void test()
            {
                using BatchClient batchCli = TestUtilities.OpenBatchClient(TestUtilities.GetCredentialsFromEnvironment());
                string jobId = "Bug1665834Job-" + TestUtilities.GetMyName();

                try
                {
                    CloudJob unboundJob = batchCli.JobOperations.CreateJob(jobId, new PoolInformation());
                    unboundJob.PoolInformation.PoolId = poolFixture.PoolId;
                    unboundJob.Commit();

                    CloudJob boundJob = batchCli.JobOperations.GetJob(jobId);

                    // add some noise tasks
                    for (int j = 0; j < 5; j++)
                    {
                        CloudTask unboundTaskQuick = new CloudTask((10 + j).ToString(), "cmd /c hostname");

                        boundJob.AddTask(unboundTaskQuick);
                    }

                    Thread.Sleep(5000);

                    // wait for fast tasks to complete
                    {
                        bool repeat = true;

                        while (repeat)
                        {
                            CloudPool boundPool = batchCli.PoolOperations.GetPool(poolFixture.PoolId);

                            repeat = false;

                            foreach (CloudTask curTask in boundJob.ListTasks())
                            {
                                if (curTask.State != TaskState.Completed)
                                {
                                    repeat = true;

                                    testOutputHelper.WriteLine("Manual Wait Task Id: " + curTask.Id + ", state = " + curTask.State);
                                    testOutputHelper.WriteLine("   poolstate: " + boundPool.State + ", currentdedicated: " + boundPool.CurrentDedicatedComputeNodes);
                                    testOutputHelper.WriteLine("      compute nodes:");

                                    foreach (ComputeNode curComputeNode in boundPool.ListComputeNodes())
                                    {
                                        testOutputHelper.WriteLine("           computeNode.Id: " + curComputeNode.Id + ", state: " + curComputeNode.State);
                                    }
                                }
                            }
                        }
                    }

                    // add some longer running tasks

                    testOutputHelper.WriteLine("Adding longer running tasks");

                    for (int i = 0; i < 15; i++)
                    {
                        CloudTask unboundTask = new CloudTask(i.ToString() + "_a234567890a234567890a234567890a234567890a234567890a234567890", "cmd /c ping 127.0.0.1 -n 4");

                        boundJob.AddTask(unboundTask);
                    }

                    Utilities        utilities = batchCli.Utilities;
                    TaskStateMonitor tsm       = utilities.CreateTaskStateMonitor();

                    IPagedEnumerable <CloudTask> taskList = boundJob.ListTasks();

                    // try to set really low delay
                    ODATAMonitorControl odmc = new ODATAMonitorControl {
                        DelayBetweenDataFetch = new TimeSpan(0)
                    };

                    // confirm the floor is enforced
                    Assert.Equal(500, odmc.DelayBetweenDataFetch.Milliseconds);

                    testOutputHelper.WriteLine("Calling TaskStateMonitor.WaitAll().  This will take a while.");

                    TimeSpan timeToWait = TimeSpan.FromMinutes(5);
                    Task     whenAll    = tsm.WhenAll(taskList, TaskState.Completed, timeToWait, controlParams: odmc);

                    //This could throw, if it does the test will fail, which is what we want
                    whenAll.Wait();

                    foreach (CloudTask curTask in boundJob.ListTasks())
                    {
                        Assert.Equal(TaskState.Completed, curTask.State);
                    }
                }
                finally
                {
                    // cleanup
                    TestUtilities.DeleteJobIfExistsAsync(batchCli, jobId).Wait();
                }
            }

            SynchronizationContextHelper.RunTest(test, TestTimeout);
        }
示例#12
0
        /// <summary>
        /// Submit tasks which have dependant files.
        /// The files are automatically uploaded to Azure Storage using the FileStaging feature of the Azure.Batch client library.
        /// </summary>
        private static void AddTasksWithFileStaging(BatchClient client, string sharedPoolId)
        {
            string jobId = CreateJobId("HelloWorldFileStagingJob");

            Console.WriteLine("Creating job: " + jobId);
            CloudJob boundJob = CreateBoundJob(client.JobOperations, sharedPoolId, jobId);

            CloudTask taskToAdd1 = new CloudTask("task_with_file1", "cmd /c type *.txt");
            CloudTask taskToAdd2 = new CloudTask("task_with_file2", "cmd /c dir /s");

            //Set up a collection of files to be staged -- these files will be uploaded to Azure Storage
            //when the tasks are submitted to the Azure Batch service.
            taskToAdd1.FilesToStage = new List <IFileStagingProvider>();
            taskToAdd2.FilesToStage = new List <IFileStagingProvider>();

            // generate a local file in temp directory
            Process cur  = Process.GetCurrentProcess();
            string  path = Path.Combine(Environment.GetEnvironmentVariable("TEMP"), cur.Id + ".txt");

            File.WriteAllText(path, "hello from " + cur.Id);

            // add the files as a task dependency so they will be uploaded to storage before the task
            // is submitted and downloaded to the VM before the task starts execution on the node
            FileToStage file = new FileToStage(path, new StagingStorageAccount(StorageAccount, StorageKey, StorageBlobEndpoint));

            taskToAdd1.FilesToStage.Add(file);
            taskToAdd2.FilesToStage.Add(file); // filetostage object can be reused

            // create a list of the tasks to add.
            List <CloudTask> tasksToRun = new List <CloudTask> {
                taskToAdd1, taskToAdd2
            };
            bool errors = false;

            try
            {
                client.JobOperations.AddTask(boundJob.Id, tasksToRun);
            }
            catch (AggregateException ae)
            {
                errors = true;
                // Go through all exceptions and dump useful information
                ae.Handle(x =>
                {
                    Console.Error.WriteLine("Adding tasks for job {0} failed", boundJob.Id);
                    if (x is BatchException)
                    {
                        BatchException be = x as BatchException;
                        if (null != be.RequestInformation && null != be.RequestInformation.AzureError)
                        {
                            // Write the server side error information
                            Console.Error.WriteLine("    AzureError.Code: " + be.RequestInformation.AzureError.Code);
                            Console.Error.WriteLine("    AzureError.Message.Value: " + be.RequestInformation.AzureError.Message.Value);
                            if (null != be.RequestInformation.AzureError.Values)
                            {
                                Console.Error.WriteLine("    AzureError.Values");
                                foreach (var v in be.RequestInformation.AzureError.Values)
                                {
                                    Console.Error.WriteLine("        {0} : {1}", v.Key, v.Value);
                                }
                            }
                            Console.Error.WriteLine();
                        }
                    }
                    else
                    {
                        Console.WriteLine(x);
                    }
                    // Indicate that the error has been handled
                    return(true);
                });
            }

            // if there is no exception, wait for job response
            if (!errors)
            {
                Console.WriteLine("Waiting for all tasks to complete on job: {0}...", boundJob.Id);

                IPagedEnumerable <CloudTask> ourTasks = boundJob.ListTasks();
                client.Utilities.CreateTaskStateMonitor().WaitAll(ourTasks, TaskState.Completed, TimeSpan.FromMinutes(30));

                foreach (CloudTask task in ourTasks)
                {
                    Console.WriteLine("Task " + task.Id);
                    Console.WriteLine("stdout:\n" + task.GetNodeFile(Constants.StandardOutFileName).ReadAsString());
                    Console.WriteLine("\nstderr:\n" + task.GetNodeFile(Constants.StandardErrorFileName).ReadAsString());
                }
            }

            //Delete the job to ensure the tasks are cleaned up
            Console.WriteLine("Deleting job: {0}", boundJob.Id);
            client.JobOperations.DeleteJob(boundJob.Id);
        }
        static void AddTasks(BatchClient client, CloudJob cloudJob, string jobId, IEnumerable<IComputeTask> computeTasks)
        {
            foreach (var computeTask in computeTasks)
            {
                var definition = computeTask.Definition;
                var executable = new ResourceFile($"{definition.StorageUri}/{definition.ExecutableName}", definition.ExecutableName);
                var resources = definition.Resources.Select(resource => new ResourceFile($"{definition.StorageUri}/{resource}", resource));
                var inputs = computeTask.Inputs.Select(input => new ResourceFile($"{definition.StorageUri}/{input}", input));

                var resourceFiles = new List<ResourceFile> { executable };
                resourceFiles.AddRange(resources);
                resourceFiles.AddRange(inputs);

                var task = client.JobOperations.ListTasks(jobId).SingleOrDefault(t => t.Id == computeTask.Id);

                if (task == null)
                {
                    task = new CloudTask(computeTask.Id, computeTask.CommandLine)
                    {
                        ResourceFiles = resourceFiles
                    };

                    cloudJob.AddTask(task);
                    cloudJob.Commit();
                    cloudJob.Refresh();
                }
            }

            client.Utilities.CreateTaskStateMonitor().WaitAll(cloudJob.ListTasks(), TaskState.Completed, new TimeSpan(0, 30, 0));
        }
示例#14
0
        /// <summary>
        /// Monitors the specified job's tasks and returns each as they complete. When all
        /// of the tasks in the job have completed, the method returns.
        /// </summary>
        /// <param name="job">The <see cref="CloudJob"/> containing the tasks to monitor.</param>
        /// <returns>One or more completed <see cref="CloudTask"/>.</returns>
        private static IEnumerable<CloudTask> CompletedTasks(CloudJob job)
        {
            HashSet<string> yieldedTasks = new HashSet<string>();

            ODATADetailLevel detailLevel = new ODATADetailLevel();
            detailLevel.SelectClause = "id,state,url";

            while (true)
            {
                List<CloudTask> tasks = job.ListTasks(detailLevel).ToList();

                IEnumerable<CloudTask> newlyCompleted = tasks.Where(t => t.State == Microsoft.Azure.Batch.Common.TaskState.Completed)
                                          .Where(t => !yieldedTasks.Contains(t.Id));

                foreach (CloudTask task in newlyCompleted)
                {
                    yield return task;
                    yieldedTasks.Add(task.Id);
                }

                if (yieldedTasks.Count == tasks.Count)
                {
                    yield break;
                }
            }
        }
示例#15
0
        private async void Button_JobStatus_Click(object sender, RoutedEventArgs e)
        {
            StringBuilder sb = new StringBuilder(1024);
            // read account settings, dump
            AccountSettings accountSettings = SampleHelpers.LoadAccountSettings();

            // read job settings, dump
            JobSettings jobSettings = SampleHelpers.LoadJobSettings();

            // connect to batch, dump status
            BatchSharedKeyCredentials cred = new BatchSharedKeyCredentials(
                accountSettings.BatchServiceUrl,
                accountSettings.BatchAccountName,
                accountSettings.BatchAccountKey
                );

            sb.AppendLine($"batchcred created to {accountSettings.BatchAccountName} at {accountSettings.BatchServiceUrl}");
            using (BatchClient client = BatchClient.Open(cred))
            {
                string jobid = TextBox_JobID.Text.Trim();

                CloudJob job = null;
                sb.AppendLine($"GetJob({jobid})");
                try
                {
                    job = await client.JobOperations.GetJobAsync(jobid);
                }
                catch (Exception ex)
                {
                    job = null;
                    sb.AppendLine($"job not found.  jobid=[{jobid}]");
                    sb.AppendLine("job not found exception: " + ex.ToString());
                }

                if (job != null)
                {
                    TimeSpan?jobdur = job.ExecutionInformation.EndTime - job.ExecutionInformation.StartTime;
                    if (jobdur == null)
                    {
                        sb.AppendLine($"job state:{job.State} ");
                    }
                    else
                    {
                        sb.AppendLine($"job state:{job.State} duration: {jobdur}");
                    }

                    foreach (CloudTask t in job.ListTasks())
                    {
                        TimeSpan?dur = t.ExecutionInformation.EndTime - t.ExecutionInformation.StartTime;
                        if (dur == null)
                        {
                            sb.AppendLine($"task: {t.Id} {t.State} start: {t.ExecutionInformation.StartTime} end:{t.ExecutionInformation.EndTime}");
                        }
                        else
                        {
                            sb.AppendLine($"task: {t.Id} {t.State} duration:{dur} start: {t.ExecutionInformation.StartTime} end:{t.ExecutionInformation.EndTime}");
                        }
                    }
                }
            }

            TextBlock_Out.Text = sb.ToString();
        }
示例#16
0
        public void TestNode_GetListDeleteFiles()
        {
            Action test = () =>
            {
                using (BatchClient batchCli = TestUtilities.OpenBatchClientAsync(TestUtilities.GetCredentialsFromEnvironment()).Result)
                {
                    string jobId = "TestNodeGetListDeleteFiles-" + TestUtilities.GetMyName();

                    try
                    {
                        const string taskId = "hiWorld";

                        const string directoryCreationTaskId1 = "dirTask1";
                        const string directoryCreationTaskId2 = "dirTask2";

                        const string directoryNameOne = "Foo";
                        const string directoryNameTwo = "Bar";

                        //
                        // Create the job
                        //
                        CloudJob unboundJob = batchCli.JobOperations.CreateJob(jobId, new PoolInformation());
                        unboundJob.PoolInformation.PoolId = this.poolFixture.PoolId;
                        unboundJob.Commit();

                        CloudJob  boundJob = batchCli.JobOperations.GetJob(jobId);
                        CloudTask myTask   = new CloudTask(taskId, "cmd /c echo hello world");
                        CloudTask directoryCreationTask1 = new CloudTask(directoryCreationTaskId1, string.Format("cmd /c mkdir {0} && echo test > {0}/testfile.txt", directoryNameOne));
                        CloudTask directoryCreationTask2 = new CloudTask(directoryCreationTaskId2, string.Format("cmd /c mkdir {0} && echo test > {0}/testfile.txt", directoryNameTwo));

                        boundJob.AddTask(myTask);
                        boundJob.AddTask(directoryCreationTask1);
                        boundJob.AddTask(directoryCreationTask2);

                        this.testOutputHelper.WriteLine("Initial job commit()");

                        //
                        // Wait for task to go to completion
                        //
                        Utilities        utilities        = batchCli.Utilities;
                        TaskStateMonitor taskStateMonitor = utilities.CreateTaskStateMonitor();

                        taskStateMonitor.WaitAll(
                            boundJob.ListTasks(),
                            Microsoft.Azure.Batch.Common.TaskState.Completed,
                            TimeSpan.FromMinutes(3));

                        CloudTask boundTask = boundJob.GetTask(taskId);
                        //Since the compute node name comes back as "Node:<computeNodeId>" we need to split on : to get the actual compute node name
                        string computeNodeId = boundTask.ComputeNodeInformation.AffinityId.Split(':')[1];

                        ComputeNode computeNode = batchCli.PoolOperations.GetComputeNode(this.poolFixture.PoolId, computeNodeId);

                        this.testOutputHelper.WriteLine("Task ran on compute node: {0}", computeNodeId);

                        //Ensure that ListFiles done without a recursive option, or with recursive false return the same values
                        {
                            List <NodeFile> filesByComputeNodeRecursiveOmitted = batchCli.PoolOperations.ListNodeFiles(
                                this.poolFixture.PoolId,
                                computeNodeId).ToList();

                            List <NodeFile> filesByComputeNodeRecursiveFalse = batchCli.PoolOperations.ListNodeFiles(
                                this.poolFixture.PoolId,
                                computeNodeId,
                                recursive: false).ToList();

                            AssertFileListsMatch(filesByComputeNodeRecursiveOmitted, filesByComputeNodeRecursiveFalse);
                        }

                        {
                            List <NodeFile> filesByTaskRecursiveOmitted = batchCli.JobOperations.ListNodeFiles(
                                jobId,
                                taskId).ToList();

                            List <NodeFile> filesByTaskRecursiveFalse = batchCli.JobOperations.ListNodeFiles(
                                jobId,
                                taskId,
                                recursive: false).ToList();

                            AssertFileListsMatch(filesByTaskRecursiveOmitted, filesByTaskRecursiveFalse);
                        }

                        //
                        // List all node files from operations -- recursive true
                        //
                        //TODO: Detail level?
                        List <NodeFile> fileListFromComputeNodeOperations = batchCli.PoolOperations.ListNodeFiles(this.poolFixture.PoolId, computeNodeId, recursive: true).ToList();

                        foreach (NodeFile f in fileListFromComputeNodeOperations)
                        {
                            this.testOutputHelper.WriteLine("Found file: {0}", f.Path);
                        }
                        //Check to make sure the expected folder named "Shared" exists
                        Assert.Contains("shared", fileListFromComputeNodeOperations.Select(f => f.Path));

                        //
                        // List all node files from the compute node -- recursive true
                        //
                        List <NodeFile> fileListFromComputeNode = computeNode.ListNodeFiles(recursive: true).ToList();
                        foreach (NodeFile f in fileListFromComputeNodeOperations)
                        {
                            this.testOutputHelper.WriteLine("Found file: {0}", f.Path);
                        }
                        //Check to make sure the expected folder named "Shared" exists
                        Assert.Contains("shared", fileListFromComputeNode.Select(f => f.Path));

                        //
                        // Get file from operations
                        //
                        string filePathToGet = fileListFromComputeNode.First(f => !f.IsDirectory.Value && f.Properties.ContentLength > 0).Path;
                        this.testOutputHelper.WriteLine("Getting file: {0}", filePathToGet);
                        NodeFile computeNodeFileFromManager = batchCli.PoolOperations.GetNodeFile(this.poolFixture.PoolId, computeNodeId, filePathToGet);
                        this.testOutputHelper.WriteLine("Successfully retrieved file: {0}", filePathToGet);
                        this.testOutputHelper.WriteLine("---- File data: ----");
                        var computeNodeFileContentFromManager = computeNodeFileFromManager.ReadAsString();
                        this.testOutputHelper.WriteLine(computeNodeFileContentFromManager);
                        Assert.NotEmpty(computeNodeFileContentFromManager);

                        //
                        // Get file directly from operations (bypassing the properties call)
                        //
                        var computeNodeFileContentDirect = batchCli.PoolOperations.CopyNodeFileContentToString(this.poolFixture.PoolId, computeNodeId, filePathToGet);
                        this.testOutputHelper.WriteLine("---- File data: ----");
                        this.testOutputHelper.WriteLine(computeNodeFileContentDirect);
                        Assert.NotEmpty(computeNodeFileContentDirect);

                        //
                        // Get file from compute node
                        //
                        this.testOutputHelper.WriteLine("Getting file: {0}", filePathToGet);
                        NodeFile fileFromComputeNode = computeNode.GetNodeFile(filePathToGet);
                        this.testOutputHelper.WriteLine("Successfully retrieved file: {0}", filePathToGet);
                        this.testOutputHelper.WriteLine("---- File data: ----");
                        var computeNodeFileContentFromNode = fileFromComputeNode.ReadAsString();
                        this.testOutputHelper.WriteLine(computeNodeFileContentFromNode);
                        Assert.NotEmpty(computeNodeFileContentFromNode);

                        //
                        // Get file from compute node (bypassing the properties call)
                        //
                        computeNodeFileContentDirect = computeNode.CopyNodeFileContentToString(filePathToGet);
                        this.testOutputHelper.WriteLine("---- File data: ----");
                        this.testOutputHelper.WriteLine(computeNodeFileContentDirect);
                        Assert.NotEmpty(computeNodeFileContentDirect);

                        //
                        // NodeFile delete
                        //
                        string   filePath = Path.Combine(@"workitems", jobId, "job-1", taskId, Constants.StandardOutFileName);
                        NodeFile nodeFile = batchCli.PoolOperations.GetNodeFile(this.poolFixture.PoolId, computeNodeId, filePath);

                        nodeFile.Delete();

                        //Ensure delete succeeded

                        TestUtilities.AssertThrows <BatchException>(() => nodeFile.Refresh());

                        //Delete directory

                        NodeFile directory = batchCli.PoolOperations.ListNodeFiles(this.poolFixture.PoolId, computeNodeId, recursive: true).First(item => item.Path.Contains(directoryNameOne));
                        Assert.True(directory.IsDirectory);
                        TestUtilities.AssertThrows <BatchException>(() => directory.Delete(recursive: false));
                        directory.Delete(recursive: true);

                        Assert.Null(batchCli.PoolOperations.ListNodeFiles(this.poolFixture.PoolId, computeNodeId, recursive: true).FirstOrDefault(item => item.Path.Contains(directoryNameOne)));

                        //
                        // PoolManager delete node file
                        //
                        filePath = Path.Combine(@"workitems", jobId, "job-1", taskId, Constants.StandardErrorFileName);

                        NodeFile file = batchCli.JobOperations.GetNodeFile(jobId, taskId, Constants.StandardErrorFileName);
                        batchCli.PoolOperations.DeleteNodeFile(this.poolFixture.PoolId, computeNodeId, filePath);

                        //Ensure delete succeeded
                        TestUtilities.AssertThrows <BatchException>(() => batchCli.JobOperations.GetNodeFile(jobId, taskId, Constants.StandardErrorFileName));

                        //Delete directory
                        directory = batchCli.PoolOperations.ListNodeFiles(this.poolFixture.PoolId, computeNodeId, recursive: true).First(item => item.Path.Contains(directoryNameTwo));
                        Assert.True(directory.IsDirectory);
                        TestUtilities.AssertThrows <BatchException>(() => batchCli.PoolOperations.DeleteNodeFile(this.poolFixture.PoolId, computeNodeId, directory.Path, recursive: false));
                        batchCli.PoolOperations.DeleteNodeFile(this.poolFixture.PoolId, computeNodeId, directory.Path, recursive: true);

                        Assert.Null(batchCli.PoolOperations.ListNodeFiles(this.poolFixture.PoolId, computeNodeId, recursive: true).FirstOrDefault(item => item.Path.Contains(directoryNameTwo)));
                    }
                    finally
                    {
                        batchCli.JobOperations.DeleteJob(jobId);
                    }
                }
            };

            SynchronizationContextHelper.RunTest(test, TestTimeout);
        }
示例#17
0
        public void Bug2329884_ComputeNodeRecentTasksAndComputeNodeError()
        {
            Action test = () =>
            {
                using (BatchClient batchCli = TestUtilities.OpenBatchClientAsync(TestUtilities.GetCredentialsFromEnvironment()).Result)
                {
                    string jobId = "Bug2329884Job-" + TestUtilities.GetMyName();
                    Protocol.RequestInterceptor interceptor = null;

                    try
                    {
                        const string taskId = "hiWorld";

                        //
                        // Create the job
                        //
                        CloudJob unboundJob = batchCli.JobOperations.CreateJob(jobId, new PoolInformation());
                        unboundJob.PoolInformation.PoolId = this.poolFixture.PoolId;

                        unboundJob.Commit();

                        CloudJob  boundJob = batchCli.JobOperations.GetJob(jobId);
                        CloudTask myTask   = new CloudTask(taskId, "cmd /c echo hello world");

                        boundJob.AddTask(myTask);

                        this.testOutputHelper.WriteLine("Initial job commit()");

                        //
                        // Wait for task to go to completion
                        //
                        Utilities        utilities        = batchCli.Utilities;
                        TaskStateMonitor taskStateMonitor = utilities.CreateTaskStateMonitor();

                        taskStateMonitor.WaitAll(
                            boundJob.ListTasks(),
                            Microsoft.Azure.Batch.Common.TaskState.Completed,
                            new TimeSpan(0, 3 /*min*/, 0));

                        CloudTask boundTask = boundJob.GetTask(taskId);

                        //Since the compute node name comes back as "Node:<computeNodeId>" we need to split on : to get the actual compute node name
                        string computeNodeId = boundTask.ComputeNodeInformation.AffinityId.Split(':')[1];

                        //
                        // Check recent tasks
                        //
                        ComputeNode computeNode = batchCli.PoolOperations.GetComputeNode(this.poolFixture.PoolId, computeNodeId);

                        this.testOutputHelper.WriteLine("Recent tasks:");

                        foreach (TaskInformation recentTask in computeNode.RecentTasks)
                        {
                            this.testOutputHelper.WriteLine("Compute node has recent task Job: {0}, Task: {1}, State: {2}, Subtask: {3}",
                                                            recentTask.JobId,
                                                            recentTask.TaskId,
                                                            recentTask.TaskState,
                                                            recentTask.SubtaskId);
                        }

                        TaskInformation myTaskInfo = computeNode.RecentTasks.First(taskInfo => taskInfo.JobId.Equals(
                                                                                       jobId, StringComparison.InvariantCultureIgnoreCase) &&
                                                                                   taskInfo.TaskId.Equals(taskId, StringComparison.InvariantCultureIgnoreCase));

                        Assert.Equal(TaskState.Completed, myTaskInfo.TaskState);
                        Assert.NotNull(myTaskInfo.ExecutionInformation);
                        Assert.Equal(0, myTaskInfo.ExecutionInformation.ExitCode);

                        //
                        // Check compute node Error
                        //
                        const string expectedErrorCode    = "TestErrorCode";
                        const string expectedErrorMessage = "Test error message";
                        const string nvpValue             = "Test";

                        //We use mocking to return a fake compute node object here to test Compute Node Error because we cannot force one easily
                        interceptor = new Protocol.RequestInterceptor((req =>
                        {
                            if (req is ComputeNodeGetBatchRequest)
                            {
                                var typedRequest = req as ComputeNodeGetBatchRequest;

                                typedRequest.ServiceRequestFunc = (token) =>
                                {
                                    var response = new AzureOperationResponse <Protocol.Models.ComputeNode, Protocol.Models.ComputeNodeGetHeaders>();

                                    List <Protocol.Models.ComputeNodeError> errors =
                                        new List <Protocol.Models.ComputeNodeError>();

                                    //Generate first Compute Node Error
                                    List <Protocol.Models.NameValuePair> nvps =
                                        new List <Protocol.Models.NameValuePair>();
                                    nvps.Add(new Protocol.Models.NameValuePair()
                                    {
                                        Name = nvpValue, Value = nvpValue
                                    });

                                    Protocol.Models.ComputeNodeError error1 = new Protocol.Models.ComputeNodeError();
                                    error1.Code = expectedErrorCode;
                                    error1.Message = expectedErrorMessage;
                                    error1.ErrorDetails = nvps;

                                    errors.Add(error1);

                                    //Generate second Compute Node Error
                                    nvps = new List <Protocol.Models.NameValuePair>();
                                    nvps.Add(new Protocol.Models.NameValuePair()
                                    {
                                        Name = nvpValue, Value = nvpValue
                                    });

                                    Protocol.Models.ComputeNodeError error2 = new Protocol.Models.ComputeNodeError();
                                    error2.Code = expectedErrorCode;
                                    error2.Message = expectedErrorMessage;
                                    error2.ErrorDetails = nvps;

                                    errors.Add(error2);

                                    Protocol.Models.ComputeNode protoComputeNode = new Protocol.Models.ComputeNode();
                                    protoComputeNode.Id = computeNodeId;
                                    protoComputeNode.State = Protocol.Models.ComputeNodeState.Idle;
                                    protoComputeNode.Errors = errors;

                                    response.Body = protoComputeNode;

                                    return(Task.FromResult(response));
                                };
                            }
                        }));

                        batchCli.PoolOperations.CustomBehaviors.Add(interceptor);

                        computeNode = batchCli.PoolOperations.GetComputeNode(this.poolFixture.PoolId, computeNodeId);

                        Assert.Equal(computeNodeId, computeNode.Id);
                        Assert.NotNull(computeNode.Errors);
                        Assert.Equal(2, computeNode.Errors.Count());

                        foreach (ComputeNodeError computeNodeError in computeNode.Errors)
                        {
                            Assert.Equal(expectedErrorCode, computeNodeError.Code);
                            Assert.Equal(expectedErrorMessage, computeNodeError.Message);
                            Assert.NotNull(computeNodeError.ErrorDetails);
                            Assert.Equal(1, computeNodeError.ErrorDetails.Count());
                            Assert.Contains(nvpValue, computeNodeError.ErrorDetails.First().Name);
                        }
                    }
                    finally
                    {
                        batchCli.JobOperations.DeleteJob(jobId);
                    }
                }
            };

            SynchronizationContextHelper.RunTest(test, TestTimeout);
        }
示例#18
0
        public void TestSampleWithFilesAndPool()
        {
            Action test = () =>
            {
                StagingStorageAccount storageCreds = TestUtilities.GetStorageCredentialsFromEnvironment();

                using (BatchClient batchCli = TestUtilities.OpenBatchClientAsync(TestUtilities.GetCredentialsFromEnvironment()).Result)
                {
                    string jobId = "SampleWithFilesJob-" + TestUtilities.GetMyName();


                    try
                    {
                        CloudJob quickJob = batchCli.JobOperations.CreateJob();
                        quickJob.Id = jobId;
                        quickJob.PoolInformation = new PoolInformation()
                        {
                            PoolId = this.poolFixture.PoolId
                        };
                        quickJob.Commit();
                        CloudJob boundJob = batchCli.JobOperations.GetJob(jobId);

                        CloudTask myTask = new CloudTask(id: "CountWordsTask", commandline: @"cmd /c dir /s .. & dir & wc localwords.txt");

                        // first we have local files that we want pushed to the compute node before the commandline is invoked
                        FileToStage wordsDotText = new FileToStage(Resources.LocalWordsDotText, storageCreds);                // use "default" mapping to base name of local file

                        myTask.FilesToStage = new List <IFileStagingProvider>();

                        myTask.FilesToStage.Add(wordsDotText);

                        // add the task to the job
                        var artifacts        = boundJob.AddTask(myTask);
                        var specificArtifact = artifacts[typeof(FileToStage)];
                        SequentialFileStagingArtifact sfsa = specificArtifact as SequentialFileStagingArtifact;

                        Assert.NotNull(sfsa);

                        // add a million more tasks...

                        // test to ensure the task is read only
                        TestUtilities.AssertThrows <InvalidOperationException>(() => myTask.FilesToStage = new List <IFileStagingProvider>());

                        // Open the new Job as bound.
                        CloudPool boundPool = batchCli.PoolOperations.GetPool(boundJob.ExecutionInformation.PoolId);

                        // wait for the task to complete
                        Utilities        utilities        = batchCli.Utilities;
                        TaskStateMonitor taskStateMonitor = utilities.CreateTaskStateMonitor();

                        taskStateMonitor.WaitAll(
                            boundJob.ListTasks(),
                            Microsoft.Azure.Batch.Common.TaskState.Completed,
                            TimeSpan.FromMinutes(10),
                            controlParams: null,
                            additionalBehaviors:
                            new[]
                        {
                            // spam/logging interceptor
                            new Microsoft.Azure.Batch.Protocol.RequestInterceptor((x) =>
                            {
                                this.testOutputHelper.WriteLine("Issuing request type: " + x.GetType().ToString());

                                try
                                {
                                    // print out the compute node states... we are actually waiting on the compute nodes
                                    List <ComputeNode> allComputeNodes = boundPool.ListComputeNodes().ToList();

                                    this.testOutputHelper.WriteLine("    #compute nodes: " + allComputeNodes.Count);

                                    allComputeNodes.ForEach(
                                        (icn) =>
                                    {
                                        this.testOutputHelper.WriteLine("  computeNode.id: " + icn.Id + ", state: " + icn.State);
                                    });
                                }
                                catch (Exception ex)
                                {
                                    // there is a race between the pool-life-job and the end of the job.. and the ListComputeNodes above
                                    Assert.True(false, "SampleWithFilesAndPool probably can ignore this if its pool not found: " + ex.ToString());
                                }
                            })
                        });

                        List <CloudTask> tasks           = boundJob.ListTasks(null).ToList();
                        CloudTask        myCompletedTask = tasks[0];

                        foreach (CloudTask curTask in tasks)
                        {
                            this.testOutputHelper.WriteLine("Task Id: " + curTask.Id + ", state: " + curTask.State);
                        }

                        boundPool.Refresh();

                        this.testOutputHelper.WriteLine("Pool Id: " + boundPool.Id + ", state: " + boundPool.State);

                        string stdOut = myCompletedTask.GetNodeFile(Constants.StandardOutFileName).ReadAsString();
                        string stdErr = myCompletedTask.GetNodeFile(Constants.StandardErrorFileName).ReadAsString();

                        this.testOutputHelper.WriteLine("StdOut: ");
                        this.testOutputHelper.WriteLine(stdOut);

                        this.testOutputHelper.WriteLine("StdErr: ");
                        this.testOutputHelper.WriteLine(stdErr);

                        this.testOutputHelper.WriteLine("Task Files:");

                        foreach (NodeFile curFile in myCompletedTask.ListNodeFiles(recursive: true))
                        {
                            this.testOutputHelper.WriteLine("    Filename: " + curFile.Name);
                        }

                        // confirm the files are there
                        Assert.True(FoundFile("localwords.txt", myCompletedTask.ListNodeFiles(recursive: true)), "mising file: localwords.txt");

                        // test validation of StagingStorageAccount

                        TestUtilities.AssertThrows <ArgumentOutOfRangeException>(() => { new StagingStorageAccount(storageAccount: " ", storageAccountKey: "key", blobEndpoint: "blob"); });
                        TestUtilities.AssertThrows <ArgumentOutOfRangeException>(() => { new StagingStorageAccount(storageAccount: "account", storageAccountKey: " ", blobEndpoint: "blob"); });
                        TestUtilities.AssertThrows <ArgumentOutOfRangeException>(() => { new StagingStorageAccount(storageAccount: "account", storageAccountKey: "key", blobEndpoint: ""); });

                        if (null != sfsa)
                        {
                            // TODO: delete the container!
                        }
                    }
                    finally
                    {
                        TestUtilities.DeleteJobIfExistsAsync(batchCli, jobId).Wait();
                    }
                }
            };

            SynchronizationContextHelper.RunTest(test, TestTimeout);
        }
示例#19
0
        public static void JobMain(string[] args)
        {
            //Load the configuration
            TopNWordsConfiguration configuration = TopNWordsConfiguration.LoadConfigurationFromAppConfig();

            StagingStorageAccount stagingStorageAccount = new StagingStorageAccount(
                configuration.StorageAccountName,
                configuration.StorageAccountKey,
                configuration.StorageAccountBlobEndpoint);

            using (BatchClient client = BatchClient.Open(new BatchSharedKeyCredentials(configuration.BatchServiceUrl, configuration.BatchAccountName, configuration.BatchAccountKey)))
            {
                string stagingContainer = null;

                //Create a pool (if user hasn't provided one)
                if (configuration.ShouldCreatePool)
                {
                    //OSFamily 4 == OS 2012 R2. You can learn more about os families and versions at:
                    //http://msdn.microsoft.com/en-us/library/azure/ee924680.aspx
                    CloudPool pool = client.PoolOperations.CreatePool(configuration.PoolId, targetDedicated: configuration.PoolSize, osFamily: "4", virtualMachineSize: "small");
                    Console.WriteLine("Adding pool {0}", configuration.PoolId);

                    try
                    {
                        pool.Commit();
                    }
                    catch (AggregateException ae)
                    {
                        // Go through all exceptions and dump useful information
                        ae.Handle(x =>
                        {
                            Console.Error.WriteLine("Creating pool ID {0} failed", configuration.PoolId);
                            if (x is BatchException)
                            {
                                BatchException be = x as BatchException;

                                Console.WriteLine(be.ToString());
                                Console.WriteLine();
                            }
                            else
                            {
                                Console.WriteLine(x);
                            }

                            // can't continue without a pool
                            return(false);
                        });
                    }
                }

                try
                {
                    Console.WriteLine("Creating job: " + configuration.JobId);
                    // get an empty unbound Job
                    CloudJob unboundJob = client.JobOperations.CreateJob();
                    unboundJob.Id = configuration.JobId;
                    unboundJob.PoolInformation = new PoolInformation()
                    {
                        PoolId = configuration.PoolId
                    };

                    // Commit Job to create it in the service
                    unboundJob.Commit();

                    // create file staging objects that represent the executable and its dependent assembly to run as the task.
                    // These files are copied to every node before the corresponding task is scheduled to run on that node.
                    FileToStage topNWordExe = new FileToStage(TopNWordsExeName, stagingStorageAccount);
                    FileToStage storageDll  = new FileToStage(StorageClientDllName, stagingStorageAccount);

                    // In this sample, the input data is copied separately to Storage and its URI is passed to the task as an argument.
                    // This approach is appropriate when the amount of input data is large such that copying it to every node via FileStaging
                    // is not desired and the number of tasks is small since a large number of readers of the blob might get throttled
                    // by Storage which will lengthen the overall processing time.
                    //
                    // You'll need to observe the behavior and use published techniques for finding the right balance of performance versus
                    // complexity.
                    string bookFileUri = UploadBookFileToCloudBlob(configuration, configuration.BookFileName);
                    Console.WriteLine("{0} uploaded to cloud", configuration.BookFileName);

                    // initialize a collection to hold the tasks that will be submitted in their entirety
                    List <CloudTask> tasksToRun = new List <CloudTask>(configuration.NumberOfTasks);

                    for (int i = 1; i <= configuration.NumberOfTasks; i++)
                    {
                        CloudTask task = new CloudTask("task_no_" + i, String.Format("{0} --Task {1} {2} {3} {4}",
                                                                                     TopNWordsExeName,
                                                                                     bookFileUri,
                                                                                     configuration.NumberOfTopWords,
                                                                                     configuration.StorageAccountName,
                                                                                     configuration.StorageAccountKey));

                        //This is the list of files to stage to a container -- for each job, one container is created and
                        //files all resolve to Azure Blobs by their name (so two tasks with the same named file will create just 1 blob in
                        //the container).
                        task.FilesToStage = new List <IFileStagingProvider>
                        {
                            topNWordExe,
                            storageDll
                        };

                        tasksToRun.Add(task);
                    }

                    // Commit all the tasks to the Batch Service. Ask AddTask to return information about the files that were staged.
                    // The container information is used later on to remove these files from Storage.
                    ConcurrentBag <ConcurrentDictionary <Type, IFileStagingArtifact> > fsArtifactBag = new ConcurrentBag <ConcurrentDictionary <Type, IFileStagingArtifact> >();
                    client.JobOperations.AddTask(configuration.JobId, tasksToRun, fileStagingArtifacts: fsArtifactBag);

                    // loop through the bag of artifacts, looking for the one that matches our staged files. Once there,
                    // capture the name of the container holding the files so they can be deleted later on if that option
                    // was configured in the settings.
                    foreach (var fsBagItem in fsArtifactBag)
                    {
                        IFileStagingArtifact fsValue;
                        if (fsBagItem.TryGetValue(typeof(FileToStage), out fsValue))
                        {
                            SequentialFileStagingArtifact stagingArtifact = fsValue as SequentialFileStagingArtifact;
                            if (stagingArtifact != null)
                            {
                                stagingContainer = stagingArtifact.BlobContainerCreated;
                                Console.WriteLine(
                                    "Uploaded files to container: {0} -- you will be charged for their storage unless you delete them.",
                                    stagingArtifact.BlobContainerCreated);
                            }
                        }
                    }

                    //Get the job to monitor status.
                    CloudJob job = client.JobOperations.GetJob(configuration.JobId);

                    Console.Write("Waiting for tasks to complete ...   ");
                    // Wait 20 minutes for all tasks to reach the completed state. The long timeout is necessary for the first
                    // time a pool is created in order to allow nodes to be added to the pool and initialized to run tasks.
                    IPagedEnumerable <CloudTask> ourTasks = job.ListTasks(new ODATADetailLevel(selectClause: "id"));
                    client.Utilities.CreateTaskStateMonitor().WaitAll(ourTasks, TaskState.Completed, TimeSpan.FromMinutes(20));
                    Console.WriteLine("tasks are done.");

                    foreach (CloudTask t in ourTasks)
                    {
                        Console.WriteLine("Task " + t.Id);
                        Console.WriteLine("stdout:" + Environment.NewLine + t.GetNodeFile(Constants.StandardOutFileName).ReadAsString());
                        Console.WriteLine();
                        Console.WriteLine("stderr:" + Environment.NewLine + t.GetNodeFile(Constants.StandardErrorFileName).ReadAsString());
                    }
                }
                finally
                {
                    //Delete the pool that we created
                    if (configuration.ShouldCreatePool)
                    {
                        Console.WriteLine("Deleting pool: {0}", configuration.PoolId);
                        client.PoolOperations.DeletePool(configuration.PoolId);
                    }

                    //Delete the job that we created
                    if (configuration.ShouldDeleteJob)
                    {
                        Console.WriteLine("Deleting job: {0}", configuration.JobId);
                        client.JobOperations.DeleteJob(configuration.JobId);
                    }

                    //Delete the containers we created
                    if (configuration.ShouldDeleteContainer)
                    {
                        DeleteContainers(configuration, stagingContainer);
                    }
                }
            }
        }
示例#20
0
        public static void JobMain(string[] args)
        {
            //Load the configuration
            Settings topNWordsConfiguration = new ConfigurationBuilder()
                                              .SetBasePath(Directory.GetCurrentDirectory())
                                              .AddJsonFile("settings.json")
                                              .Build()
                                              .Get <Settings>();
            AccountSettings accountSettings = SampleHelpers.LoadAccountSettings();

            CloudStorageAccount cloudStorageAccount = new CloudStorageAccount(
                new StorageCredentials(
                    accountSettings.StorageAccountName,
                    accountSettings.StorageAccountKey),
                accountSettings.StorageServiceUrl,
                useHttps: true);

            StagingStorageAccount stagingStorageAccount = new StagingStorageAccount(
                accountSettings.StorageAccountName,
                accountSettings.StorageAccountKey,
                cloudStorageAccount.BlobEndpoint.ToString());

            using (BatchClient client = BatchClient.Open(new BatchSharedKeyCredentials(accountSettings.BatchServiceUrl, accountSettings.BatchAccountName, accountSettings.BatchAccountKey)))
            {
                string stagingContainer = null;

                //OSFamily 5 == Windows 2016. You can learn more about os families and versions at:
                //http://msdn.microsoft.com/en-us/library/azure/ee924680.aspx
                CloudPool pool = client.PoolOperations.CreatePool(
                    topNWordsConfiguration.PoolId,
                    targetDedicatedComputeNodes: topNWordsConfiguration.PoolNodeCount,
                    virtualMachineSize: "standard_d1_v2",
                    cloudServiceConfiguration: new CloudServiceConfiguration(osFamily: "6"));
                Console.WriteLine("Adding pool {0}", topNWordsConfiguration.PoolId);
                pool.TaskSchedulingPolicy   = new TaskSchedulingPolicy(ComputeNodeFillType.Spread);
                pool.MaxTasksPerComputeNode = 4;

                GettingStartedCommon.CreatePoolIfNotExistAsync(client, pool).Wait();
                var formula     = @"startingNumberOfVMs = 2;
                    maxNumberofVMs = 4;
                    pendingTaskSamplePercent = $PendingTasks.GetSamplePercent(90 * TimeInterval_Second);
                    pendingTaskSamples = pendingTaskSamplePercent < 70 ? startingNumberOfVMs : avg($PendingTasks.GetSample(180 * TimeInterval_Second));
                    $TargetDedicatedNodes = min(maxNumberofVMs, pendingTaskSamples);
                    $NodeDeallocationOption = taskcompletion;";
                var noOfSeconds = 150;
                Thread.Sleep(noOfSeconds * 1000);

                client.PoolOperations.EnableAutoScale(
                    poolId: topNWordsConfiguration.PoolId, autoscaleFormula: formula,
                    autoscaleEvaluationInterval: TimeSpan.FromMinutes(5));

                try
                {
                    Console.WriteLine("Creating job: " + topNWordsConfiguration.JobId);
                    // get an empty unbound Job
                    CloudJob unboundJob = client.JobOperations.CreateJob();
                    unboundJob.Id = topNWordsConfiguration.JobId;
                    unboundJob.PoolInformation = new PoolInformation()
                    {
                        PoolId = topNWordsConfiguration.PoolId
                    };

                    // Commit Job to create it in the service
                    unboundJob.Commit();

                    // create file staging objects that represent the executable and its dependent assembly to run as the task.
                    // These files are copied to every node before the corresponding task is scheduled to run on that node.
                    FileToStage topNWordExe         = new FileToStage(TopNWordsExeName, stagingStorageAccount);
                    FileToStage storageDll          = new FileToStage(StorageClientDllName, stagingStorageAccount);
                    FileToStage newtonJsoftDll      = new FileToStage(NewtonJSoftDllName, stagingStorageAccount);
                    FileToStage microsoftEFDll      = new FileToStage(MicrosoftEntityFrameworkDllName, stagingStorageAccount);
                    FileToStage microsoftEFCoreDll  = new FileToStage(MicrosoftEntityFrameworkCoreDllName, stagingStorageAccount);
                    FileToStage microsoftBCLDll     = new FileToStage(MicrosoftBCLDllName, stagingStorageAccount);
                    FileToStage systemTasksDll      = new FileToStage(SystemTasksDllName, stagingStorageAccount);
                    FileToStage topNWordsConfigFile = new FileToStage(TopnWordsConfig, stagingStorageAccount);
                    FileToStage SystemValueTupleDll = new FileToStage(SystemValueTupleDllName, stagingStorageAccount);
                    FileToStage DependencyInjectionAbstractionsDll = new FileToStage(DependecyInjectionAbstractionsDllName, stagingStorageAccount);
                    FileToStage DependencyInjectionDll             = new FileToStage(DependecyInjectionDllName, stagingStorageAccount);
                    FileToStage LoggingAbstractionsDll             = new FileToStage(LoggingAbstractionsDllName, stagingStorageAccount);
                    FileToStage DiagnosticsDll        = new FileToStage(DiagnosticssDllName, stagingStorageAccount);
                    FileToStage CachingAbstractionDll = new FileToStage(CachingAbstractionsDllName, stagingStorageAccount);
                    FileToStage MicrosoftSqlServerDll = new FileToStage(MicrosoftSqlServerDllName, stagingStorageAccount);
                    FileToStage SystemComponentDll    = new FileToStage(SystemComponentDllName, stagingStorageAccount);
                    FileToStage SystemCollectionsDll  = new FileToStage(SystemCollectionsDllName, stagingStorageAccount);
                    FileToStage pDll                 = new FileToStage(pdllName, stagingStorageAccount);
                    FileToStage oDll                 = new FileToStage(odllName, stagingStorageAccount);
                    FileToStage lDll                 = new FileToStage(ldllName, stagingStorageAccount);
                    FileToStage hashcodeDll          = new FileToStage(hashcodeDllName, stagingStorageAccount);
                    FileToStage clientSqlDll         = new FileToStage(clientSqlClientDllName, stagingStorageAccount);
                    FileToStage cachingMemoryDll     = new FileToStage(CachingMemoryDllName, stagingStorageAccount);
                    FileToStage configAbstractionDll = new FileToStage(configAbstractionDllName, stagingStorageAccount);
                    FileToStage SNIDll               = new FileToStage(SNIDllName, stagingStorageAccount);


                    FileToStage relationDll = new FileToStage(relationddllName, stagingStorageAccount);



                    var textFile = "E:\\WeatherAPIPOC\\cities_id.txt";
                    var text     = File.ReadAllLines(textFile);
                    var cityList = new List <string>(text);

                    // In this sample, the input data is copied separately to Storage and its URI is passed to the task as an argument.
                    // This approach is appropriate when the amount of input data is large such that copying it to every node via FileStaging
                    // is not desired and the number of tasks is small since a large number of readers of the blob might get throttled
                    // by Storage which will lengthen the overall processing time.
                    //
                    // You'll need to observe the behavior and use published techniques for finding the right balance of performance versus
                    // complexity.

                    Console.WriteLine("{0} uploaded to cloud", topNWordsConfiguration.FileName);

                    // initialize a collection to hold the tasks that will be submitted in their entirety
                    List <CloudTask> tasksToRun = new List <CloudTask>(topNWordsConfiguration.NumberOfTasks);

                    for (int i = 0; i < cityList.Count; i++)
                    {
                        string    programLaunchTime = DateTime.Now.ToString("h:mm:sstt");
                        CloudTask task = new CloudTask(
                            id: $"task_no_{i + 1}",
                            commandline: $"cmd /c mkdir x64 & move SNI.dll x64 & {TopNWordsExeName} --Task {cityList[i]} %AZ_BATCH_NODE_ID% {programLaunchTime}");

                        //This is the list of files to stage to a container -- for each job, one container is created and
                        //files all resolve to Azure Blobs by their name (so two tasks with the same named file will create just 1 blob in
                        //the container).
                        task.FilesToStage = new List <IFileStagingProvider>
                        {
                            topNWordExe,
                            storageDll,
                            newtonJsoftDll,
                            microsoftEFDll,
                            microsoftEFCoreDll,
                            microsoftBCLDll,
                            systemTasksDll,
                            topNWordsConfigFile,
                            SystemValueTupleDll,
                            DependencyInjectionAbstractionsDll,
                            DependencyInjectionDll,
                            LoggingAbstractionsDll,
                            DiagnosticsDll,
                            CachingAbstractionDll,
                            MicrosoftSqlServerDll,
                            SystemComponentDll,
                            SystemCollectionsDll,
                            oDll,
                            pDll,
                            lDll,
                            relationDll,
                            hashcodeDll,
                            clientSqlDll,
                            cachingMemoryDll,
                            configAbstractionDll,
                            SNIDll
                        };

                        tasksToRun.Add(task);
                    }

                    // Commit all the tasks to the Batch Service. Ask AddTask to return information about the files that were staged.
                    // The container information is used later on to remove these files from Storage.
                    ConcurrentBag <ConcurrentDictionary <Type, IFileStagingArtifact> > fsArtifactBag = new ConcurrentBag <ConcurrentDictionary <Type, IFileStagingArtifact> >();
                    client.JobOperations.AddTask(topNWordsConfiguration.JobId, tasksToRun, fileStagingArtifacts: fsArtifactBag);

                    // loop through the bag of artifacts, looking for the one that matches our staged files. Once there,
                    // capture the name of the container holding the files so they can be deleted later on if that option
                    // was configured in the settings.
                    foreach (var fsBagItem in fsArtifactBag)
                    {
                        IFileStagingArtifact fsValue;
                        if (fsBagItem.TryGetValue(typeof(FileToStage), out fsValue))
                        {
                            SequentialFileStagingArtifact stagingArtifact = fsValue as SequentialFileStagingArtifact;
                            if (stagingArtifact != null)
                            {
                                stagingContainer = stagingArtifact.BlobContainerCreated;
                                Console.WriteLine(
                                    "Uploaded files to container: {0} -- you will be charged for their storage unless you delete them.",
                                    stagingArtifact.BlobContainerCreated);
                            }
                        }
                    }

                    //Get the job to monitor status.
                    CloudJob job = client.JobOperations.GetJob(topNWordsConfiguration.JobId);

                    Console.Write("Waiting for tasks to complete ...   ");
                    // Wait 20 minutes for all tasks to reach the completed state. The long timeout is necessary for the first
                    // time a pool is created in order to allow nodes to be added to the pool and initialized to run tasks.
                    IPagedEnumerable <CloudTask> ourTasks = job.ListTasks(new ODATADetailLevel(selectClause: "id"));
                    client.Utilities.CreateTaskStateMonitor().WaitAll(ourTasks, TaskState.Completed, TimeSpan.FromMinutes(20));
                    Console.WriteLine("tasks are done.");

                    foreach (CloudTask t in ourTasks)
                    {
                        Console.WriteLine("Task " + t.Id);
                        Console.WriteLine("stdout:" + Environment.NewLine + t.GetNodeFile(Batch.Constants.StandardOutFileName).ReadAsString());
                        Console.WriteLine();
                        Console.WriteLine("stderr:" + Environment.NewLine + t.GetNodeFile(Batch.Constants.StandardErrorFileName).ReadAsString());
                    }
                }
                finally
                {
                    //Delete the pool that we created
                    if (topNWordsConfiguration.ShouldDeletePool)
                    {
                        Console.WriteLine("Deleting pool: {0}", topNWordsConfiguration.PoolId);
                        client.PoolOperations.DeletePool(topNWordsConfiguration.PoolId);
                    }

                    //Delete the job that we created
                    if (topNWordsConfiguration.ShouldDeleteJob)
                    {
                        Console.WriteLine("Deleting job: {0}", topNWordsConfiguration.JobId);
                        client.JobOperations.DeleteJob(topNWordsConfiguration.JobId);
                    }

                    //Delete the containers we created
                    if (topNWordsConfiguration.ShouldDeleteContainer)
                    {
                        DeleteContainers(accountSettings, stagingContainer);
                    }
                }
            }
        }
        // Calls the Batch service to get metrics for a single job.  The first time the
        // MetricMonitor sees a job, it creates a TaskStateCache to hold task state information,
        // and queries the states of *all* tasks in the job. Subsequent times, it queries
        // only for tasks whose states have changed since the previous query -- this significant
        // reduces download volumes for large jobs. In either case, it then updates the
        // cached task states and aggregates them into a TaskStateCounts object.
        private async Task CollectTaskMetricsAsync(MetricEvent.Builder metricsBuilder, CloudJob job)
        {
            TaskStateCache taskStateCache;

            bool firstTime = !this.jobStateCache.ContainsKey(job.Id);
            if (firstTime)
            {
                taskStateCache = new TaskStateCache();
                this.jobStateCache.Add(job.Id, taskStateCache);
            }
            else
            {
                taskStateCache = this.jobStateCache[job.Id];
            }

            // If the monitor API is called for the first time, it has to issue a query to enumerate all the tasks once to get its state.
            // This is a relatively slow query.
            // Subsequent calls to the monitor API will only look for changes to the task state since the last time the query was issued and 
            // a clock skew (which is within 30 seconds approximately for Azure). Thus if the monitoring API periodicity is 1 minute, then the query 
            // should look for changes in the last minute and 30 seconds.

            // TODO: it would be better to record the time at which the last query was issued and use that,
            // rather than subtracting the monitor interval from the current time
            DateTime since = DateTime.UtcNow - (this.monitorInterval + MaximumClockSkew);
            var tasksToList = firstTime ? DetailLevels.IdAndState.AllEntities : DetailLevels.IdAndState.OnlyChangedAfter(since);

            var listTasksTimer = Stopwatch.StartNew();
            var tasks = await job.ListTasks(tasksToList).ToListAsync(this.runCancel.Token);
            listTasksTimer.Stop();

            var listTasksLatency = listTasksTimer.Elapsed;

            foreach (var task in tasks)
            {
                taskStateCache.UpdateTaskState(task.Id, task.State.Value);
            }

            var taskStateCounts = taskStateCache.GetTaskStateCounts();

            metricsBuilder.JobStats.Add(job.Id, new JobMetrics(listTasksLatency, taskStateCounts));
        }
示例#22
0
        public async static Task JobMain(string[] args)
        {
            //Load the configuration
            Settings        topNWordsConfiguration = Settings.Default;
            AccountSettings accountSettings        = AccountSettings.Default;

            CloudStorageAccount cloudStorageAccount = new CloudStorageAccount(
                new StorageCredentials(
                    accountSettings.StorageAccountName,
                    accountSettings.StorageAccountKey),
                accountSettings.StorageServiceUrl,
                useHttps: true);

            StagingStorageAccount stagingStorageAccount = new StagingStorageAccount(
                accountSettings.StorageAccountName,
                accountSettings.StorageAccountKey,
                cloudStorageAccount.BlobEndpoint.ToString());

            using (BatchClient client = BatchClient.Open(new BatchSharedKeyCredentials(accountSettings.BatchServiceUrl, accountSettings.BatchAccountName, accountSettings.BatchAccountKey)))
            {
                string stagingContainer = null;

                //OSFamily 4 == OS 2012 R2. You can learn more about os families and versions at:
                //http://msdn.microsoft.com/en-us/library/azure/ee924680.aspx
                CloudPool pool = client.PoolOperations.CreatePool(
                    topNWordsConfiguration.PoolId,
                    targetDedicatedComputeNodes: topNWordsConfiguration.PoolNodeCount,
                    virtualMachineSize: "small",
                    cloudServiceConfiguration: new CloudServiceConfiguration(osFamily: "4"));

                List <string> files = new List <string>
                {
                    Path.Combine(BatchStartTaskFolderName, BatchStartTaskTelemetryRunnerName),
                };

                files.AddRange(AIFilesToUpload);

                var resourceHelperTask = SampleHelpers.UploadResourcesAndCreateResourceFileReferencesAsync(
                    cloudStorageAccount,
                    AIBlobConatinerName,
                    files);

                List <ResourceFile> resourceFiles = resourceHelperTask.Result;

                pool.StartTask = new StartTask()
                {
                    CommandLine   = string.Format("cmd /c {0}", BatchStartTaskTelemetryRunnerName),
                    ResourceFiles = resourceFiles
                };

                Console.WriteLine("Adding pool {0}", topNWordsConfiguration.PoolId);
                try
                {
                    await GettingStartedCommon.CreatePoolIfNotExistAsync(client, pool);
                }
                catch (AggregateException ae)
                {
                    // Go through all exceptions and dump useful information
                    ae.Handle(x =>
                    {
                        Console.Error.WriteLine("Creating pool ID {0} failed", topNWordsConfiguration.PoolId);
                        if (x is BatchException)
                        {
                            BatchException be = x as BatchException;

                            Console.WriteLine(be.ToString());
                            Console.WriteLine();
                        }
                        else
                        {
                            Console.WriteLine(x);
                        }

                        // can't continue without a pool
                        return(false);
                    });
                }
                catch (BatchException be)
                {
                    Console.Error.WriteLine("Creating pool ID {0} failed", topNWordsConfiguration.PoolId);
                    Console.WriteLine(be.ToString());
                    Console.WriteLine();
                }

                try
                {
                    Console.WriteLine("Creating job: " + topNWordsConfiguration.JobId);
                    // get an empty unbound Job
                    CloudJob unboundJob = client.JobOperations.CreateJob();
                    unboundJob.Id = topNWordsConfiguration.JobId;
                    unboundJob.PoolInformation = new PoolInformation()
                    {
                        PoolId = topNWordsConfiguration.PoolId
                    };

                    // Commit Job to create it in the service
                    await unboundJob.CommitAsync();

                    // create file staging objects that represent the executable and its dependent assembly to run as the task.
                    // These files are copied to every node before the corresponding task is scheduled to run on that node.
                    FileToStage topNWordExe = new FileToStage(TopNWordsExeName, stagingStorageAccount);
                    FileToStage storageDll  = new FileToStage(StorageClientDllName, stagingStorageAccount);

                    // Upload application insights assemblies
                    List <FileToStage> aiStagedFiles = new List <FileToStage>();
                    foreach (string aiFile in AIFilesToUpload)
                    {
                        aiStagedFiles.Add(new FileToStage(aiFile, stagingStorageAccount));
                    }

                    // In this sample, the input data is copied separately to Storage and its URI is passed to the task as an argument.
                    // This approach is appropriate when the amount of input data is large such that copying it to every node via FileStaging
                    // is not desired and the number of tasks is small since a large number of readers of the blob might get throttled
                    // by Storage which will lengthen the overall processing time.
                    //
                    // You'll need to observe the behavior and use published techniques for finding the right balance of performance versus
                    // complexity.

                    string[] documents = Directory.GetFiles(topNWordsConfiguration.DocumentsRootPath);
                    await SampleHelpers.UploadResourcesAsync(cloudStorageAccount, BooksContainerName, documents);

                    // initialize a collection to hold the tasks that will be submitted in their entirety
                    List <CloudTask> tasksToRun = new List <CloudTask>(documents.Length);

                    for (int i = 0; i < documents.Length; i++)
                    {
                        CloudTask task = new CloudTask("task_no_" + i, String.Format("{0} --Task {1} {2} {3} {4}",
                                                                                     TopNWordsExeName,
                                                                                     "https://onbehalfoutput.blob.core.windows.net/" + documents[i],
                                                                                     topNWordsConfiguration.TopWordCount,
                                                                                     accountSettings.StorageAccountName,
                                                                                     accountSettings.StorageAccountKey));

                        //This is the list of files to stage to a container -- for each job, one container is created and
                        //files all resolve to Azure Blobs by their name (so two tasks with the same named file will create just 1 blob in
                        //the container).
                        task.FilesToStage = new List <IFileStagingProvider>
                        {
                            topNWordExe,
                            storageDll,
                        };

                        foreach (FileToStage stagedFile in aiStagedFiles)
                        {
                            task.FilesToStage.Add(stagedFile);
                        }

                        tasksToRun.Add(task);
                    }

                    // Commit all the tasks to the Batch Service. Ask AddTask to return information about the files that were staged.
                    // The container information is used later on to remove these files from Storage.
                    ConcurrentBag <ConcurrentDictionary <Type, IFileStagingArtifact> > fsArtifactBag = new ConcurrentBag <ConcurrentDictionary <Type, IFileStagingArtifact> >();
                    client.JobOperations.AddTask(topNWordsConfiguration.JobId, tasksToRun, fileStagingArtifacts: fsArtifactBag);

                    // loop through the bag of artifacts, looking for the one that matches our staged files. Once there,
                    // capture the name of the container holding the files so they can be deleted later on if that option
                    // was configured in the settings.
                    foreach (var fsBagItem in fsArtifactBag)
                    {
                        IFileStagingArtifact fsValue;
                        if (fsBagItem.TryGetValue(typeof(FileToStage), out fsValue))
                        {
                            SequentialFileStagingArtifact stagingArtifact = fsValue as SequentialFileStagingArtifact;
                            if (stagingArtifact != null)
                            {
                                stagingContainer = stagingArtifact.BlobContainerCreated;
                                Console.WriteLine(
                                    "Uploaded files to container: {0} -- you will be charged for their storage unless you delete them.",
                                    stagingArtifact.BlobContainerCreated);
                            }
                        }
                    }

                    //Get the job to monitor status.
                    CloudJob job = client.JobOperations.GetJob(topNWordsConfiguration.JobId);

                    Console.Write("Waiting for tasks to complete ...   ");
                    // Wait 20 minutes for all tasks to reach the completed state. The long timeout is necessary for the first
                    // time a pool is created in order to allow nodes to be added to the pool and initialized to run tasks.
                    IPagedEnumerable <CloudTask> ourTasks = job.ListTasks(new ODATADetailLevel(selectClause: "id"));
                    client.Utilities.CreateTaskStateMonitor().WaitAll(ourTasks, TaskState.Completed, TimeSpan.FromMinutes(20));
                    Console.WriteLine("tasks are done.");

                    foreach (CloudTask t in ourTasks)
                    {
                        Console.WriteLine("Task " + t.Id);

                        Console.WriteLine("stdout:" + Environment.NewLine + t.GetNodeFile(Batch.Constants.StandardOutFileName).ReadAsString());
                        Console.WriteLine();
                        Console.WriteLine("stderr:" + Environment.NewLine + t.GetNodeFile(Batch.Constants.StandardErrorFileName).ReadAsString());
                    }
                }
                finally
                {
                    //Delete the pool that we created
                    if (topNWordsConfiguration.ShouldDeletePool)
                    {
                        Console.WriteLine("Deleting pool: {0}", topNWordsConfiguration.PoolId);
                        client.PoolOperations.DeletePool(topNWordsConfiguration.PoolId);
                    }

                    //Delete the job that we created
                    if (topNWordsConfiguration.ShouldDeleteJob)
                    {
                        Console.WriteLine("Deleting job: {0}", topNWordsConfiguration.JobId);
                        client.JobOperations.DeleteJob(topNWordsConfiguration.JobId);
                    }

                    //Delete the containers we created
                    if (topNWordsConfiguration.ShouldDeleteContainer)
                    {
                        DeleteContainers(accountSettings, stagingContainer);
                    }
                }
            }
        }
示例#23
0
        private static async Task MainAsync(string[] args)
        {
            // You may adjust these values to experiment with different compute resource scenarios.
            const string nodeSize  = "small";
            const string osFamily  = "4";
            const int    nodeCount = 1;

            const string poolId = "TaskDependenciesSamplePool";
            const string jobId  = "TaskDependenciesSampleJob";

            // Amount of time to wait before timing out long-running tasks.
            TimeSpan timeLimit = TimeSpan.FromMinutes(30);

            // Set up access to your Batch account with a BatchClient. Configure your AccountSettings in the
            // Microsoft.Azure.Batch.Samples.Common project within this solution.
            BatchSharedKeyCredentials cred = new BatchSharedKeyCredentials(AccountSettings.Default.BatchServiceUrl,
                                                                           AccountSettings.Default.BatchAccountName,
                                                                           AccountSettings.Default.BatchAccountKey);

            using (BatchClient batchClient = await BatchClient.OpenAsync(cred))
            {
                // Create the pool.
                Console.WriteLine("Creating pool [{0}]...", poolId);
                CloudPool unboundPool =
                    batchClient.PoolOperations.CreatePool(poolId: poolId,
                                                          cloudServiceConfiguration: new CloudServiceConfiguration(osFamily),
                                                          virtualMachineSize: nodeSize,
                                                          targetDedicated: nodeCount);
                await unboundPool.CommitAsync();

                // Create the job and specify that it uses tasks dependencies.
                Console.WriteLine("Creating job [{0}]...", jobId);
                CloudJob unboundJob = batchClient.JobOperations.CreateJob(jobId,
                                                                          new PoolInformation {
                    PoolId = poolId
                });

                // IMPORTANT: This is REQUIRED for using task dependencies.
                unboundJob.UsesTaskDependencies = true;

                await unboundJob.CommitAsync();

                // Create the collection of tasks that will be added to the job.
                List <CloudTask> tasks = new List <CloudTask>
                {
                    // 'Rain' and 'Sun' don't depend on any other tasks
                    new CloudTask("Rain", "cmd.exe /c echo Rain"),
                    new CloudTask("Sun", "cmd.exe /c echo Sun"),

                    // Task 'Flowers' depends on completion of both 'Rain' and 'Sun'
                    // before it is run.
                    new CloudTask("Flowers", "cmd.exe /c echo Flowers")
                    {
                        DependsOn = TaskDependencies.OnIds("Rain", "Sun")
                    },

                    // Tasks 1, 2, and 3 don't depend on any other tasks. Because
                    // we will be using them for a task range dependency, we must
                    // specify string representations of integers as their ids.
                    new CloudTask("1", "cmd.exe /c echo 1"),
                    new CloudTask("2", "cmd.exe /c echo 2"),
                    new CloudTask("3", "cmd.exe /c echo 3"),

                    // Task 4 depends on a range of tasks, 1 through 3
                    new CloudTask("4", "cmd.exe /c echo 4")
                    {
                        // To use a range of tasks, their ids must be integer values.
                        // Note that we pass integers as parameters to TaskIdRange,
                        // but their ids (above) are string representations of the ids.
                        DependsOn = TaskDependencies.OnIdRange(1, 3)
                    },

                    // Task 5 depends on a range of tasks, 1 through 3, and 'Flowers'
                    new CloudTask("5", "cmd.exe /c echo 5")
                    {
                        DependsOn = new TaskDependencies(
                            new[] { "Flowers" },
                            new[] { new TaskIdRange(1, 3) })
                    },
                };

                // Add the tasks to the job.
                await batchClient.JobOperations.AddTaskAsync(jobId, tasks);

                // Pause execution while we wait for the tasks to complete, and notify
                // whether the tasks completed successfully.
                Console.WriteLine("Waiting for task completion...");
                Console.WriteLine();
                CloudJob job = await batchClient.JobOperations.GetJobAsync(jobId);

                try
                {
                    await batchClient.Utilities.CreateTaskStateMonitor().WhenAll(
                        job.ListTasks(),
                        TaskState.Completed,
                        timeLimit);

                    Console.WriteLine("All tasks completed successfully.");
                    Console.WriteLine();
                }
                catch (TimeoutException e)
                {
                    Console.WriteLine(e);
                }

                // Clean up the resources we've created in the Batch account
                Console.Write("Delete job? [yes] no: ");
                string response = Console.ReadLine().ToLower();
                if (response != "n" && response != "no")
                {
                    await batchClient.JobOperations.DeleteJobAsync(job.Id);
                }

                Console.Write("Delete pool? [yes] no: ");
                response = Console.ReadLine().ToLower();
                if (response != "n" && response != "no")
                {
                    await batchClient.PoolOperations.DeletePoolAsync(poolId);
                }
            }
        }
示例#24
0
        public void Bug230385SupportDeleteNodeFileByTask()
        {
            Action test = () =>
            {
                using (BatchClient batchCli = TestUtilities.OpenBatchClientAsync(TestUtilities.GetCredentialsFromEnvironment()).Result)
                {
                    string jobId = "Bug230285Job-" + TestUtilities.GetMyName();

                    try
                    {
                        const string taskId = "hiWorld";
                        const string directoryCreationTaskId1 = "dirTask1";
                        const string directoryCreationTaskId2 = "dirTask2";

                        const string directoryNameOne = "Foo";
                        const string directoryNameTwo = "Bar";

                        const string directory2PathOnNode = "wd/" + directoryNameTwo;

                        //
                        // Create the job
                        //
                        CloudJob unboundJob = batchCli.JobOperations.CreateJob(jobId, new PoolInformation());
                        unboundJob.PoolInformation.PoolId = this.poolFixture.PoolId;
                        unboundJob.Commit();

                        CloudJob  boundJob = batchCli.JobOperations.GetJob(jobId);
                        CloudTask myTask   = new CloudTask(taskId, "cmd /c echo hello world");
                        CloudTask directoryCreationTask1 = new CloudTask(directoryCreationTaskId1, string.Format("cmd /c mkdir {0} && echo test > {0}/testfile.txt", directoryNameOne));
                        CloudTask directoryCreationTask2 = new CloudTask(directoryCreationTaskId2, string.Format("cmd /c mkdir {0} && echo test > {0}/testfile.txt", directoryNameTwo));

                        boundJob.AddTask(myTask);
                        boundJob.AddTask(directoryCreationTask1);
                        boundJob.AddTask(directoryCreationTask2);

                        this.testOutputHelper.WriteLine("Initial job commit()");

                        //
                        // Wait for task to go to completion
                        //
                        Utilities        utilities        = batchCli.Utilities;
                        TaskStateMonitor taskStateMonitor = utilities.CreateTaskStateMonitor();

                        taskStateMonitor.WaitAll(
                            boundJob.ListTasks(),
                            Microsoft.Azure.Batch.Common.TaskState.Completed,
                            TimeSpan.FromMinutes(3));

                        //
                        // NodeFile delete
                        //

                        //Delete single file
                        NodeFile file = batchCli.JobOperations.GetNodeFile(jobId, taskId, Constants.StandardOutFileName);
                        file.Delete();

                        //Ensure delete succeeded
                        TestUtilities.AssertThrows <BatchException>(() => batchCli.JobOperations.GetNodeFile(jobId, taskId, Constants.StandardOutFileName));

                        //Delete directory

                        NodeFile directory = batchCli.JobOperations.ListNodeFiles(jobId, directoryCreationTaskId1, recursive: true).First(item => item.Path.Contains(directoryNameOne));
                        Assert.True(directory.IsDirectory);
                        TestUtilities.AssertThrows <BatchException>(() => directory.Delete(recursive: false));
                        directory.Delete(recursive: true);

                        Assert.Null(batchCli.JobOperations.ListNodeFiles(jobId, directoryCreationTaskId1, recursive: true).FirstOrDefault(item => item.Path.Contains(directoryNameOne)));

                        //
                        // JobScheduleOperations delete task file
                        //
                        batchCli.JobOperations.GetNodeFile(jobId, taskId, Constants.StandardErrorFileName);
                        batchCli.JobOperations.DeleteNodeFile(jobId, taskId, Constants.StandardErrorFileName);

                        //Ensure delete succeeded
                        TestUtilities.AssertThrows <BatchException>(() => batchCli.JobOperations.GetNodeFile(jobId, taskId, Constants.StandardErrorFileName));

                        //Delete directory
                        directory = batchCli.JobOperations.ListNodeFiles(jobId, directoryCreationTaskId2, recursive: true).First(item => item.Path.Contains(directoryNameTwo));
                        Assert.True(directory.IsDirectory);
                        TestUtilities.AssertThrows <BatchException>(() => batchCli.JobOperations.DeleteNodeFile(jobId, directoryCreationTaskId2, directory2PathOnNode, recursive: false));
                        batchCli.JobOperations.DeleteNodeFile(jobId, directoryCreationTaskId2, directory2PathOnNode, recursive: true);

                        Assert.Null(batchCli.JobOperations.ListNodeFiles(jobId, directoryCreationTaskId2, recursive: true).FirstOrDefault(item => item.Path.Contains(directoryNameTwo)));
                    }
                    finally
                    {
                        TestUtilities.DeleteJobIfExistsAsync(batchCli, jobId).Wait();
                    }
                }
            };

            SynchronizationContextHelper.RunTest(test, TestTimeout);
        }
示例#25
0
        private static async Task MainAsync(string[] args)
        {
            // You may adjust these values to experiment with different compute resource scenarios.
            const string nodeSize        = "small";
            const int    nodeCount       = 4;
            const int    maxTasksPerNode = 4;
            const int    taskCount       = 32;

            // Ensure there are enough tasks to help avoid hitting some timeout conditions below
            int minimumTaskCount = nodeCount * maxTasksPerNode * 2;

            if (taskCount < minimumTaskCount)
            {
                Console.WriteLine("You must specify at least two tasks per node core for this sample ({0} tasks in this configuration).", minimumTaskCount);
                Console.WriteLine();

                // Not enough tasks, exit the application
                return;
            }

            // In this sample, the tasks simply ping localhost on the compute nodes; adjust these
            // values to simulate variable task duration
            const int minPings = 30;
            const int maxPings = 60;

            const string poolId = "ParallelTasksSamplePool";
            const string jobId  = "ParallelTasksSampleJob";

            // Amount of time to wait before timing out (potentially) long-running tasks
            TimeSpan longTaskDurationLimit = TimeSpan.FromMinutes(30);

            // Set up access to your Batch account with a BatchClient. Configure your AccountSettings in the
            // Microsoft.Azure.Batch.Samples.Common project within this solution.
            BatchSharedKeyCredentials cred = new BatchSharedKeyCredentials(AccountSettings.Default.BatchServiceUrl,
                                                                           AccountSettings.Default.BatchAccountName,
                                                                           AccountSettings.Default.BatchAccountKey);

            using (BatchClient batchClient = await BatchClient.OpenAsync(cred))
            {
                // Create a CloudPool, or obtain an existing pool with the specified ID
                CloudPool pool = await ArticleHelpers.CreatePoolIfNotExistAsync(batchClient,
                                                                                poolId,
                                                                                nodeSize,
                                                                                nodeCount,
                                                                                maxTasksPerNode);

                // Create a CloudJob, or obtain an existing pool with the specified ID
                CloudJob job = await ArticleHelpers.CreateJobIfNotExistAsync(batchClient, poolId, jobId);

                // The job's tasks ping localhost a random number of times between minPings and maxPings.
                // Adjust the minPings/maxPings values above to experiment with different task durations.
                Random           rand  = new Random();
                List <CloudTask> tasks = new List <CloudTask>();
                for (int i = 1; i <= taskCount; i++)
                {
                    string    taskId          = "task" + i.ToString().PadLeft(3, '0');
                    string    taskCommandLine = "ping -n " + rand.Next(minPings, maxPings + 1).ToString() + " localhost";
                    CloudTask task            = new CloudTask(taskId, taskCommandLine);
                    tasks.Add(task);
                }

                // Pause execution until the pool is steady and its compute nodes are ready to accept jobs.
                // NOTE: Such a pause is not necessary within your own code. Tasks can be added to a job at any point and will be
                // scheduled to execute on a compute node as soon any node has reached Idle state. Because the focus of this sample
                // is the demonstration of running tasks in parallel on multiple compute nodes, we wait for all compute nodes to
                // complete initialization and reach the Idle state in order to maximize the number of compute nodes available for
                // parallelization.
                await ArticleHelpers.WaitForPoolToReachStateAsync(batchClient, pool.Id, AllocationState.Steady, longTaskDurationLimit);

                await ArticleHelpers.WaitForNodesToReachStateAsync(batchClient, pool.Id, ComputeNodeState.Idle, longTaskDurationLimit);

                // Add the tasks in one API call as opposed to a separate AddTask call for each. Bulk task submission
                // helps to ensure efficient underlying API calls to the Batch service.
                await batchClient.JobOperations.AddTaskAsync(job.Id, tasks);

                // Pause again to wait until *all* nodes are running tasks
                await ArticleHelpers.WaitForNodesToReachStateAsync(batchClient, pool.Id, ComputeNodeState.Running, TimeSpan.FromMinutes(2));

                Stopwatch stopwatch = Stopwatch.StartNew();

                // Print out task assignment information.
                Console.WriteLine();
                await GettingStartedCommon.PrintNodeTasksAsync(batchClient, pool.Id);

                Console.WriteLine();

                // Pause execution while we wait for all of the tasks to complete
                Console.WriteLine("Waiting for task completion...");
                Console.WriteLine();

                try
                {
                    await batchClient.Utilities.CreateTaskStateMonitor().WhenAll(
                        job.ListTasks(),
                        TaskState.Completed,
                        longTaskDurationLimit);
                }
                catch (TimeoutException e)
                {
                    Console.WriteLine(e.ToString());
                }

                stopwatch.Stop();

                // Obtain the tasks, specifying a detail level to limit the number of properties returned for each task.
                // If you have a large number of tasks, specifying a DetailLevel is extremely important in reducing the
                // amount of data transferred, lowering your query response times in increasing performance.
                ODATADetailLevel             detail   = new ODATADetailLevel(selectClause: "id,commandLine,nodeInfo,state");
                IPagedEnumerable <CloudTask> allTasks = batchClient.JobOperations.ListTasks(job.Id, detail);

                // Get a collection of the completed tasks sorted by the compute nodes on which they executed
                List <CloudTask> completedTasks = allTasks
                                                  .Where(t => t.State == TaskState.Completed)
                                                  .OrderBy(t => t.ComputeNodeInformation.ComputeNodeId)
                                                  .ToList();

                // Print the completed task information
                Console.WriteLine();
                Console.WriteLine("Completed tasks:");
                string lastNodeId = string.Empty;
                foreach (CloudTask task in completedTasks)
                {
                    if (!string.Equals(lastNodeId, task.ComputeNodeInformation.ComputeNodeId))
                    {
                        Console.WriteLine();
                        Console.WriteLine(task.ComputeNodeInformation.ComputeNodeId);
                    }

                    lastNodeId = task.ComputeNodeInformation.ComputeNodeId;

                    Console.WriteLine("\t{0}: {1}", task.Id, task.CommandLine);
                }

                // Get a collection of the uncompleted tasks which may exist if the TaskMonitor timeout was hit
                List <CloudTask> uncompletedTasks = allTasks
                                                    .Where(t => t.State != TaskState.Completed)
                                                    .OrderBy(t => t.Id)
                                                    .ToList();

                // Print a list of uncompleted tasks, if any
                Console.WriteLine();
                Console.WriteLine("Uncompleted tasks:");
                Console.WriteLine();
                if (uncompletedTasks.Any())
                {
                    foreach (CloudTask task in uncompletedTasks)
                    {
                        Console.WriteLine("\t{0}: {1}", task.Id, task.CommandLine);
                    }
                }
                else
                {
                    Console.WriteLine("\t<none>");
                }

                // Print some summary information
                Console.WriteLine();
                Console.WriteLine("             Nodes: " + nodeCount);
                Console.WriteLine("         Node size: " + nodeSize);
                Console.WriteLine("Max tasks per node: " + pool.MaxTasksPerComputeNode);
                Console.WriteLine("             Tasks: " + tasks.Count);
                Console.WriteLine("          Duration: " + stopwatch.Elapsed);
                Console.WriteLine();
                Console.WriteLine("Done!");
                Console.WriteLine();

                // Clean up the resources we've created in the Batch account
                Console.WriteLine("Delete job? [yes] no");
                string response = Console.ReadLine().ToLower();
                if (response != "n" && response != "no")
                {
                    await batchClient.JobOperations.DeleteJobAsync(job.Id);
                }

                Console.WriteLine("Delete pool? [yes] no");
                response = Console.ReadLine();
                if (response != "n" && response != "no")
                {
                    await batchClient.PoolOperations.DeletePoolAsync(pool.Id);
                }
            }
        }
示例#26
0
        public void Bug1480489NodeFileMissingIsDirectory()
        {
            Action test = () =>
            {
                using (BatchClient batchCli = TestUtilities.OpenBatchClientAsync(TestUtilities.GetCredentialsFromEnvironment()).Result)
                {
                    string jobId = "Bug1480489Job-" + TestUtilities.GetMyName();

                    try
                    {
                        // here we show how to use an unbound Job + Commit() to run millions of Tasks
                        CloudJob unboundJob = batchCli.JobOperations.CreateJob(jobId, new PoolInformation()
                        {
                            PoolId = this.poolFixture.PoolId
                        });
                        unboundJob.Commit();

                        // Open the new Job as bound.
                        CloudJob boundJob = batchCli.JobOperations.GetJob(jobId);

                        CloudTask myTask = new CloudTask(id: "Bug1480489Task", commandline: @"md Bug1480489Directory");

                        // add the task to the job
                        boundJob.AddTask(myTask);

                        // wait for the task to complete
                        Utilities        utilities        = batchCli.Utilities;
                        TaskStateMonitor taskStateMonitor = utilities.CreateTaskStateMonitor();

                        taskStateMonitor.WaitAll(
                            boundJob.ListTasks(),
                            Microsoft.Azure.Batch.Common.TaskState.Completed,
                            TimeSpan.FromMinutes(3));

                        CloudTask myCompletedTask = new List <CloudTask>(boundJob.ListTasks(null))[0];

                        string stdOut = myCompletedTask.GetNodeFile(Constants.StandardOutFileName).ReadAsString();
                        string stdErr = myCompletedTask.GetNodeFile(Constants.StandardErrorFileName).ReadAsString();

                        this.testOutputHelper.WriteLine("TaskId: " + myCompletedTask.Id);
                        this.testOutputHelper.WriteLine("StdOut: ");
                        this.testOutputHelper.WriteLine(stdOut);

                        this.testOutputHelper.WriteLine("StdErr: ");
                        this.testOutputHelper.WriteLine(stdErr);

                        this.testOutputHelper.WriteLine("Task Files:");

                        bool foundAtLeastOneDir = false;

                        foreach (NodeFile curFile in myCompletedTask.ListNodeFiles())
                        {
                            this.testOutputHelper.WriteLine("    Filepath: " + curFile.Path);
                            this.testOutputHelper.WriteLine("       IsDirectory: " + curFile.IsDirectory.ToString());

                            // turns out wd is created for each task so use it as sentinal
                            if (curFile.Path.Equals("wd") && curFile.IsDirectory.HasValue && curFile.IsDirectory.Value)
                            {
                                foundAtLeastOneDir = true;
                            }
                        }

                        Assert.True(foundAtLeastOneDir);
                    }
                    finally
                    {
                        TestUtilities.DeleteJobIfExistsAsync(batchCli, jobId).Wait();
                    }
                }
            };

            SynchronizationContextHelper.RunTest(test, TestTimeout);
        }
        public static void HelloWorld(
            BatchClient batchCli,
            ITestOutputHelper testOutputHelper,
            CloudPool sharedPool,
            out string jobId,
            out string taskId,
            bool deleteJob = true,
            bool isLinux   = false)
        {
            jobId = "HelloWorldJob-" + GetMyName() + "-" + GetTimeStamp();

            try
            {
                // here we show how to use an unbound Job + Commit() to run a simple "Hello World" task
                // get an empty unbound Job
                CloudJob quickJob = batchCli.JobOperations.CreateJob();
                quickJob.Id = jobId;
                quickJob.PoolInformation = new PoolInformation()
                {
                    PoolId = sharedPool.Id
                };

                // Commit Job
                quickJob.Commit();

                // get an empty unbound Task
                taskId = "dwsHelloWorldTask";

                const string winPaasHWTaskCmdLine   = "cmd /c echo Hello World";
                const string linuxIaasHWTaskCmdLine = "echo Hello World";

                string winnerTaskCmdLine = isLinux ? linuxIaasHWTaskCmdLine : winPaasHWTaskCmdLine;

                CloudTask hwTask = new CloudTask(id: taskId, commandline: winnerTaskCmdLine);

                // Open the new Job as bound.
                CloudJob boundJob = batchCli.JobOperations.GetJob(jobId);

                // add Task to Job
                boundJob.AddTask(hwTask);

                // wait for the task to complete

                Utilities        utilities        = batchCli.Utilities;
                TaskStateMonitor taskStateMonitor = utilities.CreateTaskStateMonitor();

                taskStateMonitor.WaitAll(
                    boundJob.ListTasks(),
                    TaskState.Completed,
                    TimeSpan.FromMinutes(3));

                CloudTask myCompletedTask = new List <CloudTask>(boundJob.ListTasks(null))[0];

                string stdOut = myCompletedTask.GetNodeFile(Constants.StandardOutFileName).ReadAsString();
                string stdErr = myCompletedTask.GetNodeFile(Constants.StandardErrorFileName).ReadAsString();

                // confirm that stdout includes correct value
                Assert.Contains("Hello World", stdOut);

                testOutputHelper.WriteLine("StdOut: ");
                testOutputHelper.WriteLine(stdOut);

                testOutputHelper.WriteLine("StdErr: ");
                testOutputHelper.WriteLine(stdErr);
            }
            finally
            {
                // delete the job to free the Pool compute nodes.
                if (deleteJob)
                {
                    TestUtilities.DeleteJobIfExistsAsync(batchCli, jobId).Wait();
                }
            }
        }
示例#28
0
        public void Bug1480491NodeFileFileProperties()
        {
            Action test = () =>
            {
                using (BatchClient batchCli = TestUtilities.OpenBatchClientAsync(TestUtilities.GetCredentialsFromEnvironment()).Result)
                {
                    string jobId = "Bug1480491Job-" + TestUtilities.GetMyName();

                    try
                    {
                        const string taskId = "hiWorld";

                        //
                        // Create the job
                        //
                        CloudJob unboundJob = batchCli.JobOperations.CreateJob(jobId, new PoolInformation());
                        unboundJob.PoolInformation.PoolId = this.poolFixture.PoolId;
                        unboundJob.Commit();

                        CloudJob  boundJob = batchCli.JobOperations.GetJob(jobId);
                        CloudTask myTask   = new CloudTask(taskId, "cmd /c echo hello world");

                        boundJob.AddTask(myTask);

                        this.testOutputHelper.WriteLine("Initial job commit()");

                        //
                        // Wait for task to go to completion
                        //
                        Utilities        utilities        = batchCli.Utilities;
                        TaskStateMonitor taskStateMonitor = utilities.CreateTaskStateMonitor();

                        taskStateMonitor.WaitAll(
                            boundJob.ListTasks(),
                            Microsoft.Azure.Batch.Common.TaskState.Completed,
                            TimeSpan.FromMinutes(3));

                        const int expectedFileSize = 13; //Magic number based on output generated by the task

                        //
                        // NodeFile by task
                        //
                        NodeFile file = batchCli.JobOperations.GetNodeFile(jobId, taskId, Constants.StandardOutFileName);

                        this.testOutputHelper.WriteLine("File {0} has content length: {1}", Constants.StandardOutFileName, file.Properties.ContentLength);
                        this.testOutputHelper.WriteLine("File {0} has content type: {1}", Constants.StandardOutFileName, file.Properties.ContentType);

                        this.testOutputHelper.WriteLine("File {0} has creation time: {1}", Constants.StandardOutFileName, file.Properties.CreationTime);
                        this.testOutputHelper.WriteLine("File {0} has last modified time: {1}", Constants.StandardOutFileName, file.Properties.LastModified);

                        Assert.Equal(expectedFileSize, file.Properties.ContentLength);
                        Assert.Equal("text/plain", file.Properties.ContentType);

                        //
                        // NodeFile by node
                        //
                        CloudTask boundTask     = boundJob.GetTask(taskId);
                        string    computeNodeId = boundTask.ComputeNodeInformation.AffinityId.Split(':')[1];

                        ComputeNode computeNode = batchCli.PoolOperations.GetComputeNode(this.poolFixture.PoolId, computeNodeId);

                        this.testOutputHelper.WriteLine("Task ran on compute node: {0}", computeNodeId);

                        List <NodeFile> files = computeNode.ListNodeFiles(recursive: true).ToList();
                        foreach (NodeFile nodeFile in files)
                        {
                            this.testOutputHelper.WriteLine("Found file: {0}", nodeFile.Path);
                        }

                        string filePathToGet = string.Format("workitems/{0}/{1}/{2}/{3}", jobId, "job-1", taskId, Constants.StandardOutFileName);
                        file = computeNode.GetNodeFile(filePathToGet);

                        this.testOutputHelper.WriteLine("File {0} has content length: {1}", filePathToGet, file.Properties.ContentLength);
                        this.testOutputHelper.WriteLine("File {0} has content type: {1}", filePathToGet, file.Properties.ContentType);

                        this.testOutputHelper.WriteLine("File {0} has creation time: {1}", filePathToGet, file.Properties.CreationTime);
                        this.testOutputHelper.WriteLine("File {0} has last modified time: {1}", filePathToGet, file.Properties.LastModified);

                        Assert.Equal(expectedFileSize, file.Properties.ContentLength);
                        Assert.Equal("text/plain", file.Properties.ContentType);
                    }
                    finally
                    {
                        batchCli.JobOperations.DeleteJob(jobId);
                    }
                }
            };

            SynchronizationContextHelper.RunTest(test, TestTimeout);
        }
        private static async Task MainAsync(string[] args)
        {
            const string poolId = "JobPrepReleaseSamplePool";
            const string jobId  = "JobPrepReleaseSampleJob";

            // Location of the file that the job tasks will work with, a text file in the
            // node's "shared" directory.
            const string taskOutputFile = "%AZ_BATCH_NODE_SHARED_DIR%\\job_prep_and_release.txt";

            // The job prep task will write the node ID to the text file in the shared directory
            const string jobPrepCmdLine = "cmd /c echo %AZ_BATCH_NODE_ID% tasks: >" + taskOutputFile;

            // Each task then echoes its ID to the same text file
            const string taskCmdLine = "cmd /c echo   %AZ_BATCH_TASK_ID% >> " + taskOutputFile;

            // The job release task will then delete the text file from the shared directory
            const string jobReleaseCmdLine = "cmd /c del " + taskOutputFile;

            // Configure your AccountSettings in the Microsoft.Azure.Batch.Samples.Common project within this solution
            BatchSharedKeyCredentials cred = new BatchSharedKeyCredentials(AccountSettings.Default.BatchServiceUrl,
                                                                           AccountSettings.Default.BatchAccountName,
                                                                           AccountSettings.Default.BatchAccountKey);

            // Initialize the BatchClient for access to your Batch account
            using (BatchClient batchClient = await BatchClient.OpenAsync(cred))
            {
                // Create a CloudPool (or obtain an existing pool with the specified ID)
                CloudPool pool = await ArticleHelpers.CreatePoolIfNotExistAsync(batchClient,
                                                                                poolId,
                                                                                "small",
                                                                                2,
                                                                                1);

                // Create a CloudJob (or obtain an existing job with the specified ID)
                CloudJob job = await SampleHelpers.GetJobIfExistAsync(batchClient, jobId);

                if (job == null)
                {
                    Console.WriteLine("Job {0} not found, creating...", jobId);

                    CloudJob unboundJob = batchClient.JobOperations.CreateJob(jobId, new PoolInformation()
                    {
                        PoolId = poolId
                    });

                    // Configure and assign the job preparation task
                    unboundJob.JobPreparationTask = new JobPreparationTask {
                        CommandLine = jobPrepCmdLine
                    };

                    // Configure and assign the job release task
                    unboundJob.JobReleaseTask = new JobReleaseTask {
                        CommandLine = jobReleaseCmdLine
                    };

                    await unboundJob.CommitAsync();

                    // Get the bound version of the job with all of its properties populated
                    job = await batchClient.JobOperations.GetJobAsync(jobId);
                }

                // Create the tasks that the job will execute
                List <CloudTask> tasks = new List <CloudTask>();
                for (int i = 1; i <= 8; i++)
                {
                    string    taskId          = "task" + i.ToString().PadLeft(3, '0');
                    string    taskCommandLine = taskCmdLine;
                    CloudTask task            = new CloudTask(taskId, taskCommandLine);
                    tasks.Add(task);
                }

                // Add the tasks in one API call as opposed to a separate AddTask call for each. Bulk task
                // submission helps to ensure efficient underlying API calls to the Batch service.
                Console.WriteLine("Submitting tasks and awaiting completion...");
                await batchClient.JobOperations.AddTaskAsync(job.Id, tasks);

                // Wait for the tasks to complete before proceeding. The long timeout here is to allow time
                // for the nodes within the pool to be created and started if the pool had not yet been created.
                await batchClient.Utilities.CreateTaskStateMonitor().WhenAll(
                    job.ListTasks(),
                    TaskState.Completed,
                    TimeSpan.FromMinutes(30));

                Console.WriteLine("All tasks completed.");
                Console.WriteLine();

                // Print the contents of the shared text file modified by the job preparation and other tasks.
                ODATADetailLevel nodeDetail          = new ODATADetailLevel(selectClause: "id, state");
                IPagedEnumerable <ComputeNode> nodes = batchClient.PoolOperations.ListComputeNodes(pool.Id, nodeDetail);
                await nodes.ForEachAsync(async (node) =>
                {
                    // Check to ensure that the node is Idle before attempting to pull the text file.
                    // If the pool was just created, there is a chance that another node completed all
                    // of the tasks prior to the other node(s) completing their startup procedure.
                    if (node.State == ComputeNodeState.Idle)
                    {
                        NodeFile sharedTextFile = await node.GetNodeFileAsync("shared\\job_prep_and_release.txt");
                        Console.WriteLine("Contents of {0} on {1}:", sharedTextFile.Name, node.Id);
                        Console.WriteLine("-------------------------------------------");
                        Console.WriteLine(await sharedTextFile.ReadAsStringAsync());
                    }
                });

                // Terminate the job to mark it as Completed; this will initiate the Job Release Task on any node
                // that executed job tasks. Note that the Job Release Task is also executed when a job is deleted,
                // thus you need not call Terminate if you typically delete your jobs upon task completion.
                await batchClient.JobOperations.TerminateJobAsync(job.Id);

                // Wait for the job to reach state "Completed." Note that this wait is not typically necessary in
                // production code, but is done here to enable the checking of the release tasks exit code below.
                await ArticleHelpers.WaitForJobToReachStateAsync(batchClient, job.Id, JobState.Completed, TimeSpan.FromMinutes(2));

                // Print the exit codes of the prep and release tasks by obtaining their execution info
                List <JobPreparationAndReleaseTaskExecutionInformation> prepReleaseInfo = await batchClient.JobOperations.ListJobPreparationAndReleaseTaskStatus(job.Id).ToListAsync();

                foreach (JobPreparationAndReleaseTaskExecutionInformation info in prepReleaseInfo)
                {
                    Console.WriteLine();
                    Console.WriteLine("{0}: ", info.ComputeNodeId);

                    // If no tasks were scheduled to run on the node, the JobPreparationTaskExecutionInformation will be null
                    if (info.JobPreparationTaskExecutionInformation != null)
                    {
                        Console.WriteLine("  Prep task exit code:    {0}", info.JobPreparationTaskExecutionInformation.ExitCode);
                    }

                    // If no tasks were scheduled to run on the node, the JobReleaseTaskExecutionInformation will be null
                    if (info.JobReleaseTaskExecutionInformation != null)
                    {
                        Console.WriteLine("  Release task exit code: {0}", info.JobReleaseTaskExecutionInformation.ExitCode);
                    }
                }

                // Clean up the resources we've created in the Batch account
                Console.WriteLine();
                Console.WriteLine("Delete job? [yes] no");
                string response = Console.ReadLine().ToLower();
                if (response != "n" && response != "no")
                {
                    // Note that deleting the job will execute the job release task if the job was not previously terminated
                    await batchClient.JobOperations.DeleteJobAsync(job.Id);
                }

                Console.WriteLine("Delete pool? [yes] no");
                response = Console.ReadLine();
                if (response != "n" && response != "no")
                {
                    await batchClient.PoolOperations.DeletePoolAsync(pool.Id);
                }
            }
        }
        public void CanAddTaskWithFilesToStage()
        {
            StagingStorageAccount storageCreds = TestUtilities.GetStorageCredentialsFromEnvironment();

            using (BatchClient batchCli = TestUtilities.OpenBatchClient(TestUtilities.GetCredentialsFromEnvironment()))
            {
                string jobId = "TestTaskWithFilesToStage-" + TestUtilities.GetMyName();

                try
                {
                    CloudJob job = batchCli.JobOperations.CreateJob(jobId, new PoolInformation()
                    {
                        PoolId = this.poolFixture.PoolId
                    });
                    job.Commit();
                    CloudJob boundJob = batchCli.JobOperations.GetJob(jobId);

                    CloudTask myTask = new CloudTask(id: "CountWordsTask", commandline: @"cmd /c dir /s .. & dir & wc localwords.txt");

                    myTask.FilesToStage = new List <IFileStagingProvider>
                    {
                        new FileToStage(Resources.LocalWordsDotText, storageCreds)
                    };

                    // add the task to the job
                    var artifacts        = boundJob.AddTask(myTask);
                    var specificArtifact = artifacts[typeof(FileToStage)];
                    SequentialFileStagingArtifact sfsa = specificArtifact as SequentialFileStagingArtifact;

                    Assert.NotNull(sfsa);

                    // Open the new Job as bound.
                    CloudPool boundPool = batchCli.PoolOperations.GetPool(boundJob.ExecutionInformation.PoolId);

                    // wait for the task to complete
                    TaskStateMonitor taskStateMonitor = batchCli.Utilities.CreateTaskStateMonitor();

                    taskStateMonitor.WaitAll(
                        boundJob.ListTasks(),
                        Microsoft.Azure.Batch.Common.TaskState.Completed,
                        TimeSpan.FromMinutes(10),
                        controlParams: null,
                        additionalBehaviors: new[]
                    {
                        // spam/logging interceptor
                        new Microsoft.Azure.Batch.Protocol.RequestInterceptor((x) =>
                        {
                            this.testOutputHelper.WriteLine("Issuing request type: " + x.GetType().ToString());

                            try
                            {
                                // print out the compute node states... we are actually waiting on the compute nodes
                                List <ComputeNode> allComputeNodes = boundPool.ListComputeNodes().ToList();

                                this.testOutputHelper.WriteLine("    #compute nodes: " + allComputeNodes.Count);

                                allComputeNodes.ForEach(
                                    (icn) =>
                                {
                                    this.testOutputHelper.WriteLine("  computeNode.id: " + icn.Id + ", state: " + icn.State);
                                });
                            }
                            catch (Exception ex)
                            {
                                // there is a race between the pool-life-job and the end of the job.. and the ListComputeNodes above
                                Assert.True(false, "SampleWithFilesAndPool probably can ignore this if its pool not found: " + ex.ToString());
                            }
                        })
                    });

                    List <CloudTask> tasks           = boundJob.ListTasks().ToList();
                    CloudTask        myCompletedTask = tasks.Single();

                    foreach (CloudTask curTask in tasks)
                    {
                        this.testOutputHelper.WriteLine("Task Id: " + curTask.Id + ", state: " + curTask.State);
                    }

                    boundPool.Refresh();

                    this.testOutputHelper.WriteLine("Pool Id: " + boundPool.Id + ", state: " + boundPool.State);

                    string stdOut = myCompletedTask.GetNodeFile(Constants.StandardOutFileName).ReadAsString();
                    string stdErr = myCompletedTask.GetNodeFile(Constants.StandardErrorFileName).ReadAsString();

                    this.testOutputHelper.WriteLine("StdOut: ");
                    this.testOutputHelper.WriteLine(stdOut);

                    this.testOutputHelper.WriteLine("StdErr: ");
                    this.testOutputHelper.WriteLine(stdErr);

                    this.testOutputHelper.WriteLine("Task Files:");

                    foreach (NodeFile curFile in myCompletedTask.ListNodeFiles(recursive: true))
                    {
                        this.testOutputHelper.WriteLine("    File path: " + curFile.Path);
                    }

                    var files = myCompletedTask.ListNodeFiles(recursive: true).ToList();

                    // confirm the files are there
                    Assert.True(files.Any(file => file.Path.Contains("localWords.txt")), "missing file: localWords.txt");
                }
                finally
                {
                    TestUtilities.DeleteJobIfExistsAsync(batchCli, jobId).Wait();
                }
            }
        }
示例#31
0
        private static async Task MainAsync()
        {
            try
            {
                //Timeout Code
                Timer timeoutTimer = new Timer(timerCallBack, null, timeoutTimeSpan, Timeout.InfiniteTimeSpan);

                Log("Connecting to Batch");
                Log(BatchAccountName);
                Log(BatchAccountKey);
                Log(BatchAccountUrl);
                //Connecting to Batch
                var credentials = new BatchSharedKeyCredentials(BatchAccountUrl, BatchAccountName, BatchAccountKey);
                batchClient = BatchClient.Open(credentials);

                Log("Batch Connected");
                Log("Getting Job");

                //Getting Job
                Job = await batchClient.JobOperations.GetJobAsync(JobId);

                //Job = batchClient.JobOperations.ListJobs().First();

                Log("Getting Tasks");

                //Wait till tasks are added
                while (true)
                {
                    if (Job.ListTasks().Count() < 2)
                    {
                        Thread.Sleep(TaskWaitPeriod);
                        continue;
                    }
                    break;
                }

                //Getting all the Tasks in the Job
                List <CloudTask> tasks = new List <CloudTask>();

                while (true)
                {
                    tasks = Job.ListTasks().ToList();
                    tasks = tasks.Where(t => !t.Id.Equals(JobManagerName)).ToList();
                    if (tasks.Count >= TotalTaskAdded)
                    {
                        Log("Total Task Added: " + TotalTaskAdded);
                        break;
                    }
                    else
                    {
                        Log("Total Task Added not matching: " + TotalTaskAdded + " going to sleep.");
                        Thread.Sleep(TaskCompletionWaitPeriod);
                    }
                }

                Log("Number of tasks: " + tasks.Count);

                //Periodically check on all jobs
                while (true)
                {
                    Log("Loop Started");

                    try
                    {
                        bool flag = false;
                        foreach (var task in tasks)
                        {
                            await task.RefreshAsync();

                            Log("Entering task: " + task.Id);
                            if (task.Id.Equals(JobManagerName))
                            {
                                continue;
                            }
                            if (task.State == TaskState.Completed)
                            {
                                Log("[" + task.Id + "] task completed");
                                //if (task.ExecutionInformation.SchedulingError != null)
                                //{
                                //    Log("[" + task.Id + "] task scheduling error : Reactivating");
                                //    //Scheduling error : Restart task
                                //    //await task.ReactivateAsync();
                                //    //flag = true;
                                //}
                                //else
                                if (task.ExecutionInformation.ExitCode != 0)
                                {
                                    Log("[" + task.Id + "] task error, terminating all tasks");
                                    //Task retured with error : Stop all other task
                                    await TerminateAllTaks(tasks.ToList());

                                    flag = false;
                                    break;
                                }
                            }
                            else
                            {
                                Log(task.Id + " state: " + task.State);
                                flag = true;
                            }
                        }

                        if (!flag || timeoutFlag)
                        {
                            break;
                        }
                    }
                    catch (Exception e)
                    {
                        Log("");
                        Log(e.ToString());
                        Log(e.StackTrace);
                        Log(e.Message);
                        if (null != e.InnerException)
                        {
                            Log(e.InnerException.ToString());
                            Log(e.InnerException.StackTrace);
                        }
                    }
                    Log("Sleeping for " + TaskCompletionWaitPeriod + " milliseconds");
                    Thread.Sleep(TaskCompletionWaitPeriod);
                }

                Log("Loop Ended");

                try
                {
                    //Upload all the files
                    foreach (var task in tasks)
                    {
                        try
                        {
                            await task.RefreshAsync();

                            Log("Entering task: " + task.Id);
                            if (task.Id.Equals(JobManagerName))
                            {
                                continue;
                            }
                            var files = task.ListNodeFiles(recursive: true).ToList();
                            if (null == files)
                            {
                                continue;
                            }
                            //Upload these files to the central container and add a appinsights event
                            foreach (var file in files)
                            {
                                if (file.IsDirectory ?? false)
                                {
                                    continue;
                                }
                                if (file.Path.ToLower().Contains("wd") && CheckIfSupportedFileType(file.Path))
                                {
                                    //upload this file to container
                                    var fileName     = task.Id + "$" + file.Path.Split('\\').Last();
                                    var fileFullPath = Path.Combine(Directory.GetCurrentDirectory(), fileName);
                                    Log(string.Format("Uploading output file for [{0}]. File Path: [{1}].", task.Id, fileFullPath));
                                    using (FileStream fileStream = new FileStream(fileFullPath, FileMode.Create))
                                    {
                                        await file.CopyToStreamAsync(fileStream);

                                        fileStream.Close();
                                    }
                                    UploadFileToContainer(fileFullPath, OutputContainerSasUrl);
                                }
                            }
                        }
                        catch (Exception e)
                        {
                            Log(e.ToString());
                            Log(e.StackTrace);
                        }
                    }
                }
                catch (Exception e)
                {
                    Log(e.StackTrace);
                }
            }
            catch (Exception e)
            {
                throw e;
            }
        }