Пример #1
0
        /// <summary>
        /// Creates a job and adds a task to it. The task is a
        /// custom executable which has a resource file associated with it.
        /// </summary>
        /// <param name="batchClient">The BatchClient to use when interacting with the Batch service.</param>
        /// <param name="cloudStorageAccount">The storage account to upload the files to.</param>
        /// <param name="jobId">The ID of the job.</param>
        /// <returns>The set of container names containing the jobs input files.</returns>
        private async Task <HashSet <string> > SubmitJobAsync(BatchClient batchClient, CloudStorageAccount cloudStorageAccount, string jobId)
        {
            // create an empty unbound Job
            CloudJob unboundJob = batchClient.JobOperations.CreateJob();

            unboundJob.Id = jobId;
            unboundJob.PoolInformation = new PoolInformation()
            {
                PoolId = this.poolsAndResourceFileSettings.PoolId
            };

            // Commit Job to create it in the service
            await unboundJob.CommitAsync();

            List <CloudTask> tasksToRun = new List <CloudTask>();

            // Create a task which requires some resource files
            CloudTask taskWithFiles = new CloudTask("task_with_file1", SimpleTaskExe);

            // Set up a collection of files to be staged -- these files will be uploaded to Azure Storage
            // when the tasks are submitted to the Azure Batch service.
            taskWithFiles.FilesToStage = new List <IFileStagingProvider>();

            // generate a local file in temp directory
            string localSampleFile = Path.Combine(Environment.GetEnvironmentVariable("TEMP"), "HelloWorld.txt");

            File.WriteAllText(localSampleFile, "hello from Batch PoolsAndResourceFiles sample!");

            StagingStorageAccount fileStagingStorageAccount = new StagingStorageAccount(
                storageAccount: this.accountSettings.StorageAccountName,
                storageAccountKey: this.accountSettings.StorageAccountKey,
                blobEndpoint: cloudStorageAccount.BlobEndpoint.ToString());

            // add the files as a task dependency so they will be uploaded to storage before the task
            // is submitted and downloaded to the node before the task starts execution.
            FileToStage helloWorldFile = new FileToStage(localSampleFile, fileStagingStorageAccount);
            FileToStage simpleTaskFile = new FileToStage(SimpleTaskExe, fileStagingStorageAccount);

            // When this task is added via JobOperations.AddTaskAsync below, the FilesToStage are uploaded to storage once.
            // The Batch service does not automatically delete content from your storage account, so files added in this
            // way must be manually removed when they are no longer used.
            taskWithFiles.FilesToStage.Add(helloWorldFile);
            taskWithFiles.FilesToStage.Add(simpleTaskFile);

            tasksToRun.Add(taskWithFiles);

            var fileStagingArtifacts = new ConcurrentBag <ConcurrentDictionary <Type, IFileStagingArtifact> >();

            // Use the AddTask method which takes an enumerable of tasks for best performance, as it submits up to 100
            // tasks at once in a single request.  If the list of tasks is N where N > 100, this will correctly parallelize
            // the requests and return when all N tasks have been added.
            await batchClient.JobOperations.AddTaskAsync(jobId, tasksToRun, fileStagingArtifacts : fileStagingArtifacts);

            // Extract the names of the blob containers from the file staging artifacts
            HashSet <string> blobContainerNames = GettingStartedCommon.ExtractBlobContainerNames(fileStagingArtifacts);

            return(blobContainerNames);
        }
Пример #2
0
        private static IList <ResourceFile> UploadFilesMakeResFiles(StagingStorageAccount stagingCreds)
        {
            // use a dummy task to stage fsome files and generate resource files
            CloudTask myTask = new CloudTask(id: "CountWordsTask", commandline: @"cmd /c dir /s .. & dir & type localwords.txt");

            // first we have local files that we want pushed to the compute node before the commandline is invoked
            FileToStage wordsDotText = new FileToStage(Resources.LocalWordsDotText, stagingCreds);                // use "default" mapping to base name of local file

            // add in the files to stage
            myTask.FilesToStage = new List <IFileStagingProvider>();
            myTask.FilesToStage.Add(wordsDotText);

            // trigger file staging
            myTask.StageFiles();

            // return the resolved resource files
            return(myTask.ResourceFiles);
        }
        /// <summary>
        /// Submits a set of tasks to the job
        /// </summary>
        /// <param name="batchClient">The batch client to use.</param>
        /// <returns>The set of blob artifacts created by file staging.</returns>
        private async Task <HashSet <string> > SubmitTasks(BatchClient batchClient)
        {
            List <CloudTask> tasksToRun = new List <CloudTask>();

            // Create a task which requires some resource files
            CloudTask taskWithFiles = new CloudTask("task_with_file1", SimpleTaskExe);

            // Set up a collection of files to be staged -- these files will be uploaded to Azure Storage
            // when the tasks are submitted to the Azure Batch service.
            taskWithFiles.FilesToStage = new List <IFileStagingProvider>();

            // generate a local file in temp directory
            string localSampleFilePath = GettingStartedCommon.GenerateTemporaryFile("HelloWorld.txt", "hello from Batch JobManager sample!");

            StagingStorageAccount fileStagingStorageAccount = new StagingStorageAccount(
                storageAccount: this.configurationSettings.StorageAccountName,
                storageAccountKey: this.configurationSettings.StorageAccountKey,
                blobEndpoint: this.configurationSettings.StorageBlobEndpoint);

            // add the files as a task dependency so they will be uploaded to storage before the task
            // is submitted and downloaded to the node before the task starts execution.
            FileToStage helloWorldFile = new FileToStage(localSampleFilePath, fileStagingStorageAccount);
            FileToStage simpleTaskFile = new FileToStage(SimpleTaskExe, fileStagingStorageAccount);

            // When this task is added via JobOperations.AddTaskAsync below, the FilesToStage are uploaded to storage once.
            // The Batch service does not automatically delete content from your storage account, so files added in this
            // way must be manually removed when they are no longer used.
            taskWithFiles.FilesToStage.Add(helloWorldFile);
            taskWithFiles.FilesToStage.Add(simpleTaskFile);

            tasksToRun.Add(taskWithFiles);

            var fileStagingArtifacts = new ConcurrentBag <ConcurrentDictionary <Type, IFileStagingArtifact> >();

            // Use the AddTask method which takes an enumerable of tasks for best performance, as it submits up to 100
            // tasks at once in a single request.  If the list of tasks is N where N > 100, this will correctly parallelize
            // the requests and return when all N tasks have been added.
            await batchClient.JobOperations.AddTaskAsync(jobId, tasksToRun, fileStagingArtifacts : fileStagingArtifacts);

            // Extract the names of the blob containers from the file staging artifacts
            HashSet <string> blobContainerNames = GettingStartedCommon.ExtractBlobContainerNames(fileStagingArtifacts);

            return(blobContainerNames);
        }
Пример #4
0
        private static List <CloudTask> CreateTasks(Settings unzipperSettings, StagingStorageAccount stagingStorageAccount)
        {
            // create file staging objects that represent the executable and its dependent assembly to run as the task.
            // These files are copied to every node before the corresponding task is scheduled to run on that node.
            FileToStage unzipperExe = new FileToStage(UnzipperExeName, stagingStorageAccount);
            FileToStage storageDll  = new FileToStage(StorageClientDllName, stagingStorageAccount);


            //get list of zipped files
            var zipFiles = GetZipFiles(unzipperSettings).ToList();

            Console.WriteLine("found {0} zipped files", zipFiles.Count);


            // initialize a collection to hold the tasks that will be submitted in their entirety. This will be one task per file.
            List <CloudTask> tasksToRun = new List <CloudTask>(zipFiles.Count);
            int i = 0;

            foreach (var zipFile in zipFiles)
            {
                CloudTask task = new CloudTask("task_no_" + i, String.Format("{0} --Task {1} {2} {3}",
                                                                             UnzipperExeName,
                                                                             zipFile.Uri,
                                                                             unzipperSettings.StorageAccountName,
                                                                             unzipperSettings.StorageAccountKey));

                //This is the list of files to stage to a container -- for each job, one container is created and
                //files all resolve to Azure Blobs by their name (so two tasks with the same named file will create just 1 blob in
                //the container).
                task.FilesToStage = new List <IFileStagingProvider>
                {
                    unzipperExe,
                    storageDll
                };

                tasksToRun.Add(task);
                i++;
            }

            return(tasksToRun);
        }
Пример #5
0
        public void TestSampleWithFilesAndPool()
        {
            Action test = () =>
            {
                StagingStorageAccount storageCreds = TestUtilities.GetStorageCredentialsFromEnvironment();

                using (BatchClient batchCli = TestUtilities.OpenBatchClientAsync(TestUtilities.GetCredentialsFromEnvironment()).Result)
                {
                    string jobId = "SampleWithFilesJob-" + TestUtilities.GetMyName();


                    try
                    {
                        CloudJob quickJob = batchCli.JobOperations.CreateJob();
                        quickJob.Id = jobId;
                        quickJob.PoolInformation = new PoolInformation()
                        {
                            PoolId = this.poolFixture.PoolId
                        };
                        quickJob.Commit();
                        CloudJob boundJob = batchCli.JobOperations.GetJob(jobId);

                        CloudTask myTask = new CloudTask(id: "CountWordsTask", commandline: @"cmd /c dir /s .. & dir & wc localwords.txt");

                        // first we have local files that we want pushed to the compute node before the commandline is invoked
                        FileToStage wordsDotText = new FileToStage(Resources.LocalWordsDotText, storageCreds);                // use "default" mapping to base name of local file

                        myTask.FilesToStage = new List <IFileStagingProvider>();

                        myTask.FilesToStage.Add(wordsDotText);

                        // add the task to the job
                        var artifacts        = boundJob.AddTask(myTask);
                        var specificArtifact = artifacts[typeof(FileToStage)];
                        SequentialFileStagingArtifact sfsa = specificArtifact as SequentialFileStagingArtifact;

                        Assert.NotNull(sfsa);

                        // add a million more tasks...

                        // test to ensure the task is read only
                        TestUtilities.AssertThrows <InvalidOperationException>(() => myTask.FilesToStage = new List <IFileStagingProvider>());

                        // Open the new Job as bound.
                        CloudPool boundPool = batchCli.PoolOperations.GetPool(boundJob.ExecutionInformation.PoolId);

                        // wait for the task to complete
                        Utilities        utilities        = batchCli.Utilities;
                        TaskStateMonitor taskStateMonitor = utilities.CreateTaskStateMonitor();

                        taskStateMonitor.WaitAll(
                            boundJob.ListTasks(),
                            Microsoft.Azure.Batch.Common.TaskState.Completed,
                            TimeSpan.FromMinutes(10),
                            controlParams: null,
                            additionalBehaviors:
                            new[]
                        {
                            // spam/logging interceptor
                            new Microsoft.Azure.Batch.Protocol.RequestInterceptor((x) =>
                            {
                                this.testOutputHelper.WriteLine("Issuing request type: " + x.GetType().ToString());

                                try
                                {
                                    // print out the compute node states... we are actually waiting on the compute nodes
                                    List <ComputeNode> allComputeNodes = boundPool.ListComputeNodes().ToList();

                                    this.testOutputHelper.WriteLine("    #compute nodes: " + allComputeNodes.Count);

                                    allComputeNodes.ForEach(
                                        (icn) =>
                                    {
                                        this.testOutputHelper.WriteLine("  computeNode.id: " + icn.Id + ", state: " + icn.State);
                                    });
                                }
                                catch (Exception ex)
                                {
                                    // there is a race between the pool-life-job and the end of the job.. and the ListComputeNodes above
                                    Assert.True(false, "SampleWithFilesAndPool probably can ignore this if its pool not found: " + ex.ToString());
                                }
                            })
                        });

                        List <CloudTask> tasks           = boundJob.ListTasks(null).ToList();
                        CloudTask        myCompletedTask = tasks[0];

                        foreach (CloudTask curTask in tasks)
                        {
                            this.testOutputHelper.WriteLine("Task Id: " + curTask.Id + ", state: " + curTask.State);
                        }

                        boundPool.Refresh();

                        this.testOutputHelper.WriteLine("Pool Id: " + boundPool.Id + ", state: " + boundPool.State);

                        string stdOut = myCompletedTask.GetNodeFile(Constants.StandardOutFileName).ReadAsString();
                        string stdErr = myCompletedTask.GetNodeFile(Constants.StandardErrorFileName).ReadAsString();

                        this.testOutputHelper.WriteLine("StdOut: ");
                        this.testOutputHelper.WriteLine(stdOut);

                        this.testOutputHelper.WriteLine("StdErr: ");
                        this.testOutputHelper.WriteLine(stdErr);

                        this.testOutputHelper.WriteLine("Task Files:");

                        foreach (NodeFile curFile in myCompletedTask.ListNodeFiles(recursive: true))
                        {
                            this.testOutputHelper.WriteLine("    Filename: " + curFile.Name);
                        }

                        // confirm the files are there
                        Assert.True(FoundFile("localwords.txt", myCompletedTask.ListNodeFiles(recursive: true)), "mising file: localwords.txt");

                        // test validation of StagingStorageAccount

                        TestUtilities.AssertThrows <ArgumentOutOfRangeException>(() => { new StagingStorageAccount(storageAccount: " ", storageAccountKey: "key", blobEndpoint: "blob"); });
                        TestUtilities.AssertThrows <ArgumentOutOfRangeException>(() => { new StagingStorageAccount(storageAccount: "account", storageAccountKey: " ", blobEndpoint: "blob"); });
                        TestUtilities.AssertThrows <ArgumentOutOfRangeException>(() => { new StagingStorageAccount(storageAccount: "account", storageAccountKey: "key", blobEndpoint: ""); });

                        if (null != sfsa)
                        {
                            // TODO: delete the container!
                        }
                    }
                    finally
                    {
                        TestUtilities.DeleteJobIfExistsAsync(batchCli, jobId).Wait();
                    }
                }
            };

            SynchronizationContextHelper.RunTest(test, TestTimeout);
        }
Пример #6
0
        public static void JobMain(string[] args)
        {
            //Load the configuration
            TopNWordsConfiguration configuration = TopNWordsConfiguration.LoadConfigurationFromAppConfig();

            StagingStorageAccount stagingStorageAccount = new StagingStorageAccount(
                configuration.StorageAccountName,
                configuration.StorageAccountKey,
                configuration.StorageAccountBlobEndpoint);

            IBatchClient client           = BatchClient.Connect(configuration.BatchServiceUrl, new BatchCredentials(configuration.BatchAccountName, configuration.BatchAccountKey));
            string       stagingContainer = null;

            //Create a pool (if user hasn't provided one)
            if (configuration.ShouldCreatePool)
            {
                using (IPoolManager pm = client.OpenPoolManager())
                {
                    //OSFamily 4 == OS 2012 R2
                    //You can learn more about os families and versions at:
                    //http://msdn.microsoft.com/en-us/library/azure/ee924680.aspx
                    ICloudPool pool = pm.CreatePool(configuration.PoolName, targetDedicated: configuration.PoolSize, osFamily: "4", vmSize: "small");
                    Console.WriteLine("Adding pool {0}", configuration.PoolName);
                    pool.Commit();
                }
            }

            try
            {
                using (IWorkItemManager wm = client.OpenWorkItemManager())
                {
                    IToolbox toolbox = client.OpenToolbox();

                    //Use the TaskSubmissionHelper to help us create a WorkItem and add tasks to it.
                    ITaskSubmissionHelper taskSubmissionHelper = toolbox.CreateTaskSubmissionHelper(wm, configuration.PoolName);
                    taskSubmissionHelper.WorkItemName = configuration.WorkItemName;

                    FileToStage topNWordExe = new FileToStage(TopNWordsExeName, stagingStorageAccount);
                    FileToStage storageDll  = new FileToStage(StorageClientDllName, stagingStorageAccount);

                    string bookFileUri = UploadBookFileToCloudBlob(configuration, configuration.BookFileName);
                    Console.WriteLine("{0} uploaded to cloud", configuration.BookFileName);

                    for (int i = 1; i <= configuration.NumberOfTasks; i++)
                    {
                        ICloudTask task = new CloudTask("task_no_" + i, String.Format("{0} --Task {1} {2} {3} {4}",
                                                                                      TopNWordsExeName,
                                                                                      bookFileUri,
                                                                                      configuration.NumberOfTopWords,
                                                                                      configuration.StorageAccountName,
                                                                                      configuration.StorageAccountKey));

                        //This is the list of files to stage to a container -- for each TaskSubmissionHelper one container is created and
                        //files all resolve to Azure Blobs by their name (so two tasks with the same named file will create just 1 blob in
                        //the TaskSubmissionHelper's container).
                        task.FilesToStage = new List <IFileStagingProvider>
                        {
                            topNWordExe,
                            storageDll
                        };

                        taskSubmissionHelper.AddTask(task);
                    }

                    //Commit all the tasks to the Batch Service.
                    IJobCommitUnboundArtifacts artifacts = taskSubmissionHelper.Commit() as IJobCommitUnboundArtifacts;

                    foreach (var fileStagingArtifact in artifacts.FileStagingArtifacts)
                    {
                        SequentialFileStagingArtifact stagingArtifact = fileStagingArtifact.Value as SequentialFileStagingArtifact;
                        if (stagingArtifact != null)
                        {
                            stagingContainer = stagingArtifact.BlobContainerCreated;
                            Console.WriteLine("Uploaded files to container: {0} -- you will be charged for their storage unless you delete them.",
                                              stagingArtifact.BlobContainerCreated);
                        }
                    }

                    //Get the job to monitor status.
                    ICloudJob job = wm.GetJob(artifacts.WorkItemName, artifacts.JobName);

                    Console.Write("Waiting for tasks to complete ...");
                    // Wait 1 minute for all tasks to reach the completed state
                    client.OpenToolbox().CreateTaskStateMonitor().WaitAll(job.ListTasks(), TaskState.Completed, TimeSpan.FromMinutes(20));
                    Console.WriteLine("Done.");

                    foreach (ICloudTask task in job.ListTasks())
                    {
                        Console.WriteLine("Task " + task.Name + " says:\n" + task.GetTaskFile(Constants.StandardOutFileName).ReadAsString());
                        Console.WriteLine(task.GetTaskFile(Constants.StandardErrorFileName).ReadAsString());
                    }
                }
            }
            finally
            {
                //Delete the pool that we created
                if (configuration.ShouldCreatePool)
                {
                    using (IPoolManager pm = client.OpenPoolManager())
                    {
                        Console.WriteLine("Deleting pool: {0}", configuration.PoolName);
                        pm.DeletePool(configuration.PoolName);
                    }
                }

                //Delete the workitem that we created
                if (configuration.ShouldDeleteWorkItem)
                {
                    using (IWorkItemManager wm = client.OpenWorkItemManager())
                    {
                        Console.WriteLine("Deleting work item: {0}", configuration.WorkItemName);
                        wm.DeleteWorkItem(configuration.WorkItemName);
                    }
                }

                //Delete the containers we created
                if (configuration.ShouldDeleteContainer)
                {
                    DeleteContainers(configuration, stagingContainer);
                }
            }
        }
        /// <summary>
        /// Submits a set of tasks to the job
        /// </summary>
        /// <param name="batchClient">The batch client to use.</param>
        /// <returns>The set of blob artifacts created by file staging.</returns>
        private async Task<HashSet<string>> SubmitTasks(BatchClient batchClient)
        {
            List<CloudTask> tasksToRun = new List<CloudTask>();

            // Create a task which requires some resource files
            CloudTask taskWithFiles = new CloudTask("task_with_file1", SimpleTaskExe);
            
            // Set up a collection of files to be staged -- these files will be uploaded to Azure Storage
            // when the tasks are submitted to the Azure Batch service.
            taskWithFiles.FilesToStage = new List<IFileStagingProvider>();

            // generate a local file in temp directory
            string localSampleFilePath = GettingStartedCommon.GenerateTemporaryFile("HelloWorld.txt", "hello from Batch JobManager sample!");
            
            StagingStorageAccount fileStagingStorageAccount = new StagingStorageAccount(
                storageAccount: this.configurationSettings.StorageAccountName,
                storageAccountKey: this.configurationSettings.StorageAccountKey,
                blobEndpoint: this.configurationSettings.StorageBlobEndpoint);

            // add the files as a task dependency so they will be uploaded to storage before the task 
            // is submitted and downloaded to the node before the task starts execution.
            FileToStage helloWorldFile = new FileToStage(localSampleFilePath, fileStagingStorageAccount);
            FileToStage simpleTaskFile = new FileToStage(SimpleTaskExe, fileStagingStorageAccount);

            // When this task is added via JobOperations.AddTaskAsync below, the FilesToStage are uploaded to storage once.
            // The Batch service does not automatically delete content from your storage account, so files added in this 
            // way must be manually removed when they are no longer used.
            taskWithFiles.FilesToStage.Add(helloWorldFile);
            taskWithFiles.FilesToStage.Add(simpleTaskFile);

            tasksToRun.Add(taskWithFiles);

            var fileStagingArtifacts = new ConcurrentBag<ConcurrentDictionary<Type, IFileStagingArtifact>>();

            // Use the AddTask method which takes an enumerable of tasks for best performance, as it submits up to 100
            // tasks at once in a single request.  If the list of tasks is N where N > 100, this will correctly parallelize 
            // the requests and return when all N tasks have been added.
            await batchClient.JobOperations.AddTaskAsync(jobId, tasksToRun, fileStagingArtifacts: fileStagingArtifacts);

            // Extract the names of the blob containers from the file staging artifacts
            HashSet<string> blobContainerNames = GettingStartedCommon.ExtractBlobContainerNames(fileStagingArtifacts);

            return blobContainerNames;
        }
Пример #8
0
        private static async Task MainAsync()
        {
            const string poolId = "FileHandlingPool";
            const string jobId  = "FileHandlingJobDemo";

            var settings = Config.LoadAccountSettings();

            SetupStorage(settings.StorageAccountName, settings.StorageAccountKey);

            BatchSharedKeyCredentials cred = new BatchSharedKeyCredentials(settings.BatchServiceUrl, settings.BatchAccountName, settings.BatchAccountKey);

            using (BatchClient batchClient = BatchClient.Open(cred))
            {
                var pool = await BatchUtils.CreatePoolIfNotExistAsync(batchClient, poolId);

                var job = await BatchUtils.CreateJobIfNotExistAsync(batchClient, poolId, jobId);

                //set up auto storage file
                ResourceFile autoStorageFile = ResourceFile.FromAutoStorageContainer(AutoStorageContainerName, AutoStorageFileName);
                Console.WriteLine("\n[INFO] Autostorage resource File reference: ");
                Console.WriteLine("AutoStorageContainer: " + autoStorageFile.AutoStorageContainerName);
                Console.WriteLine("FilePath: " + autoStorageFile.FilePath);

                //upload file to external storage and add it as a resource file
                string storageConnectionString =
                    $"DefaultEndpointsProtocol=https;AccountName={settings.StorageAccountName};AccountKey={settings.StorageAccountKey}";

                CloudStorageAccount storageAccount    = CloudStorageAccount.Parse(storageConnectionString);
                CloudBlobClient     blobClient        = storageAccount.CreateCloudBlobClient();
                CloudBlobContainer  externalContainer = blobClient.GetContainerReference(ExternalStorageContainerName);
                await externalContainer.CreateIfNotExistsAsync();

                var externalFile = await UploadFileToContainer(blobClient, ExternalStorageContainerName, "resource_files/resource_file.txt", "resource_file.txt");

                Console.WriteLine("\n[INFO] External storage resource File reference:");
                Console.WriteLine("SAS Url: " + externalFile.HttpUrl);
                Console.WriteLine("FilePath: " + externalFile.FilePath);


                // using staging files API
                var filesToStage = new List <IFileStagingProvider>();
                StagingStorageAccount fileStagingStorageAccount = new StagingStorageAccount(
                    storageAccount: settings.StorageAccountName,
                    storageAccountKey: settings.StorageAccountKey,
                    blobEndpoint: storageAccount.BlobEndpoint.ToString());

                FileToStage stagedFile = new FileToStage("resource_files/staged_file.txt", fileStagingStorageAccount);
                Console.WriteLine("\n[INFO] Staged File added:");
                Console.WriteLine("Local File: " + stagedFile.LocalFileToStage);
                Console.WriteLine("Node File: " + stagedFile.NodeFileName);

                filesToStage.Add(stagedFile);

                // setup output files
                // Generate SAS for outputcontainer
                CloudBlobContainer outputContainer = blobClient.GetContainerReference(OutputContainerName);
                await outputContainer.CreateIfNotExistsAsync();

                string containerSas = outputContainer.GetSharedAccessSignature(new SharedAccessBlobPolicy()
                {
                    Permissions            = SharedAccessBlobPermissions.Write,
                    SharedAccessExpiryTime = DateTimeOffset.UtcNow.AddDays(1)
                });
                string containerUrl = outputContainer.Uri.AbsoluteUri + containerSas;
                Console.WriteLine("\n[INFO] Output container: " + containerUrl);

                Console.WriteLine("\nPress return to continue...");
                Console.ReadLine();

                // Create tasks
                List <CloudTask> tasks = new List <CloudTask>();

                for (var i = 1; i <= 10; i++)
                {
                    var taskId      = i.ToString().PadLeft(3, '0');
                    var commandLine = $@"/bin/bash -c ""echo 'Hello from {taskId}' && printf 'root dir:\n' > output.txt && ls -la >> output.txt && printf '\ninput dir:\n' >> output.txt && ls -la input >> output.txt""";
                    var task        = new CloudTask(taskId, commandLine);

                    // add resource files to task (one autostorage, one in external storage)
                    task.ResourceFiles = new[] { autoStorageFile, externalFile };

                    // add staged files
                    task.FilesToStage = filesToStage;

                    // add output files
                    var outputFiles = new List <OutputFile>
                    {
                        new OutputFile(
                            filePattern: @"../std*.txt",
                            destination: new OutputFileDestination(new OutputFileBlobContainerDestination(
                                                                       containerUrl: containerUrl,
                                                                       path: taskId)),
                            uploadOptions: new OutputFileUploadOptions(
                                uploadCondition: OutputFileUploadCondition.TaskCompletion)),
                        new OutputFile(
                            filePattern: @"output.txt",
                            destination: new OutputFileDestination(new OutputFileBlobContainerDestination(
                                                                       containerUrl: containerUrl,
                                                                       path: taskId + @"\output.txt")),
                            uploadOptions: new OutputFileUploadOptions(
                                uploadCondition: OutputFileUploadCondition.TaskCompletion)),
                    };

                    task.OutputFiles = outputFiles;

                    tasks.Add(task);
                }

                Console.WriteLine("Submitting tasks and awaiting completion...");
                // Add all tasks to the job.
                batchClient.JobOperations.AddTask(job.Id, tasks);

                await BatchUtils.WaitForTasksAndPrintOutputAsync(batchClient, job.ListTasks(), TimeSpan.FromMinutes(30));

                // Clean up Batch resources (if the user so chooses)
                Console.WriteLine();
                Console.Write("Delete job? [yes] no: ");
                string response = Console.ReadLine().ToLower();
                if (response != "n" && response != "no")
                {
                    batchClient.JobOperations.DeleteJob(jobId);
                }

                Console.Write("Delete pool? [yes] no: ");
                response = Console.ReadLine().ToLower();
                if (response != "n" && response != "no")
                {
                    batchClient.PoolOperations.DeletePool(poolId);
                }
            }
        }
Пример #9
0
        /// <summary>
        /// Submit tasks which have dependant files.
        /// The files are automatically uploaded to Azure Storage using the FileStaging feature of the Azure.Batch client library.
        /// </summary>
        private static void AddTasksWithFileStaging(BatchClient client, string sharedPoolId)
        {
            string jobId = CreateJobId("HelloWorldFileStagingJob");

            Console.WriteLine("Creating job: " + jobId);
            CloudJob boundJob = CreateBoundJob(client.JobOperations, sharedPoolId, jobId);

            CloudTask taskToAdd1 = new CloudTask("task_with_file1", "cmd /c type *.txt");
            CloudTask taskToAdd2 = new CloudTask("task_with_file2", "cmd /c dir /s");

            //Set up a collection of files to be staged -- these files will be uploaded to Azure Storage
            //when the tasks are submitted to the Azure Batch service.
            taskToAdd1.FilesToStage = new List<IFileStagingProvider>();
            taskToAdd2.FilesToStage = new List<IFileStagingProvider>();

            // generate a local file in temp directory
            Process cur = Process.GetCurrentProcess();
            string path = Path.Combine(Environment.GetEnvironmentVariable("TEMP"), cur.Id + ".txt");
            File.WriteAllText(path, "hello from " + cur.Id);

            // add the files as a task dependency so they will be uploaded to storage before the task 
            // is submitted and downloaded to the VM before the task starts execution on the node
            FileToStage file = new FileToStage(path, new StagingStorageAccount(StorageAccount, StorageKey, StorageBlobEndpoint));
            taskToAdd1.FilesToStage.Add(file);
            taskToAdd2.FilesToStage.Add(file); // filetostage object can be reused

            // create a list of the tasks to add.
            List<CloudTask> tasksToRun = new List<CloudTask> {taskToAdd1, taskToAdd2};
            bool errors = false;

            try
            {
                client.JobOperations.AddTask(boundJob.Id, tasksToRun);
            }
            catch (AggregateException ae)
            {
                errors = true;
                // Go through all exceptions and dump useful information
                ae.Handle(x =>
                {
                    Console.Error.WriteLine("Adding tasks for job {0} failed", boundJob.Id);
                    if (x is BatchException)
                    {
                        BatchException be = x as BatchException;
                        if (null != be.RequestInformation && null != be.RequestInformation.AzureError)
                        {
                            // Write the server side error information
                            Console.Error.WriteLine("    AzureError.Code: " + be.RequestInformation.AzureError.Code);
                            Console.Error.WriteLine("    AzureError.Message.Value: " + be.RequestInformation.AzureError.Message.Value);
                            if (null != be.RequestInformation.AzureError.Values)
                            {
                                Console.Error.WriteLine("    AzureError.Values");
                                foreach (var v in be.RequestInformation.AzureError.Values)
                                {
                                    Console.Error.WriteLine("        {0} : {1}", v.Key, v.Value);
                                }
                            }
                            Console.Error.WriteLine();
                        }
                    }
                    else
                    {
                        Console.WriteLine(x);
                    }
                    // Indicate that the error has been handled
                    return true;
                });
            }

            // if there is no exception, wait for job response
            if (!errors)
            {
                Console.WriteLine("Waiting for all tasks to complete on job: {0}...", boundJob.Id);

                IPagedEnumerable<CloudTask> ourTasks = boundJob.ListTasks();
                client.Utilities.CreateTaskStateMonitor().WaitAll(ourTasks, TaskState.Completed, TimeSpan.FromMinutes(30));

                foreach (CloudTask task in ourTasks)
                {
                    Console.WriteLine("Task " + task.Id);
                    Console.WriteLine("stdout:\n" + task.GetNodeFile(Constants.StandardOutFileName).ReadAsString());
                    Console.WriteLine("\nstderr:\n" + task.GetNodeFile(Constants.StandardErrorFileName).ReadAsString());
                }
            }

            //Delete the job to ensure the tasks are cleaned up
            Console.WriteLine("Deleting job: {0}", boundJob.Id);
            client.JobOperations.DeleteJob(boundJob.Id);
        }
Пример #10
0
        public static void JobMain(string[] args)
        {
            //Load the configuration
            TopNWordsConfiguration configuration = TopNWordsConfiguration.LoadConfigurationFromAppConfig();

            StagingStorageAccount stagingStorageAccount = new StagingStorageAccount(
                configuration.StorageAccountName,
                configuration.StorageAccountKey,
                configuration.StorageAccountBlobEndpoint);

            using (BatchClient client = BatchClient.Open(new BatchSharedKeyCredentials(configuration.BatchServiceUrl, configuration.BatchAccountName, configuration.BatchAccountKey)))
            {
                string stagingContainer = null;

                //Create a pool (if user hasn't provided one)
                if (configuration.ShouldCreatePool)
                {
                    //OSFamily 4 == OS 2012 R2. You can learn more about os families and versions at:
                    //http://msdn.microsoft.com/en-us/library/azure/ee924680.aspx
                    CloudPool pool = client.PoolOperations.CreatePool(configuration.PoolId, targetDedicated: configuration.PoolSize, osFamily: "4", virtualMachineSize: "small");
                    Console.WriteLine("Adding pool {0}", configuration.PoolId);

                    try
                    {
                        pool.Commit();
                    }
                    catch (AggregateException ae)
                    {
                        // Go through all exceptions and dump useful information
                        ae.Handle(x =>
                        {
                            Console.Error.WriteLine("Creating pool ID {0} failed", configuration.PoolId);
                            if (x is BatchException)
                            {
                                BatchException be = x as BatchException;

                                Console.WriteLine(be.ToString());
                                Console.WriteLine();
                            }
                            else
                            {
                                Console.WriteLine(x);
                            }

                            // can't continue without a pool
                            return(false);
                        });
                    }
                }

                try
                {
                    Console.WriteLine("Creating job: " + configuration.JobId);
                    // get an empty unbound Job
                    CloudJob unboundJob = client.JobOperations.CreateJob();
                    unboundJob.Id = configuration.JobId;
                    unboundJob.PoolInformation = new PoolInformation()
                    {
                        PoolId = configuration.PoolId
                    };

                    // Commit Job to create it in the service
                    unboundJob.Commit();

                    // create file staging objects that represent the executable and its dependent assembly to run as the task.
                    // These files are copied to every node before the corresponding task is scheduled to run on that node.
                    FileToStage topNWordExe = new FileToStage(TopNWordsExeName, stagingStorageAccount);
                    FileToStage storageDll  = new FileToStage(StorageClientDllName, stagingStorageAccount);

                    // In this sample, the input data is copied separately to Storage and its URI is passed to the task as an argument.
                    // This approach is appropriate when the amount of input data is large such that copying it to every node via FileStaging
                    // is not desired and the number of tasks is small since a large number of readers of the blob might get throttled
                    // by Storage which will lengthen the overall processing time.
                    //
                    // You'll need to observe the behavior and use published techniques for finding the right balance of performance versus
                    // complexity.
                    string bookFileUri = UploadBookFileToCloudBlob(configuration, configuration.BookFileName);
                    Console.WriteLine("{0} uploaded to cloud", configuration.BookFileName);

                    // initialize a collection to hold the tasks that will be submitted in their entirety
                    List <CloudTask> tasksToRun = new List <CloudTask>(configuration.NumberOfTasks);

                    for (int i = 1; i <= configuration.NumberOfTasks; i++)
                    {
                        CloudTask task = new CloudTask("task_no_" + i, String.Format("{0} --Task {1} {2} {3} {4}",
                                                                                     TopNWordsExeName,
                                                                                     bookFileUri,
                                                                                     configuration.NumberOfTopWords,
                                                                                     configuration.StorageAccountName,
                                                                                     configuration.StorageAccountKey));

                        //This is the list of files to stage to a container -- for each job, one container is created and
                        //files all resolve to Azure Blobs by their name (so two tasks with the same named file will create just 1 blob in
                        //the container).
                        task.FilesToStage = new List <IFileStagingProvider>
                        {
                            topNWordExe,
                            storageDll
                        };

                        tasksToRun.Add(task);
                    }

                    // Commit all the tasks to the Batch Service. Ask AddTask to return information about the files that were staged.
                    // The container information is used later on to remove these files from Storage.
                    ConcurrentBag <ConcurrentDictionary <Type, IFileStagingArtifact> > fsArtifactBag = new ConcurrentBag <ConcurrentDictionary <Type, IFileStagingArtifact> >();
                    client.JobOperations.AddTask(configuration.JobId, tasksToRun, fileStagingArtifacts: fsArtifactBag);

                    // loop through the bag of artifacts, looking for the one that matches our staged files. Once there,
                    // capture the name of the container holding the files so they can be deleted later on if that option
                    // was configured in the settings.
                    foreach (var fsBagItem in fsArtifactBag)
                    {
                        IFileStagingArtifact fsValue;
                        if (fsBagItem.TryGetValue(typeof(FileToStage), out fsValue))
                        {
                            SequentialFileStagingArtifact stagingArtifact = fsValue as SequentialFileStagingArtifact;
                            if (stagingArtifact != null)
                            {
                                stagingContainer = stagingArtifact.BlobContainerCreated;
                                Console.WriteLine(
                                    "Uploaded files to container: {0} -- you will be charged for their storage unless you delete them.",
                                    stagingArtifact.BlobContainerCreated);
                            }
                        }
                    }

                    //Get the job to monitor status.
                    CloudJob job = client.JobOperations.GetJob(configuration.JobId);

                    Console.Write("Waiting for tasks to complete ...   ");
                    // Wait 20 minutes for all tasks to reach the completed state. The long timeout is necessary for the first
                    // time a pool is created in order to allow nodes to be added to the pool and initialized to run tasks.
                    IPagedEnumerable <CloudTask> ourTasks = job.ListTasks(new ODATADetailLevel(selectClause: "id"));
                    client.Utilities.CreateTaskStateMonitor().WaitAll(ourTasks, TaskState.Completed, TimeSpan.FromMinutes(20));
                    Console.WriteLine("tasks are done.");

                    foreach (CloudTask t in ourTasks)
                    {
                        Console.WriteLine("Task " + t.Id);
                        Console.WriteLine("stdout:" + Environment.NewLine + t.GetNodeFile(Constants.StandardOutFileName).ReadAsString());
                        Console.WriteLine();
                        Console.WriteLine("stderr:" + Environment.NewLine + t.GetNodeFile(Constants.StandardErrorFileName).ReadAsString());
                    }
                }
                finally
                {
                    //Delete the pool that we created
                    if (configuration.ShouldCreatePool)
                    {
                        Console.WriteLine("Deleting pool: {0}", configuration.PoolId);
                        client.PoolOperations.DeletePool(configuration.PoolId);
                    }

                    //Delete the job that we created
                    if (configuration.ShouldDeleteJob)
                    {
                        Console.WriteLine("Deleting job: {0}", configuration.JobId);
                        client.JobOperations.DeleteJob(configuration.JobId);
                    }

                    //Delete the containers we created
                    if (configuration.ShouldDeleteContainer)
                    {
                        DeleteContainers(configuration, stagingContainer);
                    }
                }
            }
        }
Пример #11
0
        /// <summary>
        /// Creates a job and adds a task to it. The task is a 
        /// custom executable which has a resource file associated with it.
        /// </summary>
        /// <param name="batchClient">The BatchClient to use when interacting with the Batch service.</param>
        /// <param name="cloudStorageAccount">The storage account to upload the files to.</param>
        /// <param name="jobId">The ID of the job.</param>
        /// <returns>The set of container names containing the jobs input files.</returns>
        private async Task<HashSet<string>> SubmitJobAsync(BatchClient batchClient, CloudStorageAccount cloudStorageAccount, string jobId)
        {
            // create an empty unbound Job
            CloudJob unboundJob = batchClient.JobOperations.CreateJob();
            unboundJob.Id = jobId;
            unboundJob.PoolInformation = new PoolInformation() { PoolId = this.poolsAndResourceFileSettings.PoolId };

            // Commit Job to create it in the service
            await unboundJob.CommitAsync();

            List<CloudTask> tasksToRun = new List<CloudTask>();

            // Create a task which requires some resource files
            CloudTask taskWithFiles = new CloudTask("task_with_file1", SimpleTaskExe);

            // Set up a collection of files to be staged -- these files will be uploaded to Azure Storage
            // when the tasks are submitted to the Azure Batch service.
            taskWithFiles.FilesToStage = new List<IFileStagingProvider>();
            
            // generate a local file in temp directory
            string localSampleFile = Path.Combine(Environment.GetEnvironmentVariable("TEMP"), "HelloWorld.txt");
            File.WriteAllText(localSampleFile, "hello from Batch PoolsAndResourceFiles sample!");

            StagingStorageAccount fileStagingStorageAccount = new StagingStorageAccount(
                storageAccount: this.accountSettings.StorageAccountName,
                storageAccountKey: this.accountSettings.StorageAccountKey,
                blobEndpoint: cloudStorageAccount.BlobEndpoint.ToString());

            // add the files as a task dependency so they will be uploaded to storage before the task 
            // is submitted and downloaded to the node before the task starts execution.
            FileToStage helloWorldFile = new FileToStage(localSampleFile, fileStagingStorageAccount);
            FileToStage simpleTaskFile = new FileToStage(SimpleTaskExe, fileStagingStorageAccount);

            // When this task is added via JobOperations.AddTaskAsync below, the FilesToStage are uploaded to storage once.
            // The Batch service does not automatically delete content from your storage account, so files added in this 
            // way must be manually removed when they are no longer used.
            taskWithFiles.FilesToStage.Add(helloWorldFile);
            taskWithFiles.FilesToStage.Add(simpleTaskFile);

            tasksToRun.Add(taskWithFiles);

            var fileStagingArtifacts = new ConcurrentBag<ConcurrentDictionary<Type, IFileStagingArtifact>>();
            
            // Use the AddTask method which takes an enumerable of tasks for best performance, as it submits up to 100
            // tasks at once in a single request.  If the list of tasks is N where N > 100, this will correctly parallelize 
            // the requests and return when all N tasks have been added.
            await batchClient.JobOperations.AddTaskAsync(jobId, tasksToRun, fileStagingArtifacts: fileStagingArtifacts);

            // Extract the names of the blob containers from the file staging artifacts
            HashSet<string> blobContainerNames = GettingStartedCommon.ExtractBlobContainerNames(fileStagingArtifacts);
            return blobContainerNames;
        }
Пример #12
0
        /// <summary>
        /// Submit a work item with tasks which have dependant files.
        /// The files are automatically uploaded to Azure Storage using the FileStaging feature of the Azure.Batch client library.
        /// </summary>
        /// <param name="client"></param>
        private static void AddWorkWithFileStaging(IBatchClient client)
        {
            using (IWorkItemManager wm = client.OpenWorkItemManager())
            {
                IToolbox toolbox = client.OpenToolbox();
                ITaskSubmissionHelper taskSubmissionHelper = toolbox.CreateTaskSubmissionHelper(wm, Program.PoolName);

                taskSubmissionHelper.WorkItemName = Environment.GetEnvironmentVariable("USERNAME") + DateTime.Now.ToString("yyyyMMdd-HHmmss");

                Console.WriteLine("Creating work item: {0}", taskSubmissionHelper.WorkItemName);

                ICloudTask taskToAdd1 = new CloudTask("task_with_file1", "cmd /c type *.txt");
                ICloudTask taskToAdd2 = new CloudTask("task_with_file2", "cmd /c dir /s");

                //Set up a collection of files to be staged -- these files will be uploaded to Azure Storage
                //when the tasks are submitted to the Azure Batch service.
                taskToAdd1.FilesToStage = new List <IFileStagingProvider>();
                taskToAdd2.FilesToStage = new List <IFileStagingProvider>();

                // generate a local file in temp directory
                Process cur  = Process.GetCurrentProcess();
                string  path = Path.Combine(Environment.GetEnvironmentVariable("TEMP"), cur.Id.ToString() + ".txt");
                System.IO.File.WriteAllText(path, "hello from " + cur.Id.ToString());

                // add file as task dependency so it'll be uploaded to storage before task
                // is submitted and download onto the VM before task starts execution
                FileToStage file = new FileToStage(path, new StagingStorageAccount(Program.StorageAccount, Program.StorageKey, Program.StorageBlobEndpoint));
                taskToAdd1.FilesToStage.Add(file);
                taskToAdd2.FilesToStage.Add(file); // filetostage object can be reused

                taskSubmissionHelper.AddTask(taskToAdd1);
                taskSubmissionHelper.AddTask(taskToAdd2);

                IJobCommitUnboundArtifacts artifacts = null;
                bool errors = false;

                try
                {
                    //Stage the files to Azure Storage and add the tasks to Azure Batch.
                    artifacts = taskSubmissionHelper.Commit() as IJobCommitUnboundArtifacts;
                }
                catch (AggregateException ae)
                {
                    errors = true;
                    // Go through all exceptions and dump useful information
                    ae.Handle((x) =>
                    {
                        if (x is BatchException)
                        {
                            BatchException be = x as BatchException;
                            if (null != be.RequestInformation && null != be.RequestInformation.AzureError)
                            {
                                // Write the server side error information
                                Console.Error.WriteLine(be.RequestInformation.AzureError.Code);
                                Console.Error.WriteLine(be.RequestInformation.AzureError.Message.Value);
                                if (null != be.RequestInformation.AzureError.Values)
                                {
                                    foreach (var v in be.RequestInformation.AzureError.Values)
                                    {
                                        Console.Error.WriteLine(v.Key + " : " + v.Value);
                                    }
                                }
                            }
                        }
                        else
                        {
                            Console.WriteLine(x);
                        }
                        // Indicate that the error has been handled
                        return(true);
                    });
                }

                // if there is no exception, wait for job response
                if (!errors)
                {
                    List <ICloudTask> tasksToMonitorForCompletion = wm.ListTasks(artifacts.WorkItemName, artifacts.JobName).ToList();

                    Console.WriteLine("Waiting for all tasks to complete on work item: {0}, Job: {1} ...", artifacts.WorkItemName, artifacts.JobName);
                    client.OpenToolbox().CreateTaskStateMonitor().WaitAll(tasksToMonitorForCompletion, TaskState.Completed, TimeSpan.FromMinutes(30));

                    foreach (ICloudTask task in wm.ListTasks(artifacts.WorkItemName, artifacts.JobName))
                    {
                        Console.WriteLine("Task " + task.Name + " says:\n" + task.GetTaskFile(Constants.StandardOutFileName).ReadAsString());
                        Console.WriteLine(task.GetTaskFile(Constants.StandardErrorFileName).ReadAsString());
                    }
                }

                Console.WriteLine("Deleting work item: {0}", artifacts.WorkItemName);
                wm.DeleteWorkItem(artifacts.WorkItemName); //Don't forget to delete the work item before you exit
            }
        }
Пример #13
0
        /// <summary>
        /// Creates a job and adds 4 tasks to it.  2 tasks are basic with only a command line.  The other 2 tasks use 
        /// the file staging feature in order to add a resource file which each task consumes.
        /// </summary>
        /// <param name="batchClient">The BatchClient to use when interacting with the Batch service.</param>
        /// <param name="jobId">The ID of the job.</param>
        /// <returns>The set of container names containing the jobs input files.</returns>
        private async Task<HashSet<string>> SubmitJobAsync(BatchClient batchClient, string jobId)
        {
            // create an empty unbound Job
            CloudJob unboundJob = batchClient.JobOperations.CreateJob();
            unboundJob.Id = jobId;
            unboundJob.PoolInformation = new PoolInformation() { PoolId = this.configurationSettings.PoolId };

            // Commit Job to create it in the service
            await unboundJob.CommitAsync();

            // create 2 quick tasks. Each task within a job must have a unique ID
            List<CloudTask> tasksToRun = new List<CloudTask>();
            tasksToRun.Add(new CloudTask("task1", "hostname"));
            tasksToRun.Add(new CloudTask("task2", "cmd /c dir /s"));

            // Also create 2 tasks which require some resource files
            CloudTask taskWithFiles1 = new CloudTask("task_with_file1", "cmd /c type 2>nul *.txt");
            CloudTask taskWithFiles2 = new CloudTask("task_with_file2", "cmd /c dir /s");

            // Set up a collection of files to be staged -- these files will be uploaded to Azure Storage
            // when the tasks are submitted to the Azure Batch service.
            taskWithFiles1.FilesToStage = new List<IFileStagingProvider>();
            taskWithFiles2.FilesToStage = new List<IFileStagingProvider>();

            // generate a local file in temp directory
            string path = Path.Combine(Environment.GetEnvironmentVariable("TEMP"), "HelloWorld.txt");
            File.WriteAllText(path, "hello from Batch SimpleJobSubmission sample!");

            // add the files as a task dependency so they will be uploaded to storage before the task 
            // is submitted and downloaded to the node before the task starts execution.
            FileToStage fileToStage = new FileToStage(path,
                new StagingStorageAccount(
                    storageAccount: this.configurationSettings.StorageAccountName,
                    storageAccountKey: this.configurationSettings.StorageAccountKey,
                    blobEndpoint: this.configurationSettings.StorageBlobEndpoint));


            // When these tasks are added via JobOperations.AddTaskAsync below, the fileToStage is uploaded to storage once,
            // and a SAS is generated and supplied to each of the tasks.  The Batch service does not automatically delete
            // content from your storage account, so files added in this way must be manually removed when they are no longer
            // used.
            taskWithFiles1.FilesToStage.Add(fileToStage);
            taskWithFiles2.FilesToStage.Add(fileToStage);

            tasksToRun.Add(taskWithFiles1);
            tasksToRun.Add(taskWithFiles2);

            var fileStagingArtifacts = new ConcurrentBag<ConcurrentDictionary<Type, IFileStagingArtifact>>();
            
            // Use the AddTask method which takes an enumerable of tasks for best performance, as it submits up to 100
            // tasks at once in a single request.  If the list of tasks is N where N > 100, this will correctly parallelize 
            // the requests and return when all N tasks have been added.
            await batchClient.JobOperations.AddTaskAsync(jobId, tasksToRun, fileStagingArtifacts: fileStagingArtifacts);

            // Extract the names of the blob containers from the file staging artifacts
            HashSet<string> blobContainerNames = ExtractBlobContainerNames(fileStagingArtifacts);
            return blobContainerNames;
        }
Пример #14
0
        public async static Task JobMain(string[] args)
        {
            //Load the configuration
            Settings        topNWordsConfiguration = Settings.Default;
            AccountSettings accountSettings        = AccountSettings.Default;

            CloudStorageAccount cloudStorageAccount = new CloudStorageAccount(
                new StorageCredentials(
                    accountSettings.StorageAccountName,
                    accountSettings.StorageAccountKey),
                accountSettings.StorageServiceUrl,
                useHttps: true);

            StagingStorageAccount stagingStorageAccount = new StagingStorageAccount(
                accountSettings.StorageAccountName,
                accountSettings.StorageAccountKey,
                cloudStorageAccount.BlobEndpoint.ToString());

            using (BatchClient client = BatchClient.Open(new BatchSharedKeyCredentials(accountSettings.BatchServiceUrl, accountSettings.BatchAccountName, accountSettings.BatchAccountKey)))
            {
                string stagingContainer = null;

                //OSFamily 4 == OS 2012 R2. You can learn more about os families and versions at:
                //http://msdn.microsoft.com/en-us/library/azure/ee924680.aspx
                CloudPool pool = client.PoolOperations.CreatePool(
                    topNWordsConfiguration.PoolId,
                    targetDedicatedComputeNodes: topNWordsConfiguration.PoolNodeCount,
                    virtualMachineSize: "small",
                    cloudServiceConfiguration: new CloudServiceConfiguration(osFamily: "4"));

                List <string> files = new List <string>
                {
                    Path.Combine(BatchStartTaskFolderName, BatchStartTaskTelemetryRunnerName),
                };

                files.AddRange(AIFilesToUpload);

                var resourceHelperTask = SampleHelpers.UploadResourcesAndCreateResourceFileReferencesAsync(
                    cloudStorageAccount,
                    AIBlobConatinerName,
                    files);

                List <ResourceFile> resourceFiles = resourceHelperTask.Result;

                pool.StartTask = new StartTask()
                {
                    CommandLine   = string.Format("cmd /c {0}", BatchStartTaskTelemetryRunnerName),
                    ResourceFiles = resourceFiles
                };

                Console.WriteLine("Adding pool {0}", topNWordsConfiguration.PoolId);
                try
                {
                    await GettingStartedCommon.CreatePoolIfNotExistAsync(client, pool);
                }
                catch (AggregateException ae)
                {
                    // Go through all exceptions and dump useful information
                    ae.Handle(x =>
                    {
                        Console.Error.WriteLine("Creating pool ID {0} failed", topNWordsConfiguration.PoolId);
                        if (x is BatchException)
                        {
                            BatchException be = x as BatchException;

                            Console.WriteLine(be.ToString());
                            Console.WriteLine();
                        }
                        else
                        {
                            Console.WriteLine(x);
                        }

                        // can't continue without a pool
                        return(false);
                    });
                }
                catch (BatchException be)
                {
                    Console.Error.WriteLine("Creating pool ID {0} failed", topNWordsConfiguration.PoolId);
                    Console.WriteLine(be.ToString());
                    Console.WriteLine();
                }

                try
                {
                    Console.WriteLine("Creating job: " + topNWordsConfiguration.JobId);
                    // get an empty unbound Job
                    CloudJob unboundJob = client.JobOperations.CreateJob();
                    unboundJob.Id = topNWordsConfiguration.JobId;
                    unboundJob.PoolInformation = new PoolInformation()
                    {
                        PoolId = topNWordsConfiguration.PoolId
                    };

                    // Commit Job to create it in the service
                    await unboundJob.CommitAsync();

                    // create file staging objects that represent the executable and its dependent assembly to run as the task.
                    // These files are copied to every node before the corresponding task is scheduled to run on that node.
                    FileToStage topNWordExe = new FileToStage(TopNWordsExeName, stagingStorageAccount);
                    FileToStage storageDll  = new FileToStage(StorageClientDllName, stagingStorageAccount);

                    // Upload application insights assemblies
                    List <FileToStage> aiStagedFiles = new List <FileToStage>();
                    foreach (string aiFile in AIFilesToUpload)
                    {
                        aiStagedFiles.Add(new FileToStage(aiFile, stagingStorageAccount));
                    }

                    // In this sample, the input data is copied separately to Storage and its URI is passed to the task as an argument.
                    // This approach is appropriate when the amount of input data is large such that copying it to every node via FileStaging
                    // is not desired and the number of tasks is small since a large number of readers of the blob might get throttled
                    // by Storage which will lengthen the overall processing time.
                    //
                    // You'll need to observe the behavior and use published techniques for finding the right balance of performance versus
                    // complexity.

                    string[] documents = Directory.GetFiles(topNWordsConfiguration.DocumentsRootPath);
                    await SampleHelpers.UploadResourcesAsync(cloudStorageAccount, BooksContainerName, documents);

                    // initialize a collection to hold the tasks that will be submitted in their entirety
                    List <CloudTask> tasksToRun = new List <CloudTask>(documents.Length);

                    for (int i = 0; i < documents.Length; i++)
                    {
                        CloudTask task = new CloudTask("task_no_" + i, String.Format("{0} --Task {1} {2} {3} {4}",
                                                                                     TopNWordsExeName,
                                                                                     "https://onbehalfoutput.blob.core.windows.net/" + documents[i],
                                                                                     topNWordsConfiguration.TopWordCount,
                                                                                     accountSettings.StorageAccountName,
                                                                                     accountSettings.StorageAccountKey));

                        //This is the list of files to stage to a container -- for each job, one container is created and
                        //files all resolve to Azure Blobs by their name (so two tasks with the same named file will create just 1 blob in
                        //the container).
                        task.FilesToStage = new List <IFileStagingProvider>
                        {
                            topNWordExe,
                            storageDll,
                        };

                        foreach (FileToStage stagedFile in aiStagedFiles)
                        {
                            task.FilesToStage.Add(stagedFile);
                        }

                        tasksToRun.Add(task);
                    }

                    // Commit all the tasks to the Batch Service. Ask AddTask to return information about the files that were staged.
                    // The container information is used later on to remove these files from Storage.
                    ConcurrentBag <ConcurrentDictionary <Type, IFileStagingArtifact> > fsArtifactBag = new ConcurrentBag <ConcurrentDictionary <Type, IFileStagingArtifact> >();
                    client.JobOperations.AddTask(topNWordsConfiguration.JobId, tasksToRun, fileStagingArtifacts: fsArtifactBag);

                    // loop through the bag of artifacts, looking for the one that matches our staged files. Once there,
                    // capture the name of the container holding the files so they can be deleted later on if that option
                    // was configured in the settings.
                    foreach (var fsBagItem in fsArtifactBag)
                    {
                        IFileStagingArtifact fsValue;
                        if (fsBagItem.TryGetValue(typeof(FileToStage), out fsValue))
                        {
                            SequentialFileStagingArtifact stagingArtifact = fsValue as SequentialFileStagingArtifact;
                            if (stagingArtifact != null)
                            {
                                stagingContainer = stagingArtifact.BlobContainerCreated;
                                Console.WriteLine(
                                    "Uploaded files to container: {0} -- you will be charged for their storage unless you delete them.",
                                    stagingArtifact.BlobContainerCreated);
                            }
                        }
                    }

                    //Get the job to monitor status.
                    CloudJob job = client.JobOperations.GetJob(topNWordsConfiguration.JobId);

                    Console.Write("Waiting for tasks to complete ...   ");
                    // Wait 20 minutes for all tasks to reach the completed state. The long timeout is necessary for the first
                    // time a pool is created in order to allow nodes to be added to the pool and initialized to run tasks.
                    IPagedEnumerable <CloudTask> ourTasks = job.ListTasks(new ODATADetailLevel(selectClause: "id"));
                    client.Utilities.CreateTaskStateMonitor().WaitAll(ourTasks, TaskState.Completed, TimeSpan.FromMinutes(20));
                    Console.WriteLine("tasks are done.");

                    foreach (CloudTask t in ourTasks)
                    {
                        Console.WriteLine("Task " + t.Id);

                        Console.WriteLine("stdout:" + Environment.NewLine + t.GetNodeFile(Batch.Constants.StandardOutFileName).ReadAsString());
                        Console.WriteLine();
                        Console.WriteLine("stderr:" + Environment.NewLine + t.GetNodeFile(Batch.Constants.StandardErrorFileName).ReadAsString());
                    }
                }
                finally
                {
                    //Delete the pool that we created
                    if (topNWordsConfiguration.ShouldDeletePool)
                    {
                        Console.WriteLine("Deleting pool: {0}", topNWordsConfiguration.PoolId);
                        client.PoolOperations.DeletePool(topNWordsConfiguration.PoolId);
                    }

                    //Delete the job that we created
                    if (topNWordsConfiguration.ShouldDeleteJob)
                    {
                        Console.WriteLine("Deleting job: {0}", topNWordsConfiguration.JobId);
                        client.JobOperations.DeleteJob(topNWordsConfiguration.JobId);
                    }

                    //Delete the containers we created
                    if (topNWordsConfiguration.ShouldDeleteContainer)
                    {
                        DeleteContainers(accountSettings, stagingContainer);
                    }
                }
            }
        }
Пример #15
0
        public static void JobMain(string[] args)
        {
            //Load the configuration
            TopNWordsConfiguration configuration = TopNWordsConfiguration.LoadConfigurationFromAppConfig();
            
            StagingStorageAccount stagingStorageAccount = new StagingStorageAccount(
                configuration.StorageAccountName, 
                configuration.StorageAccountKey, 
                configuration.StorageAccountBlobEndpoint);

            IBatchClient client = BatchClient.Connect(configuration.BatchServiceUrl, new BatchCredentials(configuration.BatchAccountName, configuration.BatchAccountKey));
            string stagingContainer = null;

            //Create a pool (if user hasn't provided one)
            if (configuration.ShouldCreatePool)
            {
                using (IPoolManager pm = client.OpenPoolManager())
                {
                    //OSFamily 4 == OS 2012 R2
                    //You can learn more about os families and versions at:
                    //http://msdn.microsoft.com/en-us/library/azure/ee924680.aspx
                    ICloudPool pool = pm.CreatePool(configuration.PoolName, targetDedicated: configuration.PoolSize, osFamily: "4", vmSize: "small");
                    Console.WriteLine("Adding pool {0}", configuration.PoolName);
                    pool.Commit();
                }
            }
            
            try
            {
                using (IWorkItemManager wm = client.OpenWorkItemManager())
                {
                    IToolbox toolbox = client.OpenToolbox();

                    //Use the TaskSubmissionHelper to help us create a WorkItem and add tasks to it.
                    ITaskSubmissionHelper taskSubmissionHelper = toolbox.CreateTaskSubmissionHelper(wm, configuration.PoolName);
                    taskSubmissionHelper.WorkItemName = configuration.WorkItemName;

                    FileToStage topNWordExe = new FileToStage(TopNWordsExeName, stagingStorageAccount);
                    FileToStage storageDll = new FileToStage(StorageClientDllName, stagingStorageAccount);

                    string bookFileUri = UploadBookFileToCloudBlob(configuration, configuration.BookFileName);
                    Console.WriteLine("{0} uploaded to cloud", configuration.BookFileName);
                    
                    for (int i = 1; i <= configuration.NumberOfTasks; i++)
                    {
                        ICloudTask task = new CloudTask("task_no_" + i, String.Format("{0} --Task {1} {2} {3} {4}", 
                            TopNWordsExeName, 
                            bookFileUri, 
                            configuration.NumberOfTopWords,
                            configuration.StorageAccountName, 
                            configuration.StorageAccountKey));

                        //This is the list of files to stage to a container -- for each TaskSubmissionHelper one container is created and 
                        //files all resolve to Azure Blobs by their name (so two tasks with the same named file will create just 1 blob in
                        //the TaskSubmissionHelper's container).
                        task.FilesToStage = new List<IFileStagingProvider>
                                            {
                                                topNWordExe, 
                                                storageDll
                                            };

                        taskSubmissionHelper.AddTask(task);
                    }

                    //Commit all the tasks to the Batch Service.
                    IJobCommitUnboundArtifacts artifacts = taskSubmissionHelper.Commit() as IJobCommitUnboundArtifacts;
                    
                    foreach (var fileStagingArtifact in artifacts.FileStagingArtifacts)
                    {
                        SequentialFileStagingArtifact stagingArtifact = fileStagingArtifact.Value as SequentialFileStagingArtifact;
                        if (stagingArtifact != null)
                        {
                            stagingContainer = stagingArtifact.BlobContainerCreated;
                            Console.WriteLine("Uploaded files to container: {0} -- you will be charged for their storage unless you delete them.", 
                                stagingArtifact.BlobContainerCreated);
                        }
                    }

                    //Get the job to monitor status.
                    ICloudJob job = wm.GetJob(artifacts.WorkItemName, artifacts.JobName);

                    Console.Write("Waiting for tasks to complete ...");
                    // Wait 1 minute for all tasks to reach the completed state
                    client.OpenToolbox().CreateTaskStateMonitor().WaitAll(job.ListTasks(), TaskState.Completed,  TimeSpan.FromMinutes(20));
                    Console.WriteLine("Done.");

                    foreach (ICloudTask task in job.ListTasks())
                    {
                        Console.WriteLine("Task " + task.Name + " says:\n" + task.GetTaskFile(Constants.StandardOutFileName).ReadAsString());
                        Console.WriteLine(task.GetTaskFile(Constants.StandardErrorFileName).ReadAsString());
                    }
                }
            }
            finally
            {
                //Delete the pool that we created
                if (configuration.ShouldCreatePool)
                {
                    using (IPoolManager pm = client.OpenPoolManager())
                    {
                        Console.WriteLine("Deleting pool: {0}", configuration.PoolName);
                        pm.DeletePool(configuration.PoolName);
                    }
                }

                //Delete the workitem that we created
                if (configuration.ShouldDeleteWorkItem)
                {
                    using (IWorkItemManager wm = client.OpenWorkItemManager())
                    {
                        Console.WriteLine("Deleting work item: {0}", configuration.WorkItemName);
                        wm.DeleteWorkItem(configuration.WorkItemName);
                    }
                }

                //Delete the containers we created
                if(configuration.ShouldDeleteContainer)
                {
                    DeleteContainers(configuration, stagingContainer);
                }
            }
        }
Пример #16
0
        public static void JobMain(string[] args)
        {
            //Load the configuration
            Settings topNWordsConfiguration = new ConfigurationBuilder()
                                              .SetBasePath(Directory.GetCurrentDirectory())
                                              .AddJsonFile("settings.json")
                                              .Build()
                                              .Get <Settings>();
            AccountSettings accountSettings = SampleHelpers.LoadAccountSettings();

            CloudStorageAccount cloudStorageAccount = new CloudStorageAccount(
                new StorageCredentials(
                    accountSettings.StorageAccountName,
                    accountSettings.StorageAccountKey),
                accountSettings.StorageServiceUrl,
                useHttps: true);

            StagingStorageAccount stagingStorageAccount = new StagingStorageAccount(
                accountSettings.StorageAccountName,
                accountSettings.StorageAccountKey,
                cloudStorageAccount.BlobEndpoint.ToString());

            using (BatchClient client = BatchClient.Open(new BatchSharedKeyCredentials(accountSettings.BatchServiceUrl, accountSettings.BatchAccountName, accountSettings.BatchAccountKey)))
            {
                string stagingContainer = null;

                //OSFamily 5 == Windows 2016. You can learn more about os families and versions at:
                //http://msdn.microsoft.com/en-us/library/azure/ee924680.aspx
                CloudPool pool = client.PoolOperations.CreatePool(
                    topNWordsConfiguration.PoolId,
                    targetDedicatedComputeNodes: topNWordsConfiguration.PoolNodeCount,
                    virtualMachineSize: "standard_d1_v2",
                    cloudServiceConfiguration: new CloudServiceConfiguration(osFamily: "6"));
                Console.WriteLine("Adding pool {0}", topNWordsConfiguration.PoolId);
                pool.TaskSchedulingPolicy   = new TaskSchedulingPolicy(ComputeNodeFillType.Spread);
                pool.MaxTasksPerComputeNode = 4;

                GettingStartedCommon.CreatePoolIfNotExistAsync(client, pool).Wait();
                var formula     = @"startingNumberOfVMs = 2;
                    maxNumberofVMs = 4;
                    pendingTaskSamplePercent = $PendingTasks.GetSamplePercent(90 * TimeInterval_Second);
                    pendingTaskSamples = pendingTaskSamplePercent < 70 ? startingNumberOfVMs : avg($PendingTasks.GetSample(180 * TimeInterval_Second));
                    $TargetDedicatedNodes = min(maxNumberofVMs, pendingTaskSamples);
                    $NodeDeallocationOption = taskcompletion;";
                var noOfSeconds = 150;
                Thread.Sleep(noOfSeconds * 1000);

                client.PoolOperations.EnableAutoScale(
                    poolId: topNWordsConfiguration.PoolId, autoscaleFormula: formula,
                    autoscaleEvaluationInterval: TimeSpan.FromMinutes(5));

                try
                {
                    Console.WriteLine("Creating job: " + topNWordsConfiguration.JobId);
                    // get an empty unbound Job
                    CloudJob unboundJob = client.JobOperations.CreateJob();
                    unboundJob.Id = topNWordsConfiguration.JobId;
                    unboundJob.PoolInformation = new PoolInformation()
                    {
                        PoolId = topNWordsConfiguration.PoolId
                    };

                    // Commit Job to create it in the service
                    unboundJob.Commit();

                    // create file staging objects that represent the executable and its dependent assembly to run as the task.
                    // These files are copied to every node before the corresponding task is scheduled to run on that node.
                    FileToStage topNWordExe         = new FileToStage(TopNWordsExeName, stagingStorageAccount);
                    FileToStage storageDll          = new FileToStage(StorageClientDllName, stagingStorageAccount);
                    FileToStage newtonJsoftDll      = new FileToStage(NewtonJSoftDllName, stagingStorageAccount);
                    FileToStage microsoftEFDll      = new FileToStage(MicrosoftEntityFrameworkDllName, stagingStorageAccount);
                    FileToStage microsoftEFCoreDll  = new FileToStage(MicrosoftEntityFrameworkCoreDllName, stagingStorageAccount);
                    FileToStage microsoftBCLDll     = new FileToStage(MicrosoftBCLDllName, stagingStorageAccount);
                    FileToStage systemTasksDll      = new FileToStage(SystemTasksDllName, stagingStorageAccount);
                    FileToStage topNWordsConfigFile = new FileToStage(TopnWordsConfig, stagingStorageAccount);
                    FileToStage SystemValueTupleDll = new FileToStage(SystemValueTupleDllName, stagingStorageAccount);
                    FileToStage DependencyInjectionAbstractionsDll = new FileToStage(DependecyInjectionAbstractionsDllName, stagingStorageAccount);
                    FileToStage DependencyInjectionDll             = new FileToStage(DependecyInjectionDllName, stagingStorageAccount);
                    FileToStage LoggingAbstractionsDll             = new FileToStage(LoggingAbstractionsDllName, stagingStorageAccount);
                    FileToStage DiagnosticsDll        = new FileToStage(DiagnosticssDllName, stagingStorageAccount);
                    FileToStage CachingAbstractionDll = new FileToStage(CachingAbstractionsDllName, stagingStorageAccount);
                    FileToStage MicrosoftSqlServerDll = new FileToStage(MicrosoftSqlServerDllName, stagingStorageAccount);
                    FileToStage SystemComponentDll    = new FileToStage(SystemComponentDllName, stagingStorageAccount);
                    FileToStage SystemCollectionsDll  = new FileToStage(SystemCollectionsDllName, stagingStorageAccount);
                    FileToStage pDll                 = new FileToStage(pdllName, stagingStorageAccount);
                    FileToStage oDll                 = new FileToStage(odllName, stagingStorageAccount);
                    FileToStage lDll                 = new FileToStage(ldllName, stagingStorageAccount);
                    FileToStage hashcodeDll          = new FileToStage(hashcodeDllName, stagingStorageAccount);
                    FileToStage clientSqlDll         = new FileToStage(clientSqlClientDllName, stagingStorageAccount);
                    FileToStage cachingMemoryDll     = new FileToStage(CachingMemoryDllName, stagingStorageAccount);
                    FileToStage configAbstractionDll = new FileToStage(configAbstractionDllName, stagingStorageAccount);
                    FileToStage SNIDll               = new FileToStage(SNIDllName, stagingStorageAccount);


                    FileToStage relationDll = new FileToStage(relationddllName, stagingStorageAccount);



                    var textFile = "E:\\WeatherAPIPOC\\cities_id.txt";
                    var text     = File.ReadAllLines(textFile);
                    var cityList = new List <string>(text);

                    // In this sample, the input data is copied separately to Storage and its URI is passed to the task as an argument.
                    // This approach is appropriate when the amount of input data is large such that copying it to every node via FileStaging
                    // is not desired and the number of tasks is small since a large number of readers of the blob might get throttled
                    // by Storage which will lengthen the overall processing time.
                    //
                    // You'll need to observe the behavior and use published techniques for finding the right balance of performance versus
                    // complexity.

                    Console.WriteLine("{0} uploaded to cloud", topNWordsConfiguration.FileName);

                    // initialize a collection to hold the tasks that will be submitted in their entirety
                    List <CloudTask> tasksToRun = new List <CloudTask>(topNWordsConfiguration.NumberOfTasks);

                    for (int i = 0; i < cityList.Count; i++)
                    {
                        string    programLaunchTime = DateTime.Now.ToString("h:mm:sstt");
                        CloudTask task = new CloudTask(
                            id: $"task_no_{i + 1}",
                            commandline: $"cmd /c mkdir x64 & move SNI.dll x64 & {TopNWordsExeName} --Task {cityList[i]} %AZ_BATCH_NODE_ID% {programLaunchTime}");

                        //This is the list of files to stage to a container -- for each job, one container is created and
                        //files all resolve to Azure Blobs by their name (so two tasks with the same named file will create just 1 blob in
                        //the container).
                        task.FilesToStage = new List <IFileStagingProvider>
                        {
                            topNWordExe,
                            storageDll,
                            newtonJsoftDll,
                            microsoftEFDll,
                            microsoftEFCoreDll,
                            microsoftBCLDll,
                            systemTasksDll,
                            topNWordsConfigFile,
                            SystemValueTupleDll,
                            DependencyInjectionAbstractionsDll,
                            DependencyInjectionDll,
                            LoggingAbstractionsDll,
                            DiagnosticsDll,
                            CachingAbstractionDll,
                            MicrosoftSqlServerDll,
                            SystemComponentDll,
                            SystemCollectionsDll,
                            oDll,
                            pDll,
                            lDll,
                            relationDll,
                            hashcodeDll,
                            clientSqlDll,
                            cachingMemoryDll,
                            configAbstractionDll,
                            SNIDll
                        };

                        tasksToRun.Add(task);
                    }

                    // Commit all the tasks to the Batch Service. Ask AddTask to return information about the files that were staged.
                    // The container information is used later on to remove these files from Storage.
                    ConcurrentBag <ConcurrentDictionary <Type, IFileStagingArtifact> > fsArtifactBag = new ConcurrentBag <ConcurrentDictionary <Type, IFileStagingArtifact> >();
                    client.JobOperations.AddTask(topNWordsConfiguration.JobId, tasksToRun, fileStagingArtifacts: fsArtifactBag);

                    // loop through the bag of artifacts, looking for the one that matches our staged files. Once there,
                    // capture the name of the container holding the files so they can be deleted later on if that option
                    // was configured in the settings.
                    foreach (var fsBagItem in fsArtifactBag)
                    {
                        IFileStagingArtifact fsValue;
                        if (fsBagItem.TryGetValue(typeof(FileToStage), out fsValue))
                        {
                            SequentialFileStagingArtifact stagingArtifact = fsValue as SequentialFileStagingArtifact;
                            if (stagingArtifact != null)
                            {
                                stagingContainer = stagingArtifact.BlobContainerCreated;
                                Console.WriteLine(
                                    "Uploaded files to container: {0} -- you will be charged for their storage unless you delete them.",
                                    stagingArtifact.BlobContainerCreated);
                            }
                        }
                    }

                    //Get the job to monitor status.
                    CloudJob job = client.JobOperations.GetJob(topNWordsConfiguration.JobId);

                    Console.Write("Waiting for tasks to complete ...   ");
                    // Wait 20 minutes for all tasks to reach the completed state. The long timeout is necessary for the first
                    // time a pool is created in order to allow nodes to be added to the pool and initialized to run tasks.
                    IPagedEnumerable <CloudTask> ourTasks = job.ListTasks(new ODATADetailLevel(selectClause: "id"));
                    client.Utilities.CreateTaskStateMonitor().WaitAll(ourTasks, TaskState.Completed, TimeSpan.FromMinutes(20));
                    Console.WriteLine("tasks are done.");

                    foreach (CloudTask t in ourTasks)
                    {
                        Console.WriteLine("Task " + t.Id);
                        Console.WriteLine("stdout:" + Environment.NewLine + t.GetNodeFile(Batch.Constants.StandardOutFileName).ReadAsString());
                        Console.WriteLine();
                        Console.WriteLine("stderr:" + Environment.NewLine + t.GetNodeFile(Batch.Constants.StandardErrorFileName).ReadAsString());
                    }
                }
                finally
                {
                    //Delete the pool that we created
                    if (topNWordsConfiguration.ShouldDeletePool)
                    {
                        Console.WriteLine("Deleting pool: {0}", topNWordsConfiguration.PoolId);
                        client.PoolOperations.DeletePool(topNWordsConfiguration.PoolId);
                    }

                    //Delete the job that we created
                    if (topNWordsConfiguration.ShouldDeleteJob)
                    {
                        Console.WriteLine("Deleting job: {0}", topNWordsConfiguration.JobId);
                        client.JobOperations.DeleteJob(topNWordsConfiguration.JobId);
                    }

                    //Delete the containers we created
                    if (topNWordsConfiguration.ShouldDeleteContainer)
                    {
                        DeleteContainers(accountSettings, stagingContainer);
                    }
                }
            }
        }
Пример #17
0
        public static void JobMain(string[] args)
        {
            Console.WriteLine("Setting up Batch Process - ImageBlur. \nPress Enter to begin.");
            Console.WriteLine("-------------------------------------------------------------");
            Console.ReadLine();
            Settings imageBlurSettings = Settings.Default;
            AccountSettings accountSettings = AccountSettings.Default;

            /* Setting up credentials for Batch and Storage accounts
             * =====================================================
             */

            StorageCredentials storageCredentials = new StorageCredentials(
                accountSettings.StorageAccountName, 
                accountSettings.StorageAccountKey);
            CloudStorageAccount storageAccount = new CloudStorageAccount(storageCredentials, useHttps: true);

            StagingStorageAccount stagingStorageAccount = new StagingStorageAccount(
                accountSettings.StorageAccountName,
                accountSettings.StorageAccountKey,
                storageAccount.BlobEndpoint.ToString());

            BatchSharedKeyCredentials batchCredentials = new BatchSharedKeyCredentials(
                accountSettings.BatchServiceUrl, 
                accountSettings.BatchAccountName, 
                accountSettings.BatchAccountKey);


            using (BatchClient client = BatchClient.Open(batchCredentials))
            {
                string stagingContainer = null;

                /* Setting up pool to run job and tasks in
                 * =======================================
                 */

                CreatePool(client, imageBlurSettings, accountSettings);

                try
                {

                    /* Setting up Job ------------------------
                     * =======================================
                     */

                    Console.WriteLine("Creating job {0}. \nPress Enter to continue.", imageBlurSettings.JobId);
                    Console.ReadLine();

                    CloudJob unboundJob = client.JobOperations.CreateJob();
                    unboundJob.Id = imageBlurSettings.JobId;
                    unboundJob.PoolInformation = new PoolInformation() { PoolId = imageBlurSettings.PoolId };
                    unboundJob.Commit();


                    /* Uploading Source Image(s) to run varying degrees of Blur on
                     * ===========================================================
                     * Here, the input data is uploaded separately to Storage and 
                     * its URI is passed to the task as an argument.
                     */

                    Console.WriteLine("Uploading source images. \nPress Enter to continue.");
                    Console.ReadLine();

                    string[] sourceImages = imageBlurSettings.SourceImageNames.Split(',');
                    List<String> sourceImageUris = new List<String>();
                    for( var i = 0; i < sourceImages.Length; i++)
                    {
                        Console.WriteLine("    Uploading {0}.", sourceImages[i]);
                        sourceImageUris.Add( UploadSourceImagesFileToCloudBlob(accountSettings, sourceImages[i]));
                        Console.WriteLine("    Source Image uploaded to: <{0}>.", sourceImageUris[i]);
                    }

                    Console.WriteLine();
                    Console.WriteLine("All Source Images uploaded. \nPress Enter to continue.");
                    Console.ReadLine();

                    /* Setting up tasks with dependencies ----------------
                     * ===================================================
                     */

                    Console.WriteLine("Setting up files to stage for tasks. \nPress Enter to continue.");
                    Console.ReadLine();

                    // Setting up Files to Stage - Files to upload into each task (executables and dependent assemblies)
                    FileToStage imageBlurExe = new FileToStage(ImageBlurExeName, stagingStorageAccount);
                    FileToStage storageDll = new FileToStage(StorageClientDllName, stagingStorageAccount);
                    FileToStage imageProcessorDll = new FileToStage(ImageProcessorDllName, stagingStorageAccount);

                    // initialize collection to hold tasks that will be submitted in their entirety
                    List<CloudTask> tasksToRun = new List<CloudTask>(imageBlurSettings.NumberOfTasks);

                    for (int i = 0; i < imageBlurSettings.NumberOfTasks; i++)
                    {
                        // create individual tasks (cmd line passed in as argument)
                        CloudTask task = new CloudTask("task_" + i, String.Format("{0} --Task {1} {2} {3}",
                            ImageBlurExeName,
                            sourceImageUris[i],
                            accountSettings.StorageAccountName,
                            accountSettings.StorageAccountKey));

                        // list of files to stage to a container -- for each job, one container is created and
                        // files all resolve to Azure Blobs by their name
                        task.FilesToStage = new List<IFileStagingProvider> { imageBlurExe, storageDll, imageProcessorDll };

                        tasksToRun.Add(task);
                        Console.WriteLine("\t task {0} has been added", "task_" + i);
                    }
                    Console.WriteLine();

                    /* Commit tasks with dependencies ----------------
                     * ===============================================
                     */

                    Console.WriteLine("Running Tasks. \nPress Enter to continue.");
                    Console.WriteLine("-------------------------------------------------------------");
                    Console.ReadLine();

                    ConcurrentBag<ConcurrentDictionary<Type, IFileStagingArtifact>> fsArtifactBag = new ConcurrentBag<ConcurrentDictionary<Type, IFileStagingArtifact>>();
                    client.JobOperations.AddTask(imageBlurSettings.JobId, tasksToRun, fileStagingArtifacts: fsArtifactBag);

                    foreach (var fsBagItem in fsArtifactBag)
                    {
                        IFileStagingArtifact fsValue;
                        if (fsBagItem.TryGetValue(typeof(FileToStage), out fsValue))
                        {
                            SequentialFileStagingArtifact stagingArtifact = fsValue as SequentialFileStagingArtifact;
                            if (stagingArtifact != null)
                            {
                                stagingContainer = stagingArtifact.BlobContainerCreated;
                                Console.WriteLine(
                                    "Uploaded files to container: {0} -- \nyou will be charged for their storage unless you delete them.",
                                    stagingArtifact.BlobContainerCreated);
                            }
                        }
                    }

                    //Get the job to monitor status.
                    CloudJob job = client.JobOperations.GetJob(imageBlurSettings.JobId);

                    Console.WriteLine();
                    Console.Write("Waiting for tasks to complete ...   ");
                    IPagedEnumerable<CloudTask> ourTasks = job.ListTasks(new ODATADetailLevel(selectClause: "id"));
                    client.Utilities.CreateTaskStateMonitor().WaitAll(ourTasks, TaskState.Completed, TimeSpan.FromMinutes(20));
                    Console.WriteLine("tasks are done.");
                    Console.WriteLine();

                    Console.WriteLine("See below for Stdout / Stderr for each node.");
                    Console.WriteLine("============================================");

                    /* Display stdout/stderr for each task on completion 
                     * =================================================
                     */

                    foreach (CloudTask t in ourTasks)
                    {
                        Console.WriteLine("Task " + t.Id + ":");
                        Console.WriteLine("    stdout:" + Environment.NewLine + t.GetNodeFile("stdout.txt").ReadAsString());
                        Console.WriteLine();
                        Console.WriteLine("    stderr:" + Environment.NewLine + t.GetNodeFile("stderr.txt").ReadAsString());
                    }

                    Console.WriteLine();
                    Console.WriteLine("Please find the resulting images in storage. \nPress Enter to continue.");
                    Console.WriteLine("=======================================================================");
                    Console.ReadLine();
                }
                finally
                {
                    /* If configured as such, Delete the resources that were used in this process
                     * ==========================================================================
                     */

                    //Delete the pool that we created
                    if (imageBlurSettings.DeletePool)
                    {

                        Console.WriteLine("Deleting Pool. \nPress Enter to continue.");
                        Console.ReadLine();

                        Console.WriteLine("Deleting pool: {0}", imageBlurSettings.PoolId);
                        client.PoolOperations.DeletePool(imageBlurSettings.PoolId);
                    }

                    //Delete the job that we created
                    if (imageBlurSettings.DeleteJob)
                    {

                        Console.WriteLine("Deleting Job. \nPress Enter to continue.");
                        Console.ReadLine();

                        Console.WriteLine("Deleting job: {0}", imageBlurSettings.JobId);
                        client.JobOperations.DeleteJob(imageBlurSettings.JobId);
                    }

                    //Delete the containers we created
                    if (imageBlurSettings.DeleteContainer)
                    {

                        Console.WriteLine("Deleting Container. \nPress Enter to continue.");
                        Console.ReadLine();

                        DeleteContainers(accountSettings, stagingContainer);
                    }
                    Console.WriteLine();
                    Console.WriteLine("Please check the Azure portal to make sure that all resources you want deleted are in fact deleted");
                    Console.WriteLine("==================================================================================================");
                    Console.WriteLine();
                    Console.WriteLine("Press Enter to exit the program");
                    Console.WriteLine("Exiting program...");
                }

            }

        }
Пример #18
0
        /// <summary>
        /// Submit tasks which have dependant files.
        /// The files are automatically uploaded to Azure Storage using the FileStaging feature of the Azure.Batch client library.
        /// </summary>
        private static void AddTasksWithFileStaging(BatchClient client, string sharedPoolId)
        {
            string jobId = CreateJobId("HelloWorldFileStagingJob");

            Console.WriteLine("Creating job: " + jobId);
            CloudJob boundJob = CreateBoundJob(client.JobOperations, sharedPoolId, jobId);

            CloudTask taskToAdd1 = new CloudTask("task_with_file1", "cmd /c type *.txt");
            CloudTask taskToAdd2 = new CloudTask("task_with_file2", "cmd /c dir /s");

            //Set up a collection of files to be staged -- these files will be uploaded to Azure Storage
            //when the tasks are submitted to the Azure Batch service.
            taskToAdd1.FilesToStage = new List <IFileStagingProvider>();
            taskToAdd2.FilesToStage = new List <IFileStagingProvider>();

            // generate a local file in temp directory
            Process cur  = Process.GetCurrentProcess();
            string  path = Path.Combine(Environment.GetEnvironmentVariable("TEMP"), cur.Id + ".txt");

            File.WriteAllText(path, "hello from " + cur.Id);

            // add the files as a task dependency so they will be uploaded to storage before the task
            // is submitted and downloaded to the VM before the task starts execution on the node
            FileToStage file = new FileToStage(path, new StagingStorageAccount(StorageAccount, StorageKey, StorageBlobEndpoint));

            taskToAdd1.FilesToStage.Add(file);
            taskToAdd2.FilesToStage.Add(file); // filetostage object can be reused

            // create a list of the tasks to add.
            List <CloudTask> tasksToRun = new List <CloudTask> {
                taskToAdd1, taskToAdd2
            };
            bool errors = false;

            try
            {
                client.JobOperations.AddTask(boundJob.Id, tasksToRun);
            }
            catch (AggregateException ae)
            {
                errors = true;
                // Go through all exceptions and dump useful information
                ae.Handle(x =>
                {
                    Console.Error.WriteLine("Adding tasks for job {0} failed", boundJob.Id);
                    if (x is BatchException)
                    {
                        BatchException be = x as BatchException;
                        if (null != be.RequestInformation && null != be.RequestInformation.AzureError)
                        {
                            // Write the server side error information
                            Console.Error.WriteLine("    AzureError.Code: " + be.RequestInformation.AzureError.Code);
                            Console.Error.WriteLine("    AzureError.Message.Value: " + be.RequestInformation.AzureError.Message.Value);
                            if (null != be.RequestInformation.AzureError.Values)
                            {
                                Console.Error.WriteLine("    AzureError.Values");
                                foreach (var v in be.RequestInformation.AzureError.Values)
                                {
                                    Console.Error.WriteLine("        {0} : {1}", v.Key, v.Value);
                                }
                            }
                            Console.Error.WriteLine();
                        }
                    }
                    else
                    {
                        Console.WriteLine(x);
                    }
                    // Indicate that the error has been handled
                    return(true);
                });
            }

            // if there is no exception, wait for job response
            if (!errors)
            {
                Console.WriteLine("Waiting for all tasks to complete on job: {0}...", boundJob.Id);

                IPagedEnumerable <CloudTask> ourTasks = boundJob.ListTasks();
                client.Utilities.CreateTaskStateMonitor().WaitAll(ourTasks, TaskState.Completed, TimeSpan.FromMinutes(30));

                foreach (CloudTask task in ourTasks)
                {
                    Console.WriteLine("Task " + task.Id);
                    Console.WriteLine("stdout:\n" + task.GetNodeFile(Constants.StandardOutFileName).ReadAsString());
                    Console.WriteLine("\nstderr:\n" + task.GetNodeFile(Constants.StandardErrorFileName).ReadAsString());
                }
            }

            //Delete the job to ensure the tasks are cleaned up
            Console.WriteLine("Deleting job: {0}", boundJob.Id);
            client.JobOperations.DeleteJob(boundJob.Id);
        }
Пример #19
0
        /// <summary>
        /// Creates a job and adds 4 tasks to it.  2 tasks are basic with only a command line.  The other 2 tasks use
        /// the file staging feature in order to add a resource file which each task consumes.
        /// </summary>
        /// <param name="batchClient">The BatchClient to use when interacting with the Batch service.</param>
        /// <param name="jobId">The ID of the job.</param>
        /// <returns>The set of container names containing the jobs input files.</returns>
        private async Task <HashSet <string> > SubmitJobAsync(BatchClient batchClient, string jobId)
        {
            // create an empty unbound Job
            CloudJob unboundJob = batchClient.JobOperations.CreateJob();

            unboundJob.Id = jobId;
            unboundJob.PoolInformation = new PoolInformation()
            {
                PoolId = this.configurationSettings.PoolId
            };

            // Commit Job to create it in the service
            await unboundJob.CommitAsync();

            // create 2 quick tasks. Each task within a job must have a unique ID
            List <CloudTask> tasksToRun = new List <CloudTask>();

            tasksToRun.Add(new CloudTask("task1", "hostname"));
            tasksToRun.Add(new CloudTask("task2", "cmd /c dir /s"));

            // Also create 2 tasks which require some resource files
            CloudTask taskWithFiles1 = new CloudTask("task_with_file1", "cmd /c type 2>nul *.txt");
            CloudTask taskWithFiles2 = new CloudTask("task_with_file2", "cmd /c dir /s");

            // Set up a collection of files to be staged -- these files will be uploaded to Azure Storage
            // when the tasks are submitted to the Azure Batch service.
            taskWithFiles1.FilesToStage = new List <IFileStagingProvider>();
            taskWithFiles2.FilesToStage = new List <IFileStagingProvider>();

            // generate a local file in temp directory
            string path = Path.Combine(Environment.GetEnvironmentVariable("TEMP"), "HelloWorld.txt");

            File.WriteAllText(path, "hello from Batch SimpleJobSubmission sample!");

            // add the files as a task dependency so they will be uploaded to storage before the task
            // is submitted and downloaded to the node before the task starts execution.
            FileToStage fileToStage = new FileToStage(path,
                                                      new StagingStorageAccount(
                                                          storageAccount: this.configurationSettings.StorageAccountName,
                                                          storageAccountKey: this.configurationSettings.StorageAccountKey,
                                                          blobEndpoint: this.configurationSettings.StorageBlobEndpoint));


            // When these tasks are added via JobOperations.AddTaskAsync below, the fileToStage is uploaded to storage once,
            // and a SAS is generated and supplied to each of the tasks.  The Batch service does not automatically delete
            // content from your storage account, so files added in this way must be manually removed when they are no longer
            // used.
            taskWithFiles1.FilesToStage.Add(fileToStage);
            taskWithFiles2.FilesToStage.Add(fileToStage);

            tasksToRun.Add(taskWithFiles1);
            tasksToRun.Add(taskWithFiles2);

            var fileStagingArtifacts = new ConcurrentBag <ConcurrentDictionary <Type, IFileStagingArtifact> >();

            // Use the AddTask method which takes an enumerable of tasks for best performance, as it submits up to 100
            // tasks at once in a single request.  If the list of tasks is N where N > 100, this will correctly parallelize
            // the requests and return when all N tasks have been added.
            await batchClient.JobOperations.AddTaskAsync(jobId, tasksToRun, fileStagingArtifacts : fileStagingArtifacts);

            // Extract the names of the blob containers from the file staging artifacts
            HashSet <string> blobContainerNames = ExtractBlobContainerNames(fileStagingArtifacts);

            return(blobContainerNames);
        }
Пример #20
0
        public static void JobMain(string[] args)
        {
            //Load the configuration
            TopNWordsConfiguration configuration = TopNWordsConfiguration.LoadConfigurationFromAppConfig();

            StagingStorageAccount stagingStorageAccount = new StagingStorageAccount(
                configuration.StorageAccountName,
                configuration.StorageAccountKey,
                configuration.StorageAccountBlobEndpoint);

            using (BatchClient client = BatchClient.Open(new BatchSharedKeyCredentials(configuration.BatchServiceUrl, configuration.BatchAccountName, configuration.BatchAccountKey)))
            {
                string stagingContainer = null;

                //Create a pool (if user hasn't provided one)
                if (configuration.ShouldCreatePool)
                {
                    //OSFamily 4 == OS 2012 R2. You can learn more about os families and versions at:
                    //http://msdn.microsoft.com/en-us/library/azure/ee924680.aspx
                    CloudPool pool = client.PoolOperations.CreatePool(configuration.PoolId, targetDedicated: configuration.PoolSize, osFamily: "4", virtualMachineSize: "small");
                    Console.WriteLine("Adding pool {0}", configuration.PoolId);

                    try
                    {
                        pool.Commit();
                    }
                    catch (AggregateException ae)
                    {
                        // Go through all exceptions and dump useful information
                        ae.Handle(x =>
                        {
                            Console.Error.WriteLine("Creating pool ID {0} failed", configuration.PoolId);
                            if (x is BatchException)
                            {
                                BatchException be = x as BatchException;

                                Console.WriteLine(be.ToString());
                                Console.WriteLine();
                            }
                            else
                            {
                                Console.WriteLine(x);
                            }

                            // can't continue without a pool
                            return false;
                        });
                    }
                }

                try
                {
                    Console.WriteLine("Creating job: " + configuration.JobId);
                    // get an empty unbound Job
                    CloudJob unboundJob = client.JobOperations.CreateJob();
                    unboundJob.Id = configuration.JobId;
                    unboundJob.PoolInformation = new PoolInformation() { PoolId = configuration.PoolId };

                    // Commit Job to create it in the service
                    unboundJob.Commit();

                    // create file staging objects that represent the executable and its dependent assembly to run as the task.
                    // These files are copied to every node before the corresponding task is scheduled to run on that node.
                    FileToStage topNWordExe = new FileToStage(TopNWordsExeName, stagingStorageAccount);
                    FileToStage storageDll = new FileToStage(StorageClientDllName, stagingStorageAccount);

                    // In this sample, the input data is copied separately to Storage and its URI is passed to the task as an argument.
                    // This approach is appropriate when the amount of input data is large such that copying it to every node via FileStaging
                    // is not desired and the number of tasks is small since a large number of readers of the blob might get throttled
                    // by Storage which will lengthen the overall processing time.
                    //
                    // You'll need to observe the behavior and use published techniques for finding the right balance of performance versus
                    // complexity.
                    string bookFileUri = UploadBookFileToCloudBlob(configuration, configuration.BookFileName);
                    Console.WriteLine("{0} uploaded to cloud", configuration.BookFileName);

                    // initialize a collection to hold the tasks that will be submitted in their entirety
                    List<CloudTask> tasksToRun = new List<CloudTask>(configuration.NumberOfTasks);

                    for (int i = 1; i <= configuration.NumberOfTasks; i++)
                    {
                        CloudTask task = new CloudTask("task_no_" + i, String.Format("{0} --Task {1} {2} {3} {4}",
                            TopNWordsExeName,
                            bookFileUri,
                            configuration.NumberOfTopWords,
                            configuration.StorageAccountName,
                            configuration.StorageAccountKey));

                        //This is the list of files to stage to a container -- for each job, one container is created and 
                        //files all resolve to Azure Blobs by their name (so two tasks with the same named file will create just 1 blob in
                        //the container).
                        task.FilesToStage = new List<IFileStagingProvider>
                                            {
                                                topNWordExe, 
                                                storageDll
                                            };

                        tasksToRun.Add(task);
                    }

                    // Commit all the tasks to the Batch Service. Ask AddTask to return information about the files that were staged.
                    // The container information is used later on to remove these files from Storage.
                    ConcurrentBag<ConcurrentDictionary<Type, IFileStagingArtifact>> fsArtifactBag = new ConcurrentBag<ConcurrentDictionary<Type, IFileStagingArtifact>>();
                    client.JobOperations.AddTask(configuration.JobId, tasksToRun, fileStagingArtifacts: fsArtifactBag);

                    // loop through the bag of artifacts, looking for the one that matches our staged files. Once there,
                    // capture the name of the container holding the files so they can be deleted later on if that option
                    // was configured in the settings.
                    foreach (var fsBagItem in fsArtifactBag)
                    {
                        IFileStagingArtifact fsValue;
                        if (fsBagItem.TryGetValue(typeof(FileToStage), out fsValue))
                        {
                            SequentialFileStagingArtifact stagingArtifact = fsValue as SequentialFileStagingArtifact;
                            if (stagingArtifact != null)
                            {
                                stagingContainer = stagingArtifact.BlobContainerCreated;
                                Console.WriteLine(
                                    "Uploaded files to container: {0} -- you will be charged for their storage unless you delete them.",
                                    stagingArtifact.BlobContainerCreated);
                            }
                        }
                    }

                    //Get the job to monitor status.
                    CloudJob job = client.JobOperations.GetJob(configuration.JobId);

                    Console.Write("Waiting for tasks to complete ...   ");
                    // Wait 20 minutes for all tasks to reach the completed state. The long timeout is necessary for the first
                    // time a pool is created in order to allow nodes to be added to the pool and initialized to run tasks.
                    IPagedEnumerable<CloudTask> ourTasks = job.ListTasks(new ODATADetailLevel(selectClause: "id"));
                    client.Utilities.CreateTaskStateMonitor().WaitAll(ourTasks, TaskState.Completed, TimeSpan.FromMinutes(20));
                    Console.WriteLine("tasks are done.");

                    foreach (CloudTask t in ourTasks)
                    {
                        Console.WriteLine("Task " + t.Id);
                        Console.WriteLine("stdout:" + Environment.NewLine + t.GetNodeFile(Constants.StandardOutFileName).ReadAsString());
                        Console.WriteLine();
                        Console.WriteLine("stderr:" + Environment.NewLine + t.GetNodeFile(Constants.StandardErrorFileName).ReadAsString());
                    }
                }
                finally
                {
                    //Delete the pool that we created
                    if (configuration.ShouldCreatePool)
                    {
                        Console.WriteLine("Deleting pool: {0}", configuration.PoolId);
                        client.PoolOperations.DeletePool(configuration.PoolId);
                    }

                    //Delete the job that we created
                    if (configuration.ShouldDeleteJob)
                    {
                        Console.WriteLine("Deleting job: {0}", configuration.JobId);
                        client.JobOperations.DeleteJob(configuration.JobId);
                    }

                    //Delete the containers we created
                    if (configuration.ShouldDeleteContainer)
                    {
                        DeleteContainers(configuration, stagingContainer);
                    }
                }
            }
        }
Пример #21
0
        /// <summary>
        /// Submit a work item with tasks which have dependant files.
        /// The files are automatically uploaded to Azure Storage using the FileStaging feature of the Azure.Batch client library.
        /// </summary>
        /// <param name="client"></param>
        private static void AddWorkWithFileStaging(IBatchClient client)
        {
            using (IWorkItemManager wm = client.OpenWorkItemManager())
            {

                IToolbox toolbox = client.OpenToolbox();
                ITaskSubmissionHelper taskSubmissionHelper = toolbox.CreateTaskSubmissionHelper(wm, PoolName);

                taskSubmissionHelper.WorkItemName = Environment.GetEnvironmentVariable("USERNAME") + DateTime.Now.ToString("yyyyMMdd-HHmmss");

                Console.WriteLine("Creating work item: {0}", taskSubmissionHelper.WorkItemName);

                ICloudTask taskToAdd1 = new CloudTask("task_with_file1", "cmd /c type *.txt");
                ICloudTask taskToAdd2 = new CloudTask("task_with_file2", "cmd /c dir /s");

                //Set up a collection of files to be staged -- these files will be uploaded to Azure Storage
                //when the tasks are submitted to the Azure Batch service.
                taskToAdd1.FilesToStage = new List<IFileStagingProvider>();
                taskToAdd2.FilesToStage = new List<IFileStagingProvider>();

                // generate a local file in temp directory
                Process cur = Process.GetCurrentProcess();
                string path = Path.Combine(Environment.GetEnvironmentVariable("TEMP"), cur.Id + ".txt");
                File.WriteAllText(path, "hello from " + cur.Id);

                // add file as task dependency so it'll be uploaded to storage before task 
                // is submitted and download onto the VM before task starts execution
                FileToStage file = new FileToStage(path, new StagingStorageAccount(StorageAccount, StorageKey, StorageBlobEndpoint));
                taskToAdd1.FilesToStage.Add(file);
                taskToAdd2.FilesToStage.Add(file); // filetostage object can be reused

                taskSubmissionHelper.AddTask(taskToAdd1);
                taskSubmissionHelper.AddTask(taskToAdd2);

                IJobCommitUnboundArtifacts artifacts = null;
                bool errors = false;
                
                try
                {
                    //Stage the files to Azure Storage and add the tasks to Azure Batch.
                    artifacts = taskSubmissionHelper.Commit() as IJobCommitUnboundArtifacts;
                }
                catch (AggregateException ae)
                {
                    errors = true;
                    // Go through all exceptions and dump useful information
                    ae.Handle(x =>
                    {
                        if (x is BatchException)
                        {
                            BatchException be = x as BatchException;
                            if (null != be.RequestInformation && null != be.RequestInformation.AzureError)
                            {
                                // Write the server side error information
                                Console.Error.WriteLine(be.RequestInformation.AzureError.Code);
                                Console.Error.WriteLine(be.RequestInformation.AzureError.Message.Value);
                                if (null != be.RequestInformation.AzureError.Values)
                                {
                                    foreach (var v in be.RequestInformation.AzureError.Values)
                                    {
                                        Console.Error.WriteLine(v.Key + " : " + v.Value);
                                    }
                                }
                            }
                        }
                        else
                        {
                            Console.WriteLine(x);
                        }
                        // Indicate that the error has been handled
                        return true;
                    });
                }

                // if there is no exception, wait for job response
                if (!errors)
                {
                    List<ICloudTask> tasksToMonitorForCompletion = wm.ListTasks(artifacts.WorkItemName, artifacts.JobName).ToList();

                    Console.WriteLine("Waiting for all tasks to complete on work item: {0}, Job: {1} ...", artifacts.WorkItemName, artifacts.JobName);
                    client.OpenToolbox().CreateTaskStateMonitor().WaitAll(tasksToMonitorForCompletion, TaskState.Completed, TimeSpan.FromMinutes(30));

                    foreach (ICloudTask task in wm.ListTasks(artifacts.WorkItemName, artifacts.JobName))
                    {
                        Console.WriteLine("Task " + task.Name + " says:\n" + task.GetTaskFile(Constants.StandardOutFileName).ReadAsString());
                        Console.WriteLine(task.GetTaskFile(Constants.StandardErrorFileName).ReadAsString());
                    }
                }

                Console.WriteLine("Deleting work item: {0}", artifacts.WorkItemName);
                wm.DeleteWorkItem(artifacts.WorkItemName); //Don't forget to delete the work item before you exit
            }
        }