public async Task CanBulkAddTasksWithFilesToStage(bool useJobOperations) { const string testName = "TestBulkAddTaskWithFilesToStage"; const int taskCount = 499; List <string> localFilesToStage = new List <string>(); localFilesToStage.Add("TestResources\\Data.txt"); ConcurrentBag <ConcurrentDictionary <Type, IFileStagingArtifact> > artifacts = new ConcurrentBag <ConcurrentDictionary <Type, IFileStagingArtifact> >(); List <int> legArtifactsCountList = new List <int>(); using (CancellationTokenSource cts = new CancellationTokenSource()) { //Spawn a thread to monitor the files to stage as we go - we should observe that Task t = Task.Factory.StartNew(() => { while (!cts.Token.IsCancellationRequested) { legArtifactsCountList.Add(artifacts.Count); Thread.Sleep(TimeSpan.FromSeconds(1)); } }); StagingStorageAccount storageCredentials = TestUtilities.GetStorageCredentialsFromEnvironment(); using (BatchClient batchCli = TestUtilities.OpenBatchClientFromEnvironment()) { await this.AddTasksSimpleTestAsync( batchCli, testName, taskCount, parallelOptions : new BatchClientParallelOptions() { MaxDegreeOfParallelism = 2 }, resultHandlerFunc : null, storageCredentials : storageCredentials, localFilesToStage : localFilesToStage, fileStagingArtifacts : artifacts, useJobOperations : useJobOperations).ConfigureAwait(false); cts.Cancel(); await t.ConfigureAwait(false); //Wait for the spawned thread to exit this.testOutputHelper.WriteLine("File staging leg count: ["); foreach (int fileStagingArtifactsCount in legArtifactsCountList) { this.testOutputHelper.WriteLine(fileStagingArtifactsCount + ", "); } this.testOutputHelper.WriteLine("]"); const int expectedFinalFileStagingArtifactsCount = taskCount / 100 + 1; const int expectedInitialFileStagingArtifactsCount = 0; Assert.Equal(expectedInitialFileStagingArtifactsCount, legArtifactsCountList.First()); Assert.Equal(expectedFinalFileStagingArtifactsCount, legArtifactsCountList.Last()); } } }
private static async Task <IEnumerable <string> > SubmitMoveTasksAsync(JobSettings settings, BatchClient batchClient, string blobContainerName, IEnumerable <string> blobNames, string jobId) { if (String.IsNullOrEmpty(jobId)) { throw new ArgumentNullException(nameof(jobId)); } // Create the mover task, ensuring that the needed executable is staged var moverExe = $"{ typeof(BlobMover.EntryPoint).Assembly.GetName().Name }.exe"; var fileArtifacts = new ConcurrentBag <ConcurrentDictionary <Type, IFileStagingArtifact> >(); var stagingStorageAccount = new StagingStorageAccount(settings.BatchBlobStorageName, settings.BatchBlobStorageKey, settings.BatchBlobSTorageUrl); var moverFilesToStage = new List <IFileStagingProvider> { new FileToStage($"{ moverExe }", stagingStorageAccount) }; var moverCloudTasks = blobNames.Select(blobName => new CloudTask($"Mover-{ blobName }", $"{ moverExe } { blobContainerName } { blobName }") { FilesToStage = moverFilesToStage }); await batchClient.JobOperations.AddTaskAsync(jobId, moverCloudTasks, fileStagingArtifacts : fileArtifacts).ConfigureAwait(false); return(fileArtifacts .SelectMany(dict => dict).Select(kvp => kvp.Value) .OfType <SequentialFileStagingArtifact>() .Select(artifact => artifact.BlobContainerCreated) .Distinct()); }
/// <summary> /// Creates a job and adds a task to it. The task is a /// custom executable which has a resource file associated with it. /// </summary> /// <param name="batchClient">The BatchClient to use when interacting with the Batch service.</param> /// <param name="cloudStorageAccount">The storage account to upload the files to.</param> /// <param name="jobId">The ID of the job.</param> /// <returns>The set of container names containing the jobs input files.</returns> private async Task <HashSet <string> > SubmitJobAsync(BatchClient batchClient, CloudStorageAccount cloudStorageAccount, string jobId) { // create an empty unbound Job CloudJob unboundJob = batchClient.JobOperations.CreateJob(); unboundJob.Id = jobId; unboundJob.PoolInformation = new PoolInformation() { PoolId = this.poolsAndResourceFileSettings.PoolId }; // Commit Job to create it in the service await unboundJob.CommitAsync(); List <CloudTask> tasksToRun = new List <CloudTask>(); // Create a task which requires some resource files CloudTask taskWithFiles = new CloudTask("task_with_file1", SimpleTaskExe); // Set up a collection of files to be staged -- these files will be uploaded to Azure Storage // when the tasks are submitted to the Azure Batch service. taskWithFiles.FilesToStage = new List <IFileStagingProvider>(); // generate a local file in temp directory string localSampleFile = Path.Combine(Environment.GetEnvironmentVariable("TEMP"), "HelloWorld.txt"); File.WriteAllText(localSampleFile, "hello from Batch PoolsAndResourceFiles sample!"); StagingStorageAccount fileStagingStorageAccount = new StagingStorageAccount( storageAccount: this.accountSettings.StorageAccountName, storageAccountKey: this.accountSettings.StorageAccountKey, blobEndpoint: cloudStorageAccount.BlobEndpoint.ToString()); // add the files as a task dependency so they will be uploaded to storage before the task // is submitted and downloaded to the node before the task starts execution. FileToStage helloWorldFile = new FileToStage(localSampleFile, fileStagingStorageAccount); FileToStage simpleTaskFile = new FileToStage(SimpleTaskExe, fileStagingStorageAccount); // When this task is added via JobOperations.AddTaskAsync below, the FilesToStage are uploaded to storage once. // The Batch service does not automatically delete content from your storage account, so files added in this // way must be manually removed when they are no longer used. taskWithFiles.FilesToStage.Add(helloWorldFile); taskWithFiles.FilesToStage.Add(simpleTaskFile); tasksToRun.Add(taskWithFiles); var fileStagingArtifacts = new ConcurrentBag <ConcurrentDictionary <Type, IFileStagingArtifact> >(); // Use the AddTask method which takes an enumerable of tasks for best performance, as it submits up to 100 // tasks at once in a single request. If the list of tasks is N where N > 100, this will correctly parallelize // the requests and return when all N tasks have been added. await batchClient.JobOperations.AddTaskAsync(jobId, tasksToRun, fileStagingArtifacts : fileStagingArtifacts); // Extract the names of the blob containers from the file staging artifacts HashSet <string> blobContainerNames = GettingStartedCommon.ExtractBlobContainerNames(fileStagingArtifacts); return(blobContainerNames); }
public async Task ComputeNodeUploadLogs() { Func <Task> test = async() => { using (BatchClient batchCli = TestUtilities.OpenBatchClientFromEnvironmentAsync().Result) { const string containerName = "computenodelogscontainer"; // Generate a storage container URL StagingStorageAccount storageAccount = TestUtilities.GetStorageCredentialsFromEnvironment(); BlobServiceClient blobClient = BlobUtilities.GetBlobServiceClient(storageAccount); BlobContainerClient containerClient = BlobUtilities.GetBlobContainerClient(containerName, blobClient, storageAccount); try { containerClient.CreateIfNotExists(); string sasUri = BlobUtilities.GetWriteableSasUri(containerClient, storageAccount); var blobs = containerClient.GetAllBlobs(); // Ensure that there are no items in the container to begin with Assert.Empty(blobs); var startTime = DateTime.UtcNow.Subtract(TimeSpan.FromMinutes(5)); var node = batchCli.PoolOperations.ListComputeNodes(this.poolFixture.PoolId).First(); var result = batchCli.PoolOperations.UploadComputeNodeBatchServiceLogs( this.poolFixture.PoolId, node.Id, sasUri, startTime); Assert.NotEqual(0, result.NumberOfFilesUploaded); Assert.NotEmpty(result.VirtualDirectoryName); // Allow up to 2m for files to get uploaded DateTime timeoutAt = DateTime.UtcNow.AddMinutes(2); while (DateTime.UtcNow < timeoutAt) { blobs = containerClient.GetAllBlobs(); if (blobs.Any()) { break; } } Assert.NotEmpty(blobs); } finally { await containerClient.DeleteIfExistsAsync(); } } }; await SynchronizationContextHelper.RunTestAsync(test, TestTimeout); }
public static BlobServiceClient GetBlobServiceClient(StagingStorageAccount storageAccount = null) { storageAccount ??= GetStorageAccount(); StorageSharedKeyCredential credential = new StorageSharedKeyCredential(storageAccount.StorageAccount, storageAccount.StorageAccountKey); BlobServiceClient serviceClient = new BlobServiceClient(storageAccount.BlobUri, credential, null); return(serviceClient); }
public static StagingStorageAccount GetStorageCredentialsFromEnvironment() { string storageAccountKey = TestCommon.Configuration.StorageAccountKey; string storageAccountName = TestCommon.Configuration.StorageAccountName; string storageAccountBlobEndpoint = TestCommon.Configuration.StorageAccountBlobEndpoint; StagingStorageAccount storageStagingCredentials = new StagingStorageAccount(storageAccountName, storageAccountKey, storageAccountBlobEndpoint); return(storageStagingCredentials); }
public static string GetWriteableSasUri(BlobContainerClient containerClient, StagingStorageAccount storageAccount) { var sasBuilder = new BlobSasBuilder { ExpiresOn = DateTime.UtcNow.AddDays(1), BlobContainerName = containerClient.Name, }; sasBuilder.SetPermissions(BlobSasPermissions.Write); StorageSharedKeyCredential credentials = GetSharedKeyCredential(storageAccount); BlobUriBuilder builder = new BlobUriBuilder(containerClient.Uri); builder.Sas = sasBuilder.ToSasQueryParameters(credentials); string fullSas = builder.ToString(); return(fullSas); }
private static IList <ResourceFile> UploadFilesMakeResFiles(StagingStorageAccount stagingCreds) { // use a dummy task to stage fsome files and generate resource files CloudTask myTask = new CloudTask(id: "CountWordsTask", commandline: @"cmd /c dir /s .. & dir & type localwords.txt"); // first we have local files that we want pushed to the compute node before the commandline is invoked FileToStage wordsDotText = new FileToStage(Resources.LocalWordsDotText, stagingCreds); // use "default" mapping to base name of local file // add in the files to stage myTask.FilesToStage = new List <IFileStagingProvider>(); myTask.FilesToStage.Add(wordsDotText); // trigger file staging myTask.StageFiles(); // return the resolved resource files return(myTask.ResourceFiles); }
/// <summary> /// Submits a set of tasks to the job /// </summary> /// <param name="batchClient">The batch client to use.</param> /// <returns>The set of blob artifacts created by file staging.</returns> private async Task <HashSet <string> > SubmitTasks(BatchClient batchClient) { List <CloudTask> tasksToRun = new List <CloudTask>(); // Create a task which requires some resource files CloudTask taskWithFiles = new CloudTask("task_with_file1", SimpleTaskExe); // Set up a collection of files to be staged -- these files will be uploaded to Azure Storage // when the tasks are submitted to the Azure Batch service. taskWithFiles.FilesToStage = new List <IFileStagingProvider>(); // generate a local file in temp directory string localSampleFilePath = GettingStartedCommon.GenerateTemporaryFile("HelloWorld.txt", "hello from Batch JobManager sample!"); StagingStorageAccount fileStagingStorageAccount = new StagingStorageAccount( storageAccount: this.configurationSettings.StorageAccountName, storageAccountKey: this.configurationSettings.StorageAccountKey, blobEndpoint: this.configurationSettings.StorageBlobEndpoint); // add the files as a task dependency so they will be uploaded to storage before the task // is submitted and downloaded to the node before the task starts execution. FileToStage helloWorldFile = new FileToStage(localSampleFilePath, fileStagingStorageAccount); FileToStage simpleTaskFile = new FileToStage(SimpleTaskExe, fileStagingStorageAccount); // When this task is added via JobOperations.AddTaskAsync below, the FilesToStage are uploaded to storage once. // The Batch service does not automatically delete content from your storage account, so files added in this // way must be manually removed when they are no longer used. taskWithFiles.FilesToStage.Add(helloWorldFile); taskWithFiles.FilesToStage.Add(simpleTaskFile); tasksToRun.Add(taskWithFiles); var fileStagingArtifacts = new ConcurrentBag <ConcurrentDictionary <Type, IFileStagingArtifact> >(); // Use the AddTask method which takes an enumerable of tasks for best performance, as it submits up to 100 // tasks at once in a single request. If the list of tasks is N where N > 100, this will correctly parallelize // the requests and return when all N tasks have been added. await batchClient.JobOperations.AddTaskAsync(jobId, tasksToRun, fileStagingArtifacts : fileStagingArtifacts); // Extract the names of the blob containers from the file staging artifacts HashSet <string> blobContainerNames = GettingStartedCommon.ExtractBlobContainerNames(fileStagingArtifacts); return(blobContainerNames); }
private static List <CloudTask> CreateTasks(Settings unzipperSettings, StagingStorageAccount stagingStorageAccount) { // create file staging objects that represent the executable and its dependent assembly to run as the task. // These files are copied to every node before the corresponding task is scheduled to run on that node. FileToStage unzipperExe = new FileToStage(UnzipperExeName, stagingStorageAccount); FileToStage storageDll = new FileToStage(StorageClientDllName, stagingStorageAccount); //get list of zipped files var zipFiles = GetZipFiles(unzipperSettings).ToList(); Console.WriteLine("found {0} zipped files", zipFiles.Count); // initialize a collection to hold the tasks that will be submitted in their entirety. This will be one task per file. List <CloudTask> tasksToRun = new List <CloudTask>(zipFiles.Count); int i = 0; foreach (var zipFile in zipFiles) { CloudTask task = new CloudTask("task_no_" + i, String.Format("{0} --Task {1} {2} {3}", UnzipperExeName, zipFile.Uri, unzipperSettings.StorageAccountName, unzipperSettings.StorageAccountKey)); //This is the list of files to stage to a container -- for each job, one container is created and //files all resolve to Azure Blobs by their name (so two tasks with the same named file will create just 1 blob in //the container). task.FilesToStage = new List <IFileStagingProvider> { unzipperExe, storageDll }; tasksToRun.Add(task); i++; } return(tasksToRun); }
public static void JobMain(string[] args) { //Load the configuration Settings unzipperSettings = Settings.Default; CloudStorageAccount cloudStorageAccount = new CloudStorageAccount( new StorageCredentials( unzipperSettings.StorageAccountName, unzipperSettings.StorageAccountKey), unzipperSettings.StorageServiceUrl, useHttps: true); StagingStorageAccount stagingStorageAccount = new StagingStorageAccount( unzipperSettings.StorageAccountName, unzipperSettings.StorageAccountKey, cloudStorageAccount.BlobEndpoint.ToString()); using (BatchClient client = BatchClient.Open(new BatchSharedKeyCredentials(unzipperSettings.BatchServiceUrl, unzipperSettings.BatchAccountName, unzipperSettings.BatchAccountKey))) { string stagingContainer = null; //create pool CloudPool pool = CreatePool(unzipperSettings, client); try { CreateJob(unzipperSettings, client); List <CloudTask> tasksToRun = CreateTasks(unzipperSettings, stagingStorageAccount); AddTasksToJob(unzipperSettings, client, stagingContainer, tasksToRun); MonitorProgess(unzipperSettings, client); } finally { Cleanup(unzipperSettings, client, stagingContainer); } } }
public async static Task JobMain(string[] args) { //Load the configuration Settings topNWordsConfiguration = Settings.Default; AccountSettings accountSettings = AccountSettings.Default; CloudStorageAccount cloudStorageAccount = new CloudStorageAccount( new StorageCredentials( accountSettings.StorageAccountName, accountSettings.StorageAccountKey), accountSettings.StorageServiceUrl, useHttps: true); StagingStorageAccount stagingStorageAccount = new StagingStorageAccount( accountSettings.StorageAccountName, accountSettings.StorageAccountKey, cloudStorageAccount.BlobEndpoint.ToString()); using (BatchClient client = BatchClient.Open(new BatchSharedKeyCredentials(accountSettings.BatchServiceUrl, accountSettings.BatchAccountName, accountSettings.BatchAccountKey))) { string stagingContainer = null; //OSFamily 4 == OS 2012 R2. You can learn more about os families and versions at: //http://msdn.microsoft.com/en-us/library/azure/ee924680.aspx CloudPool pool = client.PoolOperations.CreatePool( topNWordsConfiguration.PoolId, targetDedicatedComputeNodes: topNWordsConfiguration.PoolNodeCount, virtualMachineSize: "small", cloudServiceConfiguration: new CloudServiceConfiguration(osFamily: "4")); List <string> files = new List <string> { Path.Combine(BatchStartTaskFolderName, BatchStartTaskTelemetryRunnerName), }; files.AddRange(AIFilesToUpload); var resourceHelperTask = SampleHelpers.UploadResourcesAndCreateResourceFileReferencesAsync( cloudStorageAccount, AIBlobConatinerName, files); List <ResourceFile> resourceFiles = resourceHelperTask.Result; pool.StartTask = new StartTask() { CommandLine = string.Format("cmd /c {0}", BatchStartTaskTelemetryRunnerName), ResourceFiles = resourceFiles }; Console.WriteLine("Adding pool {0}", topNWordsConfiguration.PoolId); try { await GettingStartedCommon.CreatePoolIfNotExistAsync(client, pool); } catch (AggregateException ae) { // Go through all exceptions and dump useful information ae.Handle(x => { Console.Error.WriteLine("Creating pool ID {0} failed", topNWordsConfiguration.PoolId); if (x is BatchException) { BatchException be = x as BatchException; Console.WriteLine(be.ToString()); Console.WriteLine(); } else { Console.WriteLine(x); } // can't continue without a pool return(false); }); } catch (BatchException be) { Console.Error.WriteLine("Creating pool ID {0} failed", topNWordsConfiguration.PoolId); Console.WriteLine(be.ToString()); Console.WriteLine(); } try { Console.WriteLine("Creating job: " + topNWordsConfiguration.JobId); // get an empty unbound Job CloudJob unboundJob = client.JobOperations.CreateJob(); unboundJob.Id = topNWordsConfiguration.JobId; unboundJob.PoolInformation = new PoolInformation() { PoolId = topNWordsConfiguration.PoolId }; // Commit Job to create it in the service await unboundJob.CommitAsync(); // create file staging objects that represent the executable and its dependent assembly to run as the task. // These files are copied to every node before the corresponding task is scheduled to run on that node. FileToStage topNWordExe = new FileToStage(TopNWordsExeName, stagingStorageAccount); FileToStage storageDll = new FileToStage(StorageClientDllName, stagingStorageAccount); // Upload application insights assemblies List <FileToStage> aiStagedFiles = new List <FileToStage>(); foreach (string aiFile in AIFilesToUpload) { aiStagedFiles.Add(new FileToStage(aiFile, stagingStorageAccount)); } // In this sample, the input data is copied separately to Storage and its URI is passed to the task as an argument. // This approach is appropriate when the amount of input data is large such that copying it to every node via FileStaging // is not desired and the number of tasks is small since a large number of readers of the blob might get throttled // by Storage which will lengthen the overall processing time. // // You'll need to observe the behavior and use published techniques for finding the right balance of performance versus // complexity. string[] documents = Directory.GetFiles(topNWordsConfiguration.DocumentsRootPath); await SampleHelpers.UploadResourcesAsync(cloudStorageAccount, BooksContainerName, documents); // initialize a collection to hold the tasks that will be submitted in their entirety List <CloudTask> tasksToRun = new List <CloudTask>(documents.Length); for (int i = 0; i < documents.Length; i++) { CloudTask task = new CloudTask("task_no_" + i, String.Format("{0} --Task {1} {2} {3} {4}", TopNWordsExeName, "https://onbehalfoutput.blob.core.windows.net/" + documents[i], topNWordsConfiguration.TopWordCount, accountSettings.StorageAccountName, accountSettings.StorageAccountKey)); //This is the list of files to stage to a container -- for each job, one container is created and //files all resolve to Azure Blobs by their name (so two tasks with the same named file will create just 1 blob in //the container). task.FilesToStage = new List <IFileStagingProvider> { topNWordExe, storageDll, }; foreach (FileToStage stagedFile in aiStagedFiles) { task.FilesToStage.Add(stagedFile); } tasksToRun.Add(task); } // Commit all the tasks to the Batch Service. Ask AddTask to return information about the files that were staged. // The container information is used later on to remove these files from Storage. ConcurrentBag <ConcurrentDictionary <Type, IFileStagingArtifact> > fsArtifactBag = new ConcurrentBag <ConcurrentDictionary <Type, IFileStagingArtifact> >(); client.JobOperations.AddTask(topNWordsConfiguration.JobId, tasksToRun, fileStagingArtifacts: fsArtifactBag); // loop through the bag of artifacts, looking for the one that matches our staged files. Once there, // capture the name of the container holding the files so they can be deleted later on if that option // was configured in the settings. foreach (var fsBagItem in fsArtifactBag) { IFileStagingArtifact fsValue; if (fsBagItem.TryGetValue(typeof(FileToStage), out fsValue)) { SequentialFileStagingArtifact stagingArtifact = fsValue as SequentialFileStagingArtifact; if (stagingArtifact != null) { stagingContainer = stagingArtifact.BlobContainerCreated; Console.WriteLine( "Uploaded files to container: {0} -- you will be charged for their storage unless you delete them.", stagingArtifact.BlobContainerCreated); } } } //Get the job to monitor status. CloudJob job = client.JobOperations.GetJob(topNWordsConfiguration.JobId); Console.Write("Waiting for tasks to complete ... "); // Wait 20 minutes for all tasks to reach the completed state. The long timeout is necessary for the first // time a pool is created in order to allow nodes to be added to the pool and initialized to run tasks. IPagedEnumerable <CloudTask> ourTasks = job.ListTasks(new ODATADetailLevel(selectClause: "id")); client.Utilities.CreateTaskStateMonitor().WaitAll(ourTasks, TaskState.Completed, TimeSpan.FromMinutes(20)); Console.WriteLine("tasks are done."); foreach (CloudTask t in ourTasks) { Console.WriteLine("Task " + t.Id); Console.WriteLine("stdout:" + Environment.NewLine + t.GetNodeFile(Batch.Constants.StandardOutFileName).ReadAsString()); Console.WriteLine(); Console.WriteLine("stderr:" + Environment.NewLine + t.GetNodeFile(Batch.Constants.StandardErrorFileName).ReadAsString()); } } finally { //Delete the pool that we created if (topNWordsConfiguration.ShouldDeletePool) { Console.WriteLine("Deleting pool: {0}", topNWordsConfiguration.PoolId); client.PoolOperations.DeletePool(topNWordsConfiguration.PoolId); } //Delete the job that we created if (topNWordsConfiguration.ShouldDeleteJob) { Console.WriteLine("Deleting job: {0}", topNWordsConfiguration.JobId); client.JobOperations.DeleteJob(topNWordsConfiguration.JobId); } //Delete the containers we created if (topNWordsConfiguration.ShouldDeleteContainer) { DeleteContainers(accountSettings, stagingContainer); } } } }
public static void JobMain(string[] args) { //Load the configuration Settings topNWordsConfiguration = new ConfigurationBuilder() .SetBasePath(Directory.GetCurrentDirectory()) .AddJsonFile("settings.json") .Build() .Get <Settings>(); AccountSettings accountSettings = SampleHelpers.LoadAccountSettings(); CloudStorageAccount cloudStorageAccount = new CloudStorageAccount( new StorageCredentials( accountSettings.StorageAccountName, accountSettings.StorageAccountKey), accountSettings.StorageServiceUrl, useHttps: true); StagingStorageAccount stagingStorageAccount = new StagingStorageAccount( accountSettings.StorageAccountName, accountSettings.StorageAccountKey, cloudStorageAccount.BlobEndpoint.ToString()); using (BatchClient client = BatchClient.Open(new BatchSharedKeyCredentials(accountSettings.BatchServiceUrl, accountSettings.BatchAccountName, accountSettings.BatchAccountKey))) { string stagingContainer = null; //OSFamily 5 == Windows 2016. You can learn more about os families and versions at: //http://msdn.microsoft.com/en-us/library/azure/ee924680.aspx CloudPool pool = client.PoolOperations.CreatePool( topNWordsConfiguration.PoolId, targetDedicatedComputeNodes: topNWordsConfiguration.PoolNodeCount, virtualMachineSize: "standard_d1_v2", cloudServiceConfiguration: new CloudServiceConfiguration(osFamily: "6")); Console.WriteLine("Adding pool {0}", topNWordsConfiguration.PoolId); pool.TaskSchedulingPolicy = new TaskSchedulingPolicy(ComputeNodeFillType.Spread); pool.MaxTasksPerComputeNode = 4; GettingStartedCommon.CreatePoolIfNotExistAsync(client, pool).Wait(); var formula = @"startingNumberOfVMs = 2; maxNumberofVMs = 4; pendingTaskSamplePercent = $PendingTasks.GetSamplePercent(90 * TimeInterval_Second); pendingTaskSamples = pendingTaskSamplePercent < 70 ? startingNumberOfVMs : avg($PendingTasks.GetSample(180 * TimeInterval_Second)); $TargetDedicatedNodes = min(maxNumberofVMs, pendingTaskSamples); $NodeDeallocationOption = taskcompletion;"; var noOfSeconds = 150; Thread.Sleep(noOfSeconds * 1000); client.PoolOperations.EnableAutoScale( poolId: topNWordsConfiguration.PoolId, autoscaleFormula: formula, autoscaleEvaluationInterval: TimeSpan.FromMinutes(5)); try { Console.WriteLine("Creating job: " + topNWordsConfiguration.JobId); // get an empty unbound Job CloudJob unboundJob = client.JobOperations.CreateJob(); unboundJob.Id = topNWordsConfiguration.JobId; unboundJob.PoolInformation = new PoolInformation() { PoolId = topNWordsConfiguration.PoolId }; // Commit Job to create it in the service unboundJob.Commit(); // create file staging objects that represent the executable and its dependent assembly to run as the task. // These files are copied to every node before the corresponding task is scheduled to run on that node. FileToStage topNWordExe = new FileToStage(TopNWordsExeName, stagingStorageAccount); FileToStage storageDll = new FileToStage(StorageClientDllName, stagingStorageAccount); FileToStage newtonJsoftDll = new FileToStage(NewtonJSoftDllName, stagingStorageAccount); FileToStage microsoftEFDll = new FileToStage(MicrosoftEntityFrameworkDllName, stagingStorageAccount); FileToStage microsoftEFCoreDll = new FileToStage(MicrosoftEntityFrameworkCoreDllName, stagingStorageAccount); FileToStage microsoftBCLDll = new FileToStage(MicrosoftBCLDllName, stagingStorageAccount); FileToStage systemTasksDll = new FileToStage(SystemTasksDllName, stagingStorageAccount); FileToStage topNWordsConfigFile = new FileToStage(TopnWordsConfig, stagingStorageAccount); FileToStage SystemValueTupleDll = new FileToStage(SystemValueTupleDllName, stagingStorageAccount); FileToStage DependencyInjectionAbstractionsDll = new FileToStage(DependecyInjectionAbstractionsDllName, stagingStorageAccount); FileToStage DependencyInjectionDll = new FileToStage(DependecyInjectionDllName, stagingStorageAccount); FileToStage LoggingAbstractionsDll = new FileToStage(LoggingAbstractionsDllName, stagingStorageAccount); FileToStage DiagnosticsDll = new FileToStage(DiagnosticssDllName, stagingStorageAccount); FileToStage CachingAbstractionDll = new FileToStage(CachingAbstractionsDllName, stagingStorageAccount); FileToStage MicrosoftSqlServerDll = new FileToStage(MicrosoftSqlServerDllName, stagingStorageAccount); FileToStage SystemComponentDll = new FileToStage(SystemComponentDllName, stagingStorageAccount); FileToStage SystemCollectionsDll = new FileToStage(SystemCollectionsDllName, stagingStorageAccount); FileToStage pDll = new FileToStage(pdllName, stagingStorageAccount); FileToStage oDll = new FileToStage(odllName, stagingStorageAccount); FileToStage lDll = new FileToStage(ldllName, stagingStorageAccount); FileToStage hashcodeDll = new FileToStage(hashcodeDllName, stagingStorageAccount); FileToStage clientSqlDll = new FileToStage(clientSqlClientDllName, stagingStorageAccount); FileToStage cachingMemoryDll = new FileToStage(CachingMemoryDllName, stagingStorageAccount); FileToStage configAbstractionDll = new FileToStage(configAbstractionDllName, stagingStorageAccount); FileToStage SNIDll = new FileToStage(SNIDllName, stagingStorageAccount); FileToStage relationDll = new FileToStage(relationddllName, stagingStorageAccount); var textFile = "E:\\WeatherAPIPOC\\cities_id.txt"; var text = File.ReadAllLines(textFile); var cityList = new List <string>(text); // In this sample, the input data is copied separately to Storage and its URI is passed to the task as an argument. // This approach is appropriate when the amount of input data is large such that copying it to every node via FileStaging // is not desired and the number of tasks is small since a large number of readers of the blob might get throttled // by Storage which will lengthen the overall processing time. // // You'll need to observe the behavior and use published techniques for finding the right balance of performance versus // complexity. Console.WriteLine("{0} uploaded to cloud", topNWordsConfiguration.FileName); // initialize a collection to hold the tasks that will be submitted in their entirety List <CloudTask> tasksToRun = new List <CloudTask>(topNWordsConfiguration.NumberOfTasks); for (int i = 0; i < cityList.Count; i++) { string programLaunchTime = DateTime.Now.ToString("h:mm:sstt"); CloudTask task = new CloudTask( id: $"task_no_{i + 1}", commandline: $"cmd /c mkdir x64 & move SNI.dll x64 & {TopNWordsExeName} --Task {cityList[i]} %AZ_BATCH_NODE_ID% {programLaunchTime}"); //This is the list of files to stage to a container -- for each job, one container is created and //files all resolve to Azure Blobs by their name (so two tasks with the same named file will create just 1 blob in //the container). task.FilesToStage = new List <IFileStagingProvider> { topNWordExe, storageDll, newtonJsoftDll, microsoftEFDll, microsoftEFCoreDll, microsoftBCLDll, systemTasksDll, topNWordsConfigFile, SystemValueTupleDll, DependencyInjectionAbstractionsDll, DependencyInjectionDll, LoggingAbstractionsDll, DiagnosticsDll, CachingAbstractionDll, MicrosoftSqlServerDll, SystemComponentDll, SystemCollectionsDll, oDll, pDll, lDll, relationDll, hashcodeDll, clientSqlDll, cachingMemoryDll, configAbstractionDll, SNIDll }; tasksToRun.Add(task); } // Commit all the tasks to the Batch Service. Ask AddTask to return information about the files that were staged. // The container information is used later on to remove these files from Storage. ConcurrentBag <ConcurrentDictionary <Type, IFileStagingArtifact> > fsArtifactBag = new ConcurrentBag <ConcurrentDictionary <Type, IFileStagingArtifact> >(); client.JobOperations.AddTask(topNWordsConfiguration.JobId, tasksToRun, fileStagingArtifacts: fsArtifactBag); // loop through the bag of artifacts, looking for the one that matches our staged files. Once there, // capture the name of the container holding the files so they can be deleted later on if that option // was configured in the settings. foreach (var fsBagItem in fsArtifactBag) { IFileStagingArtifact fsValue; if (fsBagItem.TryGetValue(typeof(FileToStage), out fsValue)) { SequentialFileStagingArtifact stagingArtifact = fsValue as SequentialFileStagingArtifact; if (stagingArtifact != null) { stagingContainer = stagingArtifact.BlobContainerCreated; Console.WriteLine( "Uploaded files to container: {0} -- you will be charged for their storage unless you delete them.", stagingArtifact.BlobContainerCreated); } } } //Get the job to monitor status. CloudJob job = client.JobOperations.GetJob(topNWordsConfiguration.JobId); Console.Write("Waiting for tasks to complete ... "); // Wait 20 minutes for all tasks to reach the completed state. The long timeout is necessary for the first // time a pool is created in order to allow nodes to be added to the pool and initialized to run tasks. IPagedEnumerable <CloudTask> ourTasks = job.ListTasks(new ODATADetailLevel(selectClause: "id")); client.Utilities.CreateTaskStateMonitor().WaitAll(ourTasks, TaskState.Completed, TimeSpan.FromMinutes(20)); Console.WriteLine("tasks are done."); foreach (CloudTask t in ourTasks) { Console.WriteLine("Task " + t.Id); Console.WriteLine("stdout:" + Environment.NewLine + t.GetNodeFile(Batch.Constants.StandardOutFileName).ReadAsString()); Console.WriteLine(); Console.WriteLine("stderr:" + Environment.NewLine + t.GetNodeFile(Batch.Constants.StandardErrorFileName).ReadAsString()); } } finally { //Delete the pool that we created if (topNWordsConfiguration.ShouldDeletePool) { Console.WriteLine("Deleting pool: {0}", topNWordsConfiguration.PoolId); client.PoolOperations.DeletePool(topNWordsConfiguration.PoolId); } //Delete the job that we created if (topNWordsConfiguration.ShouldDeleteJob) { Console.WriteLine("Deleting job: {0}", topNWordsConfiguration.JobId); client.JobOperations.DeleteJob(topNWordsConfiguration.JobId); } //Delete the containers we created if (topNWordsConfiguration.ShouldDeleteContainer) { DeleteContainers(accountSettings, stagingContainer); } } } }
public void TestSampleWithFilesAndPool() { Action test = () => { StagingStorageAccount storageCreds = TestUtilities.GetStorageCredentialsFromEnvironment(); using (BatchClient batchCli = TestUtilities.OpenBatchClientAsync(TestUtilities.GetCredentialsFromEnvironment()).Result) { string jobId = "SampleWithFilesJob-" + TestUtilities.GetMyName(); try { CloudJob quickJob = batchCli.JobOperations.CreateJob(); quickJob.Id = jobId; quickJob.PoolInformation = new PoolInformation() { PoolId = this.poolFixture.PoolId }; quickJob.Commit(); CloudJob boundJob = batchCli.JobOperations.GetJob(jobId); CloudTask myTask = new CloudTask(id: "CountWordsTask", commandline: @"cmd /c dir /s .. & dir & wc localwords.txt"); // first we have local files that we want pushed to the compute node before the commandline is invoked FileToStage wordsDotText = new FileToStage(Resources.LocalWordsDotText, storageCreds); // use "default" mapping to base name of local file myTask.FilesToStage = new List <IFileStagingProvider>(); myTask.FilesToStage.Add(wordsDotText); // add the task to the job var artifacts = boundJob.AddTask(myTask); var specificArtifact = artifacts[typeof(FileToStage)]; SequentialFileStagingArtifact sfsa = specificArtifact as SequentialFileStagingArtifact; Assert.NotNull(sfsa); // add a million more tasks... // test to ensure the task is read only TestUtilities.AssertThrows <InvalidOperationException>(() => myTask.FilesToStage = new List <IFileStagingProvider>()); // Open the new Job as bound. CloudPool boundPool = batchCli.PoolOperations.GetPool(boundJob.ExecutionInformation.PoolId); // wait for the task to complete Utilities utilities = batchCli.Utilities; TaskStateMonitor taskStateMonitor = utilities.CreateTaskStateMonitor(); taskStateMonitor.WaitAll( boundJob.ListTasks(), Microsoft.Azure.Batch.Common.TaskState.Completed, TimeSpan.FromMinutes(10), controlParams: null, additionalBehaviors: new[] { // spam/logging interceptor new Microsoft.Azure.Batch.Protocol.RequestInterceptor((x) => { this.testOutputHelper.WriteLine("Issuing request type: " + x.GetType().ToString()); try { // print out the compute node states... we are actually waiting on the compute nodes List <ComputeNode> allComputeNodes = boundPool.ListComputeNodes().ToList(); this.testOutputHelper.WriteLine(" #compute nodes: " + allComputeNodes.Count); allComputeNodes.ForEach( (icn) => { this.testOutputHelper.WriteLine(" computeNode.id: " + icn.Id + ", state: " + icn.State); }); } catch (Exception ex) { // there is a race between the pool-life-job and the end of the job.. and the ListComputeNodes above Assert.True(false, "SampleWithFilesAndPool probably can ignore this if its pool not found: " + ex.ToString()); } }) }); List <CloudTask> tasks = boundJob.ListTasks(null).ToList(); CloudTask myCompletedTask = tasks[0]; foreach (CloudTask curTask in tasks) { this.testOutputHelper.WriteLine("Task Id: " + curTask.Id + ", state: " + curTask.State); } boundPool.Refresh(); this.testOutputHelper.WriteLine("Pool Id: " + boundPool.Id + ", state: " + boundPool.State); string stdOut = myCompletedTask.GetNodeFile(Constants.StandardOutFileName).ReadAsString(); string stdErr = myCompletedTask.GetNodeFile(Constants.StandardErrorFileName).ReadAsString(); this.testOutputHelper.WriteLine("StdOut: "); this.testOutputHelper.WriteLine(stdOut); this.testOutputHelper.WriteLine("StdErr: "); this.testOutputHelper.WriteLine(stdErr); this.testOutputHelper.WriteLine("Task Files:"); foreach (NodeFile curFile in myCompletedTask.ListNodeFiles(recursive: true)) { this.testOutputHelper.WriteLine(" Filename: " + curFile.Name); } // confirm the files are there Assert.True(FoundFile("localwords.txt", myCompletedTask.ListNodeFiles(recursive: true)), "mising file: localwords.txt"); // test validation of StagingStorageAccount TestUtilities.AssertThrows <ArgumentOutOfRangeException>(() => { new StagingStorageAccount(storageAccount: " ", storageAccountKey: "key", blobEndpoint: "blob"); }); TestUtilities.AssertThrows <ArgumentOutOfRangeException>(() => { new StagingStorageAccount(storageAccount: "account", storageAccountKey: " ", blobEndpoint: "blob"); }); TestUtilities.AssertThrows <ArgumentOutOfRangeException>(() => { new StagingStorageAccount(storageAccount: "account", storageAccountKey: "key", blobEndpoint: ""); }); if (null != sfsa) { // TODO: delete the container! } } finally { TestUtilities.DeleteJobIfExistsAsync(batchCli, jobId).Wait(); } } }; SynchronizationContextHelper.RunTest(test, TestTimeout); }
public async Task Bug1360227_AddTasksBatchRetry(bool useJobOperations) { const string testName = "Bug1360227_AddTasksBatchRetry"; Random rand = new Random(); object randLock = new object(); int numberOfTasksWhichHitClientError = 0; int numberOfTasksWhichWereForcedToRetry = 0; Func <AddTaskResult, CancellationToken, AddTaskResultStatus> resultHandlerFunc = (result, token) => { this.testOutputHelper.WriteLine("Task: {0} got status code: {1}", result.TaskId, result.Status); AddTaskResultStatus resultAction; if (result.Status == AddTaskStatus.ClientError) { ++numberOfTasksWhichHitClientError; return(AddTaskResultStatus.Success); //Have to count client error as success } lock (randLock) { double d = rand.NextDouble(); if (d > 0.8) { this.testOutputHelper.WriteLine("Forcing retry for task: {0}", result.TaskId); resultAction = AddTaskResultStatus.Retry; ++numberOfTasksWhichWereForcedToRetry; } else { resultAction = AddTaskResultStatus.Success; } } return(resultAction); }; await SynchronizationContextHelper.RunTestAsync(async() => { StagingStorageAccount storageCredentials = TestUtilities.GetStorageCredentialsFromEnvironment(); using (BatchClient batchCli = await TestUtilities.OpenBatchClientFromEnvironmentAsync()) { BatchClientParallelOptions parallelOptions = new BatchClientParallelOptions() { MaxDegreeOfParallelism = 2 }; await this.AddTasksSimpleTestAsync( batchCli, testName, 1281, parallelOptions, resultHandlerFunc, storageCredentials, new List <string> { "TestResources\\Data.txt" }, useJobOperations: useJobOperations).ConfigureAwait(false); } }, LongTestTimeout); //Ensure that we forced some tasks to retry this.testOutputHelper.WriteLine("Forced a total of {0} tasks to retry", numberOfTasksWhichWereForcedToRetry); Assert.True(numberOfTasksWhichWereForcedToRetry > 0); Assert.Equal(numberOfTasksWhichWereForcedToRetry, numberOfTasksWhichHitClientError); }
/// <summary> /// Submits a set of tasks to the job /// </summary> /// <param name="batchClient">The batch client to use.</param> /// <returns>The set of blob artifacts created by file staging.</returns> private async Task<HashSet<string>> SubmitTasks(BatchClient batchClient) { List<CloudTask> tasksToRun = new List<CloudTask>(); // Create a task which requires some resource files CloudTask taskWithFiles = new CloudTask("task_with_file1", SimpleTaskExe); // Set up a collection of files to be staged -- these files will be uploaded to Azure Storage // when the tasks are submitted to the Azure Batch service. taskWithFiles.FilesToStage = new List<IFileStagingProvider>(); // generate a local file in temp directory string localSampleFilePath = GettingStartedCommon.GenerateTemporaryFile("HelloWorld.txt", "hello from Batch JobManager sample!"); StagingStorageAccount fileStagingStorageAccount = new StagingStorageAccount( storageAccount: this.configurationSettings.StorageAccountName, storageAccountKey: this.configurationSettings.StorageAccountKey, blobEndpoint: this.configurationSettings.StorageBlobEndpoint); // add the files as a task dependency so they will be uploaded to storage before the task // is submitted and downloaded to the node before the task starts execution. FileToStage helloWorldFile = new FileToStage(localSampleFilePath, fileStagingStorageAccount); FileToStage simpleTaskFile = new FileToStage(SimpleTaskExe, fileStagingStorageAccount); // When this task is added via JobOperations.AddTaskAsync below, the FilesToStage are uploaded to storage once. // The Batch service does not automatically delete content from your storage account, so files added in this // way must be manually removed when they are no longer used. taskWithFiles.FilesToStage.Add(helloWorldFile); taskWithFiles.FilesToStage.Add(simpleTaskFile); tasksToRun.Add(taskWithFiles); var fileStagingArtifacts = new ConcurrentBag<ConcurrentDictionary<Type, IFileStagingArtifact>>(); // Use the AddTask method which takes an enumerable of tasks for best performance, as it submits up to 100 // tasks at once in a single request. If the list of tasks is N where N > 100, this will correctly parallelize // the requests and return when all N tasks have been added. await batchClient.JobOperations.AddTaskAsync(jobId, tasksToRun, fileStagingArtifacts: fileStagingArtifacts); // Extract the names of the blob containers from the file staging artifacts HashSet<string> blobContainerNames = GettingStartedCommon.ExtractBlobContainerNames(fileStagingArtifacts); return blobContainerNames; }
public static void JobMain(string[] args) { //Load the configuration TopNWordsConfiguration configuration = TopNWordsConfiguration.LoadConfigurationFromAppConfig(); StagingStorageAccount stagingStorageAccount = new StagingStorageAccount( configuration.StorageAccountName, configuration.StorageAccountKey, configuration.StorageAccountBlobEndpoint); using (BatchClient client = BatchClient.Open(new BatchSharedKeyCredentials(configuration.BatchServiceUrl, configuration.BatchAccountName, configuration.BatchAccountKey))) { string stagingContainer = null; //Create a pool (if user hasn't provided one) if (configuration.ShouldCreatePool) { //OSFamily 4 == OS 2012 R2. You can learn more about os families and versions at: //http://msdn.microsoft.com/en-us/library/azure/ee924680.aspx CloudPool pool = client.PoolOperations.CreatePool(configuration.PoolId, targetDedicated: configuration.PoolSize, osFamily: "4", virtualMachineSize: "small"); Console.WriteLine("Adding pool {0}", configuration.PoolId); try { pool.Commit(); } catch (AggregateException ae) { // Go through all exceptions and dump useful information ae.Handle(x => { Console.Error.WriteLine("Creating pool ID {0} failed", configuration.PoolId); if (x is BatchException) { BatchException be = x as BatchException; Console.WriteLine(be.ToString()); Console.WriteLine(); } else { Console.WriteLine(x); } // can't continue without a pool return false; }); } } try { Console.WriteLine("Creating job: " + configuration.JobId); // get an empty unbound Job CloudJob unboundJob = client.JobOperations.CreateJob(); unboundJob.Id = configuration.JobId; unboundJob.PoolInformation = new PoolInformation() { PoolId = configuration.PoolId }; // Commit Job to create it in the service unboundJob.Commit(); // create file staging objects that represent the executable and its dependent assembly to run as the task. // These files are copied to every node before the corresponding task is scheduled to run on that node. FileToStage topNWordExe = new FileToStage(TopNWordsExeName, stagingStorageAccount); FileToStage storageDll = new FileToStage(StorageClientDllName, stagingStorageAccount); // In this sample, the input data is copied separately to Storage and its URI is passed to the task as an argument. // This approach is appropriate when the amount of input data is large such that copying it to every node via FileStaging // is not desired and the number of tasks is small since a large number of readers of the blob might get throttled // by Storage which will lengthen the overall processing time. // // You'll need to observe the behavior and use published techniques for finding the right balance of performance versus // complexity. string bookFileUri = UploadBookFileToCloudBlob(configuration, configuration.BookFileName); Console.WriteLine("{0} uploaded to cloud", configuration.BookFileName); // initialize a collection to hold the tasks that will be submitted in their entirety List<CloudTask> tasksToRun = new List<CloudTask>(configuration.NumberOfTasks); for (int i = 1; i <= configuration.NumberOfTasks; i++) { CloudTask task = new CloudTask("task_no_" + i, String.Format("{0} --Task {1} {2} {3} {4}", TopNWordsExeName, bookFileUri, configuration.NumberOfTopWords, configuration.StorageAccountName, configuration.StorageAccountKey)); //This is the list of files to stage to a container -- for each job, one container is created and //files all resolve to Azure Blobs by their name (so two tasks with the same named file will create just 1 blob in //the container). task.FilesToStage = new List<IFileStagingProvider> { topNWordExe, storageDll }; tasksToRun.Add(task); } // Commit all the tasks to the Batch Service. Ask AddTask to return information about the files that were staged. // The container information is used later on to remove these files from Storage. ConcurrentBag<ConcurrentDictionary<Type, IFileStagingArtifact>> fsArtifactBag = new ConcurrentBag<ConcurrentDictionary<Type, IFileStagingArtifact>>(); client.JobOperations.AddTask(configuration.JobId, tasksToRun, fileStagingArtifacts: fsArtifactBag); // loop through the bag of artifacts, looking for the one that matches our staged files. Once there, // capture the name of the container holding the files so they can be deleted later on if that option // was configured in the settings. foreach (var fsBagItem in fsArtifactBag) { IFileStagingArtifact fsValue; if (fsBagItem.TryGetValue(typeof(FileToStage), out fsValue)) { SequentialFileStagingArtifact stagingArtifact = fsValue as SequentialFileStagingArtifact; if (stagingArtifact != null) { stagingContainer = stagingArtifact.BlobContainerCreated; Console.WriteLine( "Uploaded files to container: {0} -- you will be charged for their storage unless you delete them.", stagingArtifact.BlobContainerCreated); } } } //Get the job to monitor status. CloudJob job = client.JobOperations.GetJob(configuration.JobId); Console.Write("Waiting for tasks to complete ... "); // Wait 20 minutes for all tasks to reach the completed state. The long timeout is necessary for the first // time a pool is created in order to allow nodes to be added to the pool and initialized to run tasks. IPagedEnumerable<CloudTask> ourTasks = job.ListTasks(new ODATADetailLevel(selectClause: "id")); client.Utilities.CreateTaskStateMonitor().WaitAll(ourTasks, TaskState.Completed, TimeSpan.FromMinutes(20)); Console.WriteLine("tasks are done."); foreach (CloudTask t in ourTasks) { Console.WriteLine("Task " + t.Id); Console.WriteLine("stdout:" + Environment.NewLine + t.GetNodeFile(Constants.StandardOutFileName).ReadAsString()); Console.WriteLine(); Console.WriteLine("stderr:" + Environment.NewLine + t.GetNodeFile(Constants.StandardErrorFileName).ReadAsString()); } } finally { //Delete the pool that we created if (configuration.ShouldCreatePool) { Console.WriteLine("Deleting pool: {0}", configuration.PoolId); client.PoolOperations.DeletePool(configuration.PoolId); } //Delete the job that we created if (configuration.ShouldDeleteJob) { Console.WriteLine("Deleting job: {0}", configuration.JobId); client.JobOperations.DeleteJob(configuration.JobId); } //Delete the containers we created if (configuration.ShouldDeleteContainer) { DeleteContainers(configuration, stagingContainer); } } } }
public void RunTaskAndUploadFiles_FilesAreSuccessfullyUploaded() { Action test = () => { string containerName = "runtaskanduploadfiles"; StagingStorageAccount storageAccount = TestUtilities.GetStorageCredentialsFromEnvironment(); CloudStorageAccount cloudStorageAccount = new CloudStorageAccount( new StorageCredentials(storageAccount.StorageAccount, storageAccount.StorageAccountKey), blobEndpoint: storageAccount.BlobUri, queueEndpoint: null, tableEndpoint: null, fileEndpoint: null); CloudBlobClient blobClient = cloudStorageAccount.CreateCloudBlobClient(); using (BatchClient batchCli = TestUtilities.OpenBatchClientAsync(TestUtilities.GetCredentialsFromEnvironment()).Result) { string jobId = "RunTaskAndUploadFiles-" + TestUtilities.GetMyName(); try { // Create container and writeable SAS var container = blobClient.GetContainerReference(containerName); container.CreateIfNotExists(); var sas = container.GetSharedAccessSignature(new SharedAccessBlobPolicy() { Permissions = SharedAccessBlobPermissions.Write, SharedAccessExpiryTime = DateTime.UtcNow.AddDays(1) }); var fullSas = container.Uri + sas; CloudJob createJob = batchCli.JobOperations.CreateJob(jobId, new PoolInformation() { PoolId = this.poolFixture.PoolId }); createJob.Commit(); const string blobPrefix = "foo/bar"; const string taskId = "simpletask"; CloudTask unboundTask = new CloudTask(taskId, "echo test") { OutputFiles = new List <OutputFile> { new OutputFile( filePattern: @"../*.txt", destination: new OutputFileDestination(new OutputFileBlobContainerDestination(fullSas, blobPrefix)), uploadOptions: new OutputFileUploadOptions(uploadCondition: OutputFileUploadCondition.TaskCompletion)) } }; batchCli.JobOperations.AddTask(jobId, unboundTask); var tasks = batchCli.JobOperations.ListTasks(jobId); var monitor = batchCli.Utilities.CreateTaskStateMonitor(); monitor.WaitAll(tasks, TaskState.Completed, TimeSpan.FromMinutes(1)); // Ensure that the correct files got uploaded var blobs = container.ListBlobs(useFlatBlobListing: true).ToList(); Assert.Equal(4, blobs.Count); //There are 4 .txt files created, stdout, stderr, fileuploadout, and fileuploaderr foreach (var blob in blobs) { var blockBlob = blob as CloudBlockBlob; Assert.StartsWith(blobPrefix, blockBlob.Name); } } finally { TestUtilities.DeleteJobIfExistsAsync(batchCli, jobId).Wait(); var container = blobClient.GetContainerReference(containerName); container.DeleteIfExists(); } } }; SynchronizationContextHelper.RunTest(test, TestTimeout); }
/// <summary> /// Performs a simple AddTask test, adding the specified task count using the specified parallelOptions and resultHandlerFunc /// </summary> /// <returns></returns> private async System.Threading.Tasks.Task AddTasksSimpleTestAsync( BatchClient batchCli, string testName, int taskCount, BatchClientParallelOptions parallelOptions, Func <AddTaskResult, CancellationToken, AddTaskResultStatus> resultHandlerFunc, StagingStorageAccount storageCredentials, IEnumerable <string> localFilesToStage, ConcurrentBag <ConcurrentDictionary <Type, IFileStagingArtifact> > fileStagingArtifacts = null, TimeSpan?timeout = null, bool useJobOperations = true) { JobOperations jobOperations = batchCli.JobOperations; string jobId = "Bulk-" + TestUtilities.GetMyName() + "-" + testName + "-" + useJobOperations; try { CloudJob unboundJob = jobOperations.CreateJob(); this.testOutputHelper.WriteLine("Initial job commit for job: {0}", jobId); unboundJob.PoolInformation = new PoolInformation() { PoolId = "DummyPool" }; unboundJob.Id = jobId; await unboundJob.CommitAsync().ConfigureAwait(false); CloudJob boundJob = await jobOperations.GetJobAsync(jobId).ConfigureAwait(false); // // Add a simple set of tasks // IEnumerable <string> taskNames = GenerateTaskIds(taskCount); List <CloudTask> tasksToAdd = new List <CloudTask>(); List <CloudTask> tasksToValidateWith = new List <CloudTask>(); IList <IFileStagingProvider> lastFilesToStageList = null; foreach (string taskName in taskNames) { CloudTask myTask = new CloudTask(taskName, "cmd /c echo hello world"); CloudTask duplicateReadableTask = new CloudTask(taskName, "cmd /c echo hello world"); if (localFilesToStage != null && storageCredentials != null) { myTask.FilesToStage = new List <IFileStagingProvider>(); lastFilesToStageList = myTask.FilesToStage; duplicateReadableTask.FilesToStage = new List <IFileStagingProvider>(); foreach (string fileToStage in localFilesToStage) { duplicateReadableTask.FilesToStage.Add(new FileToStage(fileToStage, storageCredentials)); myTask.FilesToStage.Add(new FileToStage(fileToStage, storageCredentials)); } } tasksToAdd.Add(myTask); tasksToValidateWith.Add(duplicateReadableTask); } List <BatchClientBehavior> behaviors = new List <BatchClientBehavior>(); if (resultHandlerFunc != null) { behaviors.Add(new AddTaskCollectionResultHandler(resultHandlerFunc)); } //Add the tasks Stopwatch stopwatch = new Stopwatch(); this.testOutputHelper.WriteLine("Starting task add"); stopwatch.Start(); if (useJobOperations) { await jobOperations.AddTaskAsync( jobId, tasksToAdd, parallelOptions : parallelOptions, fileStagingArtifacts : fileStagingArtifacts, timeout : timeout, additionalBehaviors : behaviors).ConfigureAwait(continueOnCapturedContext: false); } else { await boundJob.AddTaskAsync( tasksToAdd, parallelOptions : parallelOptions, fileStagingArtifacts : fileStagingArtifacts, timeout : timeout, additionalBehaviors : behaviors).ConfigureAwait(continueOnCapturedContext: false); } stopwatch.Stop(); this.testOutputHelper.WriteLine("Task add finished, took: {0}", stopwatch.Elapsed); if (lastFilesToStageList != null) { Assert.Throws <InvalidOperationException>(() => lastFilesToStageList.Add(new FileToStage("test", null))); } //Ensure the task lists match List <CloudTask> tasksFromService = await jobOperations.ListTasks(jobId).ToListAsync().ConfigureAwait(false); EnsureTasksListsMatch(tasksToValidateWith, tasksFromService); } catch (Exception e) { this.testOutputHelper.WriteLine("Exception: {0}", e.ToString()); throw; } finally { TestUtilities.DeleteJobIfExistsAsync(batchCli, jobId).Wait(); } }
public void TestOMJobSpecAndRelease() { Action test = () => { StagingStorageAccount stagingCreds = TestUtilities.GetStorageCredentialsFromEnvironment(); using (BatchClient client = TestUtilities.OpenBatchClient(TestUtilities.GetCredentialsFromEnvironment())) { string jsId = "JobPrepAndRelease-" + /* "OM-static-c" */ "dynamic-" + CraftTimeString() + "-" + TestUtilities.GetMyName(); try { // increase request timeout interceptor Protocol.RequestInterceptor increaseTimeoutInterceptor = new Protocol.RequestInterceptor((x) => { this.testOutputHelper.WriteLine("TestOMJobSpecAndRelease: setting request timeout. Request type: " + x.GetType().ToString() + ", ClientRequestID: " + x.Options.ClientRequestId); var timeoutOptions = x.Options as Protocol.Models.ITimeoutOptions; timeoutOptions.Timeout = 5 * 60; }); // lets use a timer too CallTimerViaInterceptors timerInterceptor = new CallTimerViaInterceptors(); // seeing client side timeouts... so increase the durations on every call client.CustomBehaviors.Add(increaseTimeoutInterceptor); // add a call timer spammer/logger client.CustomBehaviors.Add(timerInterceptor.ReqInterceptor); // get some resource files to play with IList <ResourceFile> resFiles = UploadFilesMakeResFiles(stagingCreds); // create job schedule with prep/release { CloudJobSchedule unboundJobSchedule = client.JobScheduleOperations.CreateJobSchedule(jsId, null, null); unboundJobSchedule.JobSpecification = new JobSpecification(new PoolInformation()); unboundJobSchedule.JobSpecification.PoolInformation.PoolId = this.poolFixture.PoolId; unboundJobSchedule.Schedule = new Schedule() { RecurrenceInterval = TimeSpan.FromMinutes(3) }; // add the jobPrep task to the job schedule { JobPreparationTask prep = new JobPreparationTask(JobPrepCommandLine); unboundJobSchedule.JobSpecification.JobPreparationTask = prep; List <EnvironmentSetting> prepEnvSettings = new List <EnvironmentSetting>(); prepEnvSettings.Add(JobPrepEnvSettingOM); prep.EnvironmentSettings = prepEnvSettings; prep.Id = JobPrepId; prep.RerunOnComputeNodeRebootAfterSuccess = JobPrepRerunOnComputeNodeRebootAfterSuccess; prep.ResourceFiles = resFiles; // bug: incorrect type this should be IList<> /* * prep.ResourceFiles = new List<ResourceFile>(); // this is actually read into our concurrent iList thing * * // why not, merge them in. exersize the concurent IList thing * foreach (ResourceFile curRF in resFiles) * { * prep.ResourceFiles.Add(curRF); * } */ prep.UserIdentity = new UserIdentity(JobPrepUserSpec); prep.Constraints = JobPrepTaskConstraintsOM; prep.WaitForSuccess = JobPrepWaitForSuccessCreate; } // add a jobRelease task to the job schedule { JobReleaseTask relTask = new JobReleaseTask(JobReleaseTaskCommandLine); unboundJobSchedule.JobSpecification.JobReleaseTask = relTask; List <EnvironmentSetting> relEnvSettings = new List <EnvironmentSetting>(); relEnvSettings.Add(JobRelEnvSettingOM); relTask.EnvironmentSettings = relEnvSettings; relTask.MaxWallClockTime = JobRelMaxWallClockTime; relTask.Id = JobRelId; relTask.ResourceFiles = null; relTask.ResourceFiles = new List <ResourceFile>(); // why not, merge them in. work the concurrent IList thing foreach (ResourceFile curRF in resFiles) { relTask.ResourceFiles.Add(curRF); } relTask.RetentionTime = JobRelRetentionTime; relTask.UserIdentity = new UserIdentity(JobRelUserSpec); } // set JobCommonEnvSettings { List <EnvironmentSetting> jobCommonES = new List <EnvironmentSetting>(); jobCommonES.Add(JobCommonEnvSettingOM); unboundJobSchedule.JobSpecification.CommonEnvironmentSettings = jobCommonES; } // add the job schedule to the service unboundJobSchedule.Commit(); } // now we have a jobschedule with jobprep/release...now test the values on the jobschedule { CloudJobSchedule boundJobSchedule = client.JobScheduleOperations.GetJobSchedule(jsId); Assert.NotNull(boundJobSchedule); Assert.NotNull(boundJobSchedule.JobSpecification); Assert.NotNull(boundJobSchedule.JobSpecification.JobPreparationTask); Assert.NotNull(boundJobSchedule.JobSpecification.JobReleaseTask); Assert.NotNull(boundJobSchedule.JobSpecification.CommonEnvironmentSettings); AssertGoodCommonEnvSettingsOM(boundJobSchedule.JobSpecification.CommonEnvironmentSettings); AssertGoodJobPrepTaskOM(boundJobSchedule.JobSpecification.JobPreparationTask); AssertGoodJobReleaseTaskOM(boundJobSchedule.JobSpecification.JobReleaseTask); AssertGoodResourceFiles(resFiles, boundJobSchedule.JobSpecification.JobPreparationTask.ResourceFiles); AssertGoodResourceFiles(resFiles, boundJobSchedule.JobSpecification.JobReleaseTask.ResourceFiles); //todo: test mutability } CloudJobSchedule boundJobScheduleWithJob; // set on job test // test the values on the job { boundJobScheduleWithJob = TestUtilities.WaitForJobOnJobSchedule(client.JobScheduleOperations, jsId); CloudJob bndJob = client.JobOperations.GetJob(boundJobScheduleWithJob.ExecutionInformation.RecentJob.Id); Assert.NotNull(bndJob); Assert.NotNull(bndJob.CommonEnvironmentSettings); Assert.NotNull(bndJob.JobPreparationTask); Assert.NotNull(bndJob.JobReleaseTask); AssertGoodCommonEnvSettingsOM(bndJob.CommonEnvironmentSettings as IList <EnvironmentSetting> /* we know it is our internal IList */); AssertGoodJobPrepTaskOM(bndJob.JobPreparationTask); AssertGoodJobReleaseTaskOM(bndJob.JobReleaseTask); AssertGoodResourceFiles(resFiles, bndJob.JobPreparationTask.ResourceFiles); AssertGoodResourceFiles(resFiles, bndJob.JobReleaseTask.ResourceFiles); //TODO: test immutability } // used for on get-status test CloudJobSchedule updatedJobSchedule; // test update on the WI jobprep/jobrelease { // change props boundJobScheduleWithJob.JobSpecification.JobPreparationTask.WaitForSuccess = JobPrepWaitForSuccessUpdate; // commit changes boundJobScheduleWithJob.Commit(); // get new values updatedJobSchedule = client.JobScheduleOperations.GetJobSchedule(jsId); // confirm values changed Assert.Equal(JobPrepWaitForSuccessUpdate, updatedJobSchedule.JobSpecification.JobPreparationTask.WaitForSuccess); } TestGetPrepReleaseStatusCalls(client, updatedJobSchedule, this.poolFixture.PoolId, resFiles); } finally { // cleanup TestUtilities.DeleteJobScheduleIfExistsAsync(client, jsId).Wait(); } } }; SynchronizationContextHelper.RunTest(test, LongTestTimeout); }
public static void JobMain(string[] args) { //Load the configuration TopNWordsConfiguration configuration = TopNWordsConfiguration.LoadConfigurationFromAppConfig(); StagingStorageAccount stagingStorageAccount = new StagingStorageAccount( configuration.StorageAccountName, configuration.StorageAccountKey, configuration.StorageAccountBlobEndpoint); IBatchClient client = BatchClient.Connect(configuration.BatchServiceUrl, new BatchCredentials(configuration.BatchAccountName, configuration.BatchAccountKey)); string stagingContainer = null; //Create a pool (if user hasn't provided one) if (configuration.ShouldCreatePool) { using (IPoolManager pm = client.OpenPoolManager()) { //OSFamily 4 == OS 2012 R2 //You can learn more about os families and versions at: //http://msdn.microsoft.com/en-us/library/azure/ee924680.aspx ICloudPool pool = pm.CreatePool(configuration.PoolName, targetDedicated: configuration.PoolSize, osFamily: "4", vmSize: "small"); Console.WriteLine("Adding pool {0}", configuration.PoolName); pool.Commit(); } } try { using (IWorkItemManager wm = client.OpenWorkItemManager()) { IToolbox toolbox = client.OpenToolbox(); //Use the TaskSubmissionHelper to help us create a WorkItem and add tasks to it. ITaskSubmissionHelper taskSubmissionHelper = toolbox.CreateTaskSubmissionHelper(wm, configuration.PoolName); taskSubmissionHelper.WorkItemName = configuration.WorkItemName; FileToStage topNWordExe = new FileToStage(TopNWordsExeName, stagingStorageAccount); FileToStage storageDll = new FileToStage(StorageClientDllName, stagingStorageAccount); string bookFileUri = UploadBookFileToCloudBlob(configuration, configuration.BookFileName); Console.WriteLine("{0} uploaded to cloud", configuration.BookFileName); for (int i = 1; i <= configuration.NumberOfTasks; i++) { ICloudTask task = new CloudTask("task_no_" + i, String.Format("{0} --Task {1} {2} {3} {4}", TopNWordsExeName, bookFileUri, configuration.NumberOfTopWords, configuration.StorageAccountName, configuration.StorageAccountKey)); //This is the list of files to stage to a container -- for each TaskSubmissionHelper one container is created and //files all resolve to Azure Blobs by their name (so two tasks with the same named file will create just 1 blob in //the TaskSubmissionHelper's container). task.FilesToStage = new List <IFileStagingProvider> { topNWordExe, storageDll }; taskSubmissionHelper.AddTask(task); } //Commit all the tasks to the Batch Service. IJobCommitUnboundArtifacts artifacts = taskSubmissionHelper.Commit() as IJobCommitUnboundArtifacts; foreach (var fileStagingArtifact in artifacts.FileStagingArtifacts) { SequentialFileStagingArtifact stagingArtifact = fileStagingArtifact.Value as SequentialFileStagingArtifact; if (stagingArtifact != null) { stagingContainer = stagingArtifact.BlobContainerCreated; Console.WriteLine("Uploaded files to container: {0} -- you will be charged for their storage unless you delete them.", stagingArtifact.BlobContainerCreated); } } //Get the job to monitor status. ICloudJob job = wm.GetJob(artifacts.WorkItemName, artifacts.JobName); Console.Write("Waiting for tasks to complete ..."); // Wait 1 minute for all tasks to reach the completed state client.OpenToolbox().CreateTaskStateMonitor().WaitAll(job.ListTasks(), TaskState.Completed, TimeSpan.FromMinutes(20)); Console.WriteLine("Done."); foreach (ICloudTask task in job.ListTasks()) { Console.WriteLine("Task " + task.Name + " says:\n" + task.GetTaskFile(Constants.StandardOutFileName).ReadAsString()); Console.WriteLine(task.GetTaskFile(Constants.StandardErrorFileName).ReadAsString()); } } } finally { //Delete the pool that we created if (configuration.ShouldCreatePool) { using (IPoolManager pm = client.OpenPoolManager()) { Console.WriteLine("Deleting pool: {0}", configuration.PoolName); pm.DeletePool(configuration.PoolName); } } //Delete the workitem that we created if (configuration.ShouldDeleteWorkItem) { using (IWorkItemManager wm = client.OpenWorkItemManager()) { Console.WriteLine("Deleting work item: {0}", configuration.WorkItemName); wm.DeleteWorkItem(configuration.WorkItemName); } } //Delete the containers we created if (configuration.ShouldDeleteContainer) { DeleteContainers(configuration, stagingContainer); } } }
public void ComputeNodeUploadLogs() { Action test = () => { using (BatchClient batchCli = TestUtilities.OpenBatchClientFromEnvironmentAsync().Result) { var node = batchCli.PoolOperations.ListComputeNodes(this.poolFixture.PoolId).First(); // Generate a storage container URL StagingStorageAccount storageAccount = TestUtilities.GetStorageCredentialsFromEnvironment(); CloudStorageAccount cloudStorageAccount = new CloudStorageAccount( new StorageCredentials(storageAccount.StorageAccount, storageAccount.StorageAccountKey), blobEndpoint: storageAccount.BlobUri, queueEndpoint: null, tableEndpoint: null, fileEndpoint: null); CloudBlobClient blobClient = cloudStorageAccount.CreateCloudBlobClient(); const string containerName = "computenodelogscontainer"; var container = blobClient.GetContainerReference(containerName); try { container.CreateIfNotExists(); // Ensure that there are no items in the container to begin with var blobs = container.ListBlobs(); Assert.Empty(blobs); var sas = container.GetSharedAccessSignature(new SharedAccessBlobPolicy() { Permissions = SharedAccessBlobPermissions.Write, SharedAccessExpiryTime = DateTime.UtcNow.AddDays(1) }); var fullSas = container.Uri + sas; var startTime = DateTime.UtcNow.Subtract(TimeSpan.FromMinutes(5)); var result = batchCli.PoolOperations.UploadComputeNodeBatchServiceLogs( this.poolFixture.PoolId, node.Id, fullSas, startTime); Assert.NotEqual(0, result.NumberOfFilesUploaded); Assert.NotEmpty(result.VirtualDirectoryName); // Allow up to 2m for files to get uploaded DateTime timeoutAt = DateTime.UtcNow.AddMinutes(2); while (DateTime.UtcNow < timeoutAt) { blobs = container.ListBlobs(); if (blobs.Any()) { break; } } Assert.NotEmpty(blobs); } finally { container.DeleteIfExists(); } } }; SynchronizationContextHelper.RunTest(test, TestTimeout); }
private static async Task MainAsync() { const string poolId = "FileHandlingPool"; const string jobId = "FileHandlingJobDemo"; var settings = Config.LoadAccountSettings(); SetupStorage(settings.StorageAccountName, settings.StorageAccountKey); BatchSharedKeyCredentials cred = new BatchSharedKeyCredentials(settings.BatchServiceUrl, settings.BatchAccountName, settings.BatchAccountKey); using (BatchClient batchClient = BatchClient.Open(cred)) { var pool = await BatchUtils.CreatePoolIfNotExistAsync(batchClient, poolId); var job = await BatchUtils.CreateJobIfNotExistAsync(batchClient, poolId, jobId); //set up auto storage file ResourceFile autoStorageFile = ResourceFile.FromAutoStorageContainer(AutoStorageContainerName, AutoStorageFileName); Console.WriteLine("\n[INFO] Autostorage resource File reference: "); Console.WriteLine("AutoStorageContainer: " + autoStorageFile.AutoStorageContainerName); Console.WriteLine("FilePath: " + autoStorageFile.FilePath); //upload file to external storage and add it as a resource file string storageConnectionString = $"DefaultEndpointsProtocol=https;AccountName={settings.StorageAccountName};AccountKey={settings.StorageAccountKey}"; CloudStorageAccount storageAccount = CloudStorageAccount.Parse(storageConnectionString); CloudBlobClient blobClient = storageAccount.CreateCloudBlobClient(); CloudBlobContainer externalContainer = blobClient.GetContainerReference(ExternalStorageContainerName); await externalContainer.CreateIfNotExistsAsync(); var externalFile = await UploadFileToContainer(blobClient, ExternalStorageContainerName, "resource_files/resource_file.txt", "resource_file.txt"); Console.WriteLine("\n[INFO] External storage resource File reference:"); Console.WriteLine("SAS Url: " + externalFile.HttpUrl); Console.WriteLine("FilePath: " + externalFile.FilePath); // using staging files API var filesToStage = new List <IFileStagingProvider>(); StagingStorageAccount fileStagingStorageAccount = new StagingStorageAccount( storageAccount: settings.StorageAccountName, storageAccountKey: settings.StorageAccountKey, blobEndpoint: storageAccount.BlobEndpoint.ToString()); FileToStage stagedFile = new FileToStage("resource_files/staged_file.txt", fileStagingStorageAccount); Console.WriteLine("\n[INFO] Staged File added:"); Console.WriteLine("Local File: " + stagedFile.LocalFileToStage); Console.WriteLine("Node File: " + stagedFile.NodeFileName); filesToStage.Add(stagedFile); // setup output files // Generate SAS for outputcontainer CloudBlobContainer outputContainer = blobClient.GetContainerReference(OutputContainerName); await outputContainer.CreateIfNotExistsAsync(); string containerSas = outputContainer.GetSharedAccessSignature(new SharedAccessBlobPolicy() { Permissions = SharedAccessBlobPermissions.Write, SharedAccessExpiryTime = DateTimeOffset.UtcNow.AddDays(1) }); string containerUrl = outputContainer.Uri.AbsoluteUri + containerSas; Console.WriteLine("\n[INFO] Output container: " + containerUrl); Console.WriteLine("\nPress return to continue..."); Console.ReadLine(); // Create tasks List <CloudTask> tasks = new List <CloudTask>(); for (var i = 1; i <= 10; i++) { var taskId = i.ToString().PadLeft(3, '0'); var commandLine = $@"/bin/bash -c ""echo 'Hello from {taskId}' && printf 'root dir:\n' > output.txt && ls -la >> output.txt && printf '\ninput dir:\n' >> output.txt && ls -la input >> output.txt"""; var task = new CloudTask(taskId, commandLine); // add resource files to task (one autostorage, one in external storage) task.ResourceFiles = new[] { autoStorageFile, externalFile }; // add staged files task.FilesToStage = filesToStage; // add output files var outputFiles = new List <OutputFile> { new OutputFile( filePattern: @"../std*.txt", destination: new OutputFileDestination(new OutputFileBlobContainerDestination( containerUrl: containerUrl, path: taskId)), uploadOptions: new OutputFileUploadOptions( uploadCondition: OutputFileUploadCondition.TaskCompletion)), new OutputFile( filePattern: @"output.txt", destination: new OutputFileDestination(new OutputFileBlobContainerDestination( containerUrl: containerUrl, path: taskId + @"\output.txt")), uploadOptions: new OutputFileUploadOptions( uploadCondition: OutputFileUploadCondition.TaskCompletion)), }; task.OutputFiles = outputFiles; tasks.Add(task); } Console.WriteLine("Submitting tasks and awaiting completion..."); // Add all tasks to the job. batchClient.JobOperations.AddTask(job.Id, tasks); await BatchUtils.WaitForTasksAndPrintOutputAsync(batchClient, job.ListTasks(), TimeSpan.FromMinutes(30)); // Clean up Batch resources (if the user so chooses) Console.WriteLine(); Console.Write("Delete job? [yes] no: "); string response = Console.ReadLine().ToLower(); if (response != "n" && response != "no") { batchClient.JobOperations.DeleteJob(jobId); } Console.Write("Delete pool? [yes] no: "); response = Console.ReadLine().ToLower(); if (response != "n" && response != "no") { batchClient.PoolOperations.DeletePool(poolId); } } }
static void Main(string[] args) { Console.WriteLine("What to do?\r\n(c)reatepool\r\n(s)cheduletasks\r\n(d)elete pool"); var actionToDo = Console.ReadLine(); #region Reading configuration Data var accountRegion = ConfigurationManager.AppSettings["BatchRegion"]; var accountName = ConfigurationManager.AppSettings["BatchAccountName"]; var accountKey = ConfigurationManager.AppSettings["BatchAccountKey"]; var accountBaseUrl = string.Format("https://{0}.{1}.batch.azure.com", accountName, accountRegion); var storageAccountName = ConfigurationManager.AppSettings["BatchDemoStorageAccount"]; var storageAccountKey = ConfigurationManager.AppSettings["BatchDemoStorageAccountKey"]; var storageAccount = CloudStorageAccount.Parse(string.Format( "DefaultEndpointsProtocol=https;AccountName={0};AccountKey={1}", storageAccountName, storageAccountKey)); var blobClient = storageAccount.CreateCloudBlobClient(); var blobOcrSourceContainer = blobClient.GetContainerReference("ocr-source"); var blobTesseractContainer = blobClient.GetContainerReference("tesseract"); var stagingStorageCred = new StagingStorageAccount( storageAccountName, storageAccountKey, string.Format("https://{0}.blob.core.windows.net", storageAccountName)); #endregion Console.WriteLine("Creating batch client to access Azure Batch Service..."); var credentials = new BatchSharedKeyCredentials(accountBaseUrl, accountName, accountKey); var batchClient = BatchClient.Open(credentials); Console.WriteLine("Batch client created successfully!"); #region Setup Compute Pool or delete compute pool if (actionToDo == "c" || actionToDo == "d") { Console.WriteLine(); Console.WriteLine("Creating pool if needed..."); var poolExists = false; try { var existingPool = batchClient.PoolOperations.GetPool(PoolName); poolExists = true; } catch (Exception) { poolExists = false; } if ((actionToDo == "c") && !poolExists) { #region Get Resource Files and files to process from BLOB storage Console.WriteLine(); var binaryResourceFiles = new List <ResourceFile>(); Console.WriteLine("Get list of 'resource files' required for execution from BLOB storage..."); foreach (var resFile in blobTesseractContainer.ListBlobs(useFlatBlobListing: true)) { var sharedAccessSig = CreateSharedAccessSignature(blobTesseractContainer, resFile); var fullUriString = resFile.Uri.ToString(); var relativeUriString = fullUriString.Replace(blobTesseractContainer.Uri + "/", ""); Console.WriteLine("- {0} ", relativeUriString); binaryResourceFiles.Add( new ResourceFile ( fullUriString + sharedAccessSig, relativeUriString.Replace("/", @"\") ) ); } Console.WriteLine(); #endregion Console.WriteLine("Creating the pool..."); var newPool = batchClient.PoolOperations.CreatePool ( PoolName, "3", "small", 5 ); newPool.StartTask = new StartTask { ResourceFiles = binaryResourceFiles, CommandLine = "cmd /c CopyFiles.cmd", WaitForSuccess = true }; newPool.CommitAsync().Wait(); Console.WriteLine("Pool {0} created!", PoolName); } else if ((actionToDo == "d") && poolExists) { Console.WriteLine("Deleting the pool..."); batchClient.PoolOperations.DeletePoolAsync(PoolName).Wait(); Console.WriteLine("Pool {0} deleted!", PoolName); } else { Console.WriteLine("Action {0} not executed since pool does {1}!", actionToDo == "c" ? "'Create Pool'" : "'Delete Pool'", (poolExists) ? "exist, already" : "not exist, anyway"); } } #endregion #region Scheduling and running jobs if (actionToDo == "s") { #region Get the Task Files Console.WriteLine(); var filesToProcess = new List <ResourceFile>(); Console.WriteLine("Get list of 'files' to be processed in tasks..."); foreach (var fileToProc in blobOcrSourceContainer.ListBlobs(useFlatBlobListing: true)) { var sharedAccessSig = CreateSharedAccessSignature(blobOcrSourceContainer, fileToProc); var fullUriString = fileToProc.Uri.ToString(); var relativeUriString = fullUriString.Replace(blobOcrSourceContainer.Uri + "/", ""); Console.WriteLine("- {0}", relativeUriString); filesToProcess.Add( new ResourceFile( fullUriString + sharedAccessSig, relativeUriString.Replace("/", @"\") ) ); } #endregion Console.WriteLine(); Console.WriteLine("Creating a job with its tasks..."); var jobName = string.Format("ocr-{0}", DateTime.UtcNow.Ticks); Console.WriteLine("- Creating a new job {0}...", jobName); var ocrJob = batchClient.JobOperations.CreateJob(); ocrJob.Id = jobName; ocrJob.PoolInformation = new PoolInformation { PoolId = PoolName }; ocrJob.Commit(); Console.WriteLine("- Adding tasks to the job of the work item."); var taskNr = 0; var job = batchClient.JobOperations.GetJob(jobName); foreach (var ocrFile in filesToProcess) { var taskName = string.Format("task_no_{0}", taskNr++); Console.WriteLine(" - {0} for file {1}", taskName, ocrFile.FilePath); var taskCmd = string.Format( "cmd /c %WATASK_TVM_ROOT_DIR%\\shared\\BatchTesseractWrapper.exe \"{0}\" \"{1}\"", ocrFile.BlobSource, Path.GetFileNameWithoutExtension(ocrFile.FilePath)); var cloudTask = new CloudTask(taskName, taskCmd); job.AddTask(cloudTask); } Console.WriteLine("- All tasks created, committing job!"); job.Commit(); Console.WriteLine(); Console.WriteLine("Waiting for job to be completed..."); job.Refresh(); var stateMonitor = batchClient.Utilities.CreateTaskStateMonitor(); stateMonitor.WaitAll(job.ListTasks(), TaskState.Completed, new TimeSpan(0, 30, 0)); Console.WriteLine("All tasks completed!"); var tasksFinalResult = job.ListTasks(); foreach (var t in tasksFinalResult) { Console.WriteLine("- Task {0}: {1}, exit code {2}", t.Id, t.State, t.ExecutionInformation.ExitCode); } } Console.WriteLine(); Console.WriteLine("Press ENTER to quit!"); Console.ReadLine(); #endregion }
public static void JobMain(string[] args) { //Load the configuration TopNWordsConfiguration configuration = TopNWordsConfiguration.LoadConfigurationFromAppConfig(); StagingStorageAccount stagingStorageAccount = new StagingStorageAccount( configuration.StorageAccountName, configuration.StorageAccountKey, configuration.StorageAccountBlobEndpoint); using (BatchClient client = BatchClient.Open(new BatchSharedKeyCredentials(configuration.BatchServiceUrl, configuration.BatchAccountName, configuration.BatchAccountKey))) { string stagingContainer = null; //Create a pool (if user hasn't provided one) if (configuration.ShouldCreatePool) { //OSFamily 4 == OS 2012 R2. You can learn more about os families and versions at: //http://msdn.microsoft.com/en-us/library/azure/ee924680.aspx CloudPool pool = client.PoolOperations.CreatePool(configuration.PoolId, targetDedicated: configuration.PoolSize, osFamily: "4", virtualMachineSize: "small"); Console.WriteLine("Adding pool {0}", configuration.PoolId); try { pool.Commit(); } catch (AggregateException ae) { // Go through all exceptions and dump useful information ae.Handle(x => { Console.Error.WriteLine("Creating pool ID {0} failed", configuration.PoolId); if (x is BatchException) { BatchException be = x as BatchException; Console.WriteLine(be.ToString()); Console.WriteLine(); } else { Console.WriteLine(x); } // can't continue without a pool return(false); }); } } try { Console.WriteLine("Creating job: " + configuration.JobId); // get an empty unbound Job CloudJob unboundJob = client.JobOperations.CreateJob(); unboundJob.Id = configuration.JobId; unboundJob.PoolInformation = new PoolInformation() { PoolId = configuration.PoolId }; // Commit Job to create it in the service unboundJob.Commit(); // create file staging objects that represent the executable and its dependent assembly to run as the task. // These files are copied to every node before the corresponding task is scheduled to run on that node. FileToStage topNWordExe = new FileToStage(TopNWordsExeName, stagingStorageAccount); FileToStage storageDll = new FileToStage(StorageClientDllName, stagingStorageAccount); // In this sample, the input data is copied separately to Storage and its URI is passed to the task as an argument. // This approach is appropriate when the amount of input data is large such that copying it to every node via FileStaging // is not desired and the number of tasks is small since a large number of readers of the blob might get throttled // by Storage which will lengthen the overall processing time. // // You'll need to observe the behavior and use published techniques for finding the right balance of performance versus // complexity. string bookFileUri = UploadBookFileToCloudBlob(configuration, configuration.BookFileName); Console.WriteLine("{0} uploaded to cloud", configuration.BookFileName); // initialize a collection to hold the tasks that will be submitted in their entirety List <CloudTask> tasksToRun = new List <CloudTask>(configuration.NumberOfTasks); for (int i = 1; i <= configuration.NumberOfTasks; i++) { CloudTask task = new CloudTask("task_no_" + i, String.Format("{0} --Task {1} {2} {3} {4}", TopNWordsExeName, bookFileUri, configuration.NumberOfTopWords, configuration.StorageAccountName, configuration.StorageAccountKey)); //This is the list of files to stage to a container -- for each job, one container is created and //files all resolve to Azure Blobs by their name (so two tasks with the same named file will create just 1 blob in //the container). task.FilesToStage = new List <IFileStagingProvider> { topNWordExe, storageDll }; tasksToRun.Add(task); } // Commit all the tasks to the Batch Service. Ask AddTask to return information about the files that were staged. // The container information is used later on to remove these files from Storage. ConcurrentBag <ConcurrentDictionary <Type, IFileStagingArtifact> > fsArtifactBag = new ConcurrentBag <ConcurrentDictionary <Type, IFileStagingArtifact> >(); client.JobOperations.AddTask(configuration.JobId, tasksToRun, fileStagingArtifacts: fsArtifactBag); // loop through the bag of artifacts, looking for the one that matches our staged files. Once there, // capture the name of the container holding the files so they can be deleted later on if that option // was configured in the settings. foreach (var fsBagItem in fsArtifactBag) { IFileStagingArtifact fsValue; if (fsBagItem.TryGetValue(typeof(FileToStage), out fsValue)) { SequentialFileStagingArtifact stagingArtifact = fsValue as SequentialFileStagingArtifact; if (stagingArtifact != null) { stagingContainer = stagingArtifact.BlobContainerCreated; Console.WriteLine( "Uploaded files to container: {0} -- you will be charged for their storage unless you delete them.", stagingArtifact.BlobContainerCreated); } } } //Get the job to monitor status. CloudJob job = client.JobOperations.GetJob(configuration.JobId); Console.Write("Waiting for tasks to complete ... "); // Wait 20 minutes for all tasks to reach the completed state. The long timeout is necessary for the first // time a pool is created in order to allow nodes to be added to the pool and initialized to run tasks. IPagedEnumerable <CloudTask> ourTasks = job.ListTasks(new ODATADetailLevel(selectClause: "id")); client.Utilities.CreateTaskStateMonitor().WaitAll(ourTasks, TaskState.Completed, TimeSpan.FromMinutes(20)); Console.WriteLine("tasks are done."); foreach (CloudTask t in ourTasks) { Console.WriteLine("Task " + t.Id); Console.WriteLine("stdout:" + Environment.NewLine + t.GetNodeFile(Constants.StandardOutFileName).ReadAsString()); Console.WriteLine(); Console.WriteLine("stderr:" + Environment.NewLine + t.GetNodeFile(Constants.StandardErrorFileName).ReadAsString()); } } finally { //Delete the pool that we created if (configuration.ShouldCreatePool) { Console.WriteLine("Deleting pool: {0}", configuration.PoolId); client.PoolOperations.DeletePool(configuration.PoolId); } //Delete the job that we created if (configuration.ShouldDeleteJob) { Console.WriteLine("Deleting job: {0}", configuration.JobId); client.JobOperations.DeleteJob(configuration.JobId); } //Delete the containers we created if (configuration.ShouldDeleteContainer) { DeleteContainers(configuration, stagingContainer); } } } }
/// <summary> /// Creates a job and adds a task to it. The task is a /// custom executable which has a resource file associated with it. /// </summary> /// <param name="batchClient">The BatchClient to use when interacting with the Batch service.</param> /// <param name="cloudStorageAccount">The storage account to upload the files to.</param> /// <param name="jobId">The ID of the job.</param> /// <returns>The set of container names containing the jobs input files.</returns> private async Task<HashSet<string>> SubmitJobAsync(BatchClient batchClient, CloudStorageAccount cloudStorageAccount, string jobId) { // create an empty unbound Job CloudJob unboundJob = batchClient.JobOperations.CreateJob(); unboundJob.Id = jobId; unboundJob.PoolInformation = new PoolInformation() { PoolId = this.poolsAndResourceFileSettings.PoolId }; // Commit Job to create it in the service await unboundJob.CommitAsync(); List<CloudTask> tasksToRun = new List<CloudTask>(); // Create a task which requires some resource files CloudTask taskWithFiles = new CloudTask("task_with_file1", SimpleTaskExe); // Set up a collection of files to be staged -- these files will be uploaded to Azure Storage // when the tasks are submitted to the Azure Batch service. taskWithFiles.FilesToStage = new List<IFileStagingProvider>(); // generate a local file in temp directory string localSampleFile = Path.Combine(Environment.GetEnvironmentVariable("TEMP"), "HelloWorld.txt"); File.WriteAllText(localSampleFile, "hello from Batch PoolsAndResourceFiles sample!"); StagingStorageAccount fileStagingStorageAccount = new StagingStorageAccount( storageAccount: this.accountSettings.StorageAccountName, storageAccountKey: this.accountSettings.StorageAccountKey, blobEndpoint: cloudStorageAccount.BlobEndpoint.ToString()); // add the files as a task dependency so they will be uploaded to storage before the task // is submitted and downloaded to the node before the task starts execution. FileToStage helloWorldFile = new FileToStage(localSampleFile, fileStagingStorageAccount); FileToStage simpleTaskFile = new FileToStage(SimpleTaskExe, fileStagingStorageAccount); // When this task is added via JobOperations.AddTaskAsync below, the FilesToStage are uploaded to storage once. // The Batch service does not automatically delete content from your storage account, so files added in this // way must be manually removed when they are no longer used. taskWithFiles.FilesToStage.Add(helloWorldFile); taskWithFiles.FilesToStage.Add(simpleTaskFile); tasksToRun.Add(taskWithFiles); var fileStagingArtifacts = new ConcurrentBag<ConcurrentDictionary<Type, IFileStagingArtifact>>(); // Use the AddTask method which takes an enumerable of tasks for best performance, as it submits up to 100 // tasks at once in a single request. If the list of tasks is N where N > 100, this will correctly parallelize // the requests and return when all N tasks have been added. await batchClient.JobOperations.AddTaskAsync(jobId, tasksToRun, fileStagingArtifacts: fileStagingArtifacts); // Extract the names of the blob containers from the file staging artifacts HashSet<string> blobContainerNames = GettingStartedCommon.ExtractBlobContainerNames(fileStagingArtifacts); return blobContainerNames; }
public async Task RunTaskAndUploadFiles_FilesAreSuccessfullyUploaded() { async Task test() { using BatchClient batchCli = TestUtilities.OpenBatchClient(TestUtilities.GetCredentialsFromEnvironment()); string jobId = "RunTaskAndUploadFiles-" + TestUtilities.GetMyName(); string containerName = "runtaskanduploadfiles"; StagingStorageAccount storageAccount = TestUtilities.GetStorageCredentialsFromEnvironment(); BlobServiceClient blobClient = BlobUtilities.GetBlobServiceClient(storageAccount); BlobContainerClient containerClient = BlobUtilities.GetBlobContainerClient(containerName, blobClient, storageAccount); try { // Create container and writeable SAS containerClient.CreateIfNotExists(); string sasUri = BlobUtilities.GetWriteableSasUri(containerClient, storageAccount); CloudJob createJob = batchCli.JobOperations.CreateJob(jobId, new PoolInformation { PoolId = poolFixture.PoolId }); createJob.Commit(); const string blobPrefix = "foo/bar"; const string taskId = "simpletask"; OutputFileBlobContainerDestination containerDestination = new OutputFileBlobContainerDestination(sasUri, blobPrefix); containerDestination.UploadHeaders = new List <HttpHeader> { new HttpHeader("x-ms-blob-content-type", "test-type") }; OutputFileDestination destination = new OutputFileDestination(containerDestination); OutputFileUploadOptions uploadOptions = new OutputFileUploadOptions(uploadCondition: OutputFileUploadCondition.TaskCompletion); CloudTask unboundTask = new CloudTask(taskId, "echo test") { OutputFiles = new List <OutputFile> { new OutputFile(@"../*.txt", destination, uploadOptions) } }; batchCli.JobOperations.AddTask(jobId, unboundTask); IPagedEnumerable <CloudTask> tasks = batchCli.JobOperations.ListTasks(jobId); TaskStateMonitor monitor = batchCli.Utilities.CreateTaskStateMonitor(); monitor.WaitAll(tasks, TaskState.Completed, TimeSpan.FromMinutes(1)); // Ensure that the correct files got uploaded List <BlobItem> blobs = containerClient.GetAllBlobs(); Assert.Equal(4, blobs.Count()); //There are 4 .txt files created, stdout, stderr, fileuploadout, and fileuploaderr foreach (BlobItem blob in blobs) { Assert.StartsWith(blobPrefix, blob.Name); Assert.Equal("test-type", blob.Properties.ContentType); // Ensure test Upload header was applied to blob. } } finally { await TestUtilities.DeleteJobIfExistsAsync(batchCli, jobId).ConfigureAwait(false); containerClient.DeleteIfExists(); } } await SynchronizationContextHelper.RunTestAsync(test, TestTimeout); }
public static void JobMain(string[] args) { Console.WriteLine("Setting up Batch Process - ImageBlur. \nPress Enter to begin."); Console.WriteLine("-------------------------------------------------------------"); Console.ReadLine(); Settings imageBlurSettings = Settings.Default; AccountSettings accountSettings = AccountSettings.Default; /* Setting up credentials for Batch and Storage accounts * ===================================================== */ StorageCredentials storageCredentials = new StorageCredentials( accountSettings.StorageAccountName, accountSettings.StorageAccountKey); CloudStorageAccount storageAccount = new CloudStorageAccount(storageCredentials, useHttps: true); StagingStorageAccount stagingStorageAccount = new StagingStorageAccount( accountSettings.StorageAccountName, accountSettings.StorageAccountKey, storageAccount.BlobEndpoint.ToString()); BatchSharedKeyCredentials batchCredentials = new BatchSharedKeyCredentials( accountSettings.BatchServiceUrl, accountSettings.BatchAccountName, accountSettings.BatchAccountKey); using (BatchClient client = BatchClient.Open(batchCredentials)) { string stagingContainer = null; /* Setting up pool to run job and tasks in * ======================================= */ CreatePool(client, imageBlurSettings, accountSettings); try { /* Setting up Job ------------------------ * ======================================= */ Console.WriteLine("Creating job {0}. \nPress Enter to continue.", imageBlurSettings.JobId); Console.ReadLine(); CloudJob unboundJob = client.JobOperations.CreateJob(); unboundJob.Id = imageBlurSettings.JobId; unboundJob.PoolInformation = new PoolInformation() { PoolId = imageBlurSettings.PoolId }; unboundJob.Commit(); /* Uploading Source Image(s) to run varying degrees of Blur on * =========================================================== * Here, the input data is uploaded separately to Storage and * its URI is passed to the task as an argument. */ Console.WriteLine("Uploading source images. \nPress Enter to continue."); Console.ReadLine(); string[] sourceImages = imageBlurSettings.SourceImageNames.Split(','); List<String> sourceImageUris = new List<String>(); for( var i = 0; i < sourceImages.Length; i++) { Console.WriteLine(" Uploading {0}.", sourceImages[i]); sourceImageUris.Add( UploadSourceImagesFileToCloudBlob(accountSettings, sourceImages[i])); Console.WriteLine(" Source Image uploaded to: <{0}>.", sourceImageUris[i]); } Console.WriteLine(); Console.WriteLine("All Source Images uploaded. \nPress Enter to continue."); Console.ReadLine(); /* Setting up tasks with dependencies ---------------- * =================================================== */ Console.WriteLine("Setting up files to stage for tasks. \nPress Enter to continue."); Console.ReadLine(); // Setting up Files to Stage - Files to upload into each task (executables and dependent assemblies) FileToStage imageBlurExe = new FileToStage(ImageBlurExeName, stagingStorageAccount); FileToStage storageDll = new FileToStage(StorageClientDllName, stagingStorageAccount); FileToStage imageProcessorDll = new FileToStage(ImageProcessorDllName, stagingStorageAccount); // initialize collection to hold tasks that will be submitted in their entirety List<CloudTask> tasksToRun = new List<CloudTask>(imageBlurSettings.NumberOfTasks); for (int i = 0; i < imageBlurSettings.NumberOfTasks; i++) { // create individual tasks (cmd line passed in as argument) CloudTask task = new CloudTask("task_" + i, String.Format("{0} --Task {1} {2} {3}", ImageBlurExeName, sourceImageUris[i], accountSettings.StorageAccountName, accountSettings.StorageAccountKey)); // list of files to stage to a container -- for each job, one container is created and // files all resolve to Azure Blobs by their name task.FilesToStage = new List<IFileStagingProvider> { imageBlurExe, storageDll, imageProcessorDll }; tasksToRun.Add(task); Console.WriteLine("\t task {0} has been added", "task_" + i); } Console.WriteLine(); /* Commit tasks with dependencies ---------------- * =============================================== */ Console.WriteLine("Running Tasks. \nPress Enter to continue."); Console.WriteLine("-------------------------------------------------------------"); Console.ReadLine(); ConcurrentBag<ConcurrentDictionary<Type, IFileStagingArtifact>> fsArtifactBag = new ConcurrentBag<ConcurrentDictionary<Type, IFileStagingArtifact>>(); client.JobOperations.AddTask(imageBlurSettings.JobId, tasksToRun, fileStagingArtifacts: fsArtifactBag); foreach (var fsBagItem in fsArtifactBag) { IFileStagingArtifact fsValue; if (fsBagItem.TryGetValue(typeof(FileToStage), out fsValue)) { SequentialFileStagingArtifact stagingArtifact = fsValue as SequentialFileStagingArtifact; if (stagingArtifact != null) { stagingContainer = stagingArtifact.BlobContainerCreated; Console.WriteLine( "Uploaded files to container: {0} -- \nyou will be charged for their storage unless you delete them.", stagingArtifact.BlobContainerCreated); } } } //Get the job to monitor status. CloudJob job = client.JobOperations.GetJob(imageBlurSettings.JobId); Console.WriteLine(); Console.Write("Waiting for tasks to complete ... "); IPagedEnumerable<CloudTask> ourTasks = job.ListTasks(new ODATADetailLevel(selectClause: "id")); client.Utilities.CreateTaskStateMonitor().WaitAll(ourTasks, TaskState.Completed, TimeSpan.FromMinutes(20)); Console.WriteLine("tasks are done."); Console.WriteLine(); Console.WriteLine("See below for Stdout / Stderr for each node."); Console.WriteLine("============================================"); /* Display stdout/stderr for each task on completion * ================================================= */ foreach (CloudTask t in ourTasks) { Console.WriteLine("Task " + t.Id + ":"); Console.WriteLine(" stdout:" + Environment.NewLine + t.GetNodeFile("stdout.txt").ReadAsString()); Console.WriteLine(); Console.WriteLine(" stderr:" + Environment.NewLine + t.GetNodeFile("stderr.txt").ReadAsString()); } Console.WriteLine(); Console.WriteLine("Please find the resulting images in storage. \nPress Enter to continue."); Console.WriteLine("======================================================================="); Console.ReadLine(); } finally { /* If configured as such, Delete the resources that were used in this process * ========================================================================== */ //Delete the pool that we created if (imageBlurSettings.DeletePool) { Console.WriteLine("Deleting Pool. \nPress Enter to continue."); Console.ReadLine(); Console.WriteLine("Deleting pool: {0}", imageBlurSettings.PoolId); client.PoolOperations.DeletePool(imageBlurSettings.PoolId); } //Delete the job that we created if (imageBlurSettings.DeleteJob) { Console.WriteLine("Deleting Job. \nPress Enter to continue."); Console.ReadLine(); Console.WriteLine("Deleting job: {0}", imageBlurSettings.JobId); client.JobOperations.DeleteJob(imageBlurSettings.JobId); } //Delete the containers we created if (imageBlurSettings.DeleteContainer) { Console.WriteLine("Deleting Container. \nPress Enter to continue."); Console.ReadLine(); DeleteContainers(accountSettings, stagingContainer); } Console.WriteLine(); Console.WriteLine("Please check the Azure portal to make sure that all resources you want deleted are in fact deleted"); Console.WriteLine("=================================================================================================="); Console.WriteLine(); Console.WriteLine("Press Enter to exit the program"); Console.WriteLine("Exiting program..."); } } }
public static BlobContainerClient GetBlobContainerClient(string containerName, StagingStorageAccount storageAccount = null) { return(GetBlobContainerClient(containerName, GetBlobServiceClient(storageAccount), storageAccount)); }
public void CanAddTaskWithFilesToStage() { StagingStorageAccount storageCreds = TestUtilities.GetStorageCredentialsFromEnvironment(); using (BatchClient batchCli = TestUtilities.OpenBatchClient(TestUtilities.GetCredentialsFromEnvironment())) { string jobId = "TestTaskWithFilesToStage-" + TestUtilities.GetMyName(); try { CloudJob job = batchCli.JobOperations.CreateJob(jobId, new PoolInformation() { PoolId = this.poolFixture.PoolId }); job.Commit(); CloudJob boundJob = batchCli.JobOperations.GetJob(jobId); CloudTask myTask = new CloudTask(id: "CountWordsTask", commandline: @"cmd /c dir /s .. & dir & wc localwords.txt"); myTask.FilesToStage = new List <IFileStagingProvider> { new FileToStage(Resources.LocalWordsDotText, storageCreds) }; // add the task to the job var artifacts = boundJob.AddTask(myTask); var specificArtifact = artifacts[typeof(FileToStage)]; SequentialFileStagingArtifact sfsa = specificArtifact as SequentialFileStagingArtifact; Assert.NotNull(sfsa); // Open the new Job as bound. CloudPool boundPool = batchCli.PoolOperations.GetPool(boundJob.ExecutionInformation.PoolId); // wait for the task to complete TaskStateMonitor taskStateMonitor = batchCli.Utilities.CreateTaskStateMonitor(); taskStateMonitor.WaitAll( boundJob.ListTasks(), Microsoft.Azure.Batch.Common.TaskState.Completed, TimeSpan.FromMinutes(10), controlParams: null, additionalBehaviors: new[] { // spam/logging interceptor new Microsoft.Azure.Batch.Protocol.RequestInterceptor((x) => { this.testOutputHelper.WriteLine("Issuing request type: " + x.GetType().ToString()); try { // print out the compute node states... we are actually waiting on the compute nodes List <ComputeNode> allComputeNodes = boundPool.ListComputeNodes().ToList(); this.testOutputHelper.WriteLine(" #compute nodes: " + allComputeNodes.Count); allComputeNodes.ForEach( (icn) => { this.testOutputHelper.WriteLine(" computeNode.id: " + icn.Id + ", state: " + icn.State); }); } catch (Exception ex) { // there is a race between the pool-life-job and the end of the job.. and the ListComputeNodes above Assert.True(false, "SampleWithFilesAndPool probably can ignore this if its pool not found: " + ex.ToString()); } }) }); List <CloudTask> tasks = boundJob.ListTasks().ToList(); CloudTask myCompletedTask = tasks.Single(); foreach (CloudTask curTask in tasks) { this.testOutputHelper.WriteLine("Task Id: " + curTask.Id + ", state: " + curTask.State); } boundPool.Refresh(); this.testOutputHelper.WriteLine("Pool Id: " + boundPool.Id + ", state: " + boundPool.State); string stdOut = myCompletedTask.GetNodeFile(Constants.StandardOutFileName).ReadAsString(); string stdErr = myCompletedTask.GetNodeFile(Constants.StandardErrorFileName).ReadAsString(); this.testOutputHelper.WriteLine("StdOut: "); this.testOutputHelper.WriteLine(stdOut); this.testOutputHelper.WriteLine("StdErr: "); this.testOutputHelper.WriteLine(stdErr); this.testOutputHelper.WriteLine("Task Files:"); foreach (NodeFile curFile in myCompletedTask.ListNodeFiles(recursive: true)) { this.testOutputHelper.WriteLine(" File path: " + curFile.Path); } var files = myCompletedTask.ListNodeFiles(recursive: true).ToList(); // confirm the files are there Assert.True(files.Any(file => file.Path.Contains("localWords.txt")), "missing file: localWords.txt"); } finally { TestUtilities.DeleteJobIfExistsAsync(batchCli, jobId).Wait(); } } }
public static BlobContainerClient GetBlobContainerClient(string containerName, BlobServiceClient serviceClient, StagingStorageAccount storageAccount = null) { storageAccount ??= GetStorageAccount(); serviceClient ??= GetBlobServiceClient(storageAccount); BlobContainerClient containerClient = serviceClient.GetBlobContainerClient(containerName); return(containerClient); }
public static StorageSharedKeyCredential GetSharedKeyCredential(StagingStorageAccount storageAccount = null) { storageAccount ??= GetStorageAccount(); return(new StorageSharedKeyCredential(storageAccount.StorageAccount, storageAccount.StorageAccountKey)); }
public static void JobMain(string[] args) { //Load the configuration TopNWordsConfiguration configuration = TopNWordsConfiguration.LoadConfigurationFromAppConfig(); StagingStorageAccount stagingStorageAccount = new StagingStorageAccount( configuration.StorageAccountName, configuration.StorageAccountKey, configuration.StorageAccountBlobEndpoint); IBatchClient client = BatchClient.Connect(configuration.BatchServiceUrl, new BatchCredentials(configuration.BatchAccountName, configuration.BatchAccountKey)); string stagingContainer = null; //Create a pool (if user hasn't provided one) if (configuration.ShouldCreatePool) { using (IPoolManager pm = client.OpenPoolManager()) { //OSFamily 4 == OS 2012 R2 //You can learn more about os families and versions at: //http://msdn.microsoft.com/en-us/library/azure/ee924680.aspx ICloudPool pool = pm.CreatePool(configuration.PoolName, targetDedicated: configuration.PoolSize, osFamily: "4", vmSize: "small"); Console.WriteLine("Adding pool {0}", configuration.PoolName); pool.Commit(); } } try { using (IWorkItemManager wm = client.OpenWorkItemManager()) { IToolbox toolbox = client.OpenToolbox(); //Use the TaskSubmissionHelper to help us create a WorkItem and add tasks to it. ITaskSubmissionHelper taskSubmissionHelper = toolbox.CreateTaskSubmissionHelper(wm, configuration.PoolName); taskSubmissionHelper.WorkItemName = configuration.WorkItemName; FileToStage topNWordExe = new FileToStage(TopNWordsExeName, stagingStorageAccount); FileToStage storageDll = new FileToStage(StorageClientDllName, stagingStorageAccount); string bookFileUri = UploadBookFileToCloudBlob(configuration, configuration.BookFileName); Console.WriteLine("{0} uploaded to cloud", configuration.BookFileName); for (int i = 1; i <= configuration.NumberOfTasks; i++) { ICloudTask task = new CloudTask("task_no_" + i, String.Format("{0} --Task {1} {2} {3} {4}", TopNWordsExeName, bookFileUri, configuration.NumberOfTopWords, configuration.StorageAccountName, configuration.StorageAccountKey)); //This is the list of files to stage to a container -- for each TaskSubmissionHelper one container is created and //files all resolve to Azure Blobs by their name (so two tasks with the same named file will create just 1 blob in //the TaskSubmissionHelper's container). task.FilesToStage = new List<IFileStagingProvider> { topNWordExe, storageDll }; taskSubmissionHelper.AddTask(task); } //Commit all the tasks to the Batch Service. IJobCommitUnboundArtifacts artifacts = taskSubmissionHelper.Commit() as IJobCommitUnboundArtifacts; foreach (var fileStagingArtifact in artifacts.FileStagingArtifacts) { SequentialFileStagingArtifact stagingArtifact = fileStagingArtifact.Value as SequentialFileStagingArtifact; if (stagingArtifact != null) { stagingContainer = stagingArtifact.BlobContainerCreated; Console.WriteLine("Uploaded files to container: {0} -- you will be charged for their storage unless you delete them.", stagingArtifact.BlobContainerCreated); } } //Get the job to monitor status. ICloudJob job = wm.GetJob(artifacts.WorkItemName, artifacts.JobName); Console.Write("Waiting for tasks to complete ..."); // Wait 1 minute for all tasks to reach the completed state client.OpenToolbox().CreateTaskStateMonitor().WaitAll(job.ListTasks(), TaskState.Completed, TimeSpan.FromMinutes(20)); Console.WriteLine("Done."); foreach (ICloudTask task in job.ListTasks()) { Console.WriteLine("Task " + task.Name + " says:\n" + task.GetTaskFile(Constants.StandardOutFileName).ReadAsString()); Console.WriteLine(task.GetTaskFile(Constants.StandardErrorFileName).ReadAsString()); } } } finally { //Delete the pool that we created if (configuration.ShouldCreatePool) { using (IPoolManager pm = client.OpenPoolManager()) { Console.WriteLine("Deleting pool: {0}", configuration.PoolName); pm.DeletePool(configuration.PoolName); } } //Delete the workitem that we created if (configuration.ShouldDeleteWorkItem) { using (IWorkItemManager wm = client.OpenWorkItemManager()) { Console.WriteLine("Deleting work item: {0}", configuration.WorkItemName); wm.DeleteWorkItem(configuration.WorkItemName); } } //Delete the containers we created if(configuration.ShouldDeleteContainer) { DeleteContainers(configuration, stagingContainer); } } }