// BATCH CLIENT SETUP - METHOD IMPLEMENTATIONS // CreatePoolIfNoneExist(): Creates the Batch pool. // batchClient: A BatchClient object. // PoolId: ID of the CloudPool object to create. // resourceFiles: A collection of ResourceFile objects representing blobs in a Storage account container. private static void CreatePoolIfNoneExist(BatchClient batchClient, string poolId, IList <ResourceFile> resourceFiles) { CloudPool pool = null; try { Console.WriteLine("Creating pool [{0}]...", poolId); // Create an unbound pool. No pool is actually created in the Batch service until we call // CloudPool.Commit(). This CloudPool instance is therefore considered "unbound," and we can // modify its properties. pool = batchClient.PoolOperations.CreatePool( poolId: poolId, targetDedicatedComputeNodes: 5, // 5 compute nodes virtualMachineSize: "small", // single-core, 1.75 GB memory, 225 GB disk cloudServiceConfiguration: new CloudServiceConfiguration(osFamily: "4")); // Windows Server 2012 R2 pool.Commit(); } catch (BatchException be) { // Accept the specific error code PoolExists as that is expected if the pool already exists if (be.RequestInformation?.BatchError != null && be.RequestInformation.BatchError.Code == BatchErrorCodeStrings.PoolExists) { Console.WriteLine("The pool {0} already existed when we tried to create it", poolId); } else { throw; // Any other exception is unexpected } } }
private CloudPool CreatePoolIfNotExists(String poolID, String vmSize, int computerNodes = 2, ImageReference imageReference = null) { messageContainer.AddInformationMessage("Configuring pool..."); CloudPool cPool = null; try { cPool = batchClient.PoolOperations.GetPool(poolID); } catch { } if (cPool == null) { //imageReference = (imageReference == null ? CreateImageReference("MicrosoftWindowsServer", "WindowsServer", "2016-Datacenter", "latest") : imageReference); imageReference = (imageReference == null ? CreateImageReference() : imageReference); VirtualMachineConfiguration vmConfig = new VirtualMachineConfiguration(imageReference, SKUReference(imageReference.Offer)); messageContainer.AddInformationMessage("Creating pool..."); cPool = batchClient.PoolOperations.CreatePool(poolID, vmSize, vmConfig, computerNodes); cPool.ApplicationPackageReferences = new List <ApplicationPackageReference> { new ApplicationPackageReference { ApplicationId = "radiance", Version = "1.0" } }; cPool.Commit(); messageContainer.AddInformationMessage("Pool created..."); } messageContainer.AddInformationMessage("Pool configured..."); return(cPool); }
protected CloudPool CreatePool() { CloudPool currentPool = this.FindPoolIfExists(); if (currentPool == null) { // gotta create a new pool CloudServiceConfiguration passConfiguration = new CloudServiceConfiguration(OSFamily); currentPool = this.client.PoolOperations.CreatePool( this.PoolId, VMSize, passConfiguration, targetDedicatedComputeNodes: 1); var password = TestUtilities.GenerateRandomPassword(); currentPool.UserAccounts = new List <UserAccount>() { new UserAccount(AdminUserAccountName, password, ElevationLevel.Admin), new UserAccount(NonAdminUserAccountName, password, ElevationLevel.NonAdmin), }; StartTask st = new StartTask("cmd /c hostname"); // used for tests of StartTask(info) st.EnvironmentSettings = new List <EnvironmentSetting> { new EnvironmentSetting("key", "value") }; currentPool.StartTask = st; currentPool.Commit(); } return(WaitForPoolAllocation(this.client, this.PoolId)); }
protected CloudPool CreatePool() { CloudPool currentPool = this.FindPoolIfExists(); // gotta create a new pool if (currentPool == null) { List <NodeAgentSku> nodeAgentSkus = this.client.PoolOperations.ListNodeAgentSkus().ToList(); Func <ImageReference, bool> ubuntuImageScanner = imageRef => imageRef.Publisher == "Canonical" && imageRef.Offer == "UbuntuServer" && imageRef.Sku.Contains("14.04"); NodeAgentSku ubuntuSku = nodeAgentSkus.First(sku => sku.VerifiedImageReferences.FirstOrDefault(ubuntuImageScanner) != null); ImageReference imageReference = ubuntuSku.VerifiedImageReferences.First(ubuntuImageScanner); VirtualMachineConfiguration virtualMachineConfiguration = new VirtualMachineConfiguration( imageReference: imageReference, nodeAgentSkuId: ubuntuSku.Id); currentPool = this.client.PoolOperations.CreatePool( poolId: this.PoolId, virtualMachineSize: VMSize, virtualMachineConfiguration: virtualMachineConfiguration, targetDedicated: 1); currentPool.Commit(); } return(WaitForPoolAllocation(this.client, this.PoolId)); }
private void CreateBatchPool(VirtualMachineConfiguration vmc, string vmsize) { try { CloudPool pool = Client.PoolOperations.CreatePool( poolId: _poolId, targetDedicatedComputeNodes: _config.Workers, virtualMachineSize: vmsize, virtualMachineConfiguration: vmc); // Specify the application and version to install on the compute nodes pool.ApplicationPackageReferences = new List <ApplicationPackageReference> { new ApplicationPackageReference { ApplicationId = "NewtonsoftJson", Version = "1.0" } }; pool.Commit(); } catch (BatchException be) { if (be.RequestInformation?.BatchError?.Code == BatchErrorCodeStrings.PoolExists) { //Pool exists; } else { throw; } } }
/// <summary> /// Adiciona um novo pool de execução no Batch Account. /// </summary> /// <param name="poolId">Identificação do novo pool a ser criado.</param> /// <param name="appId">Aplicação associada ao pool que será criado.</param> /// <param name="appVersion">Versão da aplicação que será associada ao poll.</param> /// <returns></returns> public CloudPool CreatePool(string poolId, string appId, string appVersion) { ImageReference imageReference = new ImageReference(publisher: "MicrosoftWindowsServer", offer: "WindowsServer", sku: "2012-R2-Datacenter-smalldisk", version: "latest"); VirtualMachineConfiguration virtualMachine = new VirtualMachineConfiguration(imageReference: imageReference, nodeAgentSkuId: "batch.node.windows amd64"); CloudPool pool = client.PoolOperations.CreatePool(poolId: poolId, targetDedicatedComputeNodes: 2, virtualMachineSize: "STANDARD_A1", virtualMachineConfiguration: virtualMachine); pool.ApplicationPackageReferences = new List <ApplicationPackageReference> { new ApplicationPackageReference { ApplicationId = appId, Version = appVersion } }; pool.Commit(); return(pool); }
private static void CreateBatchPool(BatchClient batchClient, VirtualMachineConfiguration vmConfiguration) { try { CloudPool pool = batchClient.PoolOperations.CreatePool( poolId: PoolId, targetDedicatedComputeNodes: PoolNodeCount, virtualMachineSize: PoolVMSize, virtualMachineConfiguration: vmConfiguration); pool.Commit(); } catch (BatchException be) { // Accept the specific error code PoolExists as that is expected if the pool already exists if (be.RequestInformation?.BatchError?.Code == BatchErrorCodeStrings.PoolExists) { Console.WriteLine("The pool {0} already existed when we tried to create it", PoolId); } else { throw; // Any other exception is unexpected } } }
protected CloudPool CreatePool() { CloudPool currentPool = this.FindPoolIfExists(); if (currentPool == null) { // gotta create a new pool CloudServiceConfiguration passConfiguration = new CloudServiceConfiguration(OSFamily); currentPool = this.client.PoolOperations.CreatePool( this.PoolId, VMSize, passConfiguration, targetDedicatedComputeNodes: 1); StartTask st = new StartTask("cmd /c hostname"); // used for tests of StartTask(info) st.EnvironmentSettings = new List <EnvironmentSetting> { new EnvironmentSetting("key", "value") }; currentPool.StartTask = st; currentPool.Commit(); } return(WaitForPoolAllocation(this.client, this.PoolId)); }
private static void CreateBatchPool(BatchClient batchClient, VirtualMachineConfiguration vmConfiguration) { try { CloudPool pool = batchClient.PoolOperations.CreatePool( poolId: PoolId, targetDedicatedComputeNodes: PoolNodeCount, virtualMachineSize: PoolVMSize, virtualMachineConfiguration: vmConfiguration); // Specify the application and version to install on the compute nodes pool.ApplicationPackageReferences = new List <ApplicationPackageReference> { new ApplicationPackageReference { //ApplicationId = "testbatchapp", ApplicationId = "hpcapp" //, //Version = "1.1" } }; pool.Commit(); } catch (BatchException be) { // Accept the specific error code PoolExists as that is expected if the pool already exists if (be.RequestInformation?.BatchError?.Code == BatchErrorCodeStrings.PoolExists) { Console.WriteLine("The pool {0} already existed when we tried to create it", PoolId); } else { throw; // Any other exception is unexpected } } }
/// <summary> /// Creates a new pool. /// </summary> /// <param name="parameters">The parameters to use when creating the pool.</param> public void CreatePool(NewPoolParameters parameters) { if (parameters == null) { throw new ArgumentNullException("parameters"); } PoolOperations poolOperations = parameters.Context.BatchOMClient.PoolOperations; CloudPool pool = poolOperations.CreatePool(poolId: parameters.PoolId, osFamily: parameters.OSFamily, virtualMachineSize: parameters.VirtualMachineSize); pool.DisplayName = parameters.DisplayName; pool.ResizeTimeout = parameters.ResizeTimeout; pool.MaxTasksPerComputeNode = parameters.MaxTasksPerComputeNode; pool.InterComputeNodeCommunicationEnabled = parameters.InterComputeNodeCommunicationEnabled; pool.TargetOSVersion = parameters.TargetOSVersion; if (!string.IsNullOrEmpty(parameters.AutoScaleFormula)) { pool.AutoScaleEnabled = true; pool.AutoScaleEvaluationInterval = parameters.AutoScaleEvaluationInterval; pool.AutoScaleFormula = parameters.AutoScaleFormula; } else if (parameters.TargetDedicated.HasValue) { pool.TargetDedicated = parameters.TargetDedicated; } if (parameters.TaskSchedulingPolicy != null) { pool.TaskSchedulingPolicy = parameters.TaskSchedulingPolicy.omObject; } if (parameters.StartTask != null) { Utils.Utils.StartTaskSyncCollections(parameters.StartTask); pool.StartTask = parameters.StartTask.omObject; } if (parameters.Metadata != null) { pool.Metadata = new List <MetadataItem>(); foreach (DictionaryEntry m in parameters.Metadata) { pool.Metadata.Add(new MetadataItem(m.Key.ToString(), m.Value.ToString())); } } if (parameters.CertificateReferences != null) { pool.CertificateReferences = new List <CertificateReference>(); foreach (PSCertificateReference c in parameters.CertificateReferences) { pool.CertificateReferences.Add(c.omObject); } } WriteVerbose(string.Format(Resources.CreatingPool, parameters.PoolId)); pool.Commit(parameters.AdditionalBehaviors); }
// BATCH CLIENT OPERATIONS - FUNCTION IMPLEMENTATIONS /// <summary> /// Creates the Batch pool. /// </summary> /// <param name="batchClient">A BatchClient object</param> /// <param name="poolId">ID of the CloudPool object to create.</param> private static void CreatePoolIfNotExist(BatchClient batchClient, string poolId) { CloudPool pool = null; try { Console.WriteLine("Creating pool [{0}]...", poolId); ImageReference imageReference = new ImageReference( publisher: "MicrosoftWindowsServer", offer: "WindowsServer", sku: "2012-R2-Datacenter-smalldisk", version: "latest"); VirtualMachineConfiguration virtualMachineConfiguration = new VirtualMachineConfiguration( imageReference: imageReference, nodeAgentSkuId: "batch.node.windows amd64"); // Create an unbound pool. No pool is actually created in the Batch service until we call // CloudPool.Commit(). This CloudPool instance is therefore considered "unbound," and we can // modify its properties. pool = batchClient.PoolOperations.CreatePool( poolId: poolId, targetDedicatedComputeNodes: DedicatedNodeCount, targetLowPriorityComputeNodes: LowPriorityNodeCount, virtualMachineSize: PoolVMSize, virtualMachineConfiguration: virtualMachineConfiguration); // Specify the application and version to install on the compute nodes // This assumes that a Windows 64-bit zipfile of ffmpeg has been added to Batch account // with Application Id of "ffmpeg" and Version of "3.4". // Download the zipfile https://ffmpeg.zeranoe.com/builds/win64/static/ffmpeg-3.4-win64-static.zip // to upload as application package pool.ApplicationPackageReferences = new List <ApplicationPackageReference> { new ApplicationPackageReference { ApplicationId = appPackageId, Version = appPackageVersion } }; pool.Commit(); } catch (BatchException be) { // Accept the specific error code PoolExists as that is expected if the pool already exists if (be.RequestInformation?.BatchError?.Code == BatchErrorCodeStrings.PoolExists) { Console.WriteLine("The pool {0} already existed when we tried to create it", poolId); } else { throw; // Any other exception is unexpected } } }
private static void CreatePool(BatchClient batchClient, VirtualMachineConfiguration vmConfiguration) { CloudPool pool = batchClient.PoolOperations.CreatePool(poolId: PoolId, virtualMachineSize: PoolVMSize, virtualMachineConfiguration: vmConfiguration, targetDedicatedComputeNodes: PoolNodeCount); pool.Commit(); }
public static void ReCreatePool() { Log("Recreate pool"); BatchSharedKeyCredentials cred = new BatchSharedKeyCredentials(Settings.batchEndpoint, Settings.batchAccount, Settings.batchKey); using (BatchClient client = BatchClient.Open(cred)) // <-- connect to the cluster { { bool found = false; foreach (var p in client.PoolOperations.ListPools(new ODATADetailLevel(filterClause: "id eq '" + Settings.poolname + "'"))) { found = true; break; } if (found) { Log("Deleting current pool..."); client.PoolOperations.DeletePool(Settings.poolname); Log("Delete command sent."); while (found) { found = false; Thread.Sleep(1000); Log("Waiting pool to be deleted."); foreach (var p in client.PoolOperations.ListPools(new ODATADetailLevel(filterClause: "id eq '" + Settings.poolname + "'"))) { found = true; break; } } Log("Pool deleted."); } #region resource file List <ResourceFile> resources = new List <ResourceFile>(); foreach (string blob in StorageHelper.ListBlobs(Settings.resourceContainer)) { string filename = System.IO.Path.GetFileName((new Uri(blob)).LocalPath); resources.Add(new ResourceFile(StorageHelper.GetBlobSASURL(blob), filename)); } #endregion CloudPool pool = client.PoolOperations.CreatePool(Settings.poolname, "4", "medium", 10); pool.StartTask = new StartTask(); pool.StartTask.ResourceFiles = resources; pool.StartTask.CommandLine = @"cmd /c copy *.* %WATASK_TVM_ROOT_DIR%\shared\"; pool.StartTask.WaitForSuccess = true; Log("Creating the new pool..."); pool.Commit(); Log("Pool created."); } } }
public void Bug1965363_2384616_Wat7OSVersionFeatures() { Action test = () => { using (BatchClient batchCli = TestUtilities.OpenBatchClientAsync(TestUtilities.GetCredentialsFromEnvironment()).Result) { PoolOperations poolOperations = batchCli.PoolOperations; try { this.testOutputHelper.WriteLine("Listing OS Versions:"); // create pool tests // forget to set CloudServiceConfiguration on Create, get error { CloudPool noArgs = poolOperations.CreatePool("Bug1965363ButNoOSFamily-" + TestUtilities.GetMyName(), PoolFixture.VMSize, default(CloudServiceConfiguration), targetDedicatedComputeNodes: 0); BatchException ex = TestUtilities.AssertThrows <BatchException>(() => noArgs.Commit()); string exStr = ex.ToString(); // we are expecting an exception, assert if the exception is not the correct one. Assert.Contains("cloudServiceConfiguration", exStr); } // create a pool WITH an osFamily { string poolIdHOSF = "Bug1965363HasOSF-" + TestUtilities.GetMyName(); try { CloudPool hasOSF = poolOperations.CreatePool(poolIdHOSF, PoolFixture.VMSize, new CloudServiceConfiguration(PoolFixture.OSFamily), targetDedicatedComputeNodes: 0); hasOSF.Commit(); } finally { poolOperations.DeletePool(poolIdHOSF); } } } catch (Exception ex) { // special case os version beacuse it is a common failure and requires human intervention/editing // test for expired os version Assert.DoesNotContain("The specified OS Version does not exists", ex.ToString()); throw; } } }; SynchronizationContextHelper.RunTest(test, TestTimeout); }
/// <summary> /// This method can create a pool, it is not used in our example. /// The code remains for documentation purposes. /// </summary> public static void PreparePool() { BatchClient batchClient = PrepareConnection(); if (Environment.GetEnvironmentVariable("REGISTRYNAME") != null) { ContainerRegistry containerRegistry = new ContainerRegistry( registryServer: Environment.GetEnvironmentVariable("REGISTRYNAME"), userName: Environment.GetEnvironmentVariable("REGISTRYUSERNAME"), password: Environment.GetEnvironmentVariable("REGISTRYPASSWORD") ); // Create container configuration, prefetching Docker images from the container registry ContainerConfiguration containerConfig = new ContainerConfiguration() { ContainerImageNames = new List <string>() { Environment.GetEnvironmentVariable("WORKERIMAGENAME") }, ContainerRegistries = new List <ContainerRegistry> { containerRegistry } }; ImageReference imageReference = new ImageReference( publisher: "microsoft-azure-batch", offer: "ubuntu-server-container", sku: "16-04-lts", version: "latest"); // VM configuration VirtualMachineConfiguration virtualMachineConfiguration = new VirtualMachineConfiguration( imageReference: imageReference, nodeAgentSkuId: "batch.node.ubuntu 16.04") { ContainerConfiguration = containerConfig, }; //Create pool CloudPool pool = batchClient.PoolOperations.CreatePool( poolId: "docker", targetDedicatedComputeNodes: 1, virtualMachineSize: "Standard_A2_v2", virtualMachineConfiguration: virtualMachineConfiguration); pool.Commit(); } }
private static CloudPool CreatePool(Settings unzipperSettings, BatchClient client) { //OSFamily 4 == OS 2012 R2. You can learn more about os families and versions at: //http://msdn.microsoft.com/en-us/library/azure/ee924680.aspx CloudPool pool = client.PoolOperations.CreatePool( poolId: unzipperSettings.PoolId, targetDedicated: unzipperSettings.PoolNodeCount, virtualMachineSize: unzipperSettings.MachineSize, cloudServiceConfiguration: new CloudServiceConfiguration(osFamily: "4")); pool.MaxTasksPerComputeNode = unzipperSettings.MaxTasksPerNode; Console.WriteLine("Adding pool {0}", unzipperSettings.PoolId); try { pool.Commit(); } catch (AggregateException ae) { // Go through all exceptions and dump useful information ae.Handle(x => { Console.Error.WriteLine("Creating pool ID {0} failed", unzipperSettings.PoolId); if (x is BatchException) { BatchException be = x as BatchException; Console.WriteLine(be.ToString()); Console.WriteLine(); } else { Console.WriteLine(x); } // can't continue without a pool return(false); }); } catch (BatchException be) { if (be.Message.Contains("conflict")) { Console.WriteLine("pool already exists"); } } return(pool); }
public void Pool_WhenReturnedFromServer_HasExpectedUnboundProperties() { const string cloudPoolId = "id-123"; const string osFamily = "2"; const string virtualMachineSize = "4"; const string cloudPoolDisplayName = "pool-display-name-test"; MetadataItem metadataItem = new MetadataItem("foo", "bar"); BatchSharedKeyCredentials credentials = ClientUnitTestCommon.CreateDummySharedKeyCredential(); using (BatchClient client = BatchClient.Open(credentials)) { CloudPool cloudPool = client.PoolOperations.CreatePool(cloudPoolId, virtualMachineSize, new CloudServiceConfiguration(osFamily)); cloudPool.DisplayName = cloudPoolDisplayName; cloudPool.Metadata = new List <MetadataItem> { metadataItem }; Assert.Equal(cloudPoolId, cloudPool.Id); // can set an unbound object Assert.Equal(cloudPool.Metadata.First().Name, metadataItem.Name); Assert.Equal(cloudPool.Metadata.First().Value, metadataItem.Value); cloudPool.Commit(additionalBehaviors: InterceptorFactory.CreateAddPoolRequestInterceptor()); // writing isn't allowed for a cloudPool that is in an readonly state. Assert.Throws <InvalidOperationException>(() => cloudPool.AutoScaleFormula = "Foo"); Assert.Throws <InvalidOperationException>(() => cloudPool.DisplayName = "Foo"); Assert.Throws <InvalidOperationException>(() => cloudPool.CloudServiceConfiguration = null); Assert.Throws <InvalidOperationException>(() => cloudPool.ResizeTimeout = TimeSpan.FromSeconds(10)); Assert.Throws <InvalidOperationException>(() => cloudPool.Metadata = null); Assert.Throws <InvalidOperationException>(() => cloudPool.TargetDedicated = 5); Assert.Throws <InvalidOperationException>(() => cloudPool.VirtualMachineConfiguration = null); Assert.Throws <InvalidOperationException>(() => cloudPool.VirtualMachineSize = "small"); //read is allowed though Assert.Null(cloudPool.AutoScaleFormula); Assert.Equal(cloudPoolDisplayName, cloudPool.DisplayName); Assert.NotNull(cloudPool.CloudServiceConfiguration); Assert.Null(cloudPool.ResizeTimeout); Assert.Equal(1, cloudPool.Metadata.Count); Assert.Null(cloudPool.TargetDedicated); Assert.Null(cloudPool.VirtualMachineConfiguration); Assert.Equal(virtualMachineSize, cloudPool.VirtualMachineSize); } }
private void CreateBatchPool(VirtualMachineConfiguration vmc, string vmsize = "Standard_E2_v3") { try { CloudPool pool = Client.PoolOperations.CreatePool(PoolId, vmsize, vmc, PoolSize); pool.Commit(); } catch (BatchException be) { if (be.RequestInformation?.BatchError?.Code == BatchErrorCodeStrings.PoolExists) { Logger.Warning("The pool " + PoolId + " already exist"); } else { throw; } } }
static void AddApplicationPool(BatchClient client, string code) { var poolId = "applicationpool" + code; IPagedEnumerable <CloudPool> pools = client.PoolOperations.ListPools(); foreach (CloudPool pool in pools) { if (pool.Id.Equals(poolId)) { Console.WriteLine("Pool already available for id : " + pool.Id); return; } } CloudPool newPool = client.PoolOperations.CreatePool( poolId: poolId, targetDedicatedComputeNodes: 3, // 3 compute nodes virtualMachineSize: "small", // single-core, 1.75 GB memory, 225 GB disk cloudServiceConfiguration: new CloudServiceConfiguration(osFamily: "3")); newPool.Commit(); Console.WriteLine("Created the pool for Code : " + code); }
private static void CreatePoolIfNeeded(BatchClient client, string poolId) { // go through all the pools and see if the specified pool already exists bool found = false; // use an OData based select clause to limit the amount of data returned. This will result in incomplete // client objects but reduces the amount of data on the wire leading to faster completion when there are // a lot of objects for a given query. No spaces are allowed in the string and property names are case-sensitive. foreach (CloudPool p in client.PoolOperations.ListPools(new ODATADetailLevel(selectClause: "id,currentDedicated"))) { // pools are uniquely identified by their ID if (string.Equals(p.Id, poolId)) { Console.WriteLine("Using existing pool {0}", poolId); found = true; if (p.CurrentDedicated == 0) { Console.WriteLine("There are no compute nodes in this pool. No tasks will be run until at least one node has been added via resizing."); Console.WriteLine("Resizing pool to add 3 nodes. This might take a while..."); p.Resize(3); } break; } } if (!found) { Console.WriteLine("Creating pool: {0}", poolId); // if pool not found, call CreatePool. You can learn more about os families and versions at: // https://azure.microsoft.com/en-us/documentation/articles/cloud-services-guestos-update-matrix/ CloudPool pool = client.PoolOperations.CreatePool(poolId, targetDedicated: 3, virtualMachineSize: "small", osFamily: "3"); pool.Commit(); } }
protected CloudPool CreatePool() { CloudPool currentPool = this.FindPoolIfExists(); // gotta create a new pool if (currentPool == null) { var ubuntuImageDetails = GetUbuntuImageDetails(this.client); VirtualMachineConfiguration virtualMachineConfiguration = new VirtualMachineConfiguration( ubuntuImageDetails.ImageReference, nodeAgentSkuId: ubuntuImageDetails.NodeAgentSku.Id); currentPool = this.client.PoolOperations.CreatePool( poolId: this.PoolId, virtualMachineSize: VMSize, virtualMachineConfiguration: virtualMachineConfiguration, targetDedicatedComputeNodes: 1); currentPool.Commit(); } return(WaitForPoolAllocation(this.client, this.PoolId)); }
public void Bug1965363_2384616_Wat7OSVersionFeatures() { Action test = () => { using (BatchClient batchCli = TestUtilities.OpenBatchClientAsync(TestUtilities.GetCredentialsFromEnvironment()).Result) { PoolOperations poolOperations = batchCli.PoolOperations; try { this.testOutputHelper.WriteLine("Listing OS Versions:"); /* bug 2384616 ListOsVersions hidden for wat 8 * * // test ListOSVersion * foreach (OSVersion curOSV in poolMgr.ListOSVersions()) * { * this.testOutputHelper.WriteLine("Label: " + curOSV.Label); * this.testOutputHelper.WriteLine(" Version: " + curOSV.Version); * this.testOutputHelper.WriteLine(" Family: " + curOSV.Family); * this.testOutputHelper.WriteLine(" FamilyLabel: " + curOSV.FamilyLabel); * this.testOutputHelper.WriteLine(" isDefault: " + curOSV.IsDefault); * this.testOutputHelper.WriteLine(" IsActive: " + curOSV.IsActive); * * string expDate; * * if (curOSV.ExpirationDate.HasValue) * { * expDate = curOSV.ExpirationDate.Value.ToString(); * } * else * { * expDate = "<null/novalue>"; * } * * this.testOutputHelper.WriteLine(" ExpirationDate: " + expDate); * } * */ // create pool tests // forget to set CloudServiceConfiguration on Create, get error { CloudPool noArgs = poolOperations.CreatePool("Bug1965363ButNoOSFamily-" + TestUtilities.GetMyName(), PoolFixture.VMSize, default(CloudServiceConfiguration), targetDedicated: 0); BatchException ex = TestUtilities.AssertThrows <BatchException>(() => noArgs.Commit()); string exStr = ex.ToString(); // we are expecting an exception, assert if the exception is not the correct one. Assert.Contains("cloudServiceConfiguration", exStr); } // create a pool WITH an osFamily { string poolIdHOSF = "Bug1965363HasOSF-" + TestUtilities.GetMyName(); try { CloudPool hasOSF = poolOperations.CreatePool(poolIdHOSF, PoolFixture.VMSize, new CloudServiceConfiguration(PoolFixture.OSFamily), targetDedicated: 0); hasOSF.Commit(); } finally { poolOperations.DeletePool(poolIdHOSF); } } // TODO: ultimately we will either need to find (via list) a family with more than one version or // manually update these strings as OS versions are depricated //See here for other OS versions if this test fails: http://azure.microsoft.com/en-us/documentation/articles/cloud-services-guestos-update-matrix/ const string familyVersion0 = "*"; const string familyVersion1 = "WA-GUEST-OS-4.32_201605-01"; // "UpdatePoolOS" tests (ChangeOSVersion in OM) // PoolManager { string poolIdChangeOSV = "Bug1965363ChangeOSVviaMGR-" + TestUtilities.GetMyName(); try { CloudPool unboundPool = poolOperations.CreatePool( poolIdChangeOSV, PoolFixture.VMSize, new CloudServiceConfiguration(PoolFixture.OSFamily, familyVersion0), // start with version 0 targetDedicated: 0); unboundPool.Commit(); // fetch the bound pool CloudPool boundPool = poolOperations.GetPool(poolIdChangeOSV); Assert.Equal(familyVersion0, boundPool.CloudServiceConfiguration.CurrentOSVersion); // switch to new version poolOperations.ChangeOSVersion(poolIdChangeOSV, familyVersion1); // UpdatePoolOS is has latency??? PollForOSVersionChange(boundPool, familyVersion1); // check to make sure the new value is set boundPool.Refresh(); Assert.Equal(familyVersion1, boundPool.CloudServiceConfiguration.CurrentOSVersion); } finally { TestUtilities.DeletePoolIfExistsAsync(batchCli, poolIdChangeOSV).Wait(); } } // ICloudPool { string poolIdChangeOSV = "Bug1965363ChangeOSVviaPool-" + TestUtilities.GetMyName(); try { CloudPool unboundPool = poolOperations.CreatePool( poolIdChangeOSV, PoolFixture.VMSize, new CloudServiceConfiguration(PoolFixture.OSFamily, familyVersion0), // start with version 0 targetDedicated: 0); unboundPool.Commit(); // fetch the bound pool CloudPool boundPool = poolOperations.GetPool(poolIdChangeOSV); Assert.Equal(familyVersion0, boundPool.CloudServiceConfiguration.CurrentOSVersion); // switch to new version boundPool.ChangeOSVersion(familyVersion1); // UpdatePoolOS is has latency??? PollForOSVersionChange(boundPool, familyVersion1); // check to make sure the new value is set boundPool.Refresh(); Assert.Equal(familyVersion1, boundPool.CloudServiceConfiguration.CurrentOSVersion); } finally { TestUtilities.DeletePoolIfExistsAsync(batchCli, poolIdChangeOSV).Wait(); } } // autopoolspec tests { string jobId = "Bug1965363WIName-" + TestUtilities.GetMyName(); // test not setting osversion try { CloudJob unboundJob = batchCli.JobOperations.CreateJob(jobId, new PoolInformation()); AutoPoolSpecification aps = new AutoPoolSpecification(); PoolSpecification ps = new PoolSpecification(); // test unbound set constraint ps.CloudServiceConfiguration = new CloudServiceConfiguration(PoolFixture.OSFamily); // test unbound get constraint this.testOutputHelper.WriteLine("pus.CloudServiceConfiguration.OSFamily == " + ps.CloudServiceConfiguration.OSFamily); ps.VirtualMachineSize = PoolFixture.VMSize; ps.TargetDedicated = 0; // trivial size for testing purposes aps.PoolSpecification = ps; aps.PoolLifetimeOption = PoolLifetimeOption.Job; unboundJob.PoolInformation.AutoPoolSpecification = aps; // commit to test validation unboundJob.Commit(); // get bound job CloudJob boundJob = batchCli.JobOperations.GetJob(jobId); // test bound get constraints this.testOutputHelper.WriteLine(" OSFamily == " + boundJob.PoolInformation.AutoPoolSpecification.PoolSpecification.CloudServiceConfiguration.OSFamily); string targetOSVersion = boundJob.PoolInformation.AutoPoolSpecification.PoolSpecification.CloudServiceConfiguration.TargetOSVersion; if (string.IsNullOrEmpty(targetOSVersion)) { targetOSVersion = "<null or empty"; } this.testOutputHelper.WriteLine(" TargetOSVersion == " + targetOSVersion); } finally { // cleanup TestUtilities.DeleteJobIfExistsAsync(batchCli, jobId).Wait(); } { string jobScheduleId = "Bug1965363WINameSettingAndChanging-" + TestUtilities.GetMyName(); // test setting osversion try { AutoPoolSpecification aps = new AutoPoolSpecification(); PoolSpecification ps = new PoolSpecification(); CloudJobSchedule unboundJobSchedule = batchCli.JobScheduleOperations.CreateJobSchedule( jobScheduleId, new Schedule() { RecurrenceInterval = TimeSpan.FromDays(7) }, new JobSpecification(new PoolInformation() { AutoPoolSpecification = aps })); // test unbound set constraint ps.CloudServiceConfiguration = new CloudServiceConfiguration(PoolFixture.OSFamily, familyVersion0); // test unbound get constraint this.testOutputHelper.WriteLine("pus.CloudServiceConfiguration.OSFamily == " + ps.CloudServiceConfiguration.OSFamily); this.testOutputHelper.WriteLine("pus.CloudServiceConfiguration.TargetOSVersion == " + ps.CloudServiceConfiguration.TargetOSVersion); ps.VirtualMachineSize = PoolFixture.VMSize; ps.TargetDedicated = 0; // trivial size for testing purposes aps.PoolSpecification = ps; aps.PoolLifetimeOption = PoolLifetimeOption.Job; unboundJobSchedule.Commit(); // get bound job schedule CloudJobSchedule boundJobSchedule = batchCli.JobScheduleOperations.GetJobSchedule(jobScheduleId); // test bound get constraints this.testOutputHelper.WriteLine(" OSFamily == " + boundJobSchedule.JobSpecification.PoolInformation.AutoPoolSpecification.PoolSpecification.CloudServiceConfiguration.OSFamily); this.testOutputHelper.WriteLine(" TargetOSVersion == " + boundJobSchedule.JobSpecification.PoolInformation.AutoPoolSpecification.PoolSpecification.CloudServiceConfiguration.TargetOSVersion); // assert the value is as set above Assert.Equal(familyVersion0, boundJobSchedule.JobSpecification.PoolInformation.AutoPoolSpecification.PoolSpecification.CloudServiceConfiguration.TargetOSVersion); // change values const string altFamily = "3"; const string altOSVersion = "WA-GUEST-OS-3.39_201605-01"; // change values on the bound PUS PoolSpecification boundPS = boundJobSchedule.JobSpecification.PoolInformation.AutoPoolSpecification.PoolSpecification; boundPS.CloudServiceConfiguration = new CloudServiceConfiguration(altFamily, altOSVersion); // flush changes boundJobSchedule.Commit(); // confirm changes took boundJobSchedule.Refresh(); Assert.Equal(altFamily, boundJobSchedule.JobSpecification.PoolInformation.AutoPoolSpecification.PoolSpecification.CloudServiceConfiguration.OSFamily); Assert.Equal(altOSVersion, boundJobSchedule.JobSpecification.PoolInformation.AutoPoolSpecification.PoolSpecification.CloudServiceConfiguration.TargetOSVersion); } finally { // cleanup TestUtilities.DeleteJobScheduleIfExistsAsync(batchCli, jobScheduleId).Wait(); } } } } catch (Exception ex) { // special case os version beacuse it is a common failure and requires human intervention/editing // test for expired os version Assert.DoesNotContain("The specified OS Version does not exists", ex.ToString()); throw; } } }; SynchronizationContextHelper.RunTest(test, TestTimeout); }
public static void JobMain(string[] args) { //Load the configuration Settings topNWordsConfiguration = Settings.Default; AccountSettings accountSettings = AccountSettings.Default; CloudStorageAccount cloudStorageAccount = new CloudStorageAccount( new StorageCredentials( accountSettings.StorageAccountName, accountSettings.StorageAccountKey), accountSettings.StorageServiceUrl, useHttps: true); StagingStorageAccount stagingStorageAccount = new StagingStorageAccount( accountSettings.StorageAccountName, accountSettings.StorageAccountKey, cloudStorageAccount.BlobEndpoint.ToString()); using (BatchClient client = BatchClient.Open(new BatchSharedKeyCredentials(accountSettings.BatchServiceUrl, accountSettings.BatchAccountName, accountSettings.BatchAccountKey))) { string stagingContainer = null; //OSFamily 4 == OS 2012 R2. You can learn more about os families and versions at: //http://msdn.microsoft.com/en-us/library/azure/ee924680.aspx CloudPool pool = client.PoolOperations.CreatePool( topNWordsConfiguration.PoolId, targetDedicated: topNWordsConfiguration.PoolNodeCount, virtualMachineSize: "small", cloudServiceConfiguration: new CloudServiceConfiguration(osFamily: "4")); Console.WriteLine("Adding pool {0}", topNWordsConfiguration.PoolId); try { pool.Commit(); } catch (AggregateException ae) { // Go through all exceptions and dump useful information ae.Handle(x => { Console.Error.WriteLine("Creating pool ID {0} failed", topNWordsConfiguration.PoolId); if (x is BatchException) { BatchException be = x as BatchException; Console.WriteLine(be.ToString()); Console.WriteLine(); } else { Console.WriteLine(x); } // can't continue without a pool return(false); }); } try { Console.WriteLine("Creating job: " + topNWordsConfiguration.JobId); // get an empty unbound Job CloudJob unboundJob = client.JobOperations.CreateJob(); unboundJob.Id = topNWordsConfiguration.JobId; unboundJob.PoolInformation = new PoolInformation() { PoolId = topNWordsConfiguration.PoolId }; // Commit Job to create it in the service unboundJob.Commit(); // create file staging objects that represent the executable and its dependent assembly to run as the task. // These files are copied to every node before the corresponding task is scheduled to run on that node. FileToStage topNWordExe = new FileToStage(TopNWordsExeName, stagingStorageAccount); FileToStage storageDll = new FileToStage(StorageClientDllName, stagingStorageAccount); // In this sample, the input data is copied separately to Storage and its URI is passed to the task as an argument. // This approach is appropriate when the amount of input data is large such that copying it to every node via FileStaging // is not desired and the number of tasks is small since a large number of readers of the blob might get throttled // by Storage which will lengthen the overall processing time. // // You'll need to observe the behavior and use published techniques for finding the right balance of performance versus // complexity. string bookFileUri = UploadBookFileToCloudBlob(accountSettings, topNWordsConfiguration.BookFileName); Console.WriteLine("{0} uploaded to cloud", topNWordsConfiguration.BookFileName); // initialize a collection to hold the tasks that will be submitted in their entirety List <CloudTask> tasksToRun = new List <CloudTask>(topNWordsConfiguration.NumberOfTasks); for (int i = 1; i <= topNWordsConfiguration.NumberOfTasks; i++) { CloudTask task = new CloudTask("task_no_" + i, String.Format("{0} --Task {1} {2} {3} {4}", TopNWordsExeName, bookFileUri, topNWordsConfiguration.TopWordCount, accountSettings.StorageAccountName, accountSettings.StorageAccountKey)); //This is the list of files to stage to a container -- for each job, one container is created and //files all resolve to Azure Blobs by their name (so two tasks with the same named file will create just 1 blob in //the container). task.FilesToStage = new List <IFileStagingProvider> { topNWordExe, storageDll }; tasksToRun.Add(task); } // Commit all the tasks to the Batch Service. Ask AddTask to return information about the files that were staged. // The container information is used later on to remove these files from Storage. ConcurrentBag <ConcurrentDictionary <Type, IFileStagingArtifact> > fsArtifactBag = new ConcurrentBag <ConcurrentDictionary <Type, IFileStagingArtifact> >(); client.JobOperations.AddTask(topNWordsConfiguration.JobId, tasksToRun, fileStagingArtifacts: fsArtifactBag); // loop through the bag of artifacts, looking for the one that matches our staged files. Once there, // capture the name of the container holding the files so they can be deleted later on if that option // was configured in the settings. foreach (var fsBagItem in fsArtifactBag) { IFileStagingArtifact fsValue; if (fsBagItem.TryGetValue(typeof(FileToStage), out fsValue)) { SequentialFileStagingArtifact stagingArtifact = fsValue as SequentialFileStagingArtifact; if (stagingArtifact != null) { stagingContainer = stagingArtifact.BlobContainerCreated; Console.WriteLine( "Uploaded files to container: {0} -- you will be charged for their storage unless you delete them.", stagingArtifact.BlobContainerCreated); } } } //Get the job to monitor status. CloudJob job = client.JobOperations.GetJob(topNWordsConfiguration.JobId); Console.Write("Waiting for tasks to complete ... "); // Wait 20 minutes for all tasks to reach the completed state. The long timeout is necessary for the first // time a pool is created in order to allow nodes to be added to the pool and initialized to run tasks. IPagedEnumerable <CloudTask> ourTasks = job.ListTasks(new ODATADetailLevel(selectClause: "id")); client.Utilities.CreateTaskStateMonitor().WaitAll(ourTasks, TaskState.Completed, TimeSpan.FromMinutes(20)); Console.WriteLine("tasks are done."); foreach (CloudTask t in ourTasks) { Console.WriteLine("Task " + t.Id); Console.WriteLine("stdout:" + Environment.NewLine + t.GetNodeFile(Constants.StandardOutFileName).ReadAsString()); Console.WriteLine(); Console.WriteLine("stderr:" + Environment.NewLine + t.GetNodeFile(Constants.StandardErrorFileName).ReadAsString()); } } finally { //Delete the pool that we created if (topNWordsConfiguration.ShouldDeletePool) { Console.WriteLine("Deleting pool: {0}", topNWordsConfiguration.PoolId); client.PoolOperations.DeletePool(topNWordsConfiguration.PoolId); } //Delete the job that we created if (topNWordsConfiguration.ShouldDeleteJob) { Console.WriteLine("Deleting job: {0}", topNWordsConfiguration.JobId); client.JobOperations.DeleteJob(topNWordsConfiguration.JobId); } //Delete the containers we created if (topNWordsConfiguration.ShouldDeleteContainer) { DeleteContainers(accountSettings, stagingContainer); } } } }
static void Main(string[] args) { string storageConnectionString = $"DefaultEndpointsProtocol=https;AccountName={StorageAccountName};AccountKey={StorageAccountKey}"; // Retrieve the storage account CloudStorageAccount storageAccount = CloudStorageAccount.Parse(storageConnectionString); // Create the blob client CloudBlobClient blobClient = storageAccount.CreateCloudBlobClient(); const string inputContainerName = "batchinput"; List <string> inputFilePaths = new List <string> { "taskdata0.txt", "taskdata1.txt", "taskdata2.txt" }; // Upload the input files to blob storage List <ResourceFile> inputFiles = new List <ResourceFile>(); foreach (string filePath in inputFilePaths) { inputFiles.Add(UploadFileToContainer(blobClient, inputContainerName, filePath)); } // Get a SAS Url for the output container const string outputContainerName = "batchoutput"; string outputContainerSasUrl = GetOutputContainerSasUrl(blobClient, outputContainerName); // Create the virtual machine image reference ImageReference imageReference = new ImageReference( publisher: "MicrosoftWindowsServer", offer: "WindowsServer", sku: "2016-datacenter-smalldisk", version: "latest"); // Create the virtual machine configuration for the pool VirtualMachineConfiguration virtualMachineConfiguration = new VirtualMachineConfiguration( imageReference: imageReference, nodeAgentSkuId: "batch.node.windows amd64"); BatchSharedKeyCredentials cred = new BatchSharedKeyCredentials(BatchAccountUrl, BatchAccountName, BatchAccountKey); using (BatchClient batchClient = BatchClient.Open(cred)) { Console.WriteLine("Creating pool [{0}]...", PoolId); try { CloudPool pool = batchClient.PoolOperations.CreatePool( poolId: PoolId, targetDedicatedComputeNodes: PoolNodeCount, virtualMachineSize: PoolVMSize, virtualMachineConfiguration: virtualMachineConfiguration); // Specify the application packages for the tasks pool.ApplicationPackageReferences = new List <ApplicationPackageReference> { new ApplicationPackageReference { ApplicationId = "ReadWriteFile", Version = "1" } }; pool.Commit(); } catch (BatchException be) { // Accept the specific error code PoolExists as that is expected if the pool already exists if (be.RequestInformation?.BatchError?.Code == BatchErrorCodeStrings.PoolExists) { Console.WriteLine("The pool {0} already existed when we tried to create it", PoolId); } else { throw; // Any other exception is unexpected } } Console.WriteLine("Creating job [{0}]...", JobId); try { CloudJob job = batchClient.JobOperations.CreateJob(); job.Id = JobId; job.PoolInformation = new PoolInformation { PoolId = PoolId }; job.Commit(); } catch (BatchException be) { // Accept the specific error code JobExists as that is expected if the job already exists if (be.RequestInformation?.BatchError?.Code == BatchErrorCodeStrings.JobExists) { Console.WriteLine("The job {0} already existed when we tried to create it", JobId); } else { throw; // Any other exception is unexpected } } // Create a collection to hold the tasks that we'll be adding to the job Console.WriteLine("Adding {0} tasks to job [{1}]...", inputFiles.Count, JobId); List <CloudTask> tasks = new List <CloudTask>(); // Create each of the tasks to process one of the input files. for (int i = 0; i < inputFiles.Count; i++) { string taskId = String.Format("Task{0}", i); string inputFilename = inputFiles[i].FilePath; string outputFileName = string.Format("out{0}", inputFilename); string taskCommandLine = string.Format("cmd /c %AZ_BATCH_APP_PACKAGE_READWRITEFILE%\\ReadWriteFile.exe {0} {1}", inputFilename, outputFileName); CloudTask task = new CloudTask(taskId, taskCommandLine); // Set the resource files and output files for the task task.ResourceFiles = new List <ResourceFile> { inputFiles[i] }; task.OutputFiles = new List <OutputFile> { new OutputFile( filePattern: outputFileName, destination: new OutputFileDestination(new OutputFileBlobContainerDestination(containerUrl: outputContainerSasUrl, path: outputFileName)), uploadOptions: new OutputFileUploadOptions(OutputFileUploadCondition.TaskCompletion)) }; tasks.Add(task); } // Add all tasks to the job. batchClient.JobOperations.AddTask(JobId, tasks); // Monitor task success/failure, specifying a maximum amount of time to wait for the tasks to complete. TimeSpan timeout = TimeSpan.FromMinutes(30); Console.WriteLine("Monitoring all tasks for 'Completed' state, timeout in {0}...", timeout); IEnumerable <CloudTask> addedTasks = batchClient.JobOperations.ListTasks(JobId); batchClient.Utilities.CreateTaskStateMonitor().WaitAll(addedTasks, TaskState.Completed, timeout); Console.WriteLine("All tasks reached state Completed."); // Print task output Console.WriteLine(); Console.WriteLine("Printing task output..."); IEnumerable <CloudTask> completedtasks = batchClient.JobOperations.ListTasks(JobId); foreach (CloudTask task in completedtasks) { string nodeId = String.Format(task.ComputeNodeInformation.ComputeNodeId); Console.WriteLine("Task: {0}", task.Id); Console.WriteLine("Node: {0}", nodeId); Console.WriteLine("Standard out:"); Console.WriteLine(task.GetNodeFile(Constants.StandardOutFileName).ReadAsString()); } // Clean up Batch resources (if the user so chooses) Console.WriteLine(); Console.Write("Delete job? [yes] no: "); string response = Console.ReadLine().ToLower(); if (response != "n" && response != "no") { batchClient.JobOperations.DeleteJob(JobId); } Console.Write("Delete pool? [yes] no: "); response = Console.ReadLine().ToLower(); if (response != "n" && response != "no") { batchClient.PoolOperations.DeletePool(PoolId); } } }
static void Main(string[] args) { BatchSharedKeyCredentials cred = new BatchSharedKeyCredentials(url, account, key); string now = DateTime.UtcNow.ToString("r"); using (BatchClient client = BatchClient.Open(cred)) { //client.PoolOperations.EnableAutoScale("demo", "2"); string formula = string.Format(@" $TargetDedicated={1}; lifespan=time()-time(""{0}""); span=TimeInterval_Minute * 60; startup=TimeInterval_Minute * 10; ratio=50; $TargetDedicated=(lifespan>startup?(max($RunningTasks.GetSample(span, ratio), $ActiveTasks.GetSample(span, ratio)) == 0 ? 0 : $TargetDedicated):{1}); ", now, 4); try { CloudPool p = client.PoolOperations.CreatePool("formulasample", "4", "small"); p.AutoScaleEnabled = true; p.AutoScaleFormula = formula; p.Commit(); } catch (Exception ex) { // Go through all exceptions and dump useful information (ex as AggregateException).Handle((x) => { if (x is BatchException) { BatchException be = x as BatchException; if (null != be.RequestInformation && null != be.RequestInformation.AzureError) { // Write the server side error information Console.Error.WriteLine(be.RequestInformation.AzureError.Code); Console.Error.WriteLine(be.RequestInformation.AzureError.Message.Value); if (null != be.RequestInformation.AzureError.Values) { foreach (var v in be.RequestInformation.AzureError.Values) { Console.Error.WriteLine(v.Key + " : " + v.Value); } } } } return(false); }); } //var result = client.PoolOperations.EvaluateAutoScale("demo", formula); //if(result.AutoScaleRun.Error != null) //{ // Console.WriteLine(result.AutoScaleRun.Error.Code + " : " + result.AutoScaleRun.Error.Message); // foreach(var e in result.AutoScaleRun.Error.Values) // { // Console.WriteLine(" " + e.Name + " : " + e.Value); // } //} //Console.WriteLine(result.AutoScaleRun.Results); //Console.ReadLine(); } }
public void Start() { CloudStorageAccount storageAccount = createCloudStorageAccount(); CloudBlobClient cloudBlobClient = storageAccount.CreateCloudBlobClient(); string contName = ConfigurationManager.AppSettings["ContainerInput"]; CloudBlobContainer cloudBlobContainer = getBlobContainer(cloudBlobClient, contName); List <string> files = getInputDataFiles(); List <ResourceFile> inputFiles = new List <ResourceFile>(); foreach (string f in files) { if (!String.IsNullOrEmpty(f)) { inputFiles.Add(GetResource(f, cloudBlobClient, cloudBlobContainer)); } } using (BatchClient client = getBatchClient()) { // Create a Batch pool, VM configuration, Windows Server image string poolId = ConfigurationManager.AppSettings["PoolId"]; int poolNodeCount = 1; string poolVMSize = "STANDARD_A1_v2"; string jobId = "DotNetQuickstartJob"; Console.WriteLine("Creating pool [{0}]...", poolId); ImageReference imageReference = new ImageReference( publisher: "MicrosoftWindowsServer", offer: "WindowsServer", sku: "2012-R2-datacenter-smalldisk", version: "latest"); VirtualMachineConfiguration virtualMachineConfiguration = new VirtualMachineConfiguration( imageReference: imageReference, nodeAgentSkuId: "batch.node.windows amd64"); try { if (client.PoolOperations.ListPools() != null && client.PoolOperations.ListPools().Count() > 0 && client.PoolOperations.ListPools().First(p => p.Id == poolId) != null) { client.PoolOperations.DeletePoolAsync(poolId).Wait(); } CloudPool pool = client.PoolOperations.CreatePool( poolId: poolId, targetDedicatedComputeNodes: poolNodeCount, virtualMachineSize: poolVMSize, virtualMachineConfiguration: virtualMachineConfiguration); pool.Commit(); } catch (BatchException be) { // Accept the specific error code PoolExists as that is expected if the pool already exists if (be.RequestInformation?.BatchError?.Code == BatchErrorCodeStrings.PoolExists) { Console.WriteLine("The pool {0} already existed when we tried to create it", poolId); } else { throw; // Any other exception is unexpected } } catch (Exception ex) { } try { client.JobOperations.DeleteJob(jobId); CloudJob job = client.JobOperations.CreateJob(); job.Id = jobId; job.PoolInformation = new PoolInformation { PoolId = poolId }; job.Commit(); } catch (BatchException be) { // Accept the specific error code JobExists as that is expected if the job already exists if (be.RequestInformation?.BatchError?.Code == BatchErrorCodeStrings.JobExists) { Console.WriteLine("The job {0} already existed when we tried to create it", jobId); } else { throw; // Any other exception is unexpected } } // Create a collection to hold the tasks that we'll be adding to the job Console.WriteLine("Adding {0} tasks to job [{1}]...", inputFiles.Count, jobId); List <CloudTask> tasks = new List <CloudTask>(); // Create each of the tasks to process one of the input files. for (int i = 0; i < inputFiles.Count; i++) { string taskId = String.Format("Task{0}", i); string inputFilename = inputFiles[i].FilePath; string taskCommandLine = String.Format("cmd /c type {0}", inputFilename); CloudTask task = new CloudTask(taskId, taskCommandLine); task.ResourceFiles = new List <ResourceFile> { inputFiles[i] }; tasks.Add(task); } // Add all tasks to the job. client.JobOperations.AddTask(jobId, tasks); // Monitor task success/failure, specifying a maximum amount of time to wait for the tasks to complete. TimeSpan timeout = TimeSpan.FromMinutes(10); Console.WriteLine("Monitoring all tasks for 'Completed' state, timeout in {0}...", timeout); IEnumerable <CloudTask> addedTasks = client.JobOperations.ListTasks(jobId); client.Utilities.CreateTaskStateMonitor().WaitAll(addedTasks, TaskState.Completed, timeout); Console.WriteLine("All tasks reached state Completed."); // Print task output Console.WriteLine(); Console.WriteLine("Printing task output..."); IEnumerable <CloudTask> completedtasks = client.JobOperations.ListTasks(jobId); Stopwatch timer = new Stopwatch(); timer.Start(); foreach (CloudTask task in completedtasks) { string nodeId = String.Format(task.ComputeNodeInformation.ComputeNodeId); Console.WriteLine("Task: {0}", task.Id); Console.WriteLine("Node: {0}", nodeId); Console.WriteLine("Standard out:"); } // Print out some timing info timer.Stop(); Console.WriteLine(); Console.WriteLine("Sample end: {0}", DateTime.Now); Console.WriteLine("Elapsed time: {0}", timer.Elapsed); // Clean up Storage resources if (cloudBlobContainer != null) { cloudBlobContainer.DeleteIfExistsAsync().Wait(); Console.WriteLine("Container [{0}] deleted.", contName); } else { Console.WriteLine("Container [{0}] does not exist, skipping deletion.", contName); } // Clean up Batch resources (if the user so chooses) Console.WriteLine(); Console.Write("Delete job? [yes] no: "); string response = Console.ReadLine().ToLower(); if (response != "n" && response != "no") { client.JobOperations.DeleteJob(jobId); } Console.Write("Delete pool? [yes] no: "); response = Console.ReadLine().ToLower(); if (response != "n" && response != "no") { client.PoolOperations.DeletePool(poolId); } } }
static void Main(string[] args) { if (String.IsNullOrEmpty(BatchAccountName) || String.IsNullOrEmpty(BatchAccountKey) || String.IsNullOrEmpty(BatchAccountUrl) || String.IsNullOrEmpty(StorageAccountName) || String.IsNullOrEmpty(StorageAccountKey)) { throw new InvalidOperationException("One or more account credential strings have not been populated. Please ensure that your Batch and Storage account credentials have been specified."); } try { Console.WriteLine("Sample start: {0}", DateTime.Now); Console.WriteLine(); Stopwatch timer = new Stopwatch(); timer.Start(); // Construct the Storage account connection string string storageConnectionString = String.Format("DefaultEndpointsProtocol=https;AccountName={0};AccountKey={1}", StorageAccountName, StorageAccountKey); // Retrieve the storage account CloudStorageAccount storageAccount = CloudStorageAccount.Parse(storageConnectionString); // Create the blob client, for use in obtaining references to blob storage containers CloudBlobClient blobClient = storageAccount.CreateCloudBlobClient(); // Use the blob client to create the input container in Azure Storage const string inputContainerName = "input"; CloudBlobContainer container = blobClient.GetContainerReference(inputContainerName); container.CreateIfNotExists(); // The collection of data files that are to be processed by the tasks List <string> inputFilePaths = new List <string> { @"..\..\taskdata0.txt", @"..\..\taskdata1.txt", @"..\..\taskdata2.txt" }; // Upload the data files to Azure Storage. This is the data that will be processed by each of the tasks that are // executed on the compute nodes within the pool. List <ResourceFile> inputFiles = new List <ResourceFile>(); foreach (string filePath in inputFilePaths) { inputFiles.Add(UploadFileToContainer(blobClient, inputContainerName, filePath)); } // Get a Batch client using account creds BatchSharedKeyCredentials cred = new BatchSharedKeyCredentials(BatchAccountUrl, BatchAccountName, BatchAccountKey); using (BatchClient batchClient = BatchClient.Open(cred)) { // Create a Batch pool, VM configuration, Windows Server image Console.WriteLine("Creating pool [{0}]...", PoolId); ImageReference imageReference = new ImageReference( publisher: "MicrosoftWindowsServer", offer: "WindowsServer", sku: "2012-R2-datacenter-smalldisk", version: "latest"); VirtualMachineConfiguration virtualMachineConfiguration = new VirtualMachineConfiguration( imageReference: imageReference, nodeAgentSkuId: "batch.node.windows amd64"); try { CloudPool pool = batchClient.PoolOperations.CreatePool( poolId: PoolId, targetDedicatedComputeNodes: PoolNodeCount, virtualMachineSize: PoolVMSize, virtualMachineConfiguration: virtualMachineConfiguration); pool.Commit(); } catch (BatchException be) { // Accept the specific error code PoolExists as that is expected if the pool already exists if (be.RequestInformation?.BatchError?.Code == BatchErrorCodeStrings.PoolExists) { Console.WriteLine("The pool {0} already existed when we tried to create it", PoolId); } else { throw; // Any other exception is unexpected } } // Create a Batch job Console.WriteLine("Creating job [{0}]...", JobId); try { CloudJob job = batchClient.JobOperations.CreateJob(); job.Id = JobId; job.PoolInformation = new PoolInformation { PoolId = PoolId }; job.Commit(); } catch (BatchException be) { // Accept the specific error code JobExists as that is expected if the job already exists if (be.RequestInformation?.BatchError?.Code == BatchErrorCodeStrings.JobExists) { Console.WriteLine("The job {0} already existed when we tried to create it", JobId); } else { throw; // Any other exception is unexpected } } // Create a collection to hold the tasks that we'll be adding to the job Console.WriteLine("Adding {0} tasks to job [{1}]...", inputFiles.Count, JobId); List <CloudTask> tasks = new List <CloudTask>(); // Create each of the tasks to process one of the input files. for (int i = 0; i < inputFiles.Count; i++) { string taskId = String.Format("Task{0}", i); string inputFilename = inputFiles[i].FilePath; string taskCommandLine = String.Format("cmd /c type {0}", inputFilename); CloudTask task = new CloudTask(taskId, taskCommandLine); task.ResourceFiles = new List <ResourceFile> { inputFiles[i] }; tasks.Add(task); } // Add all tasks to the job. batchClient.JobOperations.AddTask(JobId, tasks); // Monitor task success/failure, specifying a maximum amount of time to wait for the tasks to complete. TimeSpan timeout = TimeSpan.FromMinutes(30); Console.WriteLine("Monitoring all tasks for 'Completed' state, timeout in {0}...", timeout); IEnumerable <CloudTask> addedTasks = batchClient.JobOperations.ListTasks(JobId); batchClient.Utilities.CreateTaskStateMonitor().WaitAll(addedTasks, TaskState.Completed, timeout); Console.WriteLine("All tasks reached state Completed."); // Print task output Console.WriteLine(); Console.WriteLine("Printing task output..."); IEnumerable <CloudTask> completedtasks = batchClient.JobOperations.ListTasks(JobId); foreach (CloudTask task in completedtasks) { string nodeId = String.Format(task.ComputeNodeInformation.ComputeNodeId); Console.WriteLine("Task: {0}", task.Id); Console.WriteLine("Node: {0}", nodeId); Console.WriteLine("Standard out:"); Console.WriteLine(task.GetNodeFile(Constants.StandardOutFileName).ReadAsString()); } // Print out some timing info timer.Stop(); Console.WriteLine(); Console.WriteLine("Sample end: {0}", DateTime.Now); Console.WriteLine("Elapsed time: {0}", timer.Elapsed); // Clean up Storage resources if (container.DeleteIfExists()) { Console.WriteLine("Container [{0}] deleted.", inputContainerName); } else { Console.WriteLine("Container [{0}] does not exist, skipping deletion.", inputContainerName); } // Clean up Batch resources (if the user so chooses) Console.WriteLine(); Console.Write("Delete job? [yes] no: "); string response = Console.ReadLine().ToLower(); if (response != "n" && response != "no") { batchClient.JobOperations.DeleteJob(JobId); } Console.Write("Delete pool? [yes] no: "); response = Console.ReadLine().ToLower(); if (response != "n" && response != "no") { batchClient.PoolOperations.DeletePool(PoolId); } } } finally { Console.WriteLine(); Console.WriteLine("Sample complete, hit ENTER to exit..."); Console.ReadLine(); } }
private static async Task MainAsync() { if (String.IsNullOrEmpty(BatchAccountName) || String.IsNullOrEmpty(BatchAccountKey) || String.IsNullOrEmpty(BatchAccountUrl) || String.IsNullOrEmpty(StorageAccountName) || String.IsNullOrEmpty(StorageAccountKey)) { throw new InvalidOperationException("One or more account credential strings have not been populated. Please ensure that your Batch and Storage account credentials have been specified."); } try { Console.WriteLine("Sample start: {0}", DateTime.Now); Console.WriteLine(); Stopwatch timer = new Stopwatch(); timer.Start(); // Construct the Storage account connection string string storageConnectionString = String.Format("DefaultEndpointsProtocol=https;AccountName={0};AccountKey={1}", StorageAccountName, StorageAccountKey); // Retrieve the storage account CloudStorageAccount storageAccount = CloudStorageAccount.Parse(storageConnectionString); // Create the blob client, for use in obtaining references to blob storage containers CloudBlobClient blobClient = storageAccount.CreateCloudBlobClient(); // Use the blob client to create the input container in Azure Storage const string appContainerName = "application"; const string inputContainerName = "input"; const string outputContainerName = "output"; await CreateContainerIfNotExistAsync(blobClient, appContainerName); await CreateContainerIfNotExistAsync(blobClient, inputContainerName); await CreateContainerIfNotExistAsync(blobClient, outputContainerName); // The collection of data files that are to be processed by the tasks // The collection of data files that are to be processed by the tasks List <string> inputFilePaths = new List <string> { @"..\..\taskdata1.txt", @"..\..\taskdata2.txt", @"..\..\taskdata3.txt" }; // Paths to the executable and its dependencies that will be executed by the tasks List <string> applicationFilePaths = new List <string> { // The DotNetTutorial project includes a project reference to TaskApplication, allowing us to // determine the path of the task application binary dynamically typeof(WordCounter.Program).Assembly.Location, "Microsoft.WindowsAzure.Storage.dll" }; // Upload the application and its dependencies to Azure Storage. This is the application that will // process the data files, and will be executed by each of the tasks on the compute nodes. List <ResourceFile> applicationFiles = await UploadFilesToContainerAsync(blobClient, appContainerName, applicationFilePaths); // Upload the data files. This is the data that will be processed by each of the tasks that are // executed on the compute nodes within the pool. List <ResourceFile> inputFiles = await UploadFilesToContainerAsync(blobClient, inputContainerName, inputFilePaths); // Obtain a shared access signature that provides write access to the output container to which // the tasks will upload their output. string outputContainerSasUrl = GetContainerSasUrl(blobClient, outputContainerName, SharedAccessBlobPermissions.Write); BatchSharedKeyCredentials cred = new BatchSharedKeyCredentials(BatchAccountUrl, BatchAccountName, BatchAccountKey); using (BatchClient batchClient = BatchClient.Open(cred)) { // Create a Batch pool, VM configuration, Windows Server image Console.WriteLine("Creating pool [{0}]...", PoolId); ImageReference imageReference = new ImageReference( publisher: "MicrosoftWindowsServer", offer: "WindowsServer", sku: "2012-R2-datacenter-smalldisk", version: "latest"); VirtualMachineConfiguration virtualMachineConfiguration = new VirtualMachineConfiguration( imageReference: imageReference, nodeAgentSkuId: "batch.node.windows amd64"); try { CloudPool pool = batchClient.PoolOperations.CreatePool( poolId: PoolId, targetDedicatedComputeNodes: PoolNodeCount, virtualMachineSize: PoolVMSize, virtualMachineConfiguration: virtualMachineConfiguration); // Create and assign the StartTask that will be executed when compute nodes join the pool. // In this case, we copy the StartTask's resource files (that will be automatically downloaded // to the node by the StartTask) into the shared directory that all tasks will have access to. pool.StartTask = new StartTask { // Specify a command line for the StartTask that copies the task application files to the // node's shared directory. Every compute node in a Batch pool is configured with a number // of pre-defined environment variables that can be referenced by commands or applications // run by tasks. // Since a successful execution of robocopy can return a non-zero exit code (e.g. 1 when one or // more files were successfully copied) we need to manually exit with a 0 for Batch to recognize // StartTask execution success. CommandLine = "cmd /c (robocopy %AZ_BATCH_TASK_WORKING_DIR% %AZ_BATCH_NODE_SHARED_DIR%) ^& IF %ERRORLEVEL% LEQ 1 exit 0", ResourceFiles = applicationFiles, WaitForSuccess = true }; pool.Commit(); } catch (BatchException be) { // Accept the specific error code PoolExists as that is expected if the pool already exists if (be.RequestInformation?.BatchError?.Code == BatchErrorCodeStrings.PoolExists) { Console.WriteLine("The pool {0} already existed when we tried to create it", PoolId); } else { throw; // Any other exception is unexpected } } // Create the job that will run the tasks. await CreateJobAsync(batchClient, JobId, PoolId); // Add the tasks to the job. We need to supply a container shared access signature for the // tasks so that they can upload their output to Azure Storage. await AddTasksAsync(batchClient, JobId, inputFiles, outputContainerSasUrl); // Monitor task success/failure, specifying a maximum amount of time to wait for the tasks to complete await MonitorTasks(batchClient, JobId, TimeSpan.FromMinutes(30)); // Download the task output files from the output Storage container to a local directory await DownloadBlobsFromContainerAsync(blobClient, outputContainerName, Environment.GetEnvironmentVariable("TEMP")); // Clean up Storage resources await DeleteContainerAsync(blobClient, appContainerName); await DeleteContainerAsync(blobClient, inputContainerName); await DeleteContainerAsync(blobClient, outputContainerName); // Print out some timing info timer.Stop(); Console.WriteLine(); Console.WriteLine("Sample end: {0}", DateTime.Now); Console.WriteLine("Elapsed time: {0}", timer.Elapsed); // Clean up Batch resources (if the user so chooses) Console.WriteLine(); Console.Write("Delete job? [yes] no: "); string response = Console.ReadLine().ToLower(); if (response != "n" && response != "no") { await batchClient.JobOperations.DeleteJobAsync(JobId); } Console.Write("Delete pool? [yes] no: "); response = Console.ReadLine().ToLower(); if (response != "n" && response != "no") { await batchClient.PoolOperations.DeletePoolAsync(PoolId); } } } catch { } }
static void Main(string[] args) { string storageConnectionString = $"DefaultEndpointsProtocol=https;AccountName={StorageAccountName};AccountKey={StorageAccountKey}"; // Retrieve the storage account CloudStorageAccount storageAccount = CloudStorageAccount.Parse(storageConnectionString); // Create the blob client CloudBlobClient blobClient = storageAccount.CreateCloudBlobClient(); // Upload the input files to blob storage const string inputContainerName = "batchinput"; List <string> inputFilePaths = new List <string> { "taskdata0.txt", "taskdata1.txt", "taskdata2.txt" }; List <ResourceFile> inputFiles = new List <ResourceFile>(); foreach (string filePath in inputFilePaths) { inputFiles.Add(UploadFileToContainer(blobClient, inputContainerName, filePath)); } // Get a SAS Url for the output container const string outputContainerName = "batchoutput"; string outputContainerSasUrl = GetOutputContainerSasUrl(blobClient, outputContainerName); // Specify a container registry ContainerRegistry containerRegistry = new ContainerRegistry( registryServer: RegistryServer, userName: RegistryUserName, password: RegistryPassword); // Create container configuration, prefetching Docker images from the container registry ContainerConfiguration containerConfig = new ContainerConfiguration() { ContainerImageNames = ContainerImageNames, ContainerRegistries = new List <ContainerRegistry> { containerRegistry } }; // Create the virtual machine image reference - make sure to choose an image that supports containers ImageReference imageReference = new ImageReference( publisher: "MicrosoftWindowsServer", offer: "WindowsServer", sku: "2016-datacenter-with-containers", version: "latest"); // Create the virtual machine configuration for the pool and set the container configuration VirtualMachineConfiguration virtualMachineConfiguration = new VirtualMachineConfiguration( imageReference: imageReference, nodeAgentSkuId: "batch.node.windows amd64"); virtualMachineConfiguration.ContainerConfiguration = containerConfig; BatchSharedKeyCredentials cred = new BatchSharedKeyCredentials(BatchAccountUrl, BatchAccountName, BatchAccountKey); using (BatchClient batchClient = BatchClient.Open(cred)) { Console.WriteLine("Creating pool [{0}]...", PoolId); try { CloudPool pool = batchClient.PoolOperations.CreatePool( poolId: PoolId, targetDedicatedComputeNodes: PoolNodeCount, virtualMachineSize: PoolVMSize, virtualMachineConfiguration: virtualMachineConfiguration); pool.Commit(); } catch (BatchException be) { // Accept the specific error code PoolExists as that is expected if the pool already exists if (be.RequestInformation?.BatchError?.Code == BatchErrorCodeStrings.PoolExists) { Console.WriteLine("The pool {0} already existed when we tried to create it", PoolId); } else { throw; // Any other exception is unexpected } } Console.WriteLine("Creating job [{0}]...", JobId); CloudJob job = null; try { job = batchClient.JobOperations.CreateJob(); job.Id = JobId; job.PoolInformation = new PoolInformation { PoolId = PoolId }; // Add job preparation task to remove existing Docker image Console.WriteLine("Adding job preparation task to job [{0}]...", JobId); string jobPreparationCmdLine = $"cmd /c docker rmi -f {ContainerImageNames[0]}:latest"; JobPreparationTask jobPreparationTask = new JobPreparationTask(jobPreparationCmdLine) { UserIdentity = new UserIdentity(new AutoUserSpecification(elevationLevel: ElevationLevel.Admin, scope: AutoUserScope.Task)) }; job.JobPreparationTask = jobPreparationTask; job.Commit(); } catch (BatchException be) { // Accept the specific error code JobExists as that is expected if the job already exists if (be.RequestInformation?.BatchError?.Code == BatchErrorCodeStrings.JobExists) { Console.WriteLine("The job {0} already existed when we tried to create it", JobId); } else { throw; // Any other exception is unexpected } } if (job != null) { // Create a collection to hold the tasks that we'll be adding to the job Console.WriteLine("Adding {0} tasks to job [{1}]...", inputFiles.Count, JobId); List <CloudTask> tasks = new List <CloudTask>(); // Create each of the tasks to process one of the input files. for (int i = 0; i < inputFiles.Count; i++) { string taskId = String.Format("Task{0}", i); string inputFilename = inputFiles[i].FilePath; string outputFileName = string.Format("out{0}", inputFilename); // Override the default entrypoint of the container string taskCommandLine = string.Format("C:\\ReadWriteFile\\ReadWriteFile.exe {0} {1}", inputFilename, outputFileName); // Specify the container the task will run TaskContainerSettings cmdContainerSettings = new TaskContainerSettings( imageName: "nimccollftacr.azurecr.io/batch/readwritefile" ); CloudTask task = new CloudTask(taskId, taskCommandLine); task.ContainerSettings = cmdContainerSettings; // Set the resource files and output files for the task task.ResourceFiles = new List <ResourceFile> { inputFiles[i] }; task.OutputFiles = new List <OutputFile> { new OutputFile( filePattern: outputFileName, destination: new OutputFileDestination(new OutputFileBlobContainerDestination(containerUrl: outputContainerSasUrl, path: outputFileName)), uploadOptions: new OutputFileUploadOptions(OutputFileUploadCondition.TaskCompletion)) }; // You must elevate the identity of the task in order to run a container task.UserIdentity = new UserIdentity(new AutoUserSpecification(elevationLevel: ElevationLevel.Admin, scope: AutoUserScope.Task)); tasks.Add(task); } // Add all tasks to the job. batchClient.JobOperations.AddTask(JobId, tasks); // Monitor task success/failure, specifying a maximum amount of time to wait for the tasks to complete. TimeSpan timeout = TimeSpan.FromMinutes(30); Console.WriteLine("Monitoring all tasks for 'Completed' state, timeout in {0}...", timeout); IEnumerable <CloudTask> addedTasks = batchClient.JobOperations.ListTasks(JobId); batchClient.Utilities.CreateTaskStateMonitor().WaitAll(addedTasks, TaskState.Completed, timeout); Console.WriteLine("All tasks reached state Completed."); // Print task output Console.WriteLine(); Console.WriteLine("Printing task output..."); IEnumerable <CloudTask> completedtasks = batchClient.JobOperations.ListTasks(JobId); foreach (CloudTask task in completedtasks) { string nodeId = String.Format(task.ComputeNodeInformation.ComputeNodeId); Console.WriteLine("Task: {0}", task.Id); Console.WriteLine("Node: {0}", nodeId); Console.WriteLine("Standard out:"); Console.WriteLine(task.GetNodeFile(Constants.StandardOutFileName).ReadAsString()); } } // Clean up Batch resources (if the user so chooses) Console.WriteLine(); Console.Write("Delete job? [yes] no: "); string response = Console.ReadLine().ToLower(); if (response != "n" && response != "no") { batchClient.JobOperations.DeleteJob(JobId); } Console.Write("Delete pool? [yes] no: "); response = Console.ReadLine().ToLower(); if (response != "n" && response != "no") { batchClient.PoolOperations.DeletePool(PoolId); } } }
/// <summary> /// Creates a new pool. /// </summary> /// <param name="parameters">The parameters to use when creating the pool.</param> public void CreatePool(NewPoolParameters parameters) { if (parameters == null) { throw new ArgumentNullException("parameters"); } PoolOperations poolOperations = parameters.Context.BatchOMClient.PoolOperations; CloudPool pool = poolOperations.CreatePool(); pool.Id = parameters.PoolId; pool.VirtualMachineSize = parameters.VirtualMachineSize; pool.DisplayName = parameters.DisplayName; pool.ResizeTimeout = parameters.ResizeTimeout; pool.MaxTasksPerComputeNode = parameters.MaxTasksPerComputeNode; pool.InterComputeNodeCommunicationEnabled = parameters.InterComputeNodeCommunicationEnabled; if (!string.IsNullOrEmpty(parameters.AutoScaleFormula)) { pool.AutoScaleEnabled = true; pool.AutoScaleEvaluationInterval = parameters.AutoScaleEvaluationInterval; pool.AutoScaleFormula = parameters.AutoScaleFormula; } else if (parameters.TargetDedicatedComputeNodes.HasValue || parameters.TargetLowPriorityComputeNodes.HasValue) { pool.TargetDedicatedComputeNodes = parameters.TargetDedicatedComputeNodes; pool.TargetLowPriorityComputeNodes = parameters.TargetLowPriorityComputeNodes; } if (parameters.TaskSchedulingPolicy != null) { pool.TaskSchedulingPolicy = parameters.TaskSchedulingPolicy.omObject; } if (parameters.StartTask != null) { Utils.Utils.StartTaskSyncCollections(parameters.StartTask); pool.StartTask = parameters.StartTask.omObject; } if (parameters.Metadata != null) { pool.Metadata = new List <MetadataItem>(); foreach (DictionaryEntry m in parameters.Metadata) { pool.Metadata.Add(new MetadataItem(m.Key.ToString(), m.Value.ToString())); } } if (parameters.CertificateReferences != null) { pool.CertificateReferences = new List <CertificateReference>(); foreach (PSCertificateReference c in parameters.CertificateReferences) { pool.CertificateReferences.Add(c.omObject); } } if (parameters.ApplicationPackageReferences != null) { pool.ApplicationPackageReferences = parameters.ApplicationPackageReferences.ToList().ConvertAll(apr => apr.omObject); } if (parameters.CloudServiceConfiguration != null) { pool.CloudServiceConfiguration = parameters.CloudServiceConfiguration.omObject; } if (parameters.VirtualMachineConfiguration != null) { Utils.Utils.VirtualMachineConfigurationSyncCollections(parameters.VirtualMachineConfiguration); pool.VirtualMachineConfiguration = parameters.VirtualMachineConfiguration.omObject; } if (parameters.NetworkConfiguration != null) { pool.NetworkConfiguration = parameters.NetworkConfiguration.omObject; } if (parameters.MountConfiguration != null) { pool.MountConfiguration = new List <MountConfiguration>(); foreach (PSMountConfiguration m in parameters.MountConfiguration) { pool.MountConfiguration.Add(m.omObject); } } if (parameters.UserAccounts != null) { pool.UserAccounts = parameters.UserAccounts.ToList().ConvertAll(user => user.omObject); } if (parameters.ApplicationLicenses != null) { pool.ApplicationLicenses = parameters.ApplicationLicenses; } WriteVerbose(string.Format(Resources.CreatingPool, parameters.PoolId)); pool.Commit(parameters.AdditionalBehaviors); }