/// <summary>
        /// Asynchronous method that delays execution until the specified pool reaches the specified state.
        /// </summary>
        /// <param name="client">A fully intitialized <see cref="BatchClient"/>.</param>
        /// <param name="poolId">The ID of the pool to monitor for the specified <see cref="AllocationState"/>.</param>
        /// <param name="targetAllocationState">The allocation state to monitor.</param>
        /// <param name="timeout">The maximum time to wait for the pool to reach the specified state.</param>
        /// <returns>A <see cref="System.Threading.Tasks.Task"/> object that represents the asynchronous operation.</returns>
        public static async Task WaitForPoolToReachStateAsync(BatchClient client, string poolId, AllocationState targetAllocationState, TimeSpan timeout)
        {
            Console.WriteLine("Waiting for pool {0} to reach allocation state {1}", poolId, targetAllocationState);

            DateTime startTime = DateTime.UtcNow;
            DateTime timeoutAfterThisTimeUtc = startTime.Add(timeout);

            ODATADetailLevel detail = new ODATADetailLevel(selectClause: "id,allocationState");
            CloudPool pool = await client.PoolOperations.GetPoolAsync(poolId, detail);

            while (pool.AllocationState != targetAllocationState)
            {
                Console.Write(".");

                await Task.Delay(TimeSpan.FromSeconds(10));
                await pool.RefreshAsync(detail);

                if (DateTime.UtcNow > timeoutAfterThisTimeUtc)
                {
                    Console.WriteLine();
                    Console.WriteLine("Timed out waiting for pool {0} to reach state {1}", poolId, targetAllocationState);

                    return;
                }
            }

            Console.WriteLine();
        }
        /// <summary>
        /// Prints task information to the console for each of the nodes in the specified pool.
        /// </summary>
        /// <param name="poolId">The ID of the <see cref="CloudPool"/> containing the nodes whose task information should be printed to the console.</param>
        /// <returns>A <see cref="System.Threading.Tasks.Task"/> object that represents the asynchronous operation.</returns>
        public static async Task PrintNodeTasksAsync(BatchClient batchClient, string poolId)
        {
            Console.WriteLine("Listing Node Tasks");
            Console.WriteLine("==================");

            ODATADetailLevel nodeDetail = new ODATADetailLevel(selectClause: "id,recentTasks");
            IPagedEnumerable<ComputeNode> nodes = batchClient.PoolOperations.ListComputeNodes(poolId, nodeDetail);
            
            await nodes.ForEachAsync(node =>
            {
                Console.WriteLine();
                Console.WriteLine(node.Id + " tasks:");

                if (node.RecentTasks != null && node.RecentTasks.Any())
                {
                    foreach (TaskInformation task in node.RecentTasks)
                    {
                        Console.WriteLine("\t{0}: {1}", task.TaskId, task.TaskState);
                    }
                }
                else
                {
                    // No tasks found for the node
                    Console.WriteLine("\tNone");
                }
            }).ConfigureAwait(continueOnCapturedContext: false);

            Console.WriteLine("==================");
        }
Пример #3
0
        /// <summary>
        /// Lists the jobs matching the specified filter options.
        /// </summary>
        /// <param name="options">The options to use when querying for jobs.</param>
        /// <returns>The jobs matching the specified filter options.</returns>
        public IEnumerable<PSCloudJob> ListJobs(ListJobOptions options)
        {
            if (options == null)
            {
                throw new ArgumentNullException("options");
            }

            // Get the single job matching the specified id
            if (!string.IsNullOrEmpty(options.JobId))
            {
                WriteVerbose(string.Format(Resources.GetJobById, options.JobId));
                JobOperations jobOperations = options.Context.BatchOMClient.JobOperations;
                ODATADetailLevel getDetailLevel = new ODATADetailLevel(selectClause: options.Select, expandClause: options.Expand);
                CloudJob job = jobOperations.GetJob(options.JobId, detailLevel: getDetailLevel, additionalBehaviors: options.AdditionalBehaviors);
                PSCloudJob psJob = new PSCloudJob(job);
                return new PSCloudJob[] { psJob };
            }
            // List jobs using the specified filter
            else
            {
                string jobScheduleId = options.JobSchedule == null ? options.JobScheduleId : options.JobSchedule.Id;
                bool filterByJobSchedule = !string.IsNullOrEmpty(jobScheduleId);
                ODATADetailLevel listDetailLevel = new ODATADetailLevel(selectClause: options.Select, expandClause: options.Expand);

                string verboseLogString = null;
                if (!string.IsNullOrEmpty(options.Filter))
                {
                    verboseLogString = filterByJobSchedule ? Resources.GetJobByOData : string.Format(Resources.GetJobByODataAndJobSChedule, jobScheduleId);
                    listDetailLevel.FilterClause = options.Filter;
                }
                else
                {
                    verboseLogString = filterByJobSchedule ? Resources.GetJobNoFilter : string.Format(Resources.GetJobByJobScheduleNoFilter, jobScheduleId);
                }
                WriteVerbose(verboseLogString);

                IPagedEnumerable<CloudJob> jobs = null;
                if (filterByJobSchedule)
                {
                    JobScheduleOperations jobScheduleOperations = options.Context.BatchOMClient.JobScheduleOperations;
                    jobs = jobScheduleOperations.ListJobs(jobScheduleId, listDetailLevel, options.AdditionalBehaviors);
                }
                else
                {
                    JobOperations jobOperations = options.Context.BatchOMClient.JobOperations;
                    jobs = jobOperations.ListJobs(listDetailLevel, options.AdditionalBehaviors);
                }
                Func<CloudJob, PSCloudJob> mappingFunction = j => { return new PSCloudJob(j); };
                return PSPagedEnumerable<PSCloudJob, CloudJob>.CreateWithMaxCount(
                    jobs, mappingFunction, options.MaxCount, () => WriteVerbose(string.Format(Resources.MaxCount, options.MaxCount)));
            }
        }
        /// <summary>
        /// Lists the tasks matching the specified filter options
        /// </summary>
        /// <param name="options">The options to use when querying for tasks</param>
        /// <returns>The tasks matching the specified filter options</returns>
        public IEnumerable<PSCloudTask> ListTasks(ListTaskOptions options)
        {
            if (options == null)
            {
                throw new ArgumentNullException("options");
            }

            // Get the single task matching the specified name
            if (!string.IsNullOrEmpty(options.TaskName))
            {
                WriteVerbose(string.Format(Resources.GBT_GetByName, options.TaskName, options.JobName, options.WorkItemName));
                using (IWorkItemManager wiManager = options.Context.BatchOMClient.OpenWorkItemManager())
                {
                    ICloudTask task = wiManager.GetTask(options.WorkItemName, options.JobName, options.TaskName, additionalBehaviors: options.AdditionalBehaviors);
                    PSCloudTask psTask = new PSCloudTask(task);
                    return new PSCloudTask[] { psTask };
                }
            }
            // List tasks using the specified filter
            else
            {
                string jName = options.Job == null ? options.JobName : options.Job.Name;
                ODATADetailLevel odata = null;
                string verboseLogString = null;
                if (!string.IsNullOrEmpty(options.Filter))
                {
                    verboseLogString = string.Format(Resources.GBT_GetByOData, jName);
                    odata = new ODATADetailLevel(filterClause: options.Filter);
                }
                else
                {
                    verboseLogString = string.Format(Resources.GBT_GetNoFilter, jName);
                }
                WriteVerbose(verboseLogString);

                IEnumerableAsyncExtended<ICloudTask> tasks = null;
                if (options.Job != null)
                {
                    tasks = options.Job.omObject.ListTasks(odata, options.AdditionalBehaviors);
                }
                else
                {
                    using (IWorkItemManager wiManager = options.Context.BatchOMClient.OpenWorkItemManager())
                    {
                        tasks = wiManager.ListTasks(options.WorkItemName, options.JobName, odata, options.AdditionalBehaviors);
                    }
                }
                Func<ICloudTask, PSCloudTask> mappingFunction = t => { return new PSCloudTask(t); };
                return PSAsyncEnumerable<PSCloudTask, ICloudTask>.CreateWithMaxCount(
                    tasks, mappingFunction, options.MaxCount, () => WriteVerbose(string.Format(Resources.MaxCount, options.MaxCount)));
            }
        }
        /// <summary>
        /// Lists the task files matching the specified filter options
        /// </summary>
        /// <param name="options">The options to use when querying for task files</param>
        /// <returns>The task files matching the specified filter options</returns>
        public IEnumerable<PSTaskFile> ListTaskFiles(ListTaskFileOptions options)
        {
            if (options == null)
            {
                throw new ArgumentNullException("options");
            }

            // Get the single task file matching the specified name
            if (!string.IsNullOrEmpty(options.TaskFileName))
            {
                WriteVerbose(string.Format(Resources.GBTF_GetByName, options.TaskFileName, options.TaskName));
                using (IWorkItemManager wiManager = options.Context.BatchOMClient.OpenWorkItemManager())
                {
                    ITaskFile taskFile = wiManager.GetTaskFile(options.WorkItemName, options.JobName, options.TaskName, options.TaskFileName, options.AdditionalBehaviors);
                    PSTaskFile psTaskFile = new PSTaskFile(taskFile);
                    return new PSTaskFile[] { psTaskFile };
                }
            }
            // List task files using the specified filter
            else
            {
                string tName = options.Task == null ? options.TaskName : options.Task.Name;
                ODATADetailLevel odata = null;
                string verboseLogString = null;
                if (!string.IsNullOrEmpty(options.Filter))
                {
                    verboseLogString = string.Format(Resources.GBTF_GetByOData, tName);
                    odata = new ODATADetailLevel(filterClause: options.Filter);
                }
                else
                {
                    verboseLogString = string.Format(Resources.GBTF_NoFilter, tName);
                }
                WriteVerbose(verboseLogString);

                IEnumerableAsyncExtended<ITaskFile> taskFiles = null;
                if (options.Task != null)
                {
                    taskFiles = options.Task.omObject.ListTaskFiles(options.Recursive, odata, options.AdditionalBehaviors);
                }
                else
                {
                    using (IWorkItemManager wiManager = options.Context.BatchOMClient.OpenWorkItemManager())
                    {
                        taskFiles = wiManager.ListTaskFiles(options.WorkItemName, options.JobName, options.TaskName, options.Recursive, odata, options.AdditionalBehaviors);
                    }
                }
                Func<ITaskFile, PSTaskFile> mappingFunction = f => { return new PSTaskFile(f); };
                return PSAsyncEnumerable<PSTaskFile, ITaskFile>.CreateWithMaxCount(
                    taskFiles, mappingFunction, options.MaxCount, () => WriteVerbose(string.Format(Resources.MaxCount, options.MaxCount)));
            }
        }
        /// <summary>
        /// Lists the tasks matching the specified filter options.
        /// </summary>
        /// <param name="options">The options to use when querying for tasks.</param>
        /// <returns>The tasks matching the specified filter options.</returns>
        public IEnumerable<PSCloudTask> ListTasks(ListTaskOptions options)
        {
            if (options == null)
            {
                throw new ArgumentNullException("options");
            }

            // Get the single task matching the specified id
            if (!string.IsNullOrEmpty(options.TaskId))
            {
                WriteVerbose(string.Format(Resources.GetTaskById, options.TaskId, options.JobId));
                JobOperations jobOperations = options.Context.BatchOMClient.JobOperations;
                ODATADetailLevel getDetailLevel = new ODATADetailLevel(selectClause: options.Select, expandClause: options.Expand);
                CloudTask task = jobOperations.GetTask(options.JobId, options.TaskId, detailLevel: getDetailLevel, additionalBehaviors: options.AdditionalBehaviors);
                PSCloudTask psTask = new PSCloudTask(task);
                return new PSCloudTask[] { psTask };
            }
            // List tasks using the specified filter
            else
            {
                string jobId = options.Job == null ? options.JobId : options.Job.Id;
                string verboseLogString = null;
                ODATADetailLevel listDetailLevel = new ODATADetailLevel(selectClause: options.Select, expandClause: options.Expand);
                if (!string.IsNullOrEmpty(options.Filter))
                {
                    verboseLogString = string.Format(Resources.GetTaskByOData, jobId);
                    listDetailLevel.FilterClause = options.Filter;
                }
                else
                {
                    verboseLogString = string.Format(Resources.GetTaskNoFilter, jobId);
                }
                WriteVerbose(verboseLogString);

                IPagedEnumerable<CloudTask> tasks = null;
                if (options.Job != null)
                {
                    tasks = options.Job.omObject.ListTasks(listDetailLevel, options.AdditionalBehaviors);
                }
                else
                {
                    JobOperations jobOperations = options.Context.BatchOMClient.JobOperations;
                    tasks = jobOperations.ListTasks(options.JobId, listDetailLevel, options.AdditionalBehaviors);
                }
                Func<CloudTask, PSCloudTask> mappingFunction = t => { return new PSCloudTask(t); };
                return PSPagedEnumerable<PSCloudTask, CloudTask>.CreateWithMaxCount(
                    tasks, mappingFunction, options.MaxCount, () => WriteVerbose(string.Format(Resources.MaxCount, options.MaxCount)));
            }
        }
Пример #7
0
        /// <summary>
        /// Lists the jobs matching the specified filter options
        /// </summary>
        /// <param name="options">The options to use when querying for jobs</param>
        /// <returns>The jobs matching the specified filter options</returns>
        public IEnumerable<PSCloudJob> ListJobs(ListJobOptions options)
        {
            if (options == null)
            {
                throw new ArgumentNullException("options");
            }

            string wiName = options.WorkItem == null ? options.WorkItemName : options.WorkItem.Name;

            // Get the single job matching the specified name
            if (!string.IsNullOrEmpty(options.JobName))
            {
                WriteVerbose(string.Format(Resources.GBJ_GetByName, options.JobName, wiName));
                using (IWorkItemManager wiManager = options.Context.BatchOMClient.OpenWorkItemManager())
                {
                    ICloudJob job = wiManager.GetJob(wiName, options.JobName, additionalBehaviors: options.AdditionalBehaviors);
                    PSCloudJob psJob = new PSCloudJob(job);
                    return new PSCloudJob[] { psJob };
                }
            }
            // List jobs using the specified filter
            else
            {
                ODATADetailLevel odata = null;
                string verboseLogString = null;
                if (!string.IsNullOrEmpty(options.Filter))
                {
                    verboseLogString = string.Format(Resources.GBJ_GetByOData, wiName);
                    odata = new ODATADetailLevel(filterClause: options.Filter);
                }
                else
                {
                    verboseLogString = string.Format(Resources.GBJ_GetNoFilter, wiName);
                }
                WriteVerbose(verboseLogString);

                using (IWorkItemManager wiManager = options.Context.BatchOMClient.OpenWorkItemManager())
                {
                    IEnumerableAsyncExtended<ICloudJob> jobs = wiManager.ListJobs(wiName, odata, options.AdditionalBehaviors);
                    Func<ICloudJob, PSCloudJob> mappingFunction = j => { return new PSCloudJob(j); };
                    return PSAsyncEnumerable<PSCloudJob, ICloudJob>.CreateWithMaxCount(
                        jobs, mappingFunction, options.MaxCount, () => WriteVerbose(string.Format(Resources.MaxCount, options.MaxCount)));
                }             
            }
        }
Пример #8
0
        /// <summary>
        /// Lists the vms matching the specified filter options
        /// </summary>
        /// <param name="options">The options to use when querying for vms</param>
        /// <returns>The vms matching the specified filter options</returns>
        public IEnumerable<PSVM> ListVMs(ListVMOptions options)
        {
            if (options == null)
            {
                throw new ArgumentNullException("options");
            }

            string poolName = options.Pool == null ? options.PoolName : options.Pool.Name;

            // Get the single vm matching the specified name
            if (!string.IsNullOrEmpty(options.VMName))
            {
                WriteVerbose(string.Format(Resources.GBVM_GetByName, options.VMName, poolName));
                using (IPoolManager poolManager = options.Context.BatchOMClient.OpenPoolManager())
                {
                    IVM vm = poolManager.GetVM(poolName, options.VMName, additionalBehaviors: options.AdditionalBehaviors);
                    PSVM psVM = new PSVM(vm);
                    return new PSVM[] { psVM };
                }
            }
            // List vms using the specified filter
            else
            {
                ODATADetailLevel odata = null;
                string verboseLogString = null;
                if (!string.IsNullOrEmpty(options.Filter))
                {
                    verboseLogString = string.Format(Resources.GBVM_GetByOData, poolName);
                    odata = new ODATADetailLevel(filterClause: options.Filter);
                }
                else
                {
                    verboseLogString = string.Format(Resources.GBVM_NoFilter, poolName);
                }
                WriteVerbose(verboseLogString);

                using (IPoolManager poolManager = options.Context.BatchOMClient.OpenPoolManager())
                {
                    IEnumerableAsyncExtended<IVM> vms = poolManager.ListVMs(poolName, odata, options.AdditionalBehaviors);
                    Func<IVM, PSVM> mappingFunction = v => { return new PSVM(v); };
                    return PSAsyncEnumerable<PSVM, IVM>.CreateWithMaxCount(
                        vms, mappingFunction, options.MaxCount, () => WriteVerbose(string.Format(Resources.MaxCount, options.MaxCount)));
                }
            }
        }
        /// <summary>
        /// Lists the workitems matching the specified filter options
        /// </summary>
        /// <param name="options">The options to use when querying for workitems</param>
        /// <returns>The workitems matching the specified filter options</returns>
        public IEnumerable<PSCloudWorkItem> ListWorkItems(ListWorkItemOptions options)
        {
            if (options == null)
            {
                throw new ArgumentNullException("options");
            }

            // Get the single WorkItem matching the specified name
            if (!string.IsNullOrWhiteSpace(options.WorkItemName))
            {
                WriteVerbose(string.Format(Resources.GBWI_GetByName, options.WorkItemName));
                using (IWorkItemManager wiManager = options.Context.BatchOMClient.OpenWorkItemManager())
                {
                    ICloudWorkItem workItem = wiManager.GetWorkItem(options.WorkItemName, additionalBehaviors: options.AdditionalBehaviors);
                    PSCloudWorkItem psWorkItem = new PSCloudWorkItem(workItem);
                    return new PSCloudWorkItem[] { psWorkItem };
                }
            }
            // List WorkItems using the specified filter
            else
            {
                ODATADetailLevel odata = null;
                string verboseLogString = null;
                if (!string.IsNullOrEmpty(options.Filter))
                {
                    verboseLogString = Resources.GBWI_GetByOData;
                    odata = new ODATADetailLevel(filterClause: options.Filter);
                }
                else
                {
                    verboseLogString = Resources.GBWI_NoFilter;
                }
                WriteVerbose(verboseLogString);

                using (IWorkItemManager wiManager = options.Context.BatchOMClient.OpenWorkItemManager())
                {
                    IEnumerableAsyncExtended<ICloudWorkItem> workItems = wiManager.ListWorkItems(odata, options.AdditionalBehaviors);
                    Func<ICloudWorkItem, PSCloudWorkItem> mappingFunction = w => { return new PSCloudWorkItem(w); };
                    return PSAsyncEnumerable<PSCloudWorkItem, ICloudWorkItem>.CreateWithMaxCount(
                        workItems, mappingFunction, options.MaxCount, () => WriteVerbose(string.Format(Resources.MaxCount, options.MaxCount)));
                }
            }
        }
Пример #10
0
        /// <summary>
        /// Lists the pools matching the specified filter options
        /// </summary>
        /// <param name="options">The options to use when querying for pools</param>
        /// <returns>The pools matching the specified filter options</returns>
        public IEnumerable<PSCloudPool> ListPools(ListPoolOptions options)
        {
            if (options == null)
            {
                throw new ArgumentNullException("options");
            }

            // Get the single pool matching the specified name
            if (!string.IsNullOrWhiteSpace(options.PoolName))
            {
                WriteVerbose(string.Format(Resources.GBP_GetByName, options.PoolName));
                using (IPoolManager poolManager = options.Context.BatchOMClient.OpenPoolManager())
                {
                    ICloudPool pool = poolManager.GetPool(options.PoolName, additionalBehaviors: options.AdditionalBehaviors);
                    PSCloudPool psPool = new PSCloudPool(pool);
                    return new PSCloudPool[] { psPool };
                }
            }
            // List pools using the specified filter
            else
            {
                ODATADetailLevel odata = null;
                string verboseLogString = null;
                if (!string.IsNullOrEmpty(options.Filter))
                {
                    verboseLogString = Resources.GBP_GetByOData;
                    odata = new ODATADetailLevel(filterClause: options.Filter);
                }
                else
                {
                    verboseLogString = Resources.GBP_NoFilter;
                }
                WriteVerbose(verboseLogString);

                using (IPoolManager poolManager = options.Context.BatchOMClient.OpenPoolManager())
                {
                    IEnumerableAsyncExtended<ICloudPool> pools = poolManager.ListPools(odata, options.AdditionalBehaviors);
                    Func<ICloudPool, PSCloudPool> mappingFunction = p => { return new PSCloudPool(p); };
                    return PSAsyncEnumerable<PSCloudPool, ICloudPool>.CreateWithMaxCount(
                        pools, mappingFunction, options.MaxCount, () => WriteVerbose(string.Format(Resources.MaxCount, options.MaxCount)));            
                }
            }
        }
Пример #11
0
        // Lists the node files under a task.
        private IEnumerable<PSNodeFile> ListNodeFilesByTask(ListNodeFileOptions options)
        {
            // Get the single node file matching the specified name
            if (!string.IsNullOrEmpty(options.NodeFileName))
            {
                WriteVerbose(string.Format(Resources.GBTF_GetByName, options.NodeFileName, options.TaskId));
                JobOperations jobOperations = options.Context.BatchOMClient.JobOperations;
                NodeFile nodeFile = jobOperations.GetNodeFile(options.JobId, options.TaskId, options.NodeFileName, options.AdditionalBehaviors);
                PSNodeFile psNodeFile = new PSNodeFile(nodeFile);
                return new PSNodeFile[] { psNodeFile };
            }
            // List node files using the specified filter
            else
            {
                string taskId = options.Task == null ? options.TaskId : options.Task.Id;
                ODATADetailLevel odata = null;
                string verboseLogString = null;
                if (!string.IsNullOrEmpty(options.Filter))
                {
                    verboseLogString = string.Format(Resources.GBTF_GetByOData, taskId);
                    odata = new ODATADetailLevel(filterClause: options.Filter);
                }
                else
                {
                    verboseLogString = string.Format(Resources.GBTF_NoFilter, taskId);
                }
                WriteVerbose(verboseLogString);

                IPagedEnumerable<NodeFile> nodeFiles = null;
                if (options.Task != null)
                {
                    nodeFiles = options.Task.omObject.ListNodeFiles(options.Recursive, odata, options.AdditionalBehaviors);
                }
                else
                {
                    JobOperations jobOperations = options.Context.BatchOMClient.JobOperations;
                    nodeFiles = jobOperations.ListNodeFiles(options.JobId, options.TaskId, options.Recursive, odata, options.AdditionalBehaviors);
                }
                Func<NodeFile, PSNodeFile> mappingFunction = f => { return new PSNodeFile(f); };
                return PSPagedEnumerable<PSNodeFile, NodeFile>.CreateWithMaxCount(
                    nodeFiles, mappingFunction, options.MaxCount, () => WriteVerbose(string.Format(Resources.MaxCount, options.MaxCount)));
            }
        }
        /// <summary>
        /// Lists the compute nodes matching the specified filter options.
        /// </summary>
        /// <param name="options">The options to use when querying for compute nodes.</param>
        /// <returns>The compute nodes matching the specified filter options.</returns>
        public IEnumerable<PSComputeNode> ListComputeNodes(ListComputeNodeOptions options)
        {
            if (options == null)
            {
                throw new ArgumentNullException("options");
            }

            string poolId = options.Pool == null ? options.PoolId : options.Pool.Id;

            // Get the single compute node matching the specified id
            if (!string.IsNullOrEmpty(options.ComputeNodeId))
            {
                WriteVerbose(string.Format(Resources.GetComputeNodeById, options.ComputeNodeId, poolId));
                PoolOperations poolOperations = options.Context.BatchOMClient.PoolOperations;
                ODATADetailLevel getDetailLevel = new ODATADetailLevel(selectClause: options.Select);
                ComputeNode computeNode = poolOperations.GetComputeNode(poolId, options.ComputeNodeId, detailLevel: getDetailLevel, additionalBehaviors: options.AdditionalBehaviors);
                PSComputeNode psComputeNode = new PSComputeNode(computeNode);
                return new PSComputeNode[] { psComputeNode };
            }
            // List compute nodes using the specified filter
            else
            {
                string verboseLogString = null;
                ODATADetailLevel listDetailLevel = new ODATADetailLevel(selectClause: options.Select);
                if (!string.IsNullOrEmpty(options.Filter))
                {
                    verboseLogString = string.Format(Resources.GetComputeNodeByOData, poolId);
                    listDetailLevel.FilterClause = options.Filter;
                }
                else
                {
                    verboseLogString = string.Format(Resources.GetComputeNodeNoFilter, poolId);
                }
                WriteVerbose(verboseLogString);

                PoolOperations poolOperations = options.Context.BatchOMClient.PoolOperations;
                IPagedEnumerable<ComputeNode> computeNodes = poolOperations.ListComputeNodes(poolId, listDetailLevel, options.AdditionalBehaviors);
                Func<ComputeNode, PSComputeNode> mappingFunction = c => { return new PSComputeNode(c); };
                return PSPagedEnumerable<PSComputeNode, ComputeNode>.CreateWithMaxCount(
                    computeNodes, mappingFunction, options.MaxCount, () => WriteVerbose(string.Format(Resources.MaxCount, options.MaxCount)));
            }
        }
        /// <summary>
        /// Lists the certificates matching the specified filter options.
        /// </summary>
        /// <param name="options">The options to use when querying for certificates.</param>
        /// <returns>The certificates matching the specified filter options.</returns>
        public IEnumerable<PSCertificate> ListCertificates(ListCertificateOptions options)
        {
            if (options == null)
            {
                throw new ArgumentNullException("options");
            }

            // Get the single certificate matching the specified thumbprint
            if (!string.IsNullOrWhiteSpace(options.Thumbprint))
            {
                WriteVerbose(string.Format(Resources.GetCertificateByThumbprint, options.Thumbprint));
                CertificateOperations certOperations = options.Context.BatchOMClient.CertificateOperations;
                ODATADetailLevel getDetailLevel = new ODATADetailLevel(selectClause: options.Select);
                Certificate certificate = certOperations.GetCertificate(options.ThumbprintAlgorithm, options.Thumbprint,
                    detailLevel: getDetailLevel, additionalBehaviors: options.AdditionalBehaviors);
                PSCertificate psCertificate = new PSCertificate(certificate);
                return new PSCertificate[] { psCertificate };
            }
            // List certificates using the specified filter
            else
            {
                string verboseLogString = null;
                ODATADetailLevel listDetailLevel = new ODATADetailLevel(selectClause: options.Select);
                if (!string.IsNullOrEmpty(options.Filter))
                {
                    verboseLogString = Resources.GetCertificatesByFilter;
                    listDetailLevel.FilterClause = options.Filter;
                }
                else
                {
                    verboseLogString = Resources.GetCertificatesNoFilter;
                }
                WriteVerbose(verboseLogString);

                CertificateOperations certOperations = options.Context.BatchOMClient.CertificateOperations;
                IPagedEnumerable<Certificate> certificates = certOperations.ListCertificates(listDetailLevel, options.AdditionalBehaviors);
                Func<Certificate, PSCertificate> mappingFunction = c => { return new PSCertificate(c); };
                return PSPagedEnumerable<PSCertificate, Certificate>.CreateWithMaxCount(
                    certificates, mappingFunction, options.MaxCount, () => WriteVerbose(string.Format(Resources.MaxCount, options.MaxCount)));
            }
        }
Пример #14
0
        /// <summary>
        /// Lists the job schedules matching the specified filter options.
        /// </summary>
        /// <param name="options">The options to use when querying for job schedules.</param>
        /// <returns>The workitems matching the specified filter options.</returns>
        public IEnumerable <PSCloudJobSchedule> ListJobSchedules(ListJobScheduleOptions options)
        {
            if (options == null)
            {
                throw new ArgumentNullException("options");
            }

            // Get the single job schedule matching the specified id
            if (!string.IsNullOrWhiteSpace(options.JobScheduleId))
            {
                WriteVerbose(string.Format(Resources.GetJobScheduleById, options.JobScheduleId));
                JobScheduleOperations jobScheduleOperations = options.Context.BatchOMClient.JobScheduleOperations;
                ODATADetailLevel      getDetailLevel        = new ODATADetailLevel(selectClause: options.Select, expandClause: options.Expand);
                CloudJobSchedule      jobSchedule           = jobScheduleOperations.GetJobSchedule(options.JobScheduleId, detailLevel: getDetailLevel, additionalBehaviors: options.AdditionalBehaviors);
                PSCloudJobSchedule    psJobSchedule         = new PSCloudJobSchedule(jobSchedule);
                return(new PSCloudJobSchedule[] { psJobSchedule });
            }
            // List job schedules using the specified filter
            else
            {
                string           verboseLogString = null;
                ODATADetailLevel listDetailLevel  = new ODATADetailLevel(selectClause: options.Select, expandClause: options.Expand);
                if (!string.IsNullOrEmpty(options.Filter))
                {
                    verboseLogString             = Resources.GetJobScheduleByOData;
                    listDetailLevel.FilterClause = options.Filter;
                }
                else
                {
                    verboseLogString = Resources.GetJobScheduleNoFilter;
                }
                WriteVerbose(verboseLogString);

                JobScheduleOperations jobScheduleOperations                 = options.Context.BatchOMClient.JobScheduleOperations;
                IPagedEnumerable <CloudJobSchedule>         workItems       = jobScheduleOperations.ListJobSchedules(listDetailLevel, options.AdditionalBehaviors);
                Func <CloudJobSchedule, PSCloudJobSchedule> mappingFunction = j => { return(new PSCloudJobSchedule(j)); };
                return(PSPagedEnumerable <PSCloudJobSchedule, CloudJobSchedule> .CreateWithMaxCount(
                           workItems, mappingFunction, options.MaxCount, () => WriteVerbose(string.Format(Resources.MaxCount, options.MaxCount))));
            }
        }
        /// <summary>
        /// Lists the job schedules matching the specified filter options.
        /// </summary>
        /// <param name="options">The options to use when querying for job schedules.</param>
        /// <returns>The workitems matching the specified filter options.</returns>
        public IEnumerable<PSCloudJobSchedule> ListJobSchedules(ListJobScheduleOptions options)
        {
            if (options == null)
            {
                throw new ArgumentNullException("options");
            }

            // Get the single job schedule matching the specified id
            if (!string.IsNullOrWhiteSpace(options.JobScheduleId))
            {
                WriteVerbose(string.Format(Resources.GetJobScheduleById, options.JobScheduleId));
                JobScheduleOperations jobScheduleOperations = options.Context.BatchOMClient.JobScheduleOperations;
                ODATADetailLevel getDetailLevel = new ODATADetailLevel(selectClause: options.Select, expandClause: options.Expand);
                CloudJobSchedule jobSchedule = jobScheduleOperations.GetJobSchedule(options.JobScheduleId, detailLevel: getDetailLevel, additionalBehaviors: options.AdditionalBehaviors);
                PSCloudJobSchedule psJobSchedule = new PSCloudJobSchedule(jobSchedule);
                return new PSCloudJobSchedule[] { psJobSchedule };
            }
            // List job schedules using the specified filter
            else
            {
                string verboseLogString = null;
                ODATADetailLevel listDetailLevel = new ODATADetailLevel(selectClause: options.Select, expandClause: options.Expand);
                if (!string.IsNullOrEmpty(options.Filter))
                {
                    verboseLogString = Resources.GetJobScheduleByOData;
                    listDetailLevel.FilterClause = options.Filter;
                }
                else
                {
                    verboseLogString = Resources.GetJobScheduleNoFilter;
                }
                WriteVerbose(verboseLogString);

                JobScheduleOperations jobScheduleOperations = options.Context.BatchOMClient.JobScheduleOperations;
                IPagedEnumerable<CloudJobSchedule> workItems = jobScheduleOperations.ListJobSchedules(listDetailLevel, options.AdditionalBehaviors);
                Func<CloudJobSchedule, PSCloudJobSchedule> mappingFunction = j => { return new PSCloudJobSchedule(j); };
                return PSPagedEnumerable<PSCloudJobSchedule, CloudJobSchedule>.CreateWithMaxCount(
                    workItems, mappingFunction, options.MaxCount, () => WriteVerbose(string.Format(Resources.MaxCount, options.MaxCount)));
            }
        }
Пример #16
0
        /// <summary>
        /// Lists the WorkItems matching the specified filter options
        /// </summary>
        /// <param name="options">The options to use when querying for WorkItems</param>
        /// <returns>The WorkItems matching the specified filter options</returns>
        public IEnumerable <PSCloudWorkItem> ListWorkItems(ListWorkItemOptions options)
        {
            if (options == null)
            {
                throw new ArgumentNullException("options");
            }

            // Get the single WorkItem matching the specified name
            if (!string.IsNullOrWhiteSpace(options.WorkItemName))
            {
                WriteVerbose(string.Format(Resources.GBWI_GetByName, options.WorkItemName));
                using (IWorkItemManager wiManager = options.Context.BatchOMClient.OpenWorkItemManager())
                {
                    ICloudWorkItem  workItem   = wiManager.GetWorkItem(options.WorkItemName, additionalBehaviors: options.AdditionalBehaviors);
                    PSCloudWorkItem psWorkItem = new PSCloudWorkItem(workItem);
                    return(new PSCloudWorkItem[] { psWorkItem });
                }
            }
            // List WorkItems using the specified filter
            else
            {
                ODATADetailLevel odata = null;
                if (!string.IsNullOrEmpty(options.Filter))
                {
                    WriteVerbose(Resources.GBWI_GetByOData);
                    odata = new ODATADetailLevel(filterClause: options.Filter);
                }
                else
                {
                    WriteVerbose(Resources.GBWI_NoFilter);
                }
                using (IWorkItemManager wiManager = options.Context.BatchOMClient.OpenWorkItemManager())
                {
                    IEnumerableAsyncExtended <ICloudWorkItem> workItems       = wiManager.ListWorkItems(odata, options.AdditionalBehaviors);
                    Func <ICloudWorkItem, PSCloudWorkItem>    mappingFunction = w => { return(new PSCloudWorkItem(w)); };
                    return(PSAsyncEnumerable <PSCloudWorkItem, ICloudWorkItem> .CreateWithMaxCount(
                               workItems, mappingFunction, options.MaxCount, () => WriteVerbose(string.Format(Resources.MaxCount, options.MaxCount))));
                }
            }
        }
Пример #17
0
        /// <summary>
        /// Lists the pools matching the specified filter options.
        /// </summary>
        /// <param name="options">The options to use when querying for pools.</param>
        /// <returns>The pools matching the specified filter options.</returns>
        public IEnumerable<PSCloudPool> ListPools(ListPoolOptions options)
        {
            if (options == null)
            {
                throw new ArgumentNullException("options");
            }

            // Get the single pool matching the specified id
            if (!string.IsNullOrWhiteSpace(options.PoolId))
            {
                WriteVerbose(string.Format(Resources.GetPoolById, options.PoolId));
                PoolOperations poolOperations = options.Context.BatchOMClient.PoolOperations;
                ODATADetailLevel getDetailLevel = new ODATADetailLevel(selectClause: options.Select, expandClause: options.Expand);
                CloudPool pool = poolOperations.GetPool(options.PoolId, detailLevel: getDetailLevel, additionalBehaviors: options.AdditionalBehaviors);
                PSCloudPool psPool = new PSCloudPool(pool);
                return new PSCloudPool[] { psPool };
            }
            // List pools using the specified filter
            else
            {
                string verboseLogString = null;
                ODATADetailLevel listDetailLevel = new ODATADetailLevel(selectClause: options.Select, expandClause: options.Expand);
                if (!string.IsNullOrEmpty(options.Filter))
                {
                    verboseLogString = Resources.GetPoolByOData;
                    listDetailLevel.FilterClause = options.Filter;
                }
                else
                {
                    verboseLogString = Resources.GetPoolNoFilter;
                }
                WriteVerbose(verboseLogString);

                PoolOperations poolOperations = options.Context.BatchOMClient.PoolOperations;
                IPagedEnumerable<CloudPool> pools = poolOperations.ListPools(listDetailLevel, options.AdditionalBehaviors);
                Func<CloudPool, PSCloudPool> mappingFunction = p => { return new PSCloudPool(p); };
                return PSPagedEnumerable<PSCloudPool, CloudPool>.CreateWithMaxCount(
                    pools, mappingFunction, options.MaxCount, () => WriteVerbose(string.Format(Resources.MaxCount, options.MaxCount)));
            }
        }
        /// <summary>
        /// Lists the pools matching the specified filter options.
        /// </summary>
        /// <param name="options">The options to use when querying for pools.</param>
        /// <returns>The pools matching the specified filter options.</returns>
        public IEnumerable <PSCloudPool> ListPools(ListPoolOptions options)
        {
            if (options == null)
            {
                throw new ArgumentNullException("options");
            }

            // Get the single pool matching the specified id
            if (!string.IsNullOrWhiteSpace(options.PoolId))
            {
                WriteVerbose(string.Format(Resources.GetPoolById, options.PoolId));
                PoolOperations   poolOperations = options.Context.BatchOMClient.PoolOperations;
                ODATADetailLevel getDetailLevel = new ODATADetailLevel(selectClause: options.Select, expandClause: options.Expand);
                CloudPool        pool           = poolOperations.GetPool(options.PoolId, detailLevel: getDetailLevel, additionalBehaviors: options.AdditionalBehaviors);
                PSCloudPool      psPool         = new PSCloudPool(pool);
                return(new PSCloudPool[] { psPool });
            }
            // List pools using the specified filter
            else
            {
                string           verboseLogString = null;
                ODATADetailLevel listDetailLevel  = new ODATADetailLevel(selectClause: options.Select, expandClause: options.Expand);
                if (!string.IsNullOrEmpty(options.Filter))
                {
                    verboseLogString             = Resources.GetPoolByOData;
                    listDetailLevel.FilterClause = options.Filter;
                }
                else
                {
                    verboseLogString = Resources.GetPoolNoFilter;
                }
                WriteVerbose(verboseLogString);

                PoolOperations poolOperations                 = options.Context.BatchOMClient.PoolOperations;
                IPagedEnumerable <CloudPool>  pools           = poolOperations.ListPools(listDetailLevel, options.AdditionalBehaviors);
                Func <CloudPool, PSCloudPool> mappingFunction = p => { return(new PSCloudPool(p)); };
                return(PSPagedEnumerable <PSCloudPool, CloudPool> .CreateWithMaxCount(
                           pools, mappingFunction, options.MaxCount, () => WriteVerbose(string.Format(Resources.MaxCount, options.MaxCount))));
            }
        }
Пример #19
0
        /// <summary>
        /// List all Azure jobs.
        /// </summary>
        /// <param name="ct">Cancellation token.</param>
        /// <param name="ShowProgress">Function to report progress as percentage in range [0, 100].</param>
        /// <param name="AddJobHandler">Callback which will be run each time a job is loaded.</param>
        public async void ListJobsAsync(CancellationToken ct, Action <double> ShowProgress, Action <JobDetails> AddJobHandler)
        {
            try
            {
                if (batchClient == null || storageClient == null)
                {
                    return;
                }

                ShowProgress(0);

                ODATADetailLevel jobDetailLevel = new ODATADetailLevel {
                    SelectClause = "id,displayName,state,executionInfo,stats", ExpandClause = "stats"
                };

                // Download raw job list via the Azure API.
                List <CloudJob> cloudJobs = await batchClient.JobOperations.ListJobs(jobDetailLevel).ToListAsync(ct);

                if (ct.IsCancellationRequested)
                {
                    return;
                }

                // Parse jobs into a list of JobDetails objects.
                for (int i = 0; i < cloudJobs.Count; i++)
                {
                    if (ct.IsCancellationRequested)
                    {
                        return;
                    }

                    ShowProgress(100.0 * i / cloudJobs.Count);
                    AddJobHandler(await GetJobDetails(cloudJobs[i], ct));
                }
            }
            finally
            {
                ShowProgress(100);
            }
        }
Пример #20
0
        /// <summary>
        /// Lists the usage metrics, aggregated by pool across individual time intervals, for the specified account.
        /// </summary>
        /// <param name="options">The options to use when aggregating usage for pools.</param>
        public IEnumerable <PSPoolUsageMetrics> ListPoolUsageMetrics(ListPoolUsageOptions options)
        {
            string           verboseLogString = null;
            ODATADetailLevel detailLevel      = null;

            if (!string.IsNullOrEmpty(options.Filter))
            {
                verboseLogString = Resources.GetPoolUsageMetricsByFilter;
                detailLevel      = new ODATADetailLevel(filterClause: options.Filter);
            }
            else
            {
                verboseLogString = Resources.GetPoolUsageMetricsByNoFilter;
            }

            PoolOperations poolOperations = options.Context.BatchOMClient.PoolOperations;
            IPagedEnumerable <PoolUsageMetrics> poolUsageMetrics =
                poolOperations.ListPoolUsageMetrics(options.StartTime, options.EndTime, detailLevel, options.AdditionalBehaviors);

            return(PSPagedEnumerable <PSPoolUsageMetrics, PoolUsageMetrics> .CreateWithMaxCount(
                       poolUsageMetrics, p => new PSPoolUsageMetrics(p), Int32.MaxValue, () => WriteVerbose(verboseLogString)));
        }
Пример #21
0
        private static void MonitorTasksUntilCompletion(int experimentId, string jobId, Task collectionTask, BatchClient batchClient)
        {
            // Monitoring tasks
            ODATADetailLevel failedMonitorLevel = new ODATADetailLevel();

            failedMonitorLevel.FilterClause = "(state eq 'completed') and (executionInfo/exitCode ne 0)";
            failedMonitorLevel.SelectClause = "id,displayName,executionInfo";
            ODATADetailLevel completedMonitorLevel = new ODATADetailLevel();

            completedMonitorLevel.FilterClause = "(state eq 'completed')";
            failedMonitorLevel.SelectClause    = "id";
            do
            {
                Console.WriteLine("Fetching failed tasks...");
                badResults = batchClient.JobOperations.ListTasks(jobId, failedMonitorLevel)
                             .Select(task => new AzureBenchmarkResult
                {
                    AcquireTime         = task.ExecutionInformation.StartTime ?? DateTime.MinValue,
                    BenchmarkFileName   = task.DisplayName,
                    ExitCode            = task.ExecutionInformation.ExitCode,
                    ExperimentID        = experimentId,
                    StdErr              = InfrastructureErrorPrefix + task.ExecutionInformation.FailureInformation.Message,
                    StdErrExtStorageIdx = "",
                    StdOut              = "",
                    StdOutExtStorageIdx = "",
                    NormalizedRuntime   = 0,
                    PeakMemorySizeMB    = 0,
                    Properties          = new Dictionary <string, string>(),
                    Status              = ResultStatus.InfrastructureError,
                    TotalProcessorTime  = TimeSpan.Zero,
                    WallClockTime       = TimeSpan.Zero
                }).ToList();
                Console.WriteLine("Done fetching failed tasks. Got {0}.", badResults.Count);
                Console.WriteLine("Fetching completed tasks...");
                completedTasksCount = batchClient.JobOperations.ListTasks(jobId, completedMonitorLevel).Count();
                Console.WriteLine("Done fetching completed tasks. Got {0}.", completedTasksCount);
            }while (!collectionTask.Wait(30000));
        }
Пример #22
0
        /// <summary>
        /// Deletes an Azure Batch job
        /// </summary>
        /// <param name="tesTaskId">The unique TES task ID</param>
        public async Task DeleteBatchJobAsync(string tesTaskId)
        {
            var jobFilter = new ODATADetailLevel
            {
                FilterClause = $"startswith(id,'{tesTaskId}{BatchJobAttemptSeparator}') and state ne 'deleting'",
                SelectClause = "id"
            };

            var batchJobsToDelete = await batchClient.JobOperations.ListJobs(jobFilter).ToListAsync();

            var count = batchJobsToDelete.Count();

            if (count > 1)
            {
                logger.LogWarning($"Found more than one active job for TES task {tesTaskId}");
            }

            foreach (var job in batchJobsToDelete)
            {
                logger.LogInformation($"Deleting job {job.Id}");
                await batchClient.JobOperations.DeleteJobAsync(job.Id);
            }
        }
Пример #23
0
        public async Task TestRequestWhichDoesntSupportFilter()
        {
            using (BatchClient client = await BatchClient.OpenAsync(ClientUnitTestCommon.CreateDummySharedKeyCredential()))
            {
                BatchClientBehavior behavior = new Protocol.RequestInterceptor(request =>
                {
                    PoolGetBatchRequest poolGetRequest = request as PoolGetBatchRequest;
                    poolGetRequest.ServiceRequestFunc  = t =>
                    {
                        return(Task.FromResult(new AzureOperationResponse <CloudPool, PoolGetHeaders>()
                        {
                            Body = new CloudPool()
                        }));
                    };
                });
                const string      dummyPoolId = "dummy";
                DetailLevel       detailLevel = new ODATADetailLevel(filterClause: "foo");
                ArgumentException e           = await Assert.ThrowsAsync <ArgumentException>(async() => await client.PoolOperations.GetPoolAsync(dummyPoolId, detailLevel, new [] { behavior }));

                Assert.Contains("Type Microsoft.Azure.Batch.Protocol.BatchRequests.PoolGetBatchRequest does not support a filter clause.", e.Message);
                Assert.Equal("detailLevel", e.ParamName);
            }
        }
Пример #24
0
        private async Task <bool> ABCheck(BatchClient client)
        {
            bool breturn = true;

            if (client == null)
            {
                throw new ApplicationException("ABCheck - batchclient was null");
            }

            // are any A or B jobs running?  If so write a message and get out
            const string ACheck = "(state eq 'Active') and startswith(id, 'JOBA')";
            const string BCheck = "(state eq 'Active') and startswith(id, 'JOBB')";

            ODATADetailLevel detailLevel = new ODATADetailLevel();

            detailLevel.FilterClause = ACheck;
            detailLevel.SelectClause = "id, stats";
            detailLevel.ExpandClause = "stats";

            List <CloudJob> jobs = await client.JobOperations.ListJobs(detailLevel).ToListAsync();

            if (jobs.Count > 0)
            {
                return(false);
            }

            detailLevel.FilterClause = BCheck;
            jobs.Clear();
            jobs = await client.JobOperations.ListJobs(detailLevel).ToListAsync();

            if (jobs.Count > 0)
            {
                return(false);
            }

            return(breturn);
        }
Пример #25
0
        /// <summary>
        /// Prints task information to the console for each of the nodes in the specified pool.
        /// </summary>
        /// <param name="poolId">The ID of the <see cref="CloudPool"/> containing the nodes whose task information
        /// should be printed to the console.</param>
        private static void PrintNodeTasks(BatchClient batchClient, string poolId)
        {
            ODATADetailLevel nodeDetail = new ODATADetailLevel(selectClause: "id,recentTasks");

            // Obtain and print the task information for each of the compute nodes in the pool.
            foreach (ComputeNode node in batchClient.PoolOperations.ListComputeNodes(poolId, nodeDetail))
            {
                Console.WriteLine();
                Console.WriteLine(node.Id + " tasks:");

                if (node.RecentTasks != null && node.RecentTasks.Any())
                {
                    foreach (TaskInformation task in node.RecentTasks)
                    {
                        Console.WriteLine("\t{0}: {1}", task.TaskId, task.TaskState);
                    }
                }
                else
                {
                    // No tasks found for the node
                    Console.WriteLine("\tNone");
                }
            }
        }
Пример #26
0
        private static async Task MainAsync(string[] args)
        {
            // You may adjust these values to experiment with different compute resource scenarios.
            const string nodeSize     = "small";
            const int nodeCount       = 4;
            const int maxTasksPerNode = 4;
            const int taskCount       = 32;

            // Ensure there are enough tasks to help avoid hitting some timeout conditions below
            const int minimumTaskCount = nodeCount * maxTasksPerNode * 2;
            if (taskCount < minimumTaskCount)
            {
                Console.WriteLine("You must specify at least two tasks per node core for this sample ({0} tasks in this configuration).", minimumTaskCount);
                Console.WriteLine();

                // Not enough tasks, exit the application
                return;
            }
  
            // In this sample, the tasks simply ping localhost on the compute nodes; adjust these
            // values to simulate variable task duration
            const int minPings = 30;
            const int maxPings = 60;

            const string poolId = "ParallelTasksSamplePool";
            const string jobId  = "ParallelTasksSampleJob";

            // Amount of time to wait before timing out (potentially) long-running tasks
            TimeSpan longTaskDurationLimit = TimeSpan.FromMinutes(30);

            // Set up access to your Batch account with a BatchClient. Configure your AccountSettings in the
            // Microsoft.Azure.Batch.Samples.Common project within this solution.
            BatchSharedKeyCredentials cred = new BatchSharedKeyCredentials(AccountSettings.Default.BatchServiceUrl,
                                                                           AccountSettings.Default.BatchAccountName,
                                                                           AccountSettings.Default.BatchAccountKey);
            
            using (BatchClient batchClient = await BatchClient.OpenAsync(cred))
            {
                // Create a CloudPool, or obtain an existing pool with the specified ID
                CloudPool pool = await ArticleHelpers.CreatePoolIfNotExistAsync(batchClient,
                                                                      poolId,
                                                                      nodeSize,
                                                                      nodeCount,
                                                                      maxTasksPerNode);

                // Create a CloudJob, or obtain an existing pool with the specified ID
                CloudJob job = await ArticleHelpers.CreateJobIfNotExistAsync(batchClient, poolId, jobId);
                
                // The job's tasks ping localhost a random number of times between minPings and maxPings.
                // Adjust the minPings/maxPings values above to experiment with different task durations.
                Random rand = new Random();
                List<CloudTask> tasks = new List<CloudTask>();
                for (int i = 1; i <= taskCount; i++)
                {
                    string taskId = "task" + i.ToString().PadLeft(3, '0');
                    string taskCommandLine = "ping -n " + rand.Next(minPings, maxPings + 1).ToString() + " localhost";
                    CloudTask task = new CloudTask(taskId, taskCommandLine);
                    tasks.Add(task);
                }

                // Pause execution until the pool is steady and its compute nodes are ready to accept jobs.
                // NOTE: Such a pause is not necessary within your own code. Tasks can be added to a job at any point and will be 
                // scheduled to execute on a compute node as soon any node has reached Idle state. Because the focus of this sample 
                // is the demonstration of running tasks in parallel on multiple compute nodes, we wait for all compute nodes to 
                // complete initialization and reach the Idle state in order to maximize the number of compute nodes available for 
                // parallelization.
                await ArticleHelpers.WaitForPoolToReachStateAsync(batchClient, pool.Id, AllocationState.Steady, longTaskDurationLimit);
                await ArticleHelpers.WaitForNodesToReachStateAsync(batchClient, pool.Id, ComputeNodeState.Idle, longTaskDurationLimit);

                // Add the tasks in one API call as opposed to a separate AddTask call for each. Bulk task submission
                // helps to ensure efficient underlying API calls to the Batch service.
                await batchClient.JobOperations.AddTaskAsync(job.Id, tasks);

                // Pause again to wait until *all* nodes are running tasks
                await ArticleHelpers.WaitForNodesToReachStateAsync(batchClient, pool.Id, ComputeNodeState.Running, TimeSpan.FromMinutes(2));

                Stopwatch stopwatch = Stopwatch.StartNew();

                // Print out task assignment information.
                Console.WriteLine();
                await GettingStartedCommon.PrintNodeTasksAsync(batchClient, pool.Id);
                Console.WriteLine();

                // Pause execution while we wait for all of the tasks to complete
                Console.WriteLine("Waiting for task completion...");
                Console.WriteLine();

                if (await batchClient.Utilities.CreateTaskStateMonitor().WhenAllAsync(job.ListTasks(),
                                                                   TaskState.Completed,
                                                                   longTaskDurationLimit))
                {
                    Console.WriteLine("Operation timed out while waiting for submitted tasks to reach state {0}", TaskState.Completed); 
                }

                stopwatch.Stop();

                // Obtain the tasks, specifying a detail level to limit the number of properties returned for each task.
                // If you have a large number of tasks, specifying a DetailLevel is extremely important in reducing the
                // amount of data transferred, lowering your query response times in increasing performance.
                ODATADetailLevel detail = new ODATADetailLevel(selectClause: "id,commandLine,nodeInfo,state");
                IPagedEnumerable<CloudTask> allTasks = batchClient.JobOperations.ListTasks(job.Id, detail);

                // Get a collection of the completed tasks sorted by the compute nodes on which they executed
                List<CloudTask> completedTasks = allTasks
                                                .Where(t => t.State == TaskState.Completed)
                                                .OrderBy(t => t.ComputeNodeInformation.ComputeNodeId)
                                                .ToList();

                // Print the completed task information
                Console.WriteLine();
                Console.WriteLine("Completed tasks:");
                string lastNodeId = string.Empty;
                foreach (CloudTask task in completedTasks)
                {
                    if (!string.Equals(lastNodeId, task.ComputeNodeInformation.ComputeNodeId))
                    {
                        Console.WriteLine();
                        Console.WriteLine(task.ComputeNodeInformation.ComputeNodeId);
                    }

                    lastNodeId = task.ComputeNodeInformation.ComputeNodeId;

                    Console.WriteLine("\t{0}: {1}", task.Id, task.CommandLine);
                }

                // Get a collection of the uncompleted tasks which may exist if the TaskMonitor timeout was hit
                List<CloudTask> uncompletedTasks = allTasks
                                                   .Where(t => t.State != TaskState.Completed)
                                                   .OrderBy(t => t.Id)
                                                   .ToList();

                // Print a list of uncompleted tasks, if any
                Console.WriteLine();
                Console.WriteLine("Uncompleted tasks:");
                Console.WriteLine();
                if (uncompletedTasks.Any())
                {
                    foreach (CloudTask task in uncompletedTasks)
                    {
                        Console.WriteLine("\t{0}: {1}", task.Id, task.CommandLine);
                    }
                }
                else
                {
                    Console.WriteLine("\t<none>");
                }

                // Print some summary information
                Console.WriteLine();
                Console.WriteLine("             Nodes: " + nodeCount);
                Console.WriteLine("         Node size: " + nodeSize);
                Console.WriteLine("Max tasks per node: " + pool.MaxTasksPerComputeNode);
                Console.WriteLine("             Tasks: " + tasks.Count);
                Console.WriteLine("          Duration: " + stopwatch.Elapsed);
                Console.WriteLine();
                Console.WriteLine("Done!");
                Console.WriteLine();

                // Clean up the resources we've created in the Batch account
                Console.WriteLine("Delete job? [yes] no");
                string response = Console.ReadLine().ToLower();
                if (response != "n" && response != "no")
                {
                    await batchClient.JobOperations.DeleteJobAsync(job.Id);
                }

                Console.WriteLine("Delete pool? [yes] no");
                response = Console.ReadLine();
                if (response != "n" && response != "no")
                {
                    await batchClient.PoolOperations.DeletePoolAsync(pool.Id);
                }
            }
        }
        private async Task<List<JobModel>> ListJobsAsync()
        {
            List<JobModel> results = new List<JobModel>();
            var detailLevel = new ODATADetailLevel() { SelectClause = "id,state,creationTime" };
            IPagedEnumerable<CloudJob> jobList = this.JobSchedule.ListJobs(detailLevel);

            await jobList.ForEachAsync(item => results.Add(new JobModel(item)));

            return results;
        }
Пример #28
0
        /// <summary>
        /// Query job info
        /// </summary>
        private async Task QueryJobChangeAsync()
        {
            TraceHelper.TraceEvent(this.sessionid, TraceEventType.Verbose,
                                   "[AzureBatchJobMonitorEntry] Enters QueryTaskInfo method.");
            bool shouldExit = false;

            this.pullJobGap = PullJobMinGap;
            JobState state = JobState.Active;

            Session.Data.JobState currentJobState = Session.Data.JobState.Configuring;
            var    pool    = this.batchClient.PoolOperations.GetPool(AzureBatchConfiguration.BatchPoolName);
            string skuName = pool.VirtualMachineSize;

            TraceHelper.TraceEvent(this.sessionid, TraceEventType.Information, "[AzureBatchJobMonitor] VMSize in pool is {0}",
                                   skuName);
            SKU targetSku = Array.Find(this.skus, sku => sku.Name.Equals(skuName, StringComparison.OrdinalIgnoreCase));

            this.nodeCapacity = targetSku.VCPUs;
            TraceHelper.TraceEvent(this.sessionid, TraceEventType.Information,
                                   "[AzureBatchJobMonitor] Node capacity in pool is {0}", nodeCapacity);

            ODATADetailLevel detailLevel = new ODATADetailLevel();

            detailLevel.SelectClause = "affinityId, ipAddress";
            var nodes = await pool.ListComputeNodes(detailLevel).ToListAsync();

            while (true)
            {
                if (shouldExit)
                {
                    break;
                }
                List <TaskInfo> stateChangedTaskList = new List <TaskInfo>();

                try
                {
                    TraceHelper.TraceEvent(this.sessionid, TraceEventType.Verbose, "[AzureBatchJobMonitor] Starting get job state.");
                    ODATADetailLevel detail = new ODATADetailLevel(selectClause: "state");
                    this.cloudJob = await this.batchClient.JobOperations.GetJobAsync(this.cloudJob.Id);

                    state           = this.cloudJob.State.HasValue ? this.cloudJob.State.Value : state;
                    currentJobState = await AzureBatchJobStateConverter.FromAzureBatchJobAsync(this.cloudJob);

                    TraceHelper.TraceEvent(this.sessionid, TraceEventType.Information, "[AzureBatchJobMonitor] Current job state in AzureBatch: JobState = {0}\n", state);
                    TraceHelper.TraceEvent(this.sessionid, TraceEventType.Information, "[AzureBatchJobMonitor] Current job state in Telepathy: JobState = {0}\n", currentJobState);
                    stateChangedTaskList = await this.GetTaskStateChangeAsync(nodes);

                    TraceHelper.TraceEvent(this.sessionid, TraceEventType.Information, "[AzureBatchJobMonitor] Previous job state report to AzureBatchJobMonitorEntry: JobState = {0}\n", previousJobState);
                    if (state == JobState.Completed || state == JobState.Disabled)
                    {
                        if (this.previousJobState == Session.Data.JobState.Canceling)
                        {
                            currentJobState = Session.Data.JobState.Canceled;
                        }
                        shouldExit = true;
                    }
                    else if (this.previousJobState == Session.Data.JobState.Canceling && !shouldExit)
                    {
                        //Override current job state as Canceling, because when all tasks turn to be completed, the job state converter will make job state finishing.
                        //If one job is cancelling in previous state and now is not in one terminated state, keep to reporting cancelling state to job monitor entry.
                        currentJobState = Session.Data.JobState.Canceling;
                        TraceHelper.TraceEvent(this.sessionid, TraceEventType.Information, "[AzureBatchJobMonitor] Overwrite current job state as {0} in Telepathy according to previous job state {1}\n", currentJobState, previousJobState);
                    }
                }
                catch (BatchException e)
                {
                    TraceHelper.TraceEvent(this.sessionid, TraceEventType.Warning, "[AzureBatchJobMonitor] BatchException thrown when querying job info: {0}", e);
                    //If the previous job state is canceling and current job is not found, then the job is deleted.
                    if (e.RequestInformation != null & e.RequestInformation.HttpStatusCode != null)
                    {
                        if (e.RequestInformation.HttpStatusCode == System.Net.HttpStatusCode.NotFound)
                        {
                            if (previousJobState == Session.Data.JobState.Canceling)
                            {
                                TraceHelper.TraceEvent(this.sessionid, TraceEventType.Warning, "[AzureBatchJobMonitor] The queried job has been deleted.");
                            }
                            else
                            {
                                TraceHelper.TraceEvent(this.sessionid, TraceEventType.Warning, "[AzureBatchJobMonitor] The queried job previous state is {0}, we make its state as canceled because it's no longer exist.", previousJobState);
                            }
                            shouldExit      = true;
                            currentJobState = Session.Data.JobState.Canceled;
                        }
                    }
                }
                catch (Exception e)
                {
                    TraceHelper.TraceEvent(this.sessionid, TraceEventType.Warning, "[AzureBatchJobMonitor] Exception thrown when querying job info: {0}", e);
                }

                try
                {
                    if (this.ReportJobStateAction != null)
                    {
                        TraceHelper.TraceEvent(this.sessionid, TraceEventType.Information,
                                               "[AzureBatchJobMonitor] Current job state report to AzureBatchJobMonitorEntry: JobState = {0}\n",
                                               currentJobState);
                        this.ReportJobStateAction(currentJobState, stateChangedTaskList, shouldExit);
                    }
                }
                catch (Exception e)
                {
                    TraceHelper.TraceEvent(this.sessionid, TraceEventType.Warning, "[AzureBatchJobMonitor] Exception thrown when report job info: {0}", e);
                }

                this.previousJobState = currentJobState;

                if (!shouldExit)
                {
                    TraceHelper.TraceEvent(this.sessionid, TraceEventType.Information, "[AzureBatchJobMonitor] Waiting {0} milliseconds and start another round of getting job state info.", this.pullJobGap);

                    // Sleep and pull job again, clear the register pull job flag
                    await Task.Delay(this.pullJobGap);

                    if (this.pullJobGap < PullJobMaxGap)
                    {
                        this.pullJobGap *= 2;
                        if (this.pullJobGap > PullJobMaxGap)
                        {
                            this.pullJobGap = PullJobMaxGap;
                        }
                    }
                }
            }
        }
Пример #29
0
        /// <summary>
        /// Gets the combined state of Azure Batch job, task and pool that corresponds to the given TES task
        /// </summary>
        /// <param name="tesTaskId">The unique TES task ID</param>
        /// <returns>Job state information</returns>
        public async Task <AzureBatchJobAndTaskState> GetBatchJobAndTaskStateAsync(string tesTaskId)
        {
            var       nodeAllocationFailed = false;
            TaskState?taskState            = null;
            int?      taskExitCode         = null;
            TaskFailureInformation taskFailureInformation = null;

            var jobFilter = new ODATADetailLevel
            {
                FilterClause = $"startswith(id,'{tesTaskId}{BatchJobAttemptSeparator}')",
                SelectClause = "*"
            };

            var jobInfos = (await batchClient.JobOperations.ListJobs(jobFilter).ToListAsync())
                           .Select(j => new { Job = j, AttemptNumber = int.Parse(j.Id.Split(BatchJobAttemptSeparator)[1]) });

            if (!jobInfos.Any())
            {
                return(new AzureBatchJobAndTaskState {
                    JobState = null
                });
            }

            if (jobInfos.Count(j => j.Job.State == JobState.Active) > 1)
            {
                return(new AzureBatchJobAndTaskState {
                    MoreThanOneActiveJobFound = true
                });
            }

            var lastJobInfo = jobInfos.OrderBy(j => j.AttemptNumber).Last();

            var job           = lastJobInfo.Job;
            var attemptNumber = lastJobInfo.AttemptNumber;

            if (job.State == JobState.Active)
            {
                try
                {
                    nodeAllocationFailed = job.ExecutionInformation?.PoolId != null &&
                                           (await batchClient.PoolOperations.GetPoolAsync(job.ExecutionInformation.PoolId)).ResizeErrors?.Count > 0;
                }
                catch (Exception ex)
                {
                    // assume that node allocation failed
                    nodeAllocationFailed = true;
                    logger.LogError(ex, $"Failed to determine if the node allocation failed for TesTask {tesTaskId} with PoolId {job.ExecutionInformation?.PoolId}.");
                }
            }
            var jobPreparationTaskExecutionInformation = (await batchClient.JobOperations.ListJobPreparationAndReleaseTaskStatus(job.Id).ToListAsync()).FirstOrDefault()?.JobPreparationTaskExecutionInformation;
            var jobPreparationTaskExitCode             = jobPreparationTaskExecutionInformation?.ExitCode;
            var jobPreparationTaskState = jobPreparationTaskExecutionInformation?.State;

            try
            {
                var batchTask = await batchClient.JobOperations.GetTaskAsync(job.Id, tesTaskId);

                taskState              = batchTask.State;
                taskExitCode           = batchTask.ExecutionInformation?.ExitCode;
                taskFailureInformation = batchTask.ExecutionInformation.FailureInformation;
            }
            catch (Exception ex)
            {
                logger.LogError(ex, $"Failed to get task for TesTask {tesTaskId}");
            }

            return(new AzureBatchJobAndTaskState
            {
                JobState = job.State,
                JobPreparationTaskState = jobPreparationTaskState,
                JobPreparationTaskExitCode = jobPreparationTaskExitCode,
                TaskState = taskState,
                MoreThanOneActiveJobFound = false,
                NodeAllocationFailed = nodeAllocationFailed,
                TaskExitCode = taskExitCode,
                TaskFailureInformation = taskFailureInformation,
                AttemptNumber = attemptNumber
            });
        }
Пример #30
0
        static async Task ManageTasks(string[] args)
        {
            int    experimentId = int.Parse(args[0], CultureInfo.InvariantCulture);
            string summaryName  = null;

            if (args.Length > 1)
            {
                summaryName = args[1];
            }
            //Console.WriteLine(String.Format("Params are:\n id: {0}\ncontainer: {8}\ndirectory:{9}\ncategory: {1}\nextensions: {10}\ndomain: {11}\nexec: {2}\nargs: {3}\ntimeout: {4}\nmemlimit: {5}\noutlimit: {6}\nerrlimit: {7}", experimentId, benchmarkCategory, executable, arguments, timeout, memoryLimit, outputLimit, errorLimit, benchmarkContainerUri, benchmarkDirectory, extensionsString, domainString));

            string jobId = Environment.GetEnvironmentVariable(JobIdEnvVariableName);

            var secretStorage = new SecretStorage(Settings.Default.AADApplicationId, Settings.Default.AADApplicationCertThumbprint, Settings.Default.KeyVaultUrl);
            BatchConnectionString credentials = new BatchConnectionString(await secretStorage.GetSecret(Settings.Default.ConnectionStringSecretId));

            Console.WriteLine("Retrieved credentials.");


            var batchCred = new BatchSharedKeyCredentials(credentials.BatchURL, credentials.BatchAccountName, credentials.BatchAccessKey);

            var storage = new AzureExperimentStorage(credentials.WithoutBatchData().ToString());

            var expInfo = await storage.GetExperiment(experimentId);

            string benchmarkContainerUri = expInfo.BenchmarkContainerUri;  // args[1];
            string benchmarkDirectory    = expInfo.BenchmarkDirectory;     // args[2];
            string benchmarkCategory     = expInfo.Category;               // args[3];
            string extensionsString      = expInfo.BenchmarkFileExtension; //args[4];
            string domainString          = expInfo.DomainName;             // args[5];
            string executable            = expInfo.Executable;             // args[6];
            string arguments             = expInfo.Parameters;             // args[7];
            double timeout        = expInfo.BenchmarkTimeout;              // TimeSpan.FromSeconds(double.Parse(args[8]));
            double memoryLimit    = expInfo.MemoryLimitMB;                 // 0; // no limit
            int    maxRepetitions = expInfo.AdaptiveRunMaxRepetitions;
            double maxTime        = expInfo.AdaptiveRunMaxTimeInSeconds;

            //long? outputLimit = null;
            //long? errorLimit = null;
            //if (args.Length > 9)
            //{
            //    memoryLimit = double.Parse(args[9]);
            //    if (args.Length > 10)
            //    {
            //        outputLimit = args[10] == "null" ? null : (long?)long.Parse(args[10]);
            //        if (args.Length > 11)
            //        {
            //            errorLimit = args[11] == "null" ? null : (long?)long.Parse(args[11]);
            //        }
            //    }
            //}

            AzureBenchmarkStorage benchmarkStorage = CreateBenchmarkStorage(benchmarkContainerUri, storage);


            var queue = await storage.CreateResultsQueue(experimentId);

            Console.Write("Created queue");

            await FetchSavedResults(experimentId, storage);

            Console.WriteLine("Fetched existing results");
            var collectionTask = CollectResults(experimentId, storage);

            Console.WriteLine("Started collection thread.");
            Domain             domain = ResolveDomain(domainString);
            SortedSet <string> extensions;

            if (string.IsNullOrEmpty(extensionsString))
            {
                extensions = new SortedSet <string>(domain.BenchmarkExtensions.Distinct());
            }
            else
            {
                extensions = new SortedSet <string>(extensionsString.Split('|').Select(s => s.Trim().TrimStart('.')).Distinct());
            }

            using (BatchClient batchClient = BatchClient.Open(batchCred))
            {
                if (expInfo.TotalBenchmarks <= 0)
                {
                    //not all experiments started
                    ODATADetailLevel detailLevel = new ODATADetailLevel();
                    detailLevel.SelectClause = "id,displayName";

                    Console.WriteLine("Listing existing tasks.");
                    var processedBlobs = new SortedSet <string>(batchClient.JobOperations.ListTasks(jobId, detailLevel)
                                                                .SelectMany(t =>
                    {
                        int id;
                        if (int.TryParse(t.Id, out id))
                        {
                            // we put benchmark file first
                            return(new string[] { t.DisplayName });
                        }
                        return(new string[] { });
                    }));
                    Console.WriteLine("Done!");

                    BlobContinuationToken continuationToken = null;
                    BlobResultSegment     resultSegment     = null;

                    List <Task> starterTasks       = new List <Task>();
                    int         totalBenchmarks    = 0;
                    string      benchmarksPath     = CombineBlobPath(benchmarkDirectory, benchmarkCategory);
                    string      outputQueueUri     = storage.GetOutputQueueSASUri(experimentId, TimeSpan.FromHours(48));
                    string      outputContainerUri = storage.GetOutputContainerSASUri(TimeSpan.FromHours(48));
                    do
                    {
                        resultSegment = await benchmarkStorage.ListBlobsSegmentedAsync(benchmarksPath, continuationToken);

                        Console.WriteLine("Got some blobs");
                        string[] blobNamesToProcess = resultSegment.Results.SelectMany(item =>
                        {
                            var blob = item as CloudBlockBlob;
                            if (blob == null || processedBlobs.Contains(blob.Name))
                            {
                                return new string[] { }
                            }
                            ;

                            var nameParts      = blob.Name.Split('/');
                            var shortnameParts = nameParts[nameParts.Length - 1].Split('.');
                            if (shortnameParts.Length == 1 && !extensions.Contains(""))
                            {
                                return new string[] { }
                            }
                            ;
                            var ext = shortnameParts[shortnameParts.Length - 1];
                            if (!extensions.Contains(ext))
                            {
                                return new string[] { }
                            }
                            ;

                            return(new string[] { blob.Name });
                        }).ToArray();
                        starterTasks.Add(StartTasksForSegment(timeout.ToString(), experimentId, executable, arguments, memoryLimit, domainString, outputQueueUri, outputContainerUri, null, null, jobId, batchClient, blobNamesToProcess, benchmarksPath, totalBenchmarks, benchmarkStorage, maxRepetitions, maxTime));

                        continuationToken = resultSegment.ContinuationToken;
                        totalBenchmarks  += blobNamesToProcess.Length;
                    }while (continuationToken != null);

                    await storage.SetTotalBenchmarks(experimentId, totalBenchmarks);

                    Program.totalBenchmarks  = totalBenchmarks;
                    totalBenchmarksToProcess = totalBenchmarks;

                    await Task.WhenAll(starterTasks.ToArray());

                    Console.WriteLine("Finished starting tasks");
                }
                else
                {
                    Program.totalBenchmarks  = expInfo.TotalBenchmarks;
                    totalBenchmarksToProcess = expInfo.TotalBenchmarks;
                }

                MonitorTasksUntilCompletion(experimentId, jobId, collectionTask, batchClient);

                if (summaryName != null)
                {
                    Trace.WriteLine(string.Format("Building summary for experiment {0} and summary name {1}...", experimentId, summaryName));
                    AzureSummaryManager manager = new AzureSummaryManager(credentials.WithoutBatchData().ToString(), MEFDomainResolver.Instance);
                    await AppendSummary(summaryName, experimentId, domain, manager);
                }
                else
                {
                    Trace.WriteLine("No summary requested.");
                }
                Console.WriteLine("Closing.");
            }
        }
Пример #31
0
        static async Task ManageRetry(string[] args)
        {
            int    experimentId          = int.Parse(args[0], CultureInfo.InvariantCulture);
            string benchmarkListBlobId   = args[1];
            string benchmarkContainerUri = null;

            if (args.Length > 2)
            {
                benchmarkContainerUri = args[2];
            }

            string jobId = Environment.GetEnvironmentVariable(JobIdEnvVariableName);

            var secretStorage = new SecretStorage(Settings.Default.AADApplicationId, Settings.Default.AADApplicationCertThumbprint, Settings.Default.KeyVaultUrl);
            BatchConnectionString credentials = new BatchConnectionString(await secretStorage.GetSecret(Settings.Default.ConnectionStringSecretId));

            Console.WriteLine("Retrieved credentials.");


            var batchCred = new BatchSharedKeyCredentials(credentials.BatchURL, credentials.BatchAccountName, credentials.BatchAccessKey);

            var storage = new AzureExperimentStorage(credentials.WithoutBatchData().ToString());

            var expInfo = await storage.GetExperiment(experimentId);

            if (benchmarkContainerUri == null)
            {
                if (expInfo.BenchmarkContainerUri != ExperimentDefinition.DefaultContainerUri)
                {
                    throw new ArgumentException("New URI for non-default benchmark container was not provided.");
                }
                else
                {
                    benchmarkContainerUri = ExperimentDefinition.DefaultContainerUri;
                }
            }

            AzureBenchmarkStorage benchmarkStorage = CreateBenchmarkStorage(benchmarkContainerUri, storage);

            var queue = await storage.CreateResultsQueue(experimentId);

            Console.Write("Created queue");

            // We can't tell bad results we got during previous runs on the same experiment from bad results
            // we got during this run when job manager crashed, so we put them all into 'good' list.
            // 'Fresh' (and, therefore, duplicate) bad results will be removed during deduplication.
            goodResults = (await storage.GetAzureExperimentResults(experimentId)).Item1.ToList();
            Console.WriteLine("Fetched existing results");
            Domain domain = ResolveDomain(expInfo.DomainName);


            string benchmarksPath    = CombineBlobPath(expInfo.BenchmarkDirectory, expInfo.Category);
            var    benchmarkListBlob = storage.TempBlobContainer.GetBlockBlobReference(benchmarkListBlobId);

            string[] benchmarkList = (await benchmarkListBlob.DownloadTextAsync()).Split('\n')
                                     .SelectMany(s =>
            {
                s = s.Trim();
                if (string.IsNullOrEmpty(s))
                {
                    return new string[] { }
                }
                ;
                else
                {
                    return new string[] { benchmarksPath + s }
                };
            }).ToArray();
            totalBenchmarksToProcess = benchmarkList.Length;
            totalBenchmarks          = expInfo.TotalBenchmarks;
            Console.WriteLine("Retrieved list of benchmarks to re-process. Total: {0}.", totalBenchmarksToProcess);
            var collectionTask = CollectResults(experimentId, storage);

            Console.WriteLine("Started collection thread.");

            using (BatchClient batchClient = BatchClient.Open(batchCred))
            {
                //not all experiments started
                ODATADetailLevel detailLevel = new ODATADetailLevel();
                detailLevel.SelectClause = "id,displayName";

                Console.WriteLine("Listing existing tasks.");
                var processedBlobs = new SortedSet <string>(batchClient.JobOperations.ListTasks(jobId, detailLevel)
                                                            .SelectMany(t =>
                {
                    int id;
                    if (int.TryParse(t.Id, out id))
                    {
                        // we put benchmark file first
                        return(new string[] { t.DisplayName });
                    }
                    return(new string[] { });
                }));
                Console.WriteLine("Done!");

                string   outputQueueUri     = storage.GetOutputQueueSASUri(experimentId, TimeSpan.FromHours(48));
                string   outputContainerUri = storage.GetOutputContainerSASUri(TimeSpan.FromHours(48));
                string[] blobsToProcess     = benchmarkList.Where(b => !processedBlobs.Contains(b)).ToArray();

                if (blobsToProcess.Length > 0)
                {
                    var starterTask = StartTasksForSegment(expInfo.BenchmarkTimeout.ToString(), experimentId, expInfo.Executable, expInfo.Parameters, expInfo.MemoryLimitMB, expInfo.DomainName, outputQueueUri, outputContainerUri, null, null, jobId, batchClient, blobsToProcess, benchmarksPath, 0, benchmarkStorage, expInfo.AdaptiveRunMaxRepetitions, expInfo.AdaptiveRunMaxTimeInSeconds);

                    await starterTask;
                    Console.WriteLine("Finished starting tasks");
                }

                MonitorTasksUntilCompletion(experimentId, jobId, collectionTask, batchClient);
            }

            Console.WriteLine("Deleting blob with benchmark list.");
            await benchmarkListBlob.DeleteIfExistsAsync();

            Console.WriteLine("Closing.");
        }
Пример #32
0
        /// <summary>
        /// Runs the job manager task.
        /// </summary>
        public async Task RunAsync()
        {
            Console.WriteLine("JobManager for account: {0}, work item: {1}, job: {2} has started...",
                              this.accountName,
                              this.workItemName,
                              this.jobName);
            Console.WriteLine();

            Console.WriteLine("JobManager running with the following settings: ");
            Console.WriteLine("----------------------------------------");
            Console.WriteLine(this.configurationSettings.ToString());

            //Set up the Batch Service credentials used to authenticate with the Batch Service.
            BatchCredentials batchCredentials = new BatchCredentials(
                this.configurationSettings.BatchAccountName,
                this.configurationSettings.BatchAccountKey);

            using (IBatchClient batchClient = BatchClient.Connect(this.configurationSettings.BatchServiceUrl, batchCredentials))
            {
                using (IWorkItemManager workItemManager = batchClient.OpenWorkItemManager())
                {
                    IToolbox toolbox = batchClient.OpenToolbox();

                    //Construct a container SAS to provide the Batch Service access to the files required to
                    //run the mapper and reducer tasks.
                    string containerSas = Helpers.ConstructContainerSas(
                        this.configurationSettings.StorageAccountName,
                        this.configurationSettings.StorageAccountKey,
                        this.configurationSettings.StorageServiceUrl,
                        this.configurationSettings.BlobContainer);

                    //
                    // Submit mapper tasks.
                    //
                    Console.WriteLine("Submitting {0} mapper tasks.", this.configurationSettings.NumberOfMapperTasks);

                    //The collection of tasks to add to the Batch Service.
                    List <ICloudTask> tasksToAdd = new List <ICloudTask>();

                    for (int i = 0; i < this.configurationSettings.NumberOfMapperTasks; i++)
                    {
                        string taskName     = Helpers.GetMapperTaskName(i);
                        string fileBlobName = Helpers.GetSplitFileName(i);
                        string fileBlobPath = Helpers.ConstructBlobSource(containerSas, fileBlobName);

                        string     commandLine       = string.Format("{0} -MapperTask {1}", Constants.TextSearchExe, fileBlobPath);
                        ICloudTask unboundMapperTask = new CloudTask(taskName, commandLine);

                        //The set of files (exe's, dll's and configuration files) required to run the mapper task.
                        IReadOnlyList <string> mapperTaskRequiredFiles = Constants.RequiredExecutableFiles;

                        List <IResourceFile> mapperTaskResourceFiles = Helpers.GetResourceFiles(containerSas, mapperTaskRequiredFiles);

                        unboundMapperTask.ResourceFiles = mapperTaskResourceFiles;

                        tasksToAdd.Add(unboundMapperTask);
                    }

                    //Submit the unbound task collection to the Batch Service.
                    //Use the AddTask method which takes a collection of ICloudTasks for the best performance.
                    await workItemManager.AddTaskAsync(this.workItemName, this.jobName, tasksToAdd);

                    //
                    // Wait for the mapper tasks to complete.
                    //
                    Console.WriteLine("Waiting for the mapper tasks to complete...");

                    //List all the mapper tasks using a name filter.
                    DetailLevel mapperTaskNameFilter = new ODATADetailLevel()
                    {
                        FilterClause = string.Format("startswith(name, '{0}')", Constants.MapperTaskPrefix)
                    };

                    List <ICloudTask> tasksToMonitor = workItemManager.ListTasks(
                        this.workItemName,
                        this.jobName,
                        detailLevel: mapperTaskNameFilter).ToList();

                    //Use the task state monitor to wait for the tasks to complete.
                    ITaskStateMonitor taskStateMonitor = toolbox.CreateTaskStateMonitor();

                    bool timedOut = await taskStateMonitor.WaitAllAsync(tasksToMonitor, TaskState.Completed, TimeSpan.FromMinutes(5));

                    //Get the list of mapper tasks in order to analyze their state and ensure they completed successfully.
                    IEnumerableAsyncExtended <ICloudTask> asyncEnumerable = workItemManager.ListTasks(
                        this.workItemName,
                        this.jobName,
                        detailLevel: mapperTaskNameFilter);
                    IAsyncEnumerator <ICloudTask> asyncEnumerator = asyncEnumerable.GetAsyncEnumerator();

                    //Dump the status of each mapper task.
                    while (await asyncEnumerator.MoveNextAsync())
                    {
                        ICloudTask cloudTask = asyncEnumerator.Current;

                        Console.WriteLine("Task {0} is in state: {1}", cloudTask.Name, cloudTask.State);

                        await Helpers.CheckForTaskSuccessAsync(cloudTask, dumpStandardOutOnTaskSuccess : false);

                        Console.WriteLine();
                    }

                    //If not all the tasks reached the desired state within the timeout then the job manager
                    //cannot continue.
                    if (timedOut)
                    {
                        const string errorMessage = "Mapper tasks did not complete within expected timeout.";
                        Console.WriteLine(errorMessage);

                        throw new TimeoutException(errorMessage);
                    }

                    //
                    // Create the reducer task.
                    //
                    string reducerTaskCommandLine = string.Format("{0} -ReducerTask", Constants.TextSearchExe);

                    Console.WriteLine("Adding the reducer task: {0}", Constants.ReducerTaskName);
                    ICloudTask unboundReducerTask = new CloudTask(Constants.ReducerTaskName, reducerTaskCommandLine);

                    //The set of files (exe's, dll's and configuration files) required to run the reducer task.
                    List <IResourceFile> reducerTaskResourceFiles = Helpers.GetResourceFiles(containerSas, Constants.RequiredExecutableFiles);

                    unboundReducerTask.ResourceFiles = reducerTaskResourceFiles;

                    //Send the request to the Batch Service to add the reducer task.
                    await workItemManager.AddTaskAsync(this.workItemName, this.jobName, unboundReducerTask);

                    //
                    //Wait for the reducer task to complete.
                    //

                    //Get the bound reducer task and monitor it for completion.
                    ICloudTask boundReducerTask = await workItemManager.GetTaskAsync(this.workItemName, this.jobName, Constants.ReducerTaskName);

                    timedOut = await taskStateMonitor.WaitAllAsync(new List <ICloudTask> {
                        boundReducerTask
                    }, TaskState.Completed, TimeSpan.FromMinutes(2));

                    //Refresh the reducer task to get the most recent information about it from the Batch Service.
                    await boundReducerTask.RefreshAsync();

                    //Dump the reducer tasks exit code and scheduling error for debugging purposes.
                    await Helpers.CheckForTaskSuccessAsync(boundReducerTask, dumpStandardOutOnTaskSuccess : true);

                    //Handle the possibilty that the reducer task did not complete in the expected timeout.
                    if (timedOut)
                    {
                        const string errorMessage = "Reducer task did not complete within expected timeout.";

                        Console.WriteLine("Task {0} is in state: {1}", boundReducerTask.Name, boundReducerTask.State);

                        Console.WriteLine(errorMessage);
                        throw new TimeoutException(errorMessage);
                    }

                    //The job manager has completed.
                    Console.WriteLine("JobManager completed successfully.");
                }
            }
        }
Пример #33
0
        /// <summary>
        /// Monitors the specified tasks for completion and returns a value indicating whether all tasks completed successfully
        /// within the timeout period.
        /// </summary>
        /// <param name="batchClient">A <see cref="BatchClient"/>.</param>
        /// <param name="jobId">The id of the job containing the tasks that should be monitored.</param>
        /// <param name="timeout">The period of time to wait for the tasks to reach the completed state.</param>
        /// <returns><c>true</c> if all tasks in the specified job completed with an exit code of 0 within the specified timeout period, otherwise <c>false</c>.</returns>

        private static void MonitorTasks(BatchClient batchClient, string jobId, TimeSpan timeout)
        {
            bool         allTasksSuccessful = true;
            const string successMessage     = "All tasks reached state Completed.";
            const string failureMessage     = "One or more tasks failed to reach the Completed state within the timeout period.";

            // Obtain the collection of tasks currently managed by the job.
            // Use a detail level to specify that only the "id" property of each task should be populated.
            // See https://docs.microsoft.com/en-us/azure/batch/batch-efficient-list-queries

            ODATADetailLevel detail = new ODATADetailLevel(selectClause: "id");

            IEnumerable <CloudTask> addedTasks = batchClient.JobOperations.ListTasks(jobId, detail);

            Console.WriteLine("Monitoring all tasks for 'Completed' state, timeout in {0}...", timeout.ToString());

            // We use a TaskStateMonitor to monitor the state of our tasks. In this case, we will wait for all tasks to
            // reach the Completed state.

            TaskStateMonitor taskStateMonitor = batchClient.Utilities.CreateTaskStateMonitor();

            try
            {
                batchClient.Utilities.CreateTaskStateMonitor().WaitAll(addedTasks, TaskState.Completed, timeout);
            }
            catch (TimeoutException)
            {
                batchClient.JobOperations.TerminateJob(jobId, failureMessage);
                Console.WriteLine(failureMessage);
            }
            batchClient.JobOperations.TerminateJob(jobId, successMessage);

            // All tasks have reached the "Completed" state, however, this does not guarantee all tasks completed successfully.
            // Here we further check each task's ExecutionInformation property to ensure that it did not encounter a scheduling error
            // or return a non-zero exit code.

            // Update the detail level to populate only the task id and executionInfo properties.
            detail.SelectClause = "id, executionInfo";

            IEnumerable <CloudTask> completedTasks = batchClient.JobOperations.ListTasks(jobId, detail);

            foreach (CloudTask task in completedTasks)
            {
                if (task.ExecutionInformation.Result == TaskExecutionResult.Failure)
                {
                    // A task with failure information set indicates there was a problem with the task. It is important to note that
                    // the task's state can be "Completed," yet still have encountered a failure.

                    allTasksSuccessful = false;

                    Console.WriteLine("WARNING: Task [{0}] encountered a failure: {1}", task.Id, task.ExecutionInformation.FailureInformation.Message);
                    if (task.ExecutionInformation.ExitCode != 0)
                    {
                        // A non-zero exit code may indicate that the application executed by the task encountered an error
                        // during execution. As not every application returns non-zero on failure by default (e.g. robocopy),
                        // your implementation of error checking may differ from this example.

                        Console.WriteLine("WARNING: Task [{0}] returned a non-zero exit code - this may indicate task execution or completion failure.", task.Id);
                    }
                }
            }

            if (allTasksSuccessful)
            {
                Console.WriteLine("Success! All tasks completed successfully within the specified timeout period. Output files uploaded to output container.");
            }
        }
        private async System.Threading.Tasks.Task<List<JobModel>> ListJobsAsync()
        {
            List<JobModel> results = new List<JobModel>();
            var detailLevel = new ODATADetailLevel() { SelectClause = "name,state,creationTime" };
            IEnumerableAsyncExtended<ICloudJob> jobList = this.WorkItem.ListJobs(detailLevel);

            IAsyncEnumerator<ICloudJob> asyncEnumerator = jobList.GetAsyncEnumerator();

            while (await asyncEnumerator.MoveNextAsync())
            {
                results.Add(new JobModel(this, asyncEnumerator.Current));
            }
            return results;
        }
Пример #35
0
        private static async Task MainAsync(string[] args)
        {
            // You may adjust these values to experiment with different compute resource scenarios.
            const string nodeSize        = "small";
            const int    nodeCount       = 4;
            const int    maxTasksPerNode = 4;
            const int    taskCount       = 32;

            // Ensure there are enough tasks to help avoid hitting some timeout conditions below
            int minimumTaskCount = nodeCount * maxTasksPerNode * 2;

            if (taskCount < minimumTaskCount)
            {
                Console.WriteLine("You must specify at least two tasks per node core for this sample ({0} tasks in this configuration).", minimumTaskCount);
                Console.WriteLine();

                // Not enough tasks, exit the application
                return;
            }

            // In this sample, the tasks simply ping localhost on the compute nodes; adjust these
            // values to simulate variable task duration
            const int minPings = 30;
            const int maxPings = 60;

            const string poolId = "ParallelTasksSamplePool";
            const string jobId  = "ParallelTasksSampleJob";

            // Amount of time to wait before timing out (potentially) long-running tasks
            TimeSpan longTaskDurationLimit = TimeSpan.FromMinutes(30);

            // Set up access to your Batch account with a BatchClient. Configure your AccountSettings in the
            // Microsoft.Azure.Batch.Samples.Common project within this solution.
            BatchSharedKeyCredentials cred = new BatchSharedKeyCredentials(AccountSettings.Default.BatchServiceUrl,
                                                                           AccountSettings.Default.BatchAccountName,
                                                                           AccountSettings.Default.BatchAccountKey);

            using (BatchClient batchClient = await BatchClient.OpenAsync(cred))
            {
                // Create a CloudPool, or obtain an existing pool with the specified ID
                CloudPool pool = await ArticleHelpers.CreatePoolIfNotExistAsync(batchClient,
                                                                                poolId,
                                                                                nodeSize,
                                                                                nodeCount,
                                                                                maxTasksPerNode);

                // Create a CloudJob, or obtain an existing pool with the specified ID
                CloudJob job = await ArticleHelpers.CreateJobIfNotExistAsync(batchClient, poolId, jobId);

                // The job's tasks ping localhost a random number of times between minPings and maxPings.
                // Adjust the minPings/maxPings values above to experiment with different task durations.
                Random           rand  = new Random();
                List <CloudTask> tasks = new List <CloudTask>();
                for (int i = 1; i <= taskCount; i++)
                {
                    string    taskId          = "task" + i.ToString().PadLeft(3, '0');
                    string    taskCommandLine = "ping -n " + rand.Next(minPings, maxPings + 1).ToString() + " localhost";
                    CloudTask task            = new CloudTask(taskId, taskCommandLine);
                    tasks.Add(task);
                }

                // Pause execution until the pool is steady and its compute nodes are ready to accept jobs.
                // NOTE: Such a pause is not necessary within your own code. Tasks can be added to a job at any point and will be
                // scheduled to execute on a compute node as soon any node has reached Idle state. Because the focus of this sample
                // is the demonstration of running tasks in parallel on multiple compute nodes, we wait for all compute nodes to
                // complete initialization and reach the Idle state in order to maximize the number of compute nodes available for
                // parallelization.
                await ArticleHelpers.WaitForPoolToReachStateAsync(batchClient, pool.Id, AllocationState.Steady, longTaskDurationLimit);

                await ArticleHelpers.WaitForNodesToReachStateAsync(batchClient, pool.Id, ComputeNodeState.Idle, longTaskDurationLimit);

                // Add the tasks in one API call as opposed to a separate AddTask call for each. Bulk task submission
                // helps to ensure efficient underlying API calls to the Batch service.
                await batchClient.JobOperations.AddTaskAsync(job.Id, tasks);

                // Pause again to wait until *all* nodes are running tasks
                await ArticleHelpers.WaitForNodesToReachStateAsync(batchClient, pool.Id, ComputeNodeState.Running, TimeSpan.FromMinutes(2));

                Stopwatch stopwatch = Stopwatch.StartNew();

                // Print out task assignment information.
                Console.WriteLine();
                await GettingStartedCommon.PrintNodeTasksAsync(batchClient, pool.Id);

                Console.WriteLine();

                // Pause execution while we wait for all of the tasks to complete
                Console.WriteLine("Waiting for task completion...");
                Console.WriteLine();

                try
                {
                    await batchClient.Utilities.CreateTaskStateMonitor().WhenAll(
                        job.ListTasks(),
                        TaskState.Completed,
                        longTaskDurationLimit);
                }
                catch (TimeoutException e)
                {
                    Console.WriteLine(e.ToString());
                }

                stopwatch.Stop();

                // Obtain the tasks, specifying a detail level to limit the number of properties returned for each task.
                // If you have a large number of tasks, specifying a DetailLevel is extremely important in reducing the
                // amount of data transferred, lowering your query response times in increasing performance.
                ODATADetailLevel             detail   = new ODATADetailLevel(selectClause: "id,commandLine,nodeInfo,state");
                IPagedEnumerable <CloudTask> allTasks = batchClient.JobOperations.ListTasks(job.Id, detail);

                // Get a collection of the completed tasks sorted by the compute nodes on which they executed
                List <CloudTask> completedTasks = allTasks
                                                  .Where(t => t.State == TaskState.Completed)
                                                  .OrderBy(t => t.ComputeNodeInformation.ComputeNodeId)
                                                  .ToList();

                // Print the completed task information
                Console.WriteLine();
                Console.WriteLine("Completed tasks:");
                string lastNodeId = string.Empty;
                foreach (CloudTask task in completedTasks)
                {
                    if (!string.Equals(lastNodeId, task.ComputeNodeInformation.ComputeNodeId))
                    {
                        Console.WriteLine();
                        Console.WriteLine(task.ComputeNodeInformation.ComputeNodeId);
                    }

                    lastNodeId = task.ComputeNodeInformation.ComputeNodeId;

                    Console.WriteLine("\t{0}: {1}", task.Id, task.CommandLine);
                }

                // Get a collection of the uncompleted tasks which may exist if the TaskMonitor timeout was hit
                List <CloudTask> uncompletedTasks = allTasks
                                                    .Where(t => t.State != TaskState.Completed)
                                                    .OrderBy(t => t.Id)
                                                    .ToList();

                // Print a list of uncompleted tasks, if any
                Console.WriteLine();
                Console.WriteLine("Uncompleted tasks:");
                Console.WriteLine();
                if (uncompletedTasks.Any())
                {
                    foreach (CloudTask task in uncompletedTasks)
                    {
                        Console.WriteLine("\t{0}: {1}", task.Id, task.CommandLine);
                    }
                }
                else
                {
                    Console.WriteLine("\t<none>");
                }

                // Print some summary information
                Console.WriteLine();
                Console.WriteLine("             Nodes: " + nodeCount);
                Console.WriteLine("         Node size: " + nodeSize);
                Console.WriteLine("Max tasks per node: " + pool.MaxTasksPerComputeNode);
                Console.WriteLine("             Tasks: " + tasks.Count);
                Console.WriteLine("          Duration: " + stopwatch.Elapsed);
                Console.WriteLine();
                Console.WriteLine("Done!");
                Console.WriteLine();

                // Clean up the resources we've created in the Batch account
                Console.WriteLine("Delete job? [yes] no");
                string response = Console.ReadLine().ToLower();
                if (response != "n" && response != "no")
                {
                    await batchClient.JobOperations.DeleteJobAsync(job.Id);
                }

                Console.WriteLine("Delete pool? [yes] no");
                response = Console.ReadLine();
                if (response != "n" && response != "no")
                {
                    await batchClient.PoolOperations.DeletePoolAsync(pool.Id);
                }
            }
        }
Пример #36
0
        /// <summary>
        /// Refreshes the status of the ComputeNodes from the Batch service.
        /// </summary>
        /// <returns></returns>
        private async Task RefreshComputeNodesAsync()
        {
            // Get the list of pool compute nodess - can't use this because ComputeNode does not support RecentTasks property yet
            DetailLevel detailLevel = new ODATADetailLevel()
                {
                    SelectClause = "recentTasks,state,id"
                };

            IPagedEnumerable<ComputeNode> computeNodeEnumerableAsync = this.Pool.ListComputeNodes(detailLevel);
            List<ComputeNode> computeNodeList = await computeNodeEnumerableAsync.ToListAsync();
            
            this.RunningTasks = 0;
            this.SchedulableComputeNodes = 0;
            foreach (ComputeNode computeNode in computeNodeList)
            {
                if (computeNode.State == ComputeNodeState.Idle || computeNode.State == ComputeNodeState.Running)
                {
                    this.SchedulableComputeNodes++;
                }

                if (computeNode.State == ComputeNodeState.Running && this.Pool.MaxTasksPerComputeNode == 1)
                {
                    this.RunningTasks++;
                }
                else if (this.Pool.MaxTasksPerComputeNode > 1)
                {
                    IEnumerable<TaskInformation> taskInfoList = computeNode.RecentTasks;
                    if (taskInfoList != null)
                    {
                        foreach (TaskInformation ti in taskInfoList)
                        {
                            if (ti.TaskState == TaskState.Running)
                            {
                                this.RunningTasks++;
                            }
                        }
                    }
                }
            }

            Interlocked.Exchange(ref this.computeNodes, computeNodeList); //Threadsafe swap
        }
Пример #37
0
        /// <summary>
        /// Lists the usage metrics, aggregated by pool across individual time intervals, for the specified account.
        /// </summary>
        /// <param name="options">The options to use when aggregating usage for pools.</param>
        public IEnumerable<PSPoolUsageMetrics> ListPoolUsageMetrics(ListPoolUsageOptions options)
        {
            string verboseLogString = null;
            ODATADetailLevel detailLevel = null;

            if (!string.IsNullOrEmpty(options.Filter))
            {
                verboseLogString = Resources.GetPoolUsageMetricsByFilter;
                detailLevel = new ODATADetailLevel(filterClause: options.Filter);
            }
            else
            {
                verboseLogString = Resources.GetPoolUsageMetricsByNoFilter;
            }

            PoolOperations poolOperations = options.Context.BatchOMClient.PoolOperations;
            IPagedEnumerable<PoolUsageMetrics> poolUsageMetrics =
                poolOperations.ListPoolUsageMetrics(options.StartTime, options.EndTime, detailLevel, options.AdditionalBehaviors);

            return PSPagedEnumerable<PSPoolUsageMetrics, PoolUsageMetrics>.CreateWithMaxCount(
                poolUsageMetrics, p => new PSPoolUsageMetrics(p), Int32.MaxValue, () => WriteVerbose(verboseLogString));
        }
Пример #38
0
        protected override async Task <SessionAllocateInfoContract> CreateAndSubmitSessionJob(
            SessionStartInfoContract startInfo,
            string endpointPrefix,
            bool durable,
            string callId,
            SecureString securePassword,
            ServiceRegistration registration,
            SessionAllocateInfoContract sessionAllocateInfo,
            string traceSwitchValue,
            string serviceName,
            BrokerConfigurations brokerConfigurations,
            string hostpath)
        {
            try
            {
                bool brokerPerfMode = true; // TODO: implement separated broker mode
                if (brokerPerfMode)
                {
                    TraceHelper.TraceEvent(TraceEventType.Information, "[AzureBatchSessionLauncher] .CreateAndSubmitSessionJob: broker perf mode");
                }

                TraceHelper.TraceEvent(
                    TraceEventType.Information,
                    "[AzureBatchSessionLauncher] .CreateAndSubmitSessionJob: callId={0}, endpointPrefix={1}, durable={2}.",
                    callId,
                    endpointPrefix,
                    durable);
                using (var batchClient = AzureBatchConfiguration.GetBatchClient())
                {
                    var pool = await batchClient.PoolOperations.GetPoolAsync(AzureBatchConfiguration.BatchPoolName);

                    ODATADetailLevel detailLevel = new ODATADetailLevel();
                    detailLevel.SelectClause = "affinityId, ipAddress";
                    //detailLevel.FilterClause = @"state eq 'idle'";
                    var nodes = await pool.ListComputeNodes(detailLevel).ToListAsync();

                    if (nodes.Count < 1)
                    {
                        throw new InvalidOperationException("Compute node count in selected pool is less then 1.");
                    }

                    sessionAllocateInfo.Id = string.Empty;

                    // sessionAllocateInfo.BrokerLauncherEpr = new[] { SessionInternalConstants.BrokerConnectionStringToken };
                    IList <EnvironmentSetting> ConstructEnvironmentVariable()
                    {
                        List <EnvironmentSetting> env = new List <EnvironmentSetting>(); // Can change to set to ensure no unintended overwrite

                        foreach (NameValueConfigurationElement entry in registration.Service.EnvironmentVariables)
                        {
                            env.Add(new EnvironmentSetting(entry.Name, entry.Value));
                        }

                        // pass service serviceInitializationTimeout as job environment variables
                        env.Add(new EnvironmentSetting(Constant.ServiceInitializationTimeoutEnvVar, registration.Service.ServiceInitializationTimeout.ToString()));

                        if (startInfo.ServiceHostIdleTimeout == null)
                        {
                            env.Add(new EnvironmentSetting(Constant.ServiceHostIdleTimeoutEnvVar, registration.Service.ServiceHostIdleTimeout.ToString()));
                        }
                        else
                        {
                            env.Add(new EnvironmentSetting(Constant.ServiceHostIdleTimeoutEnvVar, startInfo.ServiceHostIdleTimeout.ToString()));
                        }

                        if (startInfo.ServiceHangTimeout == null)
                        {
                            env.Add(new EnvironmentSetting(Constant.ServiceHangTimeoutEnvVar, registration.Service.ServiceHangTimeout.ToString()));
                        }
                        else
                        {
                            env.Add(new EnvironmentSetting(Constant.ServiceHangTimeoutEnvVar, startInfo.ServiceHangTimeout.ToString()));
                        }

                        // pass MessageLevelPreemption switcher as job environment variables
                        env.Add(new EnvironmentSetting(Constant.EnableMessageLevelPreemptionEnvVar, registration.Service.EnableMessageLevelPreemption.ToString()));

                        // pass trace switcher to svchost
                        if (!string.IsNullOrEmpty(traceSwitchValue))
                        {
                            env.Add(new EnvironmentSetting(Constant.TraceSwitchValue, traceSwitchValue));
                        }

                        // pass taskcancelgraceperiod as environment variable to svchosts
                        env.Add(new EnvironmentSetting(Constant.CancelTaskGracePeriodEnvVar, Constant.DefaultCancelTaskGracePeriod.ToString()));

                        // pass service config file name to services
                        env.Add(new EnvironmentSetting(Constant.ServiceConfigFileNameEnvVar, serviceName));

                        // pass maxMessageSize to service hosts
                        int maxMessageSize = startInfo.MaxMessageSize.HasValue ? startInfo.MaxMessageSize.Value : registration.Service.MaxMessageSize;

                        env.Add(new EnvironmentSetting(Constant.ServiceConfigMaxMessageEnvVar, maxMessageSize.ToString()));

                        // pass service operation timeout to service hosts
                        int?serviceOperationTimeout = null;

                        if (startInfo.ServiceOperationTimeout.HasValue)
                        {
                            serviceOperationTimeout = startInfo.ServiceOperationTimeout;
                        }
                        else if (brokerConfigurations != null && brokerConfigurations.LoadBalancing != null)
                        {
                            serviceOperationTimeout = brokerConfigurations.LoadBalancing.ServiceOperationTimeout;
                        }

                        if (serviceOperationTimeout.HasValue)
                        {
                            env.Add(new EnvironmentSetting(Constant.ServiceConfigServiceOperatonTimeoutEnvVar, serviceOperationTimeout.Value.ToString()));
                        }

                        if (startInfo.Environments != null)
                        {
                            foreach (KeyValuePair <string, string> entry in startInfo.Environments)
                            {
                                env.Add(new EnvironmentSetting(entry.Key, entry.Value));
                            }
                        }

                        // Each SOA job is assigned a GUID "secret", which is used
                        // to identify soa job owner. When a job running in Azure
                        // tries to access common data, it sends this "secret" together
                        // with a data request to data service.  Data service trusts
                        // the data request only if the job id and job "secret"
                        // match.
                        env.Add(new EnvironmentSetting(Constant.JobSecretEnvVar, Guid.NewGuid().ToString()));

                        // Set CCP_SERVICE_SESSIONPOOL env var of the job
                        if (startInfo.UseSessionPool)
                        {
                            env.Add(new EnvironmentSetting(Constant.ServiceUseSessionPoolEnvVar, bool.TrueString));
                        }

                        void SetBrokerNodeAuthenticationInfo()
                        {
                            // TODO: set the information needed by compute node to authenticate broker node
                            return;
                        }

                        SetBrokerNodeAuthenticationInfo();

                        env.Add(new EnvironmentSetting(BrokerSettingsConstants.Secure, startInfo.Secure.ToString()));
                        env.Add(new EnvironmentSetting(BrokerSettingsConstants.TransportScheme, startInfo.TransportScheme.ToString()));

                        TraceHelper.TraceEvent(
                            TraceEventType.Information,
                            "[AzureBatchSessionLauncher] .CreateAndSubmitSessionJob: callId={0}, set job environment: {1}={2}, {3}={4}.",
                            callId,
                            BrokerSettingsConstants.Secure,
                            startInfo.Secure,
                            BrokerSettingsConstants.TransportScheme,
                            startInfo.TransportScheme);

                        env.Add(new EnvironmentSetting(TelepathyConstants.SchedulerEnvironmentVariableName, Dns.GetHostName()));
                        env.Add(new EnvironmentSetting(Constant.OverrideProcNumEnvVar, "TRUE"));

                        //Establish a link via ev between TELEPATHY_SERVICE_WORKING_DIR and AZ_BATCH_JOB_PREP_WORKING_DIR
                        env.Add(new EnvironmentSetting(TelepathyConstants.ServiceWorkingDirEnvVar, AzureBatchPrepJobWorkingDir));
                        return(env);
                    }
                    var environment = ConstructEnvironmentVariable();

                    ResourceFile GetResourceFileReference(string containerName, string blobPrefix)
                    {
                        var          sasToken = AzureStorageUtil.ConstructContainerSas(this.cloudStorageAccount, containerName, SharedAccessBlobPermissions.List | SharedAccessBlobPermissions.Read);
                        ResourceFile rf;

                        if (string.IsNullOrEmpty(blobPrefix))
                        {
                            rf = ResourceFile.FromStorageContainerUrl(sasToken);
                        }
                        else
                        {
                            rf = ResourceFile.FromStorageContainerUrl(sasToken, blobPrefix: blobPrefix);
                        }

                        return(rf);
                    }

                    async Task <string> CreateJobAsync()
                    {
                        //TODO: need a function to test if all parameters are legal.
                        if (startInfo.MaxUnits != null && startInfo.MaxUnits <= 0)
                        {
                            throw new ArgumentException("Maxunit value is invalid.");
                        }
                        string newJobId = AzureBatchSessionJobIdConverter.ConvertToAzureBatchJobId(AzureBatchSessionIdGenerator.GenerateSessionId());

                        Debug.Assert(batchClient != null, nameof(batchClient) + " != null");
                        var job = batchClient.JobOperations.CreateJob(newJobId, new PoolInformation()
                        {
                            PoolId = AzureBatchConfiguration.BatchPoolName
                        });

                        job.JobPreparationTask = new JobPreparationTask(JobPrepCmdLine);
                        job.JobPreparationTask.UserIdentity  = new UserIdentity(new AutoUserSpecification(elevationLevel: ElevationLevel.Admin, scope: AutoUserScope.Task));
                        job.JobPreparationTask.ResourceFiles = new List <ResourceFile>()
                        {
                            GetResourceFileReference(ServiceRegistrationContainer, null),
                            GetResourceFileReference(RuntimeContainer, CcpServiceHostFolder),
                            GetResourceFileReference(ServiceAssemblyContainer, startInfo.ServiceName.ToLower())
                        };

                        job.JobReleaseTask = new JobReleaseTask(JobReleaseCmdLine);
                        job.JobReleaseTask.UserIdentity = new UserIdentity(new AutoUserSpecification(elevationLevel: ElevationLevel.Admin, scope: AutoUserScope.Task));

                        // List<ResourceFile> resourceFiles = new List<ResourceFile>();
                        // resourceFiles.Add(GetResourceFileReference(RuntimeContainer, BrokerFolder));
                        // resourceFiles.Add(GetResourceFileReference(ServiceRegistrationContainer, null));

                        // // job.JobManagerTask = new JobManagerTask("Broker",
                        // // $@"cmd /c {AzureBatchTaskWorkingDirEnvVar}\broker\HpcBroker.exe -d --ServiceRegistrationPath {AzureBatchTaskWorkingDirEnvVar} --AzureStorageConnectionString {AzureBatchConfiguration.SoaBrokerStorageConnectionString} --EnableAzureStorageQueueEndpoint True --SvcHostList {string.Join(",", nodes.Select(n => n.IPAddress))}");
                        // job.JobManagerTask = new JobManagerTask("List",
                        // $@"cmd /c dir & set");
                        // job.JobManagerTask.ResourceFiles = resourceFiles;
                        // job.JobManagerTask.UserIdentity = new UserIdentity(new AutoUserSpecification(elevationLevel: ElevationLevel.Admin, scope: AutoUserScope.Task));

                        // Set Meta Data
                        if (job.Metadata == null)
                        {
                            job.Metadata = new List <MetadataItem>();
                        }

                        Dictionary <string, string> jobMetadata = new Dictionary <string, string>()
                        {
                            { BrokerSettingsConstants.ShareSession, startInfo.ShareSession.ToString() },
                            { BrokerSettingsConstants.Secure, startInfo.Secure.ToString() },
                            { BrokerSettingsConstants.TransportScheme, ((int)startInfo.TransportScheme).ToString() },
                            { BrokerSettingsConstants.UseAzureQueue, (startInfo.UseAzureQueue == true).ToString() },
                        };

                        if (startInfo.ServiceVersion != null)
                        {
                            jobMetadata.Add(BrokerSettingsConstants.ServiceVersion, startInfo.ServiceVersion?.ToString());
                        }

                        if (startInfo.MaxUnits != null)
                        {
                            jobMetadata.Add("MaxUnits", startInfo.MaxUnits.ToString());
                        }

                        Dictionary <string, int?> jobOptionalMetadata = new Dictionary <string, int?>()
                        {
                            { BrokerSettingsConstants.ClientIdleTimeout, startInfo.ClientIdleTimeout },
                            { BrokerSettingsConstants.SessionIdleTimeout, startInfo.SessionIdleTimeout },
                            { BrokerSettingsConstants.MessagesThrottleStartThreshold, startInfo.MessagesThrottleStartThreshold },
                            { BrokerSettingsConstants.MessagesThrottleStopThreshold, startInfo.MessagesThrottleStopThreshold },
                            { BrokerSettingsConstants.ClientConnectionTimeout, startInfo.ClientConnectionTimeout },
                            { BrokerSettingsConstants.ServiceConfigMaxMessageSize, startInfo.MaxMessageSize },
                            { BrokerSettingsConstants.ServiceConfigOperationTimeout, startInfo.ServiceOperationTimeout },
                            { BrokerSettingsConstants.DispatcherCapacityInGrowShrink, startInfo.DispatcherCapacityInGrowShrink }
                        };

                        job.Metadata = job.Metadata.Concat(jobMetadata.Select(p => new MetadataItem(p.Key, p.Value)))
                                       .Concat(jobOptionalMetadata.Where(p => p.Value.HasValue).Select(p => new MetadataItem(p.Key, p.Value.ToString()))).ToList();

                        job.DisplayName = $"{job.Id} - {startInfo.ServiceName} - WCF Service";
                        await job.CommitAsync();

                        return(job.Id);
                    }

                    var jobId = await CreateJobAsync();

                    string sessionId = AzureBatchSessionJobIdConverter.ConvertToSessionId(jobId);
                    if (!sessionId.Equals("-1"))
                    {
                        sessionAllocateInfo.Id = sessionId;
                    }
                    else
                    {
                        TraceHelper.TraceEvent(TraceEventType.Error, "[AzureBatchSessionLauncher] .CreateAndSubmitSessionJob: JobId was failed to parse. callId={0}, jobId={1}.", callId, jobId);
                    }

                    Task AddTasksAsync()
                    {
                        int numTasks = startInfo.MaxUnits != null ? (int)startInfo.MaxUnits : nodes.Count;

                        var comparer = new EnvironmentSettingComparer();

                        CloudTask CreateTask(string taskId)
                        {
                            CloudTask cloudTask = new CloudTask(taskId, $@"cmd /c %{TelepathyConstants.ServiceWorkingDirEnvVar}%\ccpservicehost\CcpServiceHost.exe -standalone");

                            cloudTask.UserIdentity        = new UserIdentity(new AutoUserSpecification(elevationLevel: ElevationLevel.Admin, scope: AutoUserScope.Pool));
                            cloudTask.EnvironmentSettings = cloudTask.EnvironmentSettings == null ? environment : environment.Union(cloudTask.EnvironmentSettings, comparer).ToList();
                            return(cloudTask);
                        }

                        CloudTask CreateBrokerTask(bool direct)
                        {
                            List <ResourceFile> resourceFiles = new List <ResourceFile>();

                            resourceFiles.Add(GetResourceFileReference(RuntimeContainer, BrokerFolder));

                            string cmd;

                            if (direct)
                            {
                                cmd =
                                    $@"cmd /c %{TelepathyConstants.ServiceWorkingDirEnvVar}%\broker\HpcBroker.exe -d --SvcHostList {string.Join(",", nodes.Select(n => n.IPAddress))}";
                            }
                            else
                            {
                                cmd =
                                    $@"cmd /c %{TelepathyConstants.ServiceWorkingDirEnvVar}%\broker\HpcBroker.exe -d --AzureStorageConnectionString {AzureBatchConfiguration.SoaBrokerStorageConnectionString} --EnableAzureStorageQueueEndpoint True --SvcHostList {string.Join(",", nodes.Select(n => n.IPAddress))}";
                            }

                            CloudTask cloudTask = new CloudTask("Broker", cmd);

                            cloudTask.ResourceFiles       = resourceFiles;
                            cloudTask.UserIdentity        = new UserIdentity(new AutoUserSpecification(elevationLevel: ElevationLevel.Admin, scope: AutoUserScope.Pool));
                            cloudTask.EnvironmentSettings = cloudTask.EnvironmentSettings == null ? environment : environment.Union(cloudTask.EnvironmentSettings, comparer).ToList();
                            return(cloudTask);
                        }

                        //TODO: task id type should be changed from int to string
                        var tasks = Enumerable.Range(0, numTasks - 1).Select(_ => CreateTask(Guid.NewGuid().ToString())).ToArray();

                        if (!brokerPerfMode)
                        {
                            tasks = tasks.Union(new[] { CreateBrokerTask(true) }).ToArray();
                        }
                        else
                        {
                            tasks = tasks.Union(new[] { CreateTask(Guid.NewGuid().ToString()) }).ToArray();
                        }

                        return(batchClient.JobOperations.AddTaskAsync(jobId, tasks));
                    }

                    await AddTasksAsync();

                    async Task WaitBatchBrokerLauncher()
                    {
                        var brokerTask = await batchClient.JobOperations.GetTaskAsync(jobId, "Broker");

                        TaskStateMonitor monitor = batchClient.Utilities.CreateTaskStateMonitor();
                        await monitor.WhenAll(new[] { brokerTask }, TaskState.Running, SchedulingTimeout);

                        await brokerTask.RefreshAsync();

                        var brokerNodeIp = nodes.First(n => n.AffinityId == brokerTask.ComputeNodeInformation.AffinityId).IPAddress;

                        sessionAllocateInfo.BrokerLauncherEpr = new[] { SoaHelper.GetBrokerLauncherAddress(brokerNodeIp) };
                    }

                    if (brokerPerfMode)
                    {
                        //If broker node and session launcher node is not the same node, this line should be modified.
                        sessionAllocateInfo.BrokerLauncherEpr = new[] { SoaHelper.GetBrokerLauncherAddress(Environment.MachineName) };
                    }
                    else
                    {
                        await WaitBatchBrokerLauncher();
                    }

                    return(sessionAllocateInfo);
                }
            }
            catch (Exception ex)
            {
                TraceHelper.TraceEvent(TraceEventType.Error, $"[{nameof(AzureBatchSessionLauncher)}] .{nameof(this.CreateAndSubmitSessionJob)}: Exception happens: {ex.ToString()}");
                throw;
            }
        }
        /// <summary>
        /// Asynchronous method that delays execution until the nodes within the specified pool reach the specified state.
        /// </summary>
        /// <param name="client">A fully intitialized <see cref="BatchClient"/>.</param>
        /// <param name="poolId">The ID of the pool containing the nodes to monitor.</param>
        /// <param name="targetNodeState">The node state to monitor.</param>
        /// <param name="timeout">The maximum time to wait for the nodes to reach the specified state.</param>
        /// <returns>A <see cref="System.Threading.Tasks.Task"/> object that represents the asynchronous operation.</returns>
        public static async Task WaitForNodesToReachStateAsync(BatchClient client, string poolId, ComputeNodeState targetNodeState, TimeSpan timeout)
        {
            Console.WriteLine("Waiting for nodes to reach state {0}", targetNodeState);

            DateTime startTime = DateTime.UtcNow;
            DateTime timeoutAfterThisTimeUtc = startTime.Add(timeout);

            CloudPool pool = await client.PoolOperations.GetPoolAsync(poolId);

            ODATADetailLevel detail = new ODATADetailLevel(selectClause: "id,state");
            IEnumerable<ComputeNode> computeNodes = pool.ListComputeNodes(detail);

            while (computeNodes.Any(computeNode => computeNode.State != targetNodeState))
            {
                Console.Write(".");

                await Task.Delay(TimeSpan.FromSeconds(10));
                computeNodes = pool.ListComputeNodes(detail).ToList();

                if (DateTime.UtcNow > timeoutAfterThisTimeUtc)
                {
                    Console.WriteLine();
                    Console.WriteLine("Timed out waiting for compute nodes in pool {0} to reach state {1}", poolId, targetNodeState.ToString());

                    return;
                }
            }

            Console.WriteLine();
        }
Пример #40
0
        public static void Main(string[] args)
        {
            const int taskCount = 5000;

            const string poolId = "poolEffQuery";
            const string jobId  = "jobEffQuery";

            // Set up the credentials required by the BatchClient. Configure your AccountSettings in the
            // Microsoft.Azure.Batch.Samples.Common project within this solution.
            BatchSharedKeyCredentials cred = new BatchSharedKeyCredentials(AccountSettings.Default.BatchServiceUrl,
                                                                           AccountSettings.Default.BatchAccountName,
                                                                           AccountSettings.Default.BatchAccountKey);

            using (BatchClient batchClient = BatchClient.Open(cred))
            {
                // Create a CloudPool, or obtain an existing pool with the specified ID
                CreatePool(batchClient, poolId).Wait();
                CloudPool pool = batchClient.PoolOperations.GetPool(poolId);

                // Create a CloudJob, or obtain an existing job with the specified ID
                CloudJob job = ArticleHelpers.CreateJobAsync(batchClient, poolId, jobId).Result;

                // Configure the tasks we'll be querying. Each task simply echoes the node's
                // name and then exits. We create "large" tasks by setting an environment
                // variable for each that is 2048 bytes in size. This is done simply to
                // increase response time when querying the batch service to more clearly
                // demonstrate query durations.
                List <CloudTask>          tasks = new List <CloudTask>();
                List <EnvironmentSetting> environmentSettings = new List <EnvironmentSetting>();
                environmentSettings.Add(new EnvironmentSetting("BIGENV", GetBigString(2048)));
                for (int i = 1; i < taskCount + 1; i++)
                {
                    string    taskId          = "task" + i.ToString().PadLeft(5, '0');
                    string    taskCommandLine = "cmd /c echo %COMPUTERNAME%";
                    CloudTask task            = new CloudTask(taskId, taskCommandLine);
                    task.EnvironmentSettings = environmentSettings;
                    tasks.Add(task);
                }

                Console.WriteLine();
                Console.WriteLine("Adding {0} tasks to job {1}...", taskCount, job.Id);

                Stopwatch stopwatch = new Stopwatch();
                stopwatch.Start();

                // To reduce the chances of hitting Batch service throttling limits, we add the tasks in
                // one API call as opposed to a separate AddTask call for each. This is crucial if you
                // are adding many tasks to your jobs.
                batchClient.JobOperations.AddTask(job.Id, tasks);

                stopwatch.Stop();
                Console.WriteLine("{0} tasks added in {1}, hit ENTER to query tasks...", taskCount, stopwatch.Elapsed);
                Console.ReadLine();
                Console.WriteLine();
                stopwatch.Reset();

                // Obtain the tasks, specifying different detail levels to demonstrate limiting the number of tasks returned
                // and the amount of data returned for each. If your job tasks number in the thousands or have "large" properties
                // (such as our big environment variable), specifying a DetailLevel is important in reducing the amount of data
                // transferred, lowering your query response times (potentially greatly).

                // Get a subset of the tasks based on different task states
                ODATADetailLevel detail = new ODATADetailLevel();
                detail.FilterClause = "state eq 'active'";
                detail.SelectClause = "id,state";
                QueryTasks(batchClient, job.Id, detail);
                detail.FilterClause = "state eq 'running'";
                QueryTasks(batchClient, job.Id, detail);
                detail.FilterClause = "state eq 'completed'";
                QueryTasks(batchClient, job.Id, detail);

                // Get all tasks, but limit the properties returned to task id and state only
                detail.FilterClause = null;
                detail.SelectClause = "id,state";
                QueryTasks(batchClient, job.Id, detail);

                // Get all tasks, include id and state, also include the inflated environment settings property
                detail.SelectClause = "id,state,environmentSettings";
                QueryTasks(batchClient, job.Id, detail);

                // Get all tasks, include all standard properties, and expand the statistics
                detail.ExpandClause = "stats";
                detail.SelectClause = null;
                QueryTasks(batchClient, job.Id, detail);

                Console.WriteLine();
                Console.WriteLine("Sample complete, hit ENTER to continue...");
                Console.ReadLine();

                // Clean up the resources we've created in the Batch account
                Console.WriteLine("Delete job? [yes] no");
                string response = Console.ReadLine().ToLower();
                if (response != "n" && response != "no")
                {
                    batchClient.JobOperations.DeleteJob(job.Id);
                }

                Console.WriteLine("Delete pool? [yes] no");
                response = Console.ReadLine().ToLower();
                if (response != "n" && response != "no")
                {
                    batchClient.PoolOperations.DeletePool(pool.Id);
                }
            }
        }
        /// <summary>
        /// Lists the node agent SKUs matching the specified filter options.
        /// </summary>
        /// <param name="context">The account to use.</param>
        /// <param name="filterClause">The level of detail</param>
        /// <param name="maxCount">The number of results.</param>
        /// <param name="additionalBehaviors">Additional client behaviors to perform.</param>
        /// <returns>The node agent SKUs matching the specified filter.</returns>
        public IEnumerable<PSNodeAgentSku> ListNodeAgentSkus(
            BatchAccountContext context,
            string filterClause = default(string),
            int maxCount = default(int),
            IEnumerable<BatchClientBehavior> additionalBehaviors = null)
        {
            PoolOperations poolOperations = context.BatchOMClient.PoolOperations;
            ODATADetailLevel filterLevel = new ODATADetailLevel(filterClause: filterClause);

            IPagedEnumerable<NodeAgentSku> nodeAgentSkus = poolOperations.ListNodeAgentSkus(filterLevel, additionalBehaviors);
            Func<NodeAgentSku, PSNodeAgentSku> mappingFunction = p => { return new PSNodeAgentSku(p); };

            return PSPagedEnumerable<PSNodeAgentSku, NodeAgentSku>.CreateWithMaxCount(nodeAgentSkus, mappingFunction,
                maxCount, () => WriteVerbose(string.Format(Resources.MaxCount, maxCount)));
        }
Пример #42
0
        static async Task ManageTasks(string[] args)
        {
            int    experimentId = int.Parse(args[0], CultureInfo.InvariantCulture);
            string summaryName  = null;

            if (args.Length > 1)
            {
                summaryName = args[1];
            }
            //Console.WriteLine(String.Format("Params are:\n id: {0}\ncontainer: {8}\ndirectory:{9}\ncategory: {1}\nextensions: {10}\ndomain: {11}\nexec: {2}\nargs: {3}\ntimeout: {4}\nmemlimit: {5}\noutlimit: {6}\nerrlimit: {7}", experimentId, benchmarkCategory, executable, arguments, timeout, memoryLimit, outputLimit, errorLimit, benchmarkContainerUri, benchmarkDirectory, extensionsString, domainString));
#if DEBUG
            string jobId = "cz3_exp8535";
#else
            string jobId = Environment.GetEnvironmentVariable(JobIdEnvVariableName);
#endif
            Console.WriteLine("Retrieving credentials...");
            var secretStorage = new SecretStorage(Settings.Default.AADApplicationId, Settings.Default.AADApplicationCertThumbprint, Settings.Default.KeyVaultUrl);
            BatchConnectionString credentials = new BatchConnectionString(await secretStorage.GetSecret(Settings.Default.ConnectionStringSecretId));

            var batchCred = new BatchSharedKeyCredentials(credentials.BatchURL, credentials.BatchAccountName, credentials.BatchAccessKey);
            var storage   = new AzureExperimentStorage(credentials.WithoutBatchData().ToString());

            var expInfo = await storage.GetExperiment(experimentId);

            string benchmarkContainerUri = expInfo.BenchmarkContainerUri;  // args[1];
            string benchmarkDirectory    = expInfo.BenchmarkDirectory;     // args[2];
            string benchmarkCategory     = expInfo.Category;               // args[3];
            string extensionsString      = expInfo.BenchmarkFileExtension; //args[4];
            string domainString          = expInfo.DomainName;             // args[5];
            string executable            = expInfo.Executable;             // args[6];
            string arguments             = expInfo.Parameters;             // args[7];
            double timeout        = expInfo.BenchmarkTimeout;              // TimeSpan.FromSeconds(double.Parse(args[8]));
            double memoryLimit    = expInfo.MemoryLimitMB;                 // 0; // no limit
            int    maxRepetitions = expInfo.AdaptiveRunMaxRepetitions;
            double maxTime        = expInfo.AdaptiveRunMaxTimeInSeconds;

            long?outputLimit = 1 * (1024 * 1024); // 1 MB
            long?errorLimit  = 256 * 1024;        // 256 KB

            AzureBenchmarkStorage benchmarkStorage = CreateBenchmarkStorage(benchmarkContainerUri, storage);

            var queue = await storage.CreateResultsQueue(experimentId);

            DateTime before = DateTime.Now;
            Console.Write("Fetching existing results...");
            await FetchSavedResults(experimentId, storage);

            Domain           domain = ResolveDomain(domainString);
            HashSet <string> extensions;
            if (string.IsNullOrEmpty(extensionsString))
            {
                extensions = new HashSet <string>(domain.BenchmarkExtensions.Distinct());
            }
            else
            {
                extensions = new HashSet <string>(extensionsString.Split('|').Select(s => s.Trim().TrimStart('.')).Distinct());
            }

            using (BatchClient batchClient = BatchClient.Open(batchCred))
            {
                // Exclude benchmarks that finished correctly
                var    processedBlobs = new HashSet <string>();
                string prefix         = (benchmarkDirectory.Trim('/') + "/" + benchmarkCategory.Trim('/')).Trim('/');
                foreach (var r in goodResults.Select(g => prefix + "/" + g.BenchmarkFileName))
                {
                    processedBlobs.Add(r.Trim());
                }
                Console.WriteLine(" took {0}.", (DateTime.Now - before));

                // Exclude those that are still in progress
                ODATADetailLevel detailLevel = new ODATADetailLevel();
                detailLevel.FilterClause = "(state eq 'active') or (state eq 'running') or (state eq 'preparing')";
                detailLevel.SelectClause = "id,displayName";

                CloudJob old_job = null;
                try { old_job = batchClient.JobOperations.GetJob(jobId); } catch { /* OK */ }

                if (old_job != null)
                {
                    before = DateTime.Now;
                    Console.Write("Listing existing tasks...");
                    var ts = batchClient.JobOperations.ListTasks(jobId, detailLevel);
                    foreach (CloudTask t in ts)
                    {
                        int id;

                        if (int.TryParse(t.Id, out id))
                        {
                            string n = t.DisplayName.Trim();
                            if (!processedBlobs.Contains(n))
                            {
                                processedBlobs.Add(n);
                            }
                        }
                    }
                    ;
                    Console.WriteLine(" took {0}.", (DateTime.Now - before));

                    // Create new job if the old one is already sealed off
                    switch (old_job.State)
                    {
                    case Microsoft.Azure.Batch.Common.JobState.Completed:
                    case Microsoft.Azure.Batch.Common.JobState.Deleting:
                    case Microsoft.Azure.Batch.Common.JobState.Disabled:
                    case Microsoft.Azure.Batch.Common.JobState.Disabling:
                    case Microsoft.Azure.Batch.Common.JobState.Terminating:
                    {
                        before = DateTime.Now;
                        Console.Write("Creating fresh job...");
                        PoolInformation pool_info = old_job.PoolInformation;
                        string          new_jid;
                        int             cnt      = 1;
                        bool            have_jid = false;
                        do
                        {
                            new_jid = String.Format("{0}-{1}", jobId, cnt++);
                            try
                            {
                                CloudJob new_job = batchClient.JobOperations.CreateJob(new_jid, pool_info);
                                new_job.OnAllTasksComplete = Microsoft.Azure.Batch.Common.OnAllTasksComplete.NoAction;
                                new_job.OnTaskFailure      = old_job.OnTaskFailure;
                                new_job.Constraints        = old_job.Constraints;
                                new_job.DisplayName        = old_job.DisplayName;
                                new_job.Commit();
                                have_jid = true;
                            }
                            catch (Microsoft.Azure.Batch.Common.BatchException)
                            {
                                Console.Write(".");
                            }
                        }while (!have_jid);
                        jobId = new_jid;
                        Console.WriteLine(" took {0}.", (DateTime.Now - before));
                        break;
                    }
                    }
                }

                BlobContinuationToken continuationToken = null;
                BlobResultSegment     resultSegment     = null;

                before = DateTime.Now;
                Console.Write("Adding tasks...");
                List <Task> starterTasks       = new List <Task>();
                int         benchmarksTotal    = processedBlobs.Count();
                string      benchmarksPath     = CombineBlobPath(benchmarkDirectory, benchmarkCategory);
                string      outputQueueUri     = storage.GetOutputQueueSASUri(experimentId, TimeSpan.FromHours(48));
                string      outputContainerUri = storage.GetOutputContainerSASUri(TimeSpan.FromHours(48));
                do
                {
                    resultSegment = await benchmarkStorage.ListBlobsSegmentedAsync(benchmarksPath, continuationToken);

                    string[] blobNamesToProcess = resultSegment.Results.SelectMany(item =>
                    {
                        var blob = item as CloudBlockBlob;
                        if (blob == null || processedBlobs.Contains(blob.Name))
                        {
                            return new string[] { }
                        }
                        ;

                        var nameParts      = blob.Name.Split('/');
                        var shortnameParts = nameParts[nameParts.Length - 1].Split('.');
                        if (shortnameParts.Length == 1 && !extensions.Contains(""))
                        {
                            return new string[] { }
                        }
                        ;
                        var ext = shortnameParts[shortnameParts.Length - 1];
                        if (!extensions.Contains(ext))
                        {
                            return new string[] { }
                        }
                        ;

                        return(new string[] { blob.Name });
                    }).ToArray();
                    starterTasks.Add(StartTasksForSegment(timeout.ToString(), experimentId, executable, arguments, memoryLimit, domainString, outputQueueUri, outputContainerUri, outputLimit, errorLimit, jobId, batchClient, blobNamesToProcess, benchmarksPath, benchmarksTotal, benchmarkStorage, maxRepetitions, maxTime));

                    continuationToken = resultSegment.ContinuationToken;
                    benchmarksTotal  += blobNamesToProcess.Length;
                }while (continuationToken != null);

                await storage.SetBenchmarksTotal(experimentId, benchmarksTotal);

                Program.benchmarksTotal = benchmarksTotal;
                benchmarksToProcess     = benchmarksTotal - goodResults.Count;
                Console.WriteLine(" took {0}.", (DateTime.Now - before));

                before = DateTime.Now;
                Console.Write("Waiting for tasks to start...");
                await Task.WhenAll(starterTasks.ToArray());

                Console.WriteLine(" took {0}.", (DateTime.Now - before));

                CloudJob j = batchClient.JobOperations.GetJob(jobId);
                j.OnAllTasksComplete = Microsoft.Azure.Batch.Common.OnAllTasksComplete.TerminateJob;
                j.CommitChanges();

                before = DateTime.Now;
                Console.Write("Waiting for results...");
                var collectionTask = CollectResults(experimentId, storage);
                Console.WriteLine(" took {0}.", (DateTime.Now - before));

                MonitorTasksUntilCompletion(experimentId, jobId, collectionTask, batchClient, domain);

                if (summaryName != null && expInfo.Creator == "Nightly")
                {
                    Trace.WriteLine(string.Format("Building summary for experiment {0} and summary name {1}...", experimentId, summaryName));
                    AzureSummaryManager manager = new AzureSummaryManager(credentials.WithoutBatchData().ToString(), MEFDomainResolver.Instance);
                    await AppendSummaryAndSendReport(summaryName, experimentId, domain, manager);
                }
                else
                {
                    Trace.WriteLine("No summary requested.");
                }

                try
                {
                    int?amc = storage.GetResultsQueueReference(experimentId).ApproximateMessageCount;

                    if (amc.HasValue && amc.Value == 0)
                    {
                        switch (batchClient.JobOperations.GetJob(jobId).State)
                        {
                        case Microsoft.Azure.Batch.Common.JobState.Completed:
                        case Microsoft.Azure.Batch.Common.JobState.Disabled:
                            Console.WriteLine("Deleting Batch job and results queue.");
                            await batchClient.JobOperations.DeleteJobAsync(jobId);

                            await storage.DeleteResultsQueue(experimentId);

                            break;
                        }
                    }
                }
                catch { /* OK */ }

                Console.WriteLine("Closing.");
            }
        }
Пример #43
0
        /// <summary>
        /// Prints task information to the console for each of the nodes in the specified pool.
        /// </summary>
        /// <param name="poolId">The ID of the <see cref="CloudPool"/> containing the nodes whose task information
        /// should be printed to the console.</param>
        private static void PrintNodeTasks(BatchClient batchClient, string poolId)
        {
            ODATADetailLevel nodeDetail = new ODATADetailLevel(selectClause: "id,recentTasks");

            // Obtain and print the task information for each of the compute nodes in the pool.
            foreach (ComputeNode node in batchClient.PoolOperations.ListComputeNodes(poolId, nodeDetail))
            {
                Console.WriteLine();
                Console.WriteLine(node.Id + " tasks:");

                if (node.RecentTasks != null && node.RecentTasks.Any())
                {
                    foreach (TaskInformation task in node.RecentTasks)
                    {
                        Console.WriteLine("\t{0}: {1}", task.TaskId, task.TaskState);
                    }
                }
                else
                {
                    // No tasks found for the node
                    Console.WriteLine("\tNone");
                }

            }
        }
Пример #44
0
        private static async Task MainAsync()
        {
            string poolId = "JobPrepReleaseSamplePool";
            string jobId  = "JobPrepReleaseSampleJob";

            var settings = Config.LoadAccountSettings();

            // Location of the file that the job tasks will work with, a text file in the
            // node's "shared" directory.
            string taskOutputFile = "$AZ_BATCH_NODE_SHARED_DIR/job_prep_and_release.txt";

            // The job prep task will write the node ID to the text file in the shared directory
            string jobPrepCmdLine = $@"/bin/bash -c ""echo $AZ_BATCH_NODE_ID tasks: > {taskOutputFile}""";

            // Each task then echoes its ID to the same text file
            string taskCmdLine = $@"/bin/bash -c ""echo $AZ_BATCH_TASK_ID >> {taskOutputFile}""";

            // The job release task will then delete the text file from the shared directory
            string jobReleaseCmdLine = $@"/bin/bash -c ""rm {taskOutputFile}""";

            BatchSharedKeyCredentials cred = new BatchSharedKeyCredentials(settings.BatchServiceUrl, settings.BatchAccountName, settings.BatchAccountKey);

            using (BatchClient batchClient = BatchClient.Open(cred))
            {
                var pool = await BatchUtils.CreatePoolIfNotExistAsync(batchClient, poolId);

                var prepTask = new JobPreparationTask {
                    CommandLine = jobPrepCmdLine
                };
                var releaseTask = new JobReleaseTask {
                    CommandLine = jobReleaseCmdLine
                };

                var job = await BatchUtils.CreateJobIfNotExistAsync(batchClient, pool.Id, jobId, prepTask : prepTask, releaseTask : releaseTask);

                // Create the tasks that the job will execute
                List <CloudTask> tasks = new List <CloudTask>();
                for (int i = 1; i <= 8; i++)
                {
                    string    taskId          = "task" + i.ToString().PadLeft(3, '0');
                    string    taskCommandLine = taskCmdLine;
                    CloudTask task            = new CloudTask(taskId, taskCommandLine);
                    tasks.Add(task);
                }

                // Add the tasks in one API call as opposed to a separate AddTask call for each. Bulk task
                // submission helps to ensure efficient underlying API calls to the Batch service.
                Console.WriteLine("Submitting tasks and awaiting completion...");
                await batchClient.JobOperations.AddTaskAsync(job.Id, tasks);

                // Wait for the tasks to complete before proceeding. The long timeout here is to allow time
                // for the nodes within the pool to be created and started if the pool had not yet been created.
                await batchClient.Utilities.CreateTaskStateMonitor().WhenAll(
                    job.ListTasks(),
                    TaskState.Completed,
                    TimeSpan.FromMinutes(30));

                Console.WriteLine("All tasks completed.");
                Console.WriteLine();

                // Print the contents of the shared text file modified by the job preparation and other tasks.
                ODATADetailLevel nodeDetail          = new ODATADetailLevel(selectClause: "id, state");
                IPagedEnumerable <ComputeNode> nodes = batchClient.PoolOperations.ListComputeNodes(poolId, nodeDetail);
                await nodes.ForEachAsync(async (node) =>
                {
                    // Check to ensure that the node is Idle before attempting to pull the text file.
                    // If the pool was just created, there is a chance that another node completed all
                    // of the tasks prior to the other node(s) completing their startup procedure.
                    if (node.State == ComputeNodeState.Idle)
                    {
                        var files = await node.ListNodeFiles().ToListAsync();
                        NodeFile sharedTextFile = await node.GetNodeFileAsync("shared/job_prep_and_release.txt");
                        Console.WriteLine("Contents of {0} on {1}:", sharedTextFile.Path, node.Id);
                        Console.WriteLine("-------------------------------------------");
                        Console.WriteLine(await sharedTextFile.ReadAsStringAsync());
                    }
                });

                // Terminate the job to mark it as Completed; this will initiate the Job Release Task on any node
                // that executed job tasks. Note that the Job Release Task is also executed when a job is deleted,
                // thus you need not call Terminate if you typically delete your jobs upon task completion.
                await batchClient.JobOperations.TerminateJobAsync(job.Id);

                // Wait for the job to reach state "Completed." Note that this wait is not typically necessary in
                // production code, but is done here to enable the checking of the release tasks exit code below.
                await BatchUtils.WaitForJobToReachStateAsync(batchClient, job.Id, JobState.Completed, TimeSpan.FromMinutes(2));

                // Print the exit codes of the prep and release tasks by obtaining their execution info
                List <JobPreparationAndReleaseTaskExecutionInformation> prepReleaseInfo = await batchClient.JobOperations.ListJobPreparationAndReleaseTaskStatus(job.Id).ToListAsync();

                foreach (JobPreparationAndReleaseTaskExecutionInformation info in prepReleaseInfo)
                {
                    Console.WriteLine();
                    Console.WriteLine("{0}: ", info.ComputeNodeId);

                    // If no tasks were scheduled to run on the node, the JobPreparationTaskExecutionInformation will be null
                    if (info.JobPreparationTaskExecutionInformation != null)
                    {
                        Console.WriteLine("  Prep task exit code:    {0}", info.JobPreparationTaskExecutionInformation.ExitCode);
                    }

                    // If no tasks were scheduled to run on the node, the JobReleaseTaskExecutionInformation will be null
                    if (info.JobReleaseTaskExecutionInformation != null)
                    {
                        Console.WriteLine("  Release task exit code: {0}", info.JobReleaseTaskExecutionInformation.ExitCode);
                    }
                }

                // Clean up the resources we've created in the Batch account
                Console.WriteLine();
                Console.WriteLine("Delete job? [yes] no");
                string response = Console.ReadLine().ToLower();
                if (response != "n" && response != "no")
                {
                    // Note that deleting the job will execute the job release task if the job was not previously terminated
                    await batchClient.JobOperations.DeleteJobAsync(job.Id);
                }

                Console.WriteLine("Delete pool? [yes] no");
                response = Console.ReadLine();
                if (response != "n" && response != "no")
                {
                    await batchClient.PoolOperations.DeletePoolAsync(poolId);
                }
            }
        }
        /// <summary>
        /// Runs the job manager task.
        /// </summary>
        public async Task RunAsync()
        {
            Console.WriteLine("JobManager for account: {0}, job: {1} has started...",
                this.accountName,
                this.jobId);
            Console.WriteLine();

            Console.WriteLine("JobManager running with the following settings: ");
            Console.WriteLine("----------------------------------------");
            Console.WriteLine(this.configurationSettings.ToString());

            //Set up the Batch Service credentials used to authenticate with the Batch Service.
            BatchSharedKeyCredentials batchSharedKeyCredentials = new BatchSharedKeyCredentials(
                this.configurationSettings.BatchServiceUrl,
                this.configurationSettings.BatchAccountName,
                this.configurationSettings.BatchAccountKey);

            using (BatchClient batchClient = await BatchClient.OpenAsync(batchSharedKeyCredentials))
            {
                //Construct a container SAS to provide the Batch Service access to the files required to
                //run the mapper and reducer tasks.
                string containerSas = Helpers.ConstructContainerSas(
                    this.configurationSettings.StorageAccountName,
                    this.configurationSettings.StorageAccountKey,
                    this.configurationSettings.StorageServiceUrl,
                    this.configurationSettings.BlobContainer);

                //
                // Submit mapper tasks.
                //
                Console.WriteLine("Submitting {0} mapper tasks.", this.configurationSettings.NumberOfMapperTasks);

                //The collection of tasks to add to the Batch Service.
                List<CloudTask> tasksToAdd = new List<CloudTask>();

                for (int i = 0; i < this.configurationSettings.NumberOfMapperTasks; i++)
                {
                    string taskId = Helpers.GetMapperTaskId(i);
                    string fileBlobName = Helpers.GetSplitFileName(i);
                    string fileBlobPath = Helpers.ConstructBlobSource(containerSas, fileBlobName);

                    string commandLine = string.Format("{0} -MapperTask {1}", Constants.TextSearchExe, fileBlobPath);
                    CloudTask unboundMapperTask = new CloudTask(taskId, commandLine);

                    //The set of files (exes, dlls and configuration files) required to run the mapper task.
                    IReadOnlyList<string> mapperTaskRequiredFiles = Constants.RequiredExecutableFiles;

                    List<ResourceFile> mapperTaskResourceFiles = Helpers.GetResourceFiles(containerSas, mapperTaskRequiredFiles);
                        
                    unboundMapperTask.ResourceFiles = mapperTaskResourceFiles; 

                    tasksToAdd.Add(unboundMapperTask);
                }

                //Submit the unbound task collection to the Batch Service.
                //Use the AddTask method which takes a collection of CloudTasks for the best performance.
                await batchClient.JobOperations.AddTaskAsync(this.jobId, tasksToAdd);

                //
                // Wait for the mapper tasks to complete.
                //
                Console.WriteLine("Waiting for the mapper tasks to complete...");

                //List all the mapper tasks using an id filter.
                DetailLevel mapperTaskIdFilter = new ODATADetailLevel()
                                                        {
                                                            FilterClause = string.Format("startswith(id, '{0}')", Constants.MapperTaskPrefix)
                                                        };

                IEnumerable<CloudTask> tasksToMonitor = batchClient.JobOperations.ListTasks(
                    this.jobId,
                    detailLevel: mapperTaskIdFilter);

                //Use the task state monitor to wait for the tasks to complete.
                TaskStateMonitor taskStateMonitor = batchClient.Utilities.CreateTaskStateMonitor();
                    
                bool timedOut = await taskStateMonitor.WaitAllAsync(tasksToMonitor, TaskState.Completed, TimeSpan.FromMinutes(5));

                //Get the list of mapper tasks in order to analyze their state and ensure they completed successfully.
                IPagedEnumerable<CloudTask> asyncEnumerable = batchClient.JobOperations.ListTasks(
                    this.jobId,
                    detailLevel: mapperTaskIdFilter);

                await asyncEnumerable.ForEachAsync(async cloudTask =>
                                             {
                                                 Console.WriteLine("Task {0} is in state: {1}", cloudTask.Id, cloudTask.State);

                                                 await Helpers.CheckForTaskSuccessAsync(cloudTask, dumpStandardOutOnTaskSuccess: false);

                                                 Console.WriteLine();
                                             });
                
                //If not all the tasks reached the desired state within the timeout then the job manager
                //cannot continue.
                if (timedOut)
                {
                    const string errorMessage = "Mapper tasks did not complete within expected timeout.";
                    Console.WriteLine(errorMessage);
                        
                    throw new TimeoutException(errorMessage);
                }
                    
                //
                // Create the reducer task.
                //
                string reducerTaskCommandLine = string.Format("{0} -ReducerTask", Constants.TextSearchExe);
                    
                Console.WriteLine("Adding the reducer task: {0}", Constants.ReducerTaskId);
                CloudTask unboundReducerTask = new CloudTask(Constants.ReducerTaskId, reducerTaskCommandLine);

                //The set of files (exes, dlls and configuration files) required to run the reducer task.
                List<ResourceFile> reducerTaskResourceFiles = Helpers.GetResourceFiles(containerSas, Constants.RequiredExecutableFiles);

                unboundReducerTask.ResourceFiles = reducerTaskResourceFiles;

                //Send the request to the Batch Service to add the reducer task.
                await batchClient.JobOperations.AddTaskAsync(this.jobId, unboundReducerTask);

                //
                //Wait for the reducer task to complete.
                //

                //Get the bound reducer task and monitor it for completion.
                CloudTask boundReducerTask = await batchClient.JobOperations.GetTaskAsync(this.jobId, Constants.ReducerTaskId);

                timedOut = await taskStateMonitor.WaitAllAsync(new List<CloudTask> {boundReducerTask}, TaskState.Completed, TimeSpan.FromMinutes(2));

                //Refresh the reducer task to get the most recent information about it from the Batch Service.
                await boundReducerTask.RefreshAsync();

                //Dump the reducer tasks exit code and scheduling error for debugging purposes.
                await Helpers.CheckForTaskSuccessAsync(boundReducerTask, dumpStandardOutOnTaskSuccess: true);

                //Handle the possibilty that the reducer task did not complete in the expected timeout.
                if (timedOut)
                {
                    const string errorMessage = "Reducer task did not complete within expected timeout.";

                    Console.WriteLine("Task {0} is in state: {1}", boundReducerTask.Id, boundReducerTask.State);

                    Console.WriteLine(errorMessage);
                    throw new TimeoutException(errorMessage);
                }
                    
                //The job manager has completed.
                Console.WriteLine("JobManager completed successfully.");
            }
        }
Пример #46
0
        /// <summary>
        /// Queries and prints task information for the specified job.
        /// </summary>
        /// <param name="batchClient">A fully initialized <see cref="BatchClient"/>.</param>
        /// <param name="jobId">The ID of the job whose tasks should be queried.</param>
        /// <param name="detail">An <see cref="ODATADetailLevel"/> configured with one or more of expand, filter, select clauses.</param>
        /// <returns>A <see cref="System.Threading.Tasks.Task"/> object that represents the asynchronous operation.</returns>
        private static async Task QueryTasksAsync(BatchClient batchClient, string jobId, ODATADetailLevel detail)
        {
            List <CloudTask> taskList = new List <CloudTask>();

            Stopwatch stopwatch = Stopwatch.StartNew();

            taskList.AddRange(await batchClient.JobOperations.ListTasks(jobId, detail).ToListAsync());

            stopwatch.Stop();

            Console.WriteLine("{0} tasks retrieved in {1} (ExpandClause: {2} | FilterClause: {3} | SelectClause: {4})",
                              taskList.Count,
                              stopwatch.Elapsed,
                              detail.ExpandClause,
                              detail.FilterClause,
                              detail.SelectClause);
        }
Пример #47
0
        /// <summary>
        /// Gets the task info
        /// </summary>
        /// <returns>returns the task info as a dictionary, keyed by task id</returns>
        /// <remarks>
        /// This method returns a list of task info which ChangeTime property is in this rank: [this.lastChangeTime, DateTime.Now].
        /// </remarks>
        private async Task <List <TaskInfo> > GetTaskStateChangeAsync(List <ComputeNode> nodes)
        {
            try
            {
                TraceHelper.TraceEvent(this.sessionid, TraceEventType.Verbose, "[AzureBatchJobMonitor] Query task info...");
                ODATADetailLevel detail = new ODATADetailLevel(filterClause: $"(stateTransitionTime ge datetime'{this.lastChangeTime:O}')", selectClause: "id,nodeInfo,state,stateTransitionTime");
                TraceHelper.TraceEvent(this.sessionid, TraceEventType.Information, "[AzureBatchJobMonitor] Query task info filter clause = {0}\n", detail.FilterClause);
                List <CloudTask> stateChangedTasks = await this.batchClient.JobOperations.ListTasks(this.cloudJob.Id, detail).ToListAsync();

                if (stateChangedTasks.Count == 0)
                {
                    // no service task dispathed yet.
                    TraceHelper.TraceEvent(this.sessionid, TraceEventType.Warning,
                                           "[AzureBatchJobMonitorEntry] Failed to get tasks or no task state change.");
                    return(null);
                }

                List <TaskInfo> results = new List <TaskInfo>(stateChangedTasks.Count);
                TraceHelper.TraceEvent(this.sessionid, TraceEventType.Information, "[AzureBatchJobMonitor] The number of changed state tasks is {0}", stateChangedTasks.Count);
                DateTime lastStateTransitionTime = new DateTime();
                foreach (CloudTask task in stateChangedTasks)
                {
                    TraceHelper.TraceEvent(this.sessionid, TraceEventType.Information, "[AzureBatchJobMonitor] task {0} state changed to {1}, at date time = {2}\n", task.Id, task.State, task.StateTransitionTime);
                    TaskState state = task.State.Value;
                    DateTime  stateTransitionTime = task.StateTransitionTime.Value;
                    if (state == TaskState.Running)
                    {
                        TaskInfo info = new TaskInfo();
                        info.Id          = task.Id;
                        info.State       = TaskStateConverter.FromAzureBatchTaskState(task.State.Value);
                        info.MachineName = nodes.First(n => n.AffinityId == task.ComputeNodeInformation.AffinityId)
                                           .IPAddress;
                        info.Capacity       = this.nodeCapacity;
                        info.FirstCoreIndex = Int32.Parse(TelepathyConstants.FirstCoreIndex);
                        TraceHelper.TraceEvent(this.sessionid, TraceEventType.Information, "[AzureBatchJobMonitor] Node capacity in pool is\n", nodeCapacity);
                        results.Add(info);
                    }
                    else if (state == TaskState.Completed)
                    {
                        TaskInfo info = new TaskInfo
                        {
                            Id    = task.Id,
                            State = TaskStateConverter.FromAzureBatchTaskState(task.State.Value)
                        };
                        results.Add(info);
                    }

                    if (DateTime.Compare(lastStateTransitionTime, stateTransitionTime) < 1)
                    {
                        lastStateTransitionTime = stateTransitionTime;
                    }
                }
                this.cloudJob.Refresh();
                this.lastChangeTime = lastStateTransitionTime;
                return(results);
            }
            catch (Exception ex)
            {
                TraceHelper.TraceEvent(this.sessionid, TraceEventType.Warning, "[AzureBatchJobMonitor] Fail when get task info: {0}", ex);
                return(null);
            }
            finally
            {
                TraceHelper.TraceEvent(this.sessionid, TraceEventType.Verbose, "[AzureBatchJobMonitor] Query task info finished.");
            }
        }
Пример #48
0
        private static async Task MainAsync(string[] args)
        {
            // You may adjust these values to experiment with different compute resource scenarios.
            const string nodeSize         = "standard_d1_v2";
            const int    nodeCount        = 1;
            const int    taskSlotsPerNode = 4;

            // Adjust the task count to experiment with different list operation query durations
            const int taskCount = 5000;

            const string poolId = "EfficientListQueriesSamplePool";
            const string jobId  = "EfficientListQueriesSampleJob";

            var accountSettings = SampleHelpers.LoadAccountSettings();

            // Set up the credentials required by the BatchClient. Configure your AccountSettings in the
            // Microsoft.Azure.Batch.Samples.Common project within this solution.
            BatchSharedKeyCredentials cred = new BatchSharedKeyCredentials(
                accountSettings.BatchServiceUrl,
                accountSettings.BatchAccountName,
                accountSettings.BatchAccountKey);

            using (BatchClient batchClient = BatchClient.Open(cred))
            {
                // Create a CloudPool, or obtain an existing pool with the specified ID
                CloudPool pool = await ArticleHelpers.CreatePoolIfNotExistAsync(
                    batchClient,
                    poolId,
                    nodeSize,
                    nodeCount,
                    taskSlotsPerNode);

                // Create a CloudJob, or obtain an existing job with the specified ID
                CloudJob job = await ArticleHelpers.CreateJobIfNotExistAsync(batchClient, poolId, jobId);

                // Configure the tasks we'll be querying. Each task simply echoes the node's
                // name and then exits. We create "large" tasks by setting an environment
                // variable for each that is 2048 bytes in size. This is done simply to
                // increase response time when querying the batch service to more clearly
                // demonstrate query durations.
                List <CloudTask>          tasks = new List <CloudTask>();
                List <EnvironmentSetting> environmentSettings = new List <EnvironmentSetting>();
                environmentSettings.Add(new EnvironmentSetting("BIGENV", GetBigString(2048)));
                for (int i = 1; i < taskCount + 1; i++)
                {
                    string    taskId          = "task" + i.ToString().PadLeft(5, '0');
                    string    taskCommandLine = "cmd /c echo %COMPUTERNAME%";
                    CloudTask task            = new CloudTask(taskId, taskCommandLine);
                    task.EnvironmentSettings = environmentSettings;
                    tasks.Add(task);
                }

                Console.WriteLine();
                Console.WriteLine("Adding {0} tasks to job {1}...", taskCount, job.Id);

                Stopwatch stopwatch = Stopwatch.StartNew();

                // Add the tasks in one API call as opposed to a separate AddTask call for each. Bulk task submission
                // helps to ensure efficient underlying API calls to the Batch service.
                await batchClient.JobOperations.AddTaskAsync(job.Id, tasks);

                stopwatch.Stop();
                Console.WriteLine("{0} tasks added in {1}, hit ENTER to query tasks...", taskCount, stopwatch.Elapsed);
                Console.ReadLine();
                Console.WriteLine();
                stopwatch.Reset();

                // Obtain the tasks, specifying different detail levels to demonstrate limiting the number of tasks returned
                // and the amount of data returned for each. If your job tasks number in the thousands or have "large" properties
                // (such as our big environment variable), specifying a DetailLevel is important in reducing the amount of data
                // transferred, lowering your query response times (potentially greatly).

                // Get a subset of the tasks based on different task states
                ODATADetailLevel detail = new ODATADetailLevel();
                detail.FilterClause = "state eq 'active'";
                detail.SelectClause = "id,state";
                await QueryTasksAsync(batchClient, job.Id, detail);

                detail.FilterClause = "state eq 'running'";
                await QueryTasksAsync(batchClient, job.Id, detail);

                detail.FilterClause = "state eq 'completed'";
                await QueryTasksAsync(batchClient, job.Id, detail);

                // Get all tasks, but limit the properties returned to task id and state only
                detail.FilterClause = null;
                detail.SelectClause = "id,state";
                await QueryTasksAsync(batchClient, job.Id, detail);

                // Get all tasks, include id and state, also include the inflated environment settings property
                detail.SelectClause = "id,state,environmentSettings";
                await QueryTasksAsync(batchClient, job.Id, detail);

                // Get all tasks, include all standard properties, and expand the statistics
                detail.ExpandClause = "stats";
                detail.SelectClause = null;
                await QueryTasksAsync(batchClient, job.Id, detail);

                Console.WriteLine();
                Console.WriteLine("Done!");
                Console.WriteLine();

                // Clean up the resources we've created in the Batch account
                Console.WriteLine("Delete job? [yes] no");
                string response = Console.ReadLine().ToLower();
                if (response != "n" && response != "no")
                {
                    await batchClient.JobOperations.DeleteJobAsync(job.Id);
                }

                Console.WriteLine("Delete pool? [yes] no");
                response = Console.ReadLine().ToLower();
                if (response != "n" && response != "no")
                {
                    await batchClient.PoolOperations.DeletePoolAsync(pool.Id);
                }
            }
        }
Пример #49
0
        /// <summary>
        /// Monitors the specified job's tasks and returns each as they complete. When all
        /// of the tasks in the job have completed, the method returns.
        /// </summary>
        /// <param name="job">The <see cref="CloudJob"/> containing the tasks to monitor.</param>
        /// <returns>One or more completed <see cref="CloudTask"/>.</returns>
        private static IEnumerable<CloudTask> CompletedTasks(CloudJob job)
        {
            HashSet<string> yieldedTasks = new HashSet<string>();

            ODATADetailLevel detailLevel = new ODATADetailLevel();
            detailLevel.SelectClause = "id,state,url";

            while (true)
            {
                List<CloudTask> tasks = job.ListTasks(detailLevel).ToList();

                IEnumerable<CloudTask> newlyCompleted = tasks.Where(t => t.State == Microsoft.Azure.Batch.Common.TaskState.Completed)
                                          .Where(t => !yieldedTasks.Contains(t.Id));

                foreach (CloudTask task in newlyCompleted)
                {
                    yield return task;
                    yieldedTasks.Add(task.Id);
                }

                if (yieldedTasks.Count == tasks.Count)
                {
                    yield break;
                }
            }
        }
Пример #50
0
        /// <summary>
        /// Returns an existing <see cref="CloudJob"/> if found in the Batch account.
        /// </summary>
        /// <param name="batchClient">A fully initialized <see cref="BatchClient"/>.</param>
        /// <param name="jobId">The <see cref="CloudJob.Id"/> of the desired pool.</param>
        /// <returns>A bound <see cref="CloudJob"/>, or <c>null</c> if the specified <see cref="CloudJob"/> does not exist.</returns>
        public static async Task<CloudJob> GetJobIfExistAsync(BatchClient batchClient, string jobId)
        {
            Console.WriteLine("Checking for existing job {0}...", jobId);

            // Construct a detail level with a filter clause that specifies the job ID so that only
            // a single CloudJob is returned by the Batch service (if that job exists)
            ODATADetailLevel detail = new ODATADetailLevel(filterClause: string.Format("id eq '{0}'", jobId));
            List<CloudJob> jobs = await batchClient.JobOperations.ListJobs(detailLevel: detail).ToListAsync().ConfigureAwait(continueOnCapturedContext: false);
            
            return jobs.FirstOrDefault();
        }
Пример #51
0
        private static void VerifyODataClausesAndReturnMultiplePages <TPage, TListOptions, TListNextOptions, THeaders>(Protocol.IBatchRequest req, ODATADetailLevel expectedDetailLevel)
            where TListOptions : Protocol.Models.IOptions, new()
            where TListNextOptions : Protocol.Models.IOptions, new()
        {
            var listRequest     = req as Protocol.BatchRequest <TListOptions, AzureOperationResponse <IPage <TPage>, THeaders> >;
            var listNextRequest = req as Protocol.BatchRequest <TListNextOptions, AzureOperationResponse <IPage <TPage>, THeaders> >;

            if (listRequest != null)
            {
                listRequest.ServiceRequestFunc = token =>
                {
                    var filter = listRequest.Options as Protocol.Models.IODataFilter;
                    var select = listRequest.Options as Protocol.Models.IODataSelect;
                    var expand = listRequest.Options as Protocol.Models.IODataExpand;

                    Assert.Equal(expectedDetailLevel.FilterClause, filter?.Filter);
                    Assert.Equal(expectedDetailLevel.SelectClause, select?.Select);
                    Assert.Equal(expectedDetailLevel.ExpandClause, expand?.Expand);

                    return(Task.FromResult(new AzureOperationResponse <IPage <TPage>, THeaders>()
                    {
                        Body = new FakePage <TPage>(new List <TPage>(), "Bar")
                    }));
                };
            }

            if (listNextRequest != null)
            {
                listNextRequest.ServiceRequestFunc = token =>
                {
                    return(Task.FromResult(new AzureOperationResponse <IPage <TPage>, THeaders>()
                    {
                        Body = new FakePage <TPage>(new List <TPage>())
                    }));
                };
            }
        }
Пример #52
0
        /// <summary>
        /// calls the two new get-status REST APIs and asserts their values
        ///
        /// 1: add a single quick task (quick because we don't need it to run very long)
        /// 2: this forces a victim compute node to run the JobPrep
        /// 3: poll for this compute node, ignore others (sharedPool.size probably > 1)
        /// 4: check status of JobPrep
        /// 4a: assert as many values as makes sense... this is not a retry test
        /// 5: JobPrep succeeds, task runs
        /// 6: poll for JobRelease.. it is long running
        /// 7: assert as many values as makes sense.
        /// </summary>
        /// <param name="batchCli"></param>
        private void TestGetPrepReleaseStatusCalls(BatchClient batchCli, CloudJobSchedule boundJobSchedule, string sharedPool, IEnumerable <ResourceFile> correctResFiles)
        {
            // need this often enough lets just pull it out
            string jobId = boundJobSchedule.ExecutionInformation.RecentJob.Id;

            PoolOperations        poolOps = batchCli.PoolOperations;
            JobScheduleOperations jobScheduleOperations = batchCli.JobScheduleOperations;
            {
                DateTime beforeJobPrepRuns = DateTime.UtcNow;  // used to test start time

                // need a task to force JobPrep
                CloudTask sillyTask = new CloudTask("forceJobPrep", "cmd /c hostname");

                // add the task
                batchCli.JobOperations.AddTask(jobId, sillyTask);

                bool keepLooking = true;

                while (keepLooking)
                {
                    this.testOutputHelper.WriteLine("Waiting for task to be scheduled.");

                    foreach (CloudTask curTask in batchCli.JobOperations.GetJob(jobId).ListTasks())
                    {
                        if (curTask.State != TaskState.Active)
                        {
                            keepLooking = false;

                            break;
                        }
                    }

                    Thread.Sleep(1000);
                }

                List <JobPreparationAndReleaseTaskExecutionInformation> jobPrepStatusList = new List <JobPreparationAndReleaseTaskExecutionInformation>();
                while (jobPrepStatusList.Count == 0)
                {
                    jobPrepStatusList = batchCli.JobOperations.ListJobPreparationAndReleaseTaskStatus(jobId).ToList();
                }
                JobPreparationAndReleaseTaskExecutionInformation jptei = jobPrepStatusList.First();

                ComputeNode victimComputeNodeRunningPrepAndRelease = poolOps.GetComputeNode(sharedPool, jptei.ComputeNodeId);

                // job prep tests
                {
                    Assert.NotNull(jptei);
                    Assert.Equal(0, jptei.JobPreparationTaskExecutionInformation.RetryCount);
                    Assert.True(beforeJobPrepRuns < jptei.JobPreparationTaskExecutionInformation.StartTime + TimeSpan.FromSeconds(10));  // test that the start time is rational -- 10s of wiggle room
                    Assert.Null(jptei.JobPreparationTaskExecutionInformation.FailureInformation);

                    this.testOutputHelper.WriteLine("");
                    this.testOutputHelper.WriteLine("listing files for compute node: " + victimComputeNodeRunningPrepAndRelease.Id);

                    // fiter the list so reduce noise
                    List <NodeFile> filteredListJobPrep = new List <NodeFile>();

                    foreach (NodeFile curTF in victimComputeNodeRunningPrepAndRelease.ListNodeFiles(recursive: true))
                    {
                        // filter on the jsId since we only run one job per job in this test.
                        if (curTF.Path.IndexOf(boundJobSchedule.Id, StringComparison.InvariantCultureIgnoreCase) >= 0)
                        {
                            this.testOutputHelper.WriteLine("    name:" + curTF.Path + ", size: " + ((curTF.IsDirectory.HasValue && curTF.IsDirectory.Value) ? "<dir>" : curTF.Properties.ContentLength.ToString()));

                            filteredListJobPrep.Add(curTF);
                        }
                    }

                    // confirm resource files made it
                    foreach (ResourceFile curCorrectRF in correctResFiles)
                    {
                        bool found = false;

                        foreach (NodeFile curTF in filteredListJobPrep)
                        {
                            // look for the resfile filepath in the taskfile name
                            found |= curTF.Path.IndexOf(curCorrectRF.FilePath, StringComparison.InvariantCultureIgnoreCase) >= 0;
                        }
                        Assert.True(found, "Looking for resourcefile: " + curCorrectRF.FilePath);
                    }

                    // poll for completion
                    while (JobPreparationTaskState.Completed != jptei.JobPreparationTaskExecutionInformation.State)
                    {
                        this.testOutputHelper.WriteLine("waiting for jopPrep to complete");
                        Thread.Sleep(2000);

                        // refresh the state info
                        ODATADetailLevel detailLevel = new ODATADetailLevel()
                        {
                            FilterClause = string.Format("nodeId eq '{0}'", victimComputeNodeRunningPrepAndRelease.Id)
                        };
                        jobPrepStatusList = batchCli.JobOperations.ListJobPreparationAndReleaseTaskStatus(jobId, detailLevel: detailLevel).ToList();

                        jptei = jobPrepStatusList.First();
                    }

                    // need success
                    Assert.Equal(0, jptei.JobPreparationTaskExecutionInformation.ExitCode);

                    // check stdout to confirm prep ran

                    //Why do I have to use the hardcoded string job-1 here...?
                    string stdOutFileSpec = Path.Combine("workitems", boundJobSchedule.Id, "job-1", boundJobSchedule.JobSpecification.JobPreparationTask.Id, Constants.StandardOutFileName);
                    string stdOut         = victimComputeNodeRunningPrepAndRelease.GetNodeFile(stdOutFileSpec).ReadAsString();

                    string stdErrFileSpec = Path.Combine("workitems", boundJobSchedule.Id, "job-1", boundJobSchedule.JobSpecification.JobPreparationTask.Id, Constants.StandardErrorFileName);

                    string stdErr = string.Empty;

                    try
                    {
                        stdErr = victimComputeNodeRunningPrepAndRelease.GetNodeFile(stdErrFileSpec).ReadAsString();
                    }
                    catch (Exception)
                    {
                        //Swallow any exceptions here since stderr may not exist
                    }

                    this.testOutputHelper.WriteLine(stdOut);
                    this.testOutputHelper.WriteLine(stdErr);

                    Assert.True(!string.IsNullOrWhiteSpace(stdOut));
                    Assert.Contains("jobpreparation", stdOut.ToLower());
                }

                // jobPrep tests completed.  let JobPrep complete and task run and wait for JobRelease

                TaskStateMonitor tsm = batchCli.Utilities.CreateTaskStateMonitor();

                // spam/logging interceptor
                Protocol.RequestInterceptor consoleSpammer =
                    new Protocol.RequestInterceptor((x) =>
                {
                    this.testOutputHelper.WriteLine("TestGetPrepReleaseStatusCalls: waiting for JobPrep and task to complete");

                    ODATADetailLevel detailLevel = new ODATADetailLevel()
                    {
                        FilterClause = string.Format("nodeId eq '{0}'", victimComputeNodeRunningPrepAndRelease.Id)
                    };
                    jobPrepStatusList = batchCli.JobOperations.ListJobPreparationAndReleaseTaskStatus(jobId, detailLevel: detailLevel).ToList();
                    JobPreparationAndReleaseTaskExecutionInformation jpteiInterceptor =
                        jobPrepStatusList.First();

                    this.testOutputHelper.WriteLine("    JobPrep.State: " + jpteiInterceptor.JobPreparationTaskExecutionInformation.State);

                    this.testOutputHelper.WriteLine("");
                });

                // waiting for the task to complete means so JobRelease is run.
                tsm.WaitAll(
                    batchCli.JobOperations.GetJob(jobId).ListTasks(additionalBehaviors: new[] { consoleSpammer }),
                    TaskState.Completed,
                    TimeSpan.FromSeconds(120),
                    additionalBehaviors: new[] { consoleSpammer });

                // trigger JobRelease
                batchCli.JobOperations.TerminateJob(jobId, terminateReason: "die! I want JobRelease to run!");

                // now that the task has competed, we are racing with the JobRelease... but it is sleeping so we can can catch it
                while (true)
                {
                    ODATADetailLevel detailLevel = new ODATADetailLevel()
                    {
                        FilterClause = string.Format("nodeId eq '{0}'", victimComputeNodeRunningPrepAndRelease.Id)
                    };
                    jobPrepStatusList = batchCli.JobOperations.ListJobPreparationAndReleaseTaskStatus(jobId, detailLevel: detailLevel).ToList();
                    JobPreparationAndReleaseTaskExecutionInformation jrtei = jobPrepStatusList.FirstOrDefault();

                    if ((jrtei == null) || (null == jrtei.JobReleaseTaskExecutionInformation))
                    {
                        Thread.Sleep(2000);
                    }
                    else
                    {
                        Assert.NotNull(jrtei);
                        if (jrtei.JobReleaseTaskExecutionInformation.State != JobReleaseTaskState.Completed)
                        {
                            this.testOutputHelper.WriteLine("JobReleaseTask state is: " + jrtei.JobReleaseTaskExecutionInformation.State);

                            Thread.Sleep(5000);
                        }
                        else
                        {
                            this.testOutputHelper.WriteLine("JobRelease commpleted!");

                            // we are done
                            break;
                        }
                    }
                }
            }
        }
Пример #53
0
        /// <summary>
        /// Monitors the specified tasks for completion and returns a value indicating whether all tasks completed successfully
        /// within the timeout period.
        /// </summary>
        /// <param name="batchClient">A <see cref="BatchClient"/>.</param>
        /// <param name="jobId">The id of the job containing the tasks that should be monitored.</param>
        /// <param name="timeout">The period of time to wait for the tasks to reach the completed state.</param>
        /// <returns><c>true</c> if all tasks in the specified job completed with an exit code of 0 within the specified timeout period, otherwise <c>false</c>.</returns>
        private static async Task <bool> MonitorTasks(BatchClient batchClient, string jobId, TimeSpan timeout)
        {
            bool         allTasksSuccessful = true;
            const string successMessage     = "All tasks reached state Completed.";
            const string failureMessage     = "One or more tasks failed to reach the Completed state within the timeout period.";

            // Obtain the collection of tasks currently managed by the job. Note that we use a detail level to
            // specify that only the "id" property of each task should be populated. Using a detail level for
            // all list operations helps to lower response time from the Batch service.
            ODATADetailLevel detail = new ODATADetailLevel(selectClause: "id");
            List <CloudTask> tasks  = await batchClient.JobOperations.ListTasks(JobId, detail).ToListAsync();

            Console.WriteLine("Awaiting task completion, timeout in {0}...", timeout.ToString());

            // We use a TaskStateMonitor to monitor the state of our tasks. In this case, we will wait for all tasks to
            // reach the Completed state.
            TaskStateMonitor taskStateMonitor = batchClient.Utilities.CreateTaskStateMonitor();

            try
            {
                await taskStateMonitor.WhenAll(tasks, TaskState.Completed, timeout);
            }
            catch (TimeoutException)
            {
                await batchClient.JobOperations.TerminateJobAsync(jobId, failureMessage);

                Console.WriteLine(failureMessage);
                return(false);
            }

            await batchClient.JobOperations.TerminateJobAsync(jobId, successMessage);

            // All tasks have reached the "Completed" state, however, this does not guarantee all tasks completed successfully.
            // Here we further check each task's ExecutionInfo property to ensure that it did not encounter a scheduling error
            // or return a non-zero exit code.

            // Update the detail level to populate only the task id and executionInfo properties.
            // We refresh the tasks below, and need only this information for each task.
            detail.SelectClause = "id, executionInfo";

            foreach (CloudTask task in tasks)
            {
                // Populate the task's properties with the latest info from the Batch service
                await task.RefreshAsync(detail);

                if (task.ExecutionInformation.SchedulingError != null)
                {
                    // A scheduling error indicates a problem starting the task on the node. It is important to note that
                    // the task's state can be "Completed," yet still have encountered a scheduling error.

                    allTasksSuccessful = false;

                    Console.WriteLine("WARNING: Task [{0}] encountered a scheduling error: {1}", task.Id, task.ExecutionInformation.SchedulingError.Message);
                }
                else if (task.ExecutionInformation.ExitCode != 0)
                {
                    // A non-zero exit code may indicate that the application executed by the task encountered an error
                    // during execution. As not every application returns non-zero on failure by default (e.g. robocopy),
                    // your implementation of error checking may differ from this example.

                    allTasksSuccessful = false;

                    Console.WriteLine("WARNING: Task [{0}] returned a non-zero exit code - this may indicate task execution or completion failure.", task.Id);
                }
            }

            if (allTasksSuccessful)
            {
                Console.WriteLine("Success! All tasks completed successfully within the specified timeout period.");
            }

            return(allTasksSuccessful);
        }
Пример #54
0
        /// <summary>
        /// Gets the combined state of Azure Batch job, task and pool that corresponds to the given TES task
        /// </summary>
        /// <param name="tesTaskId">The unique TES task ID</param>
        /// <returns>Job state information</returns>
        public async Task <AzureBatchJobAndTaskState> GetBatchJobAndTaskStateAsync(string tesTaskId)
        {
            try
            {
                var    nodeAllocationFailed                       = false;
                string nodeErrorCode                              = null;
                IEnumerable <string> nodeErrorDetails             = null;
                var activeJobWithMissingAutoPool                  = false;
                ComputeNodeState?        nodeState                = null;
                TaskState?               taskState                = null;
                TaskExecutionInformation taskExecutionInformation = null;

                var jobFilter = new ODATADetailLevel
                {
                    FilterClause = $"startswith(id,'{tesTaskId}{BatchJobAttemptSeparator}')",
                    SelectClause = "*"
                };

                var jobInfos = (await batchClient.JobOperations.ListJobs(jobFilter).ToListAsync())
                               .Select(j => new { Job = j, AttemptNumber = int.Parse(j.Id.Split(BatchJobAttemptSeparator)[1]) });

                if (!jobInfos.Any())
                {
                    return(new AzureBatchJobAndTaskState {
                        JobState = null
                    });
                }

                if (jobInfos.Count(j => j.Job.State == JobState.Active) > 1)
                {
                    return(new AzureBatchJobAndTaskState {
                        MoreThanOneActiveJobFound = true
                    });
                }

                var lastJobInfo = jobInfos.OrderBy(j => j.AttemptNumber).Last();

                var job           = lastJobInfo.Job;
                var attemptNumber = lastJobInfo.AttemptNumber;

                if (job.State == JobState.Active && job.ExecutionInformation?.PoolId != null)
                {
                    var poolFilter = new ODATADetailLevel
                    {
                        FilterClause = $"id eq '{job.ExecutionInformation.PoolId}'",
                        SelectClause = "*"
                    };

                    var pool = (await batchClient.PoolOperations.ListPools(poolFilter).ToListAsync()).FirstOrDefault();

                    if (pool != null)
                    {
                        nodeAllocationFailed = pool.ResizeErrors?.Count > 0;

                        var node = (await pool.ListComputeNodes().ToListAsync()).FirstOrDefault();

                        if (node != null)
                        {
                            nodeState = node.State;
                            var nodeError = node.Errors?.FirstOrDefault();
                            nodeErrorCode    = nodeError?.Code;
                            nodeErrorDetails = nodeError?.ErrorDetails?.Select(e => e.Value);
                        }
                    }
                    else
                    {
                        if (job.CreationTime.HasValue && DateTime.UtcNow.Subtract(job.CreationTime.Value) > TimeSpan.FromMinutes(30))
                        {
                            activeJobWithMissingAutoPool = true;
                        }
                    }
                }

                try
                {
                    var batchTask = await batchClient.JobOperations.GetTaskAsync(job.Id, tesTaskId);

                    taskState = batchTask.State;
                    taskExecutionInformation = batchTask.ExecutionInformation;
                }
                catch (Exception ex)
                {
                    logger.LogError(ex, $"Failed to get task for TesTask {tesTaskId}");
                }

                return(new AzureBatchJobAndTaskState
                {
                    MoreThanOneActiveJobFound = false,
                    ActiveJobWithMissingAutoPool = activeJobWithMissingAutoPool,
                    AttemptNumber = attemptNumber,
                    NodeAllocationFailed = nodeAllocationFailed,
                    NodeErrorCode = nodeErrorCode,
                    NodeErrorDetails = nodeErrorDetails,
                    NodeState = nodeState,
                    JobState = job.State,
                    JobStartTime = job.ExecutionInformation?.StartTime,
                    JobEndTime = job.ExecutionInformation?.EndTime,
                    JobSchedulingError = job.ExecutionInformation?.SchedulingError,
                    TaskState = taskState,
                    TaskExecutionResult = taskExecutionInformation?.Result,
                    TaskStartTime = taskExecutionInformation?.StartTime,
                    TaskEndTime = taskExecutionInformation?.EndTime,
                    TaskExitCode = taskExecutionInformation?.ExitCode,
                    TaskFailureInformation = taskExecutionInformation?.FailureInformation,
                    TaskContainerState = taskExecutionInformation?.ContainerInformation?.State,
                    TaskContainerError = taskExecutionInformation?.ContainerInformation?.Error
                });
            }
            catch (Exception ex)
            {
                logger.LogError(ex, $"GetBatchJobAndTaskStateAsync failed for TesTask {tesTaskId}");
                throw;
            }
        }
Пример #55
0
        /// <summary>
        /// Monitors the specified tasks for completion and returns a value indicating whether all tasks completed successfully
        /// within the timeout period.
        /// </summary>
        /// <param name="batchClient">A <see cref="BatchClient"/>.</param>
        /// <param name="jobId">The id of the job containing the tasks that should be monitored.</param>
        /// <param name="timeout">The period of time to wait for the tasks to reach the completed state.</param>
        /// <returns><c>true</c> if all tasks in the specified job completed with an exit code of 0 within the specified timeout period, otherwise <c>false</c>.</returns>
        private static async Task<bool> MonitorTasks(BatchClient batchClient, string jobId, TimeSpan timeout)
        {
            bool allTasksSuccessful = true;
            const string successMessage = "All tasks reached state Completed.";
            const string failureMessage = "One or more tasks failed to reach the Completed state within the timeout period.";

            // Obtain the collection of tasks currently managed by the job. Note that we use a detail level to
            // specify that only the "id" property of each task should be populated. Using a detail level for
            // all list operations helps to lower response time from the Batch service.
            ODATADetailLevel detail = new ODATADetailLevel(selectClause: "id");
            List<CloudTask> tasks = await batchClient.JobOperations.ListTasks(JobId, detail).ToListAsync();

            Console.WriteLine("Awaiting task completion, timeout in {0}...", timeout.ToString());

            // We use a TaskStateMonitor to monitor the state of our tasks. In this case, we will wait for all tasks to
            // reach the Completed state.
            TaskStateMonitor taskStateMonitor = batchClient.Utilities.CreateTaskStateMonitor();
            bool timedOut = await taskStateMonitor.WhenAllAsync(tasks, TaskState.Completed, timeout);

            if (timedOut)
            {
                allTasksSuccessful = false;

                await batchClient.JobOperations.TerminateJobAsync(jobId, failureMessage);

                Console.WriteLine(failureMessage);
            }
            else
            {
                await batchClient.JobOperations.TerminateJobAsync(jobId, successMessage);

                // All tasks have reached the "Completed" state, however, this does not guarantee all tasks completed successfully.
                // Here we further check each task's ExecutionInfo property to ensure that it did not encounter a scheduling error
                // or return a non-zero exit code.

                // Update the detail level to populate only the task id and executionInfo properties.
                // We refresh the tasks below, and need only this information for each task.
                detail.SelectClause = "id, executionInfo";

                foreach (CloudTask task in tasks)
                {
                    // Populate the task's properties with the latest info from the Batch service
                    await task.RefreshAsync(detail);

                    if (task.ExecutionInformation.SchedulingError != null)
                    {
                        // A scheduling error indicates a problem starting the task on the node. It is important to note that
                        // the task's state can be "Completed," yet still have encountered a scheduling error.

                        allTasksSuccessful = false;

                        Console.WriteLine("WARNING: Task [{0}] encountered a scheduling error: {1}", task.Id, task.ExecutionInformation.SchedulingError.Message);
                    }
                    else if (task.ExecutionInformation.ExitCode != 0)
                    {
                        // A non-zero exit code may indicate that the application executed by the task encountered an error
                        // during execution. As not every application returns non-zero on failure by default (e.g. robocopy),
                        // your implementation of error checking may differ from this example.

                        allTasksSuccessful = false;

                        Console.WriteLine("WARNING: Task [{0}] returned a non-zero exit code - this may indicate task execution or completion failure.", task.Id);
                    }
                }
            }

            if (allTasksSuccessful)
            {
                Console.WriteLine("Success! All tasks completed successfully within the specified timeout period.");
            }

            return allTasksSuccessful;
        }
Пример #56
0
        /// <summary>
        /// Gets the list of jobs submitted to Azure.
        /// </summary>
        /// <returns>List of Jobs. Null if the thread is asked to cancel, or if unable to update the progress bar.</returns>
        private List <JobDetails> ListJobs()
        {
            try
            {
                view.ShowLoadingProgressBar();
                view.JobLoadProgress = 0;
            } catch (NullReferenceException)
            {
                return(null);
            } catch (Exception e)
            {
                ShowError(e.ToString());
            }

            List <JobDetails> jobs = new List <JobDetails>();
            var pools          = batchClient.PoolOperations.ListPools();
            var jobDetailLevel = new ODATADetailLevel {
                SelectClause = "id,displayName,state,executionInfo,stats", ExpandClause = "stats"
            };

            IPagedEnumerable <CloudJob> cloudJobs = null;

            // Attempt to download raw job list. If this fails more than 3 times, return.
            int numTries = 0;

            while (numTries < 4 && cloudJobs == null)
            {
                try
                {
                    cloudJobs = batchClient.JobOperations.ListJobs(jobDetailLevel);
                }
                catch (Exception e)
                {
                    if (numTries >= 3)
                    {
                        ShowError("Unable to retrieve job list: " + e.ToString());
                        return(new List <JobDetails>());
                    }
                } finally
                {
                    numTries++;
                }
            }

            // Parse jobs into a list of JobDetails objects.

            var length = cloudJobs.Count();
            int i      = 0;

            foreach (var cloudJob in cloudJobs)
            {
                if (FetchJobs.CancellationPending)
                {
                    return(null);
                }
                try
                {
                    view.JobLoadProgress = 100.0 * i / length;
                } catch (NullReferenceException)
                {
                    return(null);
                } catch (Exception e)
                {
                    ShowError(e.ToString());
                }

                string owner = GetAzureMetaData("job-" + cloudJob.Id, "Owner");

                long   numTasks    = 1;
                double jobProgress = 0;
                try
                {
                    TaskCounts tasks = batchClient.JobOperations.GetJobTaskCounts(cloudJob.Id);
                    numTasks = tasks.Active + tasks.Running + tasks.Completed;
                    // if there are no tasks, set progress to 100%
                    jobProgress = numTasks == 0 ? 100 : 100.0 * tasks.Completed / numTasks;
                } catch (Exception e)
                {
                    // sometimes an exception is thrown when retrieving the task counts
                    // could be due to the job not being submitted correctly
                    ShowError(e.ToString());

                    numTasks    = -1;
                    jobProgress = 100;
                }

                // if cpu time is unavailable, set this field to 0
                TimeSpan cpu = cloudJob.Statistics == null ? TimeSpan.Zero : cloudJob.Statistics.KernelCpuTime + cloudJob.Statistics.UserCpuTime;
                var      job = new JobDetails
                {
                    Id          = cloudJob.Id,
                    DisplayName = cloudJob.DisplayName,
                    State       = cloudJob.State.ToString(),
                    Owner       = owner,
                    NumSims     = numTasks - 1, // subtract one because one of these is the job manager
                    Progress    = jobProgress,
                    CpuTime     = cpu
                };

                if (cloudJob.ExecutionInformation != null)
                {
                    job.StartTime = cloudJob.ExecutionInformation.StartTime;
                    job.EndTime   = cloudJob.ExecutionInformation.EndTime;
                }
                jobs.Add(job);
                i++;
            }
            view.HideLoadingProgressBar();
            if (jobs == null)
            {
                return(new List <JobDetails>());
            }
            return(jobs);
        }
        private async Task WaitForMapperTasksToCompleteAsync(BatchClient batchClient)
        {
            Console.WriteLine("Waiting for the mapper tasks to complete...");

            //List all the mapper tasks using an id filter.
            DetailLevel mapperTaskIdFilter = new ODATADetailLevel()
            {
                FilterClause = string.Format("startswith(id, '{0}')", Constants.MapperTaskPrefix)
            };

            IEnumerable<CloudTask> tasksToMonitor = batchClient.JobOperations.ListTasks(
                this.jobId,
                detailLevel: mapperTaskIdFilter);

            // Use the task state monitor to wait for the tasks to complete.  Monitoring the tasks
            // for completion is necessary if you are using KillJobOnCompletion = TRUE, as otherwise when the job manager
            // exits it will kill all of the tasks that are still running under the job.
            TaskStateMonitor taskStateMonitor = batchClient.Utilities.CreateTaskStateMonitor();

            bool timedOut = await taskStateMonitor.WaitAllAsync(tasksToMonitor, TaskState.Completed, TimeSpan.FromMinutes(5));

            //Get the list of mapper tasks in order to analyze their state and ensure they completed successfully.
            IPagedEnumerable<CloudTask> asyncEnumerable = batchClient.JobOperations.ListTasks(
                this.jobId,
                detailLevel: mapperTaskIdFilter);

            await asyncEnumerable.ForEachAsync(async cloudTask =>
            {
                Console.WriteLine("Task {0} is in state: {1}", cloudTask.Id, cloudTask.State);

                await Helpers.CheckForTaskSuccessAsync(cloudTask, dumpStandardOutOnTaskSuccess: false);

                Console.WriteLine();
            });

            //If not all the tasks reached the desired state within the timeout then the job manager
            //cannot continue.
            if (timedOut)
            {
                const string errorMessage = "Mapper tasks did not complete within expected timeout.";
                Console.WriteLine(errorMessage);

                throw new TimeoutException(errorMessage);
            }
        }
Пример #58
0
        /// <summary>
        /// Queries and prints task information for the specified job.
        /// </summary>
        /// <param name="batchClient">A fully initialized <see cref="BatchClient"/>.</param>
        /// <param name="jobId">The ID of the job whose tasks should be queried.</param>
        /// <param name="detail">An <see cref="ODATADetailLevel"/> configured with one or more of expand, filter, select clauses.</param>
        private static void QueryTasks(BatchClient batchClient, string jobId, ODATADetailLevel detail)
        {
            List<CloudTask> taskList = new List<CloudTask>();

            Stopwatch stopwatch = new Stopwatch();
            stopwatch.Start();

            try
            {
                taskList.AddRange(batchClient.JobOperations.ListTasks(jobId, detail).ToList());
            }
            catch (AggregateException ex)
            {
                AggregateException ax = ex.Flatten();

                Console.WriteLine(ax.Message);
            }
            catch (Exception ex)
            {
                Console.WriteLine(ex.Message);
            }
            finally
            {
                stopwatch.Stop();
            }
            
            Console.WriteLine("{0} tasks retrieved in {1} (ExpandClause: {2} | FilterClause: {3} | SelectClause: {4})",
                        taskList.Count, 
                        stopwatch.Elapsed, 
                        detail.ExpandClause, 
                        detail.FilterClause, 
                        detail.SelectClause);
        }
Пример #59
0
        private static async Task MainAsync(string[] args)
        {
            const string poolId = "JobPrepReleaseSamplePool";
            const string jobId  = "JobPrepReleaseSampleJob";

            // Location of the file that the job tasks will work with, a text file in the
            // node's "shared" directory.
            const string taskOutputFile = "%AZ_BATCH_NODE_SHARED_DIR%\\job_prep_and_release.txt";

            // The job prep task will write the node ID to the text file in the shared directory
            const string jobPrepCmdLine = "cmd /c echo %AZ_BATCH_NODE_ID% tasks: >" + taskOutputFile;

            // Each task then echoes its ID to the same text file
            const string taskCmdLine = "cmd /c echo   %AZ_BATCH_TASK_ID% >> " + taskOutputFile;

            // The job release task will then delete the text file from the shared directory
            const string jobReleaseCmdLine = "cmd /c del " + taskOutputFile;

            // Configure your AccountSettings in the Microsoft.Azure.Batch.Samples.Common project within this solution
            BatchSharedKeyCredentials cred = new BatchSharedKeyCredentials(AccountSettings.Default.BatchServiceUrl,
                                                                           AccountSettings.Default.BatchAccountName,
                                                                           AccountSettings.Default.BatchAccountKey);

            // Initialize the BatchClient for access to your Batch account
            using (BatchClient batchClient = await BatchClient.OpenAsync(cred))
            {
                // Create a CloudPool (or obtain an existing pool with the specified ID)
                CloudPool pool = await ArticleHelpers.CreatePoolIfNotExistAsync(batchClient,
                                                                                poolId,
                                                                                "small",
                                                                                2,
                                                                                1);

                // Create a CloudJob (or obtain an existing job with the specified ID)
                CloudJob job = await SampleHelpers.GetJobIfExistAsync(batchClient, jobId);

                if (job == null)
                {
                    Console.WriteLine("Job {0} not found, creating...", jobId);

                    CloudJob unboundJob = batchClient.JobOperations.CreateJob(jobId, new PoolInformation()
                    {
                        PoolId = poolId
                    });

                    // Configure and assign the job preparation task
                    unboundJob.JobPreparationTask = new JobPreparationTask {
                        CommandLine = jobPrepCmdLine
                    };

                    // Configure and assign the job release task
                    unboundJob.JobReleaseTask = new JobReleaseTask {
                        CommandLine = jobReleaseCmdLine
                    };

                    await unboundJob.CommitAsync();

                    // Get the bound version of the job with all of its properties populated
                    job = await batchClient.JobOperations.GetJobAsync(jobId);
                }

                // Create the tasks that the job will execute
                List <CloudTask> tasks = new List <CloudTask>();
                for (int i = 1; i <= 8; i++)
                {
                    string    taskId          = "task" + i.ToString().PadLeft(3, '0');
                    string    taskCommandLine = taskCmdLine;
                    CloudTask task            = new CloudTask(taskId, taskCommandLine);
                    tasks.Add(task);
                }

                // Add the tasks in one API call as opposed to a separate AddTask call for each. Bulk task
                // submission helps to ensure efficient underlying API calls to the Batch service.
                Console.WriteLine("Submitting tasks and awaiting completion...");
                await batchClient.JobOperations.AddTaskAsync(job.Id, tasks);

                // Wait for the tasks to complete before proceeding. The long timeout here is to allow time
                // for the nodes within the pool to be created and started if the pool had not yet been created.
                if (await batchClient.Utilities.CreateTaskStateMonitor().WhenAllAsync(job.ListTasks(),
                                                                                      TaskState.Completed,
                                                                                      TimeSpan.FromMinutes(30)))
                {
                    Console.WriteLine("Operation timed out while waiting for submitted tasks to reach state {0}", TaskState.Completed);

                    return;
                }
                else
                {
                    Console.WriteLine("All tasks completed.");
                    Console.WriteLine();
                }

                // Print the contents of the shared text file modified by the job preparation and other tasks.
                ODATADetailLevel nodeDetail          = new ODATADetailLevel(selectClause: "id, state");
                IPagedEnumerable <ComputeNode> nodes = batchClient.PoolOperations.ListComputeNodes(pool.Id, nodeDetail);
                await nodes.ForEachAsync(async (node) =>
                {
                    // Check to ensure that the node is Idle before attempting to pull the text file.
                    // If the pool was just created, there is a chance that another node completed all
                    // of the tasks prior to the other node(s) completing their startup procedure.
                    if (node.State == ComputeNodeState.Idle)
                    {
                        NodeFile sharedTextFile = await node.GetNodeFileAsync("shared\\job_prep_and_release.txt");
                        Console.WriteLine("Contents of {0} on {1}:", sharedTextFile.Name, node.Id);
                        Console.WriteLine("-------------------------------------------");
                        Console.WriteLine(await sharedTextFile.ReadAsStringAsync());
                    }
                });

                // Terminate the job to mark it as Completed; this will initiate the Job Release Task on any node
                // that executed job tasks. Note that the Job Release Task is also executed when a job is deleted,
                // thus you need not call Terminate if you typically delete your jobs upon task completion.
                await batchClient.JobOperations.TerminateJobAsync(job.Id);

                // Wait for the job to reach state "Completed." Note that this wait is not typically necessary in
                // production code, but is done here to enable the checking of the release tasks exit code below.
                await ArticleHelpers.WaitForJobToReachStateAsync(batchClient, job.Id, JobState.Completed, TimeSpan.FromMinutes(2));

                // Print the exit codes of the prep and release tasks by obtaining their execution info
                List <JobPreparationAndReleaseTaskExecutionInformation> prepReleaseInfo = await batchClient.JobOperations.ListJobPreparationAndReleaseTaskStatus(job.Id).ToListAsync();

                foreach (JobPreparationAndReleaseTaskExecutionInformation info in prepReleaseInfo)
                {
                    Console.WriteLine();
                    Console.WriteLine("{0}: ", info.ComputeNodeId);

                    // If no tasks were scheduled to run on the node, the JobPreparationTaskExecutionInformation will be null
                    if (info.JobPreparationTaskExecutionInformation != null)
                    {
                        Console.WriteLine("  Prep task exit code:    {0}", info.JobPreparationTaskExecutionInformation.ExitCode);
                    }

                    // If no tasks were scheduled to run on the node, the JobReleaseTaskExecutionInformation will be null
                    if (info.JobReleaseTaskExecutionInformation != null)
                    {
                        Console.WriteLine("  Release task exit code: {0}", info.JobReleaseTaskExecutionInformation.ExitCode);
                    }
                }

                // Clean up the resources we've created in the Batch account
                Console.WriteLine();
                Console.WriteLine("Delete job? [yes] no");
                string response = Console.ReadLine().ToLower();
                if (response != "n" && response != "no")
                {
                    // Note that deleting the job will execute the job release task if the job was not previously terminated
                    await batchClient.JobOperations.DeleteJobAsync(job.Id);
                }

                Console.WriteLine("Delete pool? [yes] no");
                response = Console.ReadLine();
                if (response != "n" && response != "no")
                {
                    await batchClient.PoolOperations.DeletePoolAsync(pool.Id);
                }
            }
        }
Пример #60
0
        public static void Main(string[] args)
        {
            const int taskCount = 5000;

            const string poolId = "poolEffQuery";
            const string jobId  = "jobEffQuery";

            // Set up the credentials required by the BatchClient. Configure your AccountSettings in the
            // Microsoft.Azure.Batch.Samples.Common project within this solution.
            BatchSharedKeyCredentials cred = new BatchSharedKeyCredentials(AccountSettings.Default.BatchServiceUrl,
                                                                           AccountSettings.Default.BatchAccountName,
                                                                           AccountSettings.Default.BatchAccountKey);
            
            using (BatchClient batchClient = BatchClient.Open(cred))
            {
                // Create a CloudPool, or obtain an existing pool with the specified ID
                CreatePool(batchClient, poolId).Wait();
                CloudPool pool = batchClient.PoolOperations.GetPool(poolId);

                // Create a CloudJob, or obtain an existing job with the specified ID
                CloudJob job = ArticleHelpers.CreateJobAsync(batchClient, poolId, jobId).Result;

                // Configure the tasks we'll be querying. Each task simply echoes the node's
                // name and then exits. We create "large" tasks by setting an environment
                // variable for each that is 2048 bytes in size. This is done simply to
                // increase response time when querying the batch service to more clearly
                // demonstrate query durations.
                List<CloudTask> tasks = new List<CloudTask>();
                List<EnvironmentSetting> environmentSettings = new List<EnvironmentSetting>();
                environmentSettings.Add(new EnvironmentSetting("BIGENV", GetBigString(2048)));
                for (int i = 1; i < taskCount + 1; i++)
                {
                    string taskId = "task" + i.ToString().PadLeft(5, '0');
                    string taskCommandLine = "cmd /c echo %COMPUTERNAME%";
                    CloudTask task = new CloudTask(taskId, taskCommandLine);
                    task.EnvironmentSettings = environmentSettings;
                    tasks.Add(task);
                }

                Console.WriteLine();
                Console.WriteLine("Adding {0} tasks to job {1}...", taskCount, job.Id);

                Stopwatch stopwatch = new Stopwatch();
                stopwatch.Start();

                // To reduce the chances of hitting Batch service throttling limits, we add the tasks in
                // one API call as opposed to a separate AddTask call for each. This is crucial if you
                // are adding many tasks to your jobs.
                batchClient.JobOperations.AddTask(job.Id, tasks);

                stopwatch.Stop();
                Console.WriteLine("{0} tasks added in {1}, hit ENTER to query tasks...", taskCount, stopwatch.Elapsed);
                Console.ReadLine();
                Console.WriteLine();
                stopwatch.Reset();

                // Obtain the tasks, specifying different detail levels to demonstrate limiting the number of tasks returned
                // and the amount of data returned for each. If your job tasks number in the thousands or have "large" properties
                // (such as our big environment variable), specifying a DetailLevel is important in reducing the amount of data
                // transferred, lowering your query response times (potentially greatly).

                // Get a subset of the tasks based on different task states
                ODATADetailLevel detail = new ODATADetailLevel();
                detail.FilterClause = "state eq 'active'";
                detail.SelectClause = "id,state";
                QueryTasks(batchClient, job.Id, detail);
                detail.FilterClause = "state eq 'running'";
                QueryTasks(batchClient, job.Id, detail);
                detail.FilterClause = "state eq 'completed'";
                QueryTasks(batchClient, job.Id, detail);

                // Get all tasks, but limit the properties returned to task id and state only
                detail.FilterClause = null;
                detail.SelectClause = "id,state";
                QueryTasks(batchClient, job.Id, detail);

                // Get all tasks, include id and state, also include the inflated environment settings property
                detail.SelectClause = "id,state,environmentSettings";
                QueryTasks(batchClient, job.Id, detail);

                // Get all tasks, include all standard properties, and expand the statistics
                detail.ExpandClause = "stats";
                detail.SelectClause = null;
                QueryTasks(batchClient, job.Id, detail);

                Console.WriteLine();
                Console.WriteLine("Sample complete, hit ENTER to continue...");
                Console.ReadLine();

                // Clean up the resources we've created in the Batch account
                Console.WriteLine("Delete job? [yes] no");
                string response = Console.ReadLine().ToLower();
                if (response != "n" && response != "no")
                {
                    batchClient.JobOperations.DeleteJob(job.Id);
                }

                Console.WriteLine("Delete pool? [yes] no");
                response = Console.ReadLine().ToLower();
                if (response != "n" && response != "no")
                {
                    batchClient.PoolOperations.DeletePool(pool.Id);
                }
            }
        }