static void Main(string[] args) { //change the headnode name here const string headnode = "[headnode]"; const string serviceName = "EchoService"; const int numRequests = 8; SessionStartInfo info = new SessionStartInfo(headnode, serviceName); //the sample code needs at least 2 cores in the cluster info.SessionResourceUnitType = SessionUnitType.Core; info.MaximumUnits = 2; info.MinimumUnits = 2; Console.Write("Creating a session for EchoService..."); using (DurableSession session = DurableSession.CreateSession(info)) { Console.WriteLine("done session id = {0}", session.Id); NetTcpBinding binding = new NetTcpBinding(SecurityMode.Transport); using (BrokerClient <IService1> client = new BrokerClient <IService1>(session, binding)) { Console.Write("Sending {0} requests...", numRequests); for (int i = 0; i < numRequests; i++) { EchoOnExitRequest request = new EchoOnExitRequest(new TimeSpan(0, 0, 5)); client.SendRequest <EchoOnExitRequest>(request, i); } client.EndRequests(); Console.WriteLine("done"); // cancel half of the service tasks when processing the requests ThreadPool.QueueUserWorkItem(delegate { //wait 5 seconds to try cancel service tasks. Thread.Sleep(3 * 1000); try { Scheduler scheduler = new Scheduler(); try { scheduler.Connect(headnode); } catch (Exception e) { Console.WriteLine("Error connecting store.{0}", e.ToString()); return; } int jobId = session.GetProperty <int>("HPC_ServiceJobId"); ISchedulerJob job = scheduler.OpenJob(jobId); job.Refresh(); ISchedulerCollection taskList = job.GetTaskList(null, null, true); int onFlag = 0; foreach (ISchedulerTask task in taskList) { // cancel half of the service tasks if (onFlag++ % 2 == 0) { try { if (task.State == TaskState.Running) { Console.WriteLine("Try to cancel task {0}", task.TaskId); job.CancelTask(task.TaskId); job.Commit(); } } catch (Exception ex) { Console.WriteLine("Got exception when trying to cancel task {0}:{1}", task.TaskId, ex.Message); } } } } catch (Exception ex) { Console.WriteLine("Exception when trying to cancel the service tasks. {0}", ex.Message); } }); Console.WriteLine("Retrieving responses..."); try { int count = 0; foreach (var response in client.GetResponses <EchoOnExitResponse>()) { try { string reply = response.Result.EchoOnExitResult; Console.WriteLine("\tReceived response for request {0}: {1}", response.GetUserData <int>(), reply); count++; } catch (Exception ex) { Console.WriteLine("Error occured while processing {0}-th request: {1}", response.GetUserData <int>(), ex.Message); } } Console.WriteLine("Done retrieving responses.{0}/{1} responses retrieved ", count, numRequests); } catch (SessionException ex) { Console.WriteLine("SessionException while getting responses: {0}", ex.Message); } catch (Exception ex) { Console.WriteLine("Exception while getting responses: {0}", ex.Message); } } // Close connections and delete messages stored in the system session.Close(); Console.WriteLine("Press any key to exit."); Console.ReadKey(); } }
/// <summary> /// Main entrypoint for Activation Filter /// </summary> /// <param name="args"> /// Expect only a single argument containing an XML file /// </param> /// <returns> /// int value from Activation Filter /// </returns> static int Main(string[] args) { string clusterName = Environment.GetEnvironmentVariable("CCP_SCHEDULER"); if (setupLogFile() != 0) { return((int)AFReturnValue.FilterFailure); } int retval = (int)AFReturnValue.FilterFailure; try { // If the job is submitted outside peak business hours, no change is necessary if (DuringOffHours()) { logFile.WriteLine("AF: During Off Peak Hours, job starting"); return((int)AFReturnValue.StartJob); } // Currently during peak business hours // Check if user is authorized to start a job during these hours // If not, delay the start of the job until off peak hours are in play // Check that there is only one argument. If more than 1 argument exists, // put a warning in the log file, but try to process it anyway if (args.Length != 1) { logFile.WriteLine("Only 1 parameter expected containing the name of the job xml file"); logFile.WriteLine("Received {0} parameters", args.Length); // If no parameters exist, cannot parse XML file if (args.Length == 0) { return((int)AFReturnValue.FilterFailure); } } String fileName = args[0]; // Load the job file as an XmlDocument. XmlDocument doc = new XmlDocument(); doc.Load(fileName); XmlNamespaceManager nsMgr = new XmlNamespaceManager(doc.NameTable); nsMgr.AddNamespace("hpc", xmlNameSpace); // Find the job node in the XML document. XmlNode jobXML = doc.SelectSingleNode("/hpc:Job", nsMgr); if (jobXML == null) { throw new Exception("No job in the xml file"); } // Find the User attribute for the job. XmlAttributeCollection attrCol = jobXML.Attributes; XmlAttribute userAttr = attrCol["UserName"]; string user = userAttr.Value; // If user does not have permission to run jobs during peak hours, adjust HoldUntil if needed if (!PeakHoursUser(user)) { string jobIdString = attrCol["Id"].Value; int jobId; Int32.TryParse(jobIdString, out jobId); if (jobId != 0) { using (IScheduler scheduler = new Scheduler()) { scheduler.Connect(clusterName); ISchedulerJob job = scheduler.OpenJob(jobId); DateTime peakEnd = DateTime.Today.AddHours((double)endhours); // If the job is not already set to delay until off peak hours, set it // This property should be null, but could be non-null if some other // thread has set it after scheduling called the activation filter if ((job.HoldUntil == null) || (job.HoldUntil < peakEnd)) { job.SetHoldUntil(peakEnd); job.Commit(); logFile.WriteLine("Delay job {0} until off peak hours", jobId); } else { logFile.WriteLine("Job {0} already set to {1}", jobId, job.HoldUntil); } scheduler.Close(); } // using scheduler } else { logFile.WriteLine("jobId == 0, delaying job by default duration"); } retval = (int)AFReturnValue.HoldJobUntil; } else { logFile.WriteLine("Job to run during peak hours"); retval = (int)AFReturnValue.StartJob; } } catch (IOException e) { logFile.WriteLine("Error Loading the XmlFile"); logFile.WriteLine(e.ToString()); retval = (int)AFReturnValue.FilterFailure; } catch (Exception e) { logFile.WriteLine(e.ToString()); retval = (int)AFReturnValue.FilterFailure; } finally { logFile.Close(); } return(retval); }
static void Main(string[] args) { string clusterName = Environment.GetEnvironmentVariable("CCP_SCHEDULER"); //create a scheduler object used to connect to the scheduler using (IScheduler scheduler = new Scheduler()) { //connect to the scheduler Console.WriteLine("Connecting to cluster {0}", clusterName); scheduler.Connect(clusterName); //create a job equilvalent to the cmdline string: job submit /parametric:1-500 "echo *" Console.WriteLine("Creating parametric sweep job"); //first create a SchedulerJob object ISchedulerJob job = scheduler.CreateJob(); //and a task object ISchedulerTask task = job.CreateTask(); //set the command line to "echo *" task.CommandLine = "echo *"; //and we set the parametric task settings task.Type = TaskType.ParametricSweep; task.StartValue = 1; task.IncrementValue = 1; task.EndValue = 500; //add the task to the job job.AddTask(task); //Create an event handler so that we know when the job starts running job.OnJobState += new EventHandler <JobStateEventArg>(job_OnJobState); //and submit //you will be prompted for your credentials if they aren't already cached Console.WriteLine("Submitting job..."); scheduler.SubmitJob(job, null, null); Console.WriteLine("Job submitted"); //Wait for the job to start running jobStatus.WaitOne(); jobStatus.Reset(); //you can get realtime updates on the job through the api //we'll keep checking every second for 5 seconds for (int i = 0; i < 5; i++) { //refresh the job object with updates from the cluster job.Refresh(); Console.Write("Current job progress: " + job.Progress); Console.SetCursorPosition(0, Console.CursorTop); //we want to check again after a second Thread.Sleep(1 * 1000); } //this field isn't read-only. You can specify your own progress value depending on your needs Console.WriteLine(); Console.WriteLine("Manually changing job progress"); job.Progress = 0; //commit the changes to the server job.Commit(); Console.WriteLine("Current job progress: " + job.Progress); //you can also set progress messages, which will also be viewable in the Job Management UI Console.WriteLine("Setting job progress message"); job.ProgressMessage = "Job is still running"; //commit the changes to the server job.Commit(); Console.WriteLine("Progress message: " + job.ProgressMessage); //Wait for the job to finish Console.WriteLine("Waiting for the job to finish..."); jobStatus.WaitOne(); //job.Progress will no longer increment automatically //the job will finish regardless of the value of job.Progress Console.WriteLine("Finished job progress: " + job.Progress); //close the scheduler connection scheduler.Close(); } }
/// <summary> /// Entry point for an activation filter. /// </summary> /// <param name="jobXml"></param> /// XML stream containing the job in question. /// <param name="schedulerPass"></param> /// <param name="jobIndex"></param> /// <param name="backfill"></param> /// <param name="resourceCount"></param> /// <returns></returns> public ActivationFilterResponse FilterActivation(Stream jobXml, int schedulerPass, int jobIndex, bool backfill, int resourceCount) { if (setupLogFile() != 0) { return(ActivationFilterResponse.FailJob); } ActivationFilterResponse retval = ActivationFilterResponse.FailJob; try { // If the job is submitted outside peak business hours, no change is necessary if (DuringOffHours()) { logFile.WriteLine("AF: During Off Peak Hours, job starting"); return(ActivationFilterResponse.StartJob); } // Currently during peak business hours // Check if user is authorized to start a job during these hours // If not, delay the start of the job until off peak hours are in play // Load the job file as an XmlDocument. XmlDocument doc = new XmlDocument(); doc.Load(jobXml); XmlNamespaceManager nsMgr = new XmlNamespaceManager(doc.NameTable); nsMgr.AddNamespace("hpc", xmlNameSpace); // Find the job node in the XML document. XmlNode jobXML = doc.SelectSingleNode("/hpc:Job", nsMgr); if (jobXML == null) { throw new Exception("No job in the xml file"); } // Find the User attribute for the job. XmlAttributeCollection attrCol = jobXML.Attributes; XmlAttribute userAttr = attrCol["User"]; string user = userAttr.Value; // If user does not have permission to run jobs during peak hours, adjust HoldUntil if needed if (!PeakHoursUser(user)) { string jobIdString = attrCol["Id"].Value; int jobId; Int32.TryParse(jobIdString, out jobId); if (jobId != 0) { using (IScheduler scheduler = new Scheduler()) { scheduler.Connect("localhost"); ISchedulerJob job = scheduler.OpenJob(jobId); DateTime peakEnd = DateTime.Today.AddHours((double)endhours); // If the job is not already set to delay until off peak hours, set it // This property should be null, but could be non-null if some other // thread has set it after scheduling called the activation filter if ((job.HoldUntil == null) || (job.HoldUntil < peakEnd)) { job.SetHoldUntil(peakEnd); job.Commit(); logFile.WriteLine("Delay job {0} until off peak hours", jobId); } else { logFile.WriteLine("Job {0} already set to {1}", jobId, job.HoldUntil); } scheduler.Close(); } // using scheduler } else { logFile.WriteLine("jobId == 0, delaying job by default duration"); } retval = ActivationFilterResponse.HoldJobReleaseResourcesAllowOtherJobsToSchedule; } else { logFile.WriteLine("Job to run during peak hours"); retval = ActivationFilterResponse.StartJob; } } catch (IOException e) { logFile.WriteLine("Error Loading the XmlFile"); logFile.WriteLine(e.ToString()); retval = ActivationFilterResponse.FailJob; } catch (Exception e) { logFile.WriteLine(e.ToString()); retval = ActivationFilterResponse.FailJob; } finally { logFile.Close(); } return(retval); }