public virtual void AddEntry(IDistributable entry, double probability, bool unique, bool always, bool active)
 {
     AddEntry(entry, probability);
     entry.AlwaysDrop = always;
     entry.IsUnique   = unique;
     entry.IsActive   = active;
 }
Exemple #2
0
        /// <summary>
        /// Submits the jobs and waits for it to complete.
        /// When it submits, it create a log entry file in the cluster working directory, named according to the run name. This file is deleted
        /// when the job finishes successfully, so long as we're still waiting for it to finish. If SubmitAndWait is called and this file already
        /// exists, then it is assumed that the job we want to submit was already submitted, so we wait for it to finish rather than submit again.
        /// </summary>
        /// <param name="clusterArgs"></param>
        /// <param name="distributableObj"></param>
        /// <param name="maxSubmitAfterTasksFail"></param>
        /// <param name="OnSubmittedCallbackOrNull"></param>
        public static void SubmitAndWait(ClusterSubmitterArgs clusterArgs, IDistributable distributableObj, int maxSubmitAfterTasksFail = 0, Action OnSubmittedCallbackOrNull = null)
        {
            using (ParallelOptionsScope.Suspend())
            {
                FileInfo logEntryFile = HpcLibSettings.GetLogEntryFile(clusterArgs);
                if (logEntryFile.Exists)
                {
                    Console.WriteLine(Resource.Job_already_exists, logEntryFile.FullName);
                    clusterArgs = HpcLibSettings.LoadLogEntryFile(logEntryFile).ClusterArgs;
                }
                else
                {
                    Submit(clusterArgs, distributableObj);
                    Console.WriteLine(Resource.Wait_Writing_log);
                    HpcLibSettings.WriteLogEntryToClusterDirectory(clusterArgs);
                }

                if (OnSubmittedCallbackOrNull != null)
                {
                    OnSubmittedCallbackOrNull();
                }

                JobState jobState = WaitForJobInternal(clusterArgs, maxSubmitAfterTasksFail);

                logEntryFile.Delete();  // job finished successfully, so we can delete this. Even if failed or canceled, we assume that we'll want to overwrite in the future.

                if (jobState != JobState.Finished)
                {
                    throw new Exception("Job " + jobState);
                }
            }
        }
Exemple #3
0
        public static void SubmitAndWait(ClusterSubmitterArgs clusterArgs, IDistributable distributableObj, int maxSubmitAfterTasksFail = 0)
        {
            using (ParallelOptionsScope.Suspend())
            {
                int numberOfTries = 0;

retry:

                Submit(clusterArgs, distributableObj);

                JobWaitingParams jobWaitingParams = WaitForJobInternal(clusterArgs);

                if (jobWaitingParams.JobState == v2008R2.Properties.JobState.Canceled)
                {
                    throw new Exception("Job canceled.");
                }
                else if (jobWaitingParams.JobState == v2008R2.Properties.JobState.Failed)
                {
                    if (numberOfTries < maxSubmitAfterTasksFail)
                    {
                        ++numberOfTries;
                        Console.WriteLine("Job failed, trying again...");
                        goto retry;
                    }
                    throw new Exception("Job failed.");
                }
                //HpcLib.HpcLib.CopyFiles(new List<String> { "" }, _remoteTaskOutDir, TASK_OUT_DIR);
            }
        }
        private bool AddItemToResults(List <IDistributable> results, IDistributable entry)
        {
            if (entry.IsUnique && results.Contains(entry))
            {
                return(false);
            }

            if (!(entry is NullDistributableValue))
            {
                if (entry is DistributableTable)
                {
                    results.AddRange(((IDistributableTable)entry).GetResults());
                }
                else
                {
                    IDistributable entity = entry;
                    if (entry is IDistributableCreator)
                    {
                        entity = ((IDistributableCreator)entry).CreateInstance();
                    }

                    results.Add(entity);
                }
            }

            return(true);
        }
Exemple #5
0
 /// <summary>
 /// Distributes the task over HPC
 /// </summary>
 /// <param name="distributableObject">distributable task</param>
 public override void Distribute(IDistributable distributableObject)
 {
     this.Name = distributableObject.JobName;
     ClusterSubmitter.SubmitAndWait(this, distributableObject, MaxSubmitAfterTasksFail, OnSubmitted);
     CopyResults.AddRange(ArgumentCollection.EnumerateValuesOfTypeFromParsable <OutputFile>(distributableObject).Select(file => file.ToString()).Distinct().Where(s => s != "-"));
     if (CopyResults.Count > 0)
     {
         HpcLib.CopyFiles(CopyResults, ExternalRemoteDirectoryName, Environment.CurrentDirectory);
     }
 }
Exemple #6
0
        private static void SubmitInternal(ClusterSubmitterArgs clusterArgs, IDistributable distributableObj)
        {
            lock (_submitterLockObj)  // for now, just let one thread submit at a time.
            {
                if (clusterArgs.Archive != null)
                {
                    MBT.Escience.FileUtils.ArchiveExes(clusterArgs.Archive);
                }

                //ArgumentCollection argsToUse = (ArgumentCollection)applicationArgs.Clone();

                CopyExes(clusterArgs);

                clusterArgs.StdErrDirName = CreateUniqueDirectory(clusterArgs.ExternalRemoteDirectoryName, "Stderr", clusterArgs.Name);
                clusterArgs.StdOutDirName = CreateUniqueDirectory(clusterArgs.ExternalRemoteDirectoryName, "Stdout", clusterArgs.Name);


                if (clusterArgs.CopyInputFiles.Count > 0)
                {
                    CopyInputFiles(clusterArgs.CopyInputFiles, clusterArgs.ExternalRemoteDirectoryName);
                }

                using (ParallelOptionsScope.Suspend())
                {
                    switch (clusterArgs.Version)
                    {
                    case 1:
                        SubmitViaAPI1(clusterArgs, distributableObj);
                        break;

                    case 2:
                        Console.Error.WriteLine("Api2 and 3 are the same. Submitting via Api3.");
                        SubmitViaAPI3(clusterArgs, distributableObj);
                        break;

                    case 3:
                        SubmitViaAPI3(clusterArgs, distributableObj);
                        break;

                    default:
                        throw new NotSupportedException(string.Format("Cluster version {0} is not supported.", clusterArgs.Version));
                    }
                }
                Console.WriteLine("Processed job to cluster {0} with path {1}", clusterArgs.Cluster, clusterArgs.ExternalRemoteDirectoryName);


                Console.WriteLine("Writing log file");
                HpcLibSettings.TryWriteToLog(clusterArgs);
                Console.WriteLine("Writing log entry to cluster directory");
                HpcLibSettings.WriteLogEntryToClusterDirectory(clusterArgs);
                Console.WriteLine("Done");
            }
            return;
        }
Exemple #7
0
        /// <summary>
        /// Runs Tasks locally on distributableObject.
        /// </summary>
        /// <param name="distributableObject">The object that will run the tasks.</param>
        public void Distribute(IDistributable distributableObject)
        {
            using (ParallelOptionsScope.Create(ParallelOptions))
            {
                distributableObject.RunTasks(Tasks, TaskCount);

                if (Cleanup)
                {
                    distributableObject.Cleanup(TaskCount);
                }
            }
        }
Exemple #8
0
        private static void SubmitInternal(ClusterSubmitterArgs clusterArgs, IDistributable distributableObj)
        {
            lock (_submitterLockObj)  // for now, just let one thread submit at a time.
            {
                if (string.IsNullOrEmpty(clusterArgs.Name))
                {
                    clusterArgs.Name = distributableObj.JobName;
                }

                CopyExes(clusterArgs);

                clusterArgs.StdErrDirName = CreateUniqueDirectory(clusterArgs.ExternalRemoteDirectoryName, "Stderr", distributableObj.JobName);
                clusterArgs.StdOutDirName = CreateUniqueDirectory(clusterArgs.ExternalRemoteDirectoryName, "Stdout", distributableObj.JobName);

                if (clusterArgs.CopyInputFiles != null)
                {
                    if (!(distributableObj is DistributableWrapper))
                    {
                        clusterArgs.CopyInputFiles.AddRange(ArgumentCollection.EnumerateValuesOfTypeFromParsable <InputFile>(distributableObj).Select(file => file.ToString()));
                    }

                    if (clusterArgs.CopyInputFiles.Count > 0)
                    {
                        CopyInputFiles(clusterArgs.CopyInputFiles, clusterArgs.ExternalRemoteDirectoryName);
                    }
                }

                using (ParallelOptionsScope.Suspend())
                {
                    switch (clusterArgs.Version)
                    {
                    case 3:
                        SubmitViaAPI3(clusterArgs, distributableObj);
                        break;

                    default:
                        throw new NotSupportedException(string.Format("Cluster version {0} is not supported.", clusterArgs.Version));
                    }
                }
                Console.WriteLine(Resource.Processed_job, clusterArgs.Cluster, clusterArgs.ExternalRemoteDirectoryName);


                Console.WriteLine(Resource.Writing_log_file);
                HpcLibSettings.TryWriteToLog(clusterArgs);

                Console.WriteLine(Resource.Done);
            }
            return;
        }
Exemple #9
0
        private static void SubmitViaAPI1(ClusterSubmitterArgs clusterArgs, IDistributable distributableObj)
        {
            Console.WriteLine("Submitting using API version 1");

            v1.ICluster cluster = new v1.Cluster();
            cluster.Connect(clusterArgs.Cluster);


            foreach (v1.ITask task in EnumerateTasks(clusterArgs, distributableObj))
            {
                v1.IJob job = CreateJobApi1(cluster, (v1.JobPriority)clusterArgs.ApiPriority, task, task.Name);
                cluster.QueueJob(job, clusterArgs.Username, clusterArgs.Password, true, 0);
            }
            Console.WriteLine();
        }
Exemple #10
0
        /// <summary>
        /// Runs Tasks locally on distributableObject.
        /// </summary>
        /// <param name="distributableObject">The object that will run the tasks.</param>
        public void Distribute(IDistributable distributableObject)
        {
            Console.CancelKeyPress += (sender, eventArgs) =>
            {
                distributableObject.Cancel();
                Environment.ExitCode = -1073741510; // exit by control break
            };

            using (ParallelOptionsScope.Create(ParallelOptions))
            {
                distributableObject.RunTasks(Tasks, TaskCount);

                if (Cleanup)
                    distributableObject.Cleanup(TaskCount);
            }
        }
        /// <summary>
        /// Runs Tasks locally on distributableObject.
        /// </summary>
        /// <param name="distributableObject">The object that will run the tasks.</param>
        public void Distribute(IDistributable distributableObject)
        {
            Console.CancelKeyPress += (sender, eventArgs) =>
            {
                distributableObject.Cancel();
                Environment.ExitCode = -1073741510; // exit by control break
            };

            using (ParallelOptionsScope.Create(ParallelOptions))
            {
                distributableObject.RunTasks(Tasks, TaskCount);

                if (Cleanup)
                {
                    distributableObject.Cleanup(TaskCount);
                }
            }
        }
Exemple #12
0
 /// <summary>
 /// Submits the ArgumentCollection to the cluster, telling the cluster to run whichever exe is currently running using a new set of args that divids the work up in to tasks.
 /// </summary>
 /// <param name="clusterArgs">cluster args</param>
 /// <param name="distributableObj">distributable objects</param>
 public static void Submit(ClusterSubmitterArgs clusterArgs, IDistributable distributableObj)
 {
     for (int numTries = 0; numTries < clusterArgs.MaxSubmitTries; numTries++)
     {
         try
         {
             SubmitInternal(clusterArgs, distributableObj);
             return;
         }
         catch (Exception exception)
         {
             Console.WriteLine(Resource.Error_Submitting + clusterArgs.Cluster + ": " + exception.Message);
             Console.WriteLine(string.Format(CultureInfo.CurrentCulture, "numTry={0} of {1}", numTries, clusterArgs.MaxSubmitTries));
             Console.WriteLine(exception.StackTrace);
             Console.WriteLine(Resource.User_CluserHelp);
             Thread.Sleep(new TimeSpan(0, 10, 0));
         }
     }
     throw new Exception("max number of cluster submitter tries (" + clusterArgs.MaxSubmitTries + ") exceeded");
 }
Exemple #13
0
 public static void Submit(ClusterSubmitterArgs clusterArgs, IDistributable distributableObj)
 {
     for (int numTries = 0; numTries < clusterArgs.MaxSubmitTries; numTries++)
     {
         try
         {
             SubmitInternal(clusterArgs, distributableObj);
             return;
         }
         catch (Exception exception)
         {
             Console.WriteLine("\n\nError submitting to cluster " + clusterArgs.Cluster + ": " + exception.Message);
             Console.WriteLine("numTry=" + numTries + " of " + clusterArgs.MaxSubmitTries);
             Console.WriteLine(exception.StackTrace);
             Console.WriteLine("\n\nUse -cluster help to see usage.");
             Thread.Sleep(new TimeSpan(0, 10, 0));
         }
     }
     throw new Exception("max number of cluster submitter tries (" + clusterArgs.MaxSubmitTries + ") exceeded");
 }
Exemple #14
0
        public void Deliver(IDistributable distributable)
        {
            var endpoints = _endpointRepository.GetEndpointsForProfile(distributable.ProfileName);

            foreach (var endpoint in endpoints)
            {
                try
                {
                    DeliverToEndpoint((TDistributable)distributable, endpoint);

                    _deliveryRepository?.RecordDelivery(new Delivery(distributable.Id, endpoint.Id));

                    OnSuccess((TDistributable)distributable, endpoint);

                    //TODO: Mark file as delivered to endpoint.
                    //Not all deliveries may be idempotent so we only ever want to deliver a file once.
                }
                catch (Exception exception)
                {
                    //Call virtual OnError method and then continue to the next endpoint delivery.
                    OnError((TDistributable)distributable, endpoint, exception);
                }
            }
        }
Exemple #15
0
 /// <summary>
 /// Distribute.
 /// </summary>
 /// <param name="distributableObject"></param>
 public void Distribute(IDistributable distributableObject)
 {
     throw new NotImplementedException();
 }
Exemple #16
0
        private static v2008R2.ISchedulerTask AddCleanupTaskToJob(ClusterSubmitterArgs clusterArgs, v2008R2.IScheduler scheduler, v2008R2.ISchedulerJob job, IDistributable distributableJob)
        {
            v2008R2.ISchedulerCollection taskList        = job.GetTaskList(scheduler.CreateFilterCollection(), scheduler.CreateSortCollection(), true);
            v2008R2.IStringCollection    dependencyTasks = scheduler.CreateStringCollection();

            if (!clusterArgs.OnlyDoCleanup)
            {
                dependencyTasks.Add(((v2008R2.ISchedulerTask)taskList[0]).Name);
            }
            v2008R2.ISchedulerTask cleanupTask = CreateCleanupTask(job, clusterArgs.ExternalRemoteDirectoryName, clusterArgs.StdErrDirName, clusterArgs.StdOutDirName, "cleanup", true);

            Distribute.Locally local = new Distribute.Locally()
            {
                Cleanup         = true,
                TaskCount       = clusterArgs.TaskCount,
                Tasks           = new RangeCollection(),
                ParallelOptions = new ParallelOptions()
                {
                    MaxDegreeOfParallelism = 1
                }
            };

            Distribute.Distribute distributeExe = new Distribute.Distribute()
            {
                Distributor   = local,
                Distributable = distributableJob
            };

            string exeName = distributableJob is DistributableWrapper ? clusterArgs.ExeName : distributeExe.GetType().Assembly.GetName().Name;

            //args.AddOptionalFlag("cleanup");
            //args.AddOptional("tasks", "empty");
            string taskCommandLine = string.Format("{0}\\{1} {2}", clusterArgs.ExeRelativeDirectoryName, exeName, CreateTaskString(distributeExe, clusterArgs.MinimalCommandLine));

            cleanupTask.CommandLine = taskCommandLine;

            if (!clusterArgs.OnlyDoCleanup)
            {
                cleanupTask.DependsOn = dependencyTasks;
            }
            job.AddTask(cleanupTask);
            return(cleanupTask);
        }
Exemple #17
0
        private static v2008R2.ISchedulerTask CreateTask(int?taskNumber, ClusterSubmitterArgs clusterArgs, v2008R2.ISchedulerJob job, IDistributable distributableObj, v2008R2.IStringCollection nodesToUse)
        {
            Distribute.Locally local = new Distribute.Locally()
            {
                Cleanup         = false,
                TaskCount       = clusterArgs.TaskCount,
                Tasks           = taskNumber.HasValue ? new RangeCollection(taskNumber.Value) : null,
                ParallelOptions = new ParallelOptions()
                {
                    MaxDegreeOfParallelism = 1
                }
            };

            v2008R2.ISchedulerTask task = job.CreateTask();
            if (nodesToUse != null)
            {
                task.RequiredNodes = nodesToUse;
            }
            if (clusterArgs.NumCoresPerTask != null)
            {
                task.MinimumNumberOfCores = clusterArgs.NumCoresPerTask.Value;
                task.MaximumNumberOfCores = clusterArgs.NumCoresPerTask.Value;
                task.MaximumNumberOfNodes = 1;
                local.ParallelOptions.MaxDegreeOfParallelism = clusterArgs.NumCoresPerTask.Value;
            }
            else if (clusterArgs.IsExclusive)
            {
                //task.MinimumNumberOfCores = 1;
                //task.MaximumNumberOfCores = 8;
                //task.MaximumNumberOfNodes = 1;
            }
            task.WorkDirectory = clusterArgs.ExternalRemoteDirectoryName;

            Distribute.Distribute distributeExe = new Distribute.Distribute()
            {
                Distributable = distributableObj,
                Distributor   = local
            };

            string taskArgString = CreateTaskString(distributeExe, clusterArgs.MinimalCommandLine);
            string exeName       = distributeExe.Distributable is DistributableWrapper ? clusterArgs.ExeName : distributeExe.GetType().Assembly.GetName().Name;

            string taskCommandLine = null;

            if (clusterArgs.UseMPI)
            {
                taskCommandLine = string.Format("mpiexec -n {0} {1}\\{2} {3}", clusterArgs.NumCoresPerTask, clusterArgs.ExeRelativeDirectoryName, exeName, taskArgString);
            }
            else
            {
                taskCommandLine = string.Format("{0}\\{1} {2}", clusterArgs.ExeRelativeDirectoryName, exeName, taskArgString);
            }
            task.CommandLine = taskCommandLine;

            string taskNumberAsString = taskNumber.HasValue ? taskNumber.Value.ToString() : "*";

            task.Name           = Helper.CreateDelimitedString(" ", clusterArgs.Name, taskNumberAsString);
            task.StdErrFilePath = string.Format(@"{0}\{1}.txt", clusterArgs.StdErrDirName, taskNumberAsString);
            task.StdOutFilePath = string.Format(@"{0}\{1}.txt", clusterArgs.StdOutDirName, taskNumberAsString);

            if (task.StdErrFilePath.Length >= 160)
            {
                Console.WriteLine("Caution, std error file path is {0} characters, which will probably cause HPC to crash.", task.StdErrFilePath.Length);
            }

            return(task);
        }
Exemple #18
0
        private static v1.ITask CreateTask(ClusterSubmitterArgs clusterArgs, int taskNum, IDistributable distributableObj)
        {
            v1.ITask task = new v1.Task();
            task.WorkDirectory = clusterArgs.InternalRemoteDirectory;

            Distribute.Locally local = new Distribute.Locally()
            {
                Cleanup         = false,
                TaskCount       = clusterArgs.TaskCount,
                ParallelOptions = new ParallelOptions()
                {
                    MaxDegreeOfParallelism = 1
                },
                Tasks = new RangeCollection(taskNum)
            };

            Distribute.Distribute distributeExe = new Distribute.Distribute()
            {
                Distributable = distributableObj,
                Distributor   = local
            };

            string taskString = CreateTaskString(distributeExe, clusterArgs.MinimalCommandLine);
            string exeName    = distributableObj is DistributableWrapper ? clusterArgs.ExeName : distributeExe.GetType().Assembly.GetName().Name;

            string taskCommandLine = string.Format("{0}\\{1} {2}", clusterArgs.ExeRelativeDirectoryName, exeName, taskString);

            task.CommandLine = taskCommandLine;

            task.Name        = Helper.CreateDelimitedString(" ", clusterArgs.Name, taskNum);
            task.IsExclusive = false;
            task.MinimumNumberOfProcessors = 1;
            task.MaximumNumberOfProcessors = 1;

            task.Stderr = string.Format(@"{0}\{1}.txt", clusterArgs.StdErrDirName, taskNum);
            task.Stdout = string.Format(@"{0}\{1}.txt", clusterArgs.StdOutDirName, taskNum);

            task.Runtime = "Infinite";
            return(task);
        }
Exemple #19
0
        private static void SubmitViaAPI3(ClusterSubmitterArgs clusterArgs, IDistributable distributableObj)
        {
            Console.WriteLine(string.Format("Connecting to cluster {0} using API version 3 .", clusterArgs.Cluster));

            using (v2008R2.IScheduler scheduler = new v2008R2.Scheduler())
            {
                scheduler.Connect(clusterArgs.Cluster);
                v2008R2.ISchedulerJob job = scheduler.CreateJob();
                job.Name     = clusterArgs.Name;
                job.Priority = (v2008R2.Properties.JobPriority)clusterArgs.ApiPriority;

                if (clusterArgs.JobTemplate != null)
                {
                    Microsoft.Hpc.Scheduler.IStringCollection jobTemplates = scheduler.GetJobTemplateList();
                    string decodedJobTemplate = HttpUtility.UrlDecode(clusterArgs.JobTemplate);
                    if (jobTemplates.Contains(decodedJobTemplate))
                    {
                        job.SetJobTemplate(decodedJobTemplate);
                    }
                    else
                    {
                        Console.WriteLine("Job template '" + decodedJobTemplate + "' does not exist at specified cluster. Existing templates are:");
                        foreach (var template in jobTemplates)
                        {
                            Console.Write("'" + template + "' ");
                        }
                        Console.WriteLine("\nUsing Default job template...");
                    }
                }


                if (clusterArgs.NumCoresPerTask != null)
                {
                    clusterArgs.IsExclusive = false;
                }

                v2008R2.IStringCollection nodesToUse = null;

                if (clusterArgs.NodeExclusionList != null && clusterArgs.NodeExclusionList.Count > 0)
                {
                    nodesToUse = GetNodesToUse(clusterArgs, scheduler, job);
                }
                else if (clusterArgs.NodesToUseList != null && clusterArgs.NodesToUseList.Count > 0)
                {
                    nodesToUse = scheduler.CreateStringCollection();
                    foreach (string nodeName in clusterArgs.NodesToUseList)
                    {
                        nodesToUse.Add(nodeName);
                    }
                }
                else if (clusterArgs.NumCoresPerTask != null)
                {
                    job.AutoCalculateMax = true;
                    job.AutoCalculateMin = true;
                }
                else if (clusterArgs.IsExclusive)
                {
                    job.UnitType = Microsoft.Hpc.Scheduler.Properties.JobUnitType.Node;
                    if (clusterArgs.MinimumNumberOfNodes != null)
                    {
                        job.MaximumNumberOfNodes = clusterArgs.MaximumNumberOfNodes.Value;
                        job.MinimumNumberOfNodes = clusterArgs.MinimumNumberOfNodes.Value;
                    }
                }
                else if (clusterArgs.MinimumNumberOfCores != null)
                {
                    job.MaximumNumberOfCores = clusterArgs.MaximumNumberOfCores.Value;
                    Helper.CheckCondition(clusterArgs.MinimumNumberOfCores != null, "must provide both MinCores and MaxCores, not just one");
                    job.MinimumNumberOfCores = clusterArgs.MinimumNumberOfCores.Value;
                    job.AutoCalculateMax     = false;
                    job.AutoCalculateMin     = false;
                }
                else
                {
                    job.AutoCalculateMax = true;
                    job.AutoCalculateMin = true;
                }


                //bool checkIfValid = ValidateParamsOrNull != null;

                if (!clusterArgs.OnlyDoCleanup)
                {
                    if (clusterArgs.TaskRange.IsContiguous())
                    {
                        if (clusterArgs.TaskRange.LastElement > clusterArgs.TaskCount - 1)
                        {
                            clusterArgs.TaskRange = new RangeCollection(clusterArgs.TaskRange.FirstElement, clusterArgs.TaskCount - 1);
                        }
                        v2008R2.ISchedulerTask task = CreateTask(null, clusterArgs, job, distributableObj, nodesToUse);

                        task.IsParametric = true; // IsParametric is marked as obsolete. But is it necessary to submit to a v2 cluster??

                        //task.Type = TaskType.ParametricSweep;

                        task.StartValue = 0;
                        task.EndValue   = clusterArgs.TaskCount - 1;

                        job.AddTask(task);
                    }
                    else
                    {
                        job.AddTasks(clusterArgs.TaskRange.Select(taskNum => CreateTask((int)taskNum, clusterArgs, job, distributableObj, nodesToUse)).ToArray());
                    }
                }
                else
                {
                    clusterArgs.Cleanup = true;
                }

                v2008R2.ISchedulerTask cleanupTask = null;
                if (clusterArgs.Cleanup)
                {
                    cleanupTask = AddCleanupTaskToJob(clusterArgs, scheduler, job, distributableObj);
                }

                Console.WriteLine("Submitting job.");
                scheduler.SubmitJob(job, null, null);
                clusterArgs.JobID = job.Id;
                Console.WriteLine(job.Name + " submitted.");
            }
        }
Exemple #20
0
 public virtual void Distribute(IDistributable distributableObject)
 {
     ClusterSubmitter.Submit(this, distributableObject);
 }
Exemple #21
0
        private static IEnumerable <v1.ITask> EnumerateTasks(ClusterSubmitterArgs clusterArgs, IDistributable distributableObj)
        {
            //bool checkIfValid = ValidateParamsOrNull != null;


            for (int pieceIndex = 0; pieceIndex < clusterArgs.TaskCount; ++pieceIndex)
            {
                if (clusterArgs.TaskRange.Contains(pieceIndex))
                {
                    ArgumentCollection thisTasksArgs;
                    //if (TryCreateTaskArgsAndValidate(args,  pieceIndex.ToString(), out thisTasksArgs))
                    {
                        v1.ITask task = CreateTask(clusterArgs, pieceIndex, distributableObj);
                        yield return(task);
                    }
                }
            }
        }
Exemple #22
0
        private static ISchedulerTask CreateTask(int?taskNumber, ClusterSubmitterArgs clusterArgs, ISchedulerJob job, IDistributable distributableObj, IStringCollection nodesToUse)
        {
            Locally local = new Locally()
            {
                Cleanup   = false,
                TaskCount = clusterArgs.TaskCount,
                Tasks     = taskNumber.HasValue ? new RangeCollection(taskNumber.Value) : null,
            };

            ISchedulerTask task = job.CreateTask();

            if (nodesToUse != null)
            {
                task.RequiredNodes = nodesToUse;
            }
            if (clusterArgs.NumCoresPerTask != null)
            {
                task.MinimumNumberOfCores = clusterArgs.NumCoresPerTask.Value;
                task.MaximumNumberOfCores = clusterArgs.NumCoresPerTask.Value;
                task.MaximumNumberOfNodes = 1;
                local.ParallelOptions.MaxDegreeOfParallelism = clusterArgs.NumCoresPerTask.Value;
            }
            if (!clusterArgs.IsExclusive)
            {
                local.ParallelOptions = new ParallelOptions()
                {
                    MaxDegreeOfParallelism = 1
                };
            }

            task.WorkDirectory = clusterArgs.ExternalRemoteDirectoryName;

            DistributeApp.Distribute distributeExe = new DistributeApp.Distribute()
            {
                Distributable = distributableObj,
                Distributor   = local
            };

            string taskArgString = CreateTaskString(distributeExe);
            string exeName       = distributeExe.Distributable is DistributableWrapper ? clusterArgs.ExeName : distributeExe.GetType().Assembly.GetName().Name;

            string taskCommandLine = null;

            if (clusterArgs.UseMPI)
            {
                taskCommandLine = string.Format("mpiexec -n {0} {1}\\{2} {3}", clusterArgs.NumCoresPerTask, clusterArgs.ExeRelativeDirectoryName, exeName, taskArgString);
            }
            else
            {
                taskCommandLine = string.Format("{0}\\{1} {2}", clusterArgs.ExeRelativeDirectoryName, exeName, taskArgString);
            }
            task.CommandLine = taskCommandLine;

            string taskNumberAsString = taskNumber.HasValue ? taskNumber.Value.ToString() : "*";

            task.Name = Helper.CreateDelimitedString(" ", distributableObj.JobName, taskNumberAsString);
            Console.WriteLine(Resource.StdOutRelativeDirName + clusterArgs.StdOutRelativeDirName);
            task.StdErrFilePath = string.Format(@"{0}\{1}.txt", clusterArgs.StdErrRelativeDirName, taskNumberAsString);
            task.StdOutFilePath = string.Format(@"{0}\{1}.txt", clusterArgs.StdOutRelativeDirName, taskNumberAsString);

            Console.WriteLine(Resource.CreateTask, task.CommandLine.Length, task.CommandLine);
            if (task.StdErrFilePath.Length >= 160)
            {
                Console.WriteLine(Resource.Caution, task.StdErrFilePath.Length);
            }

            return(task);
        }
Exemple #23
0
 public Distribution(IDistributable tenant) : this()
 {
     Tenants.Add(tenant);
 }
Exemple #24
0
 /// <summary>
 /// Distribute.
 /// </summary>
 /// <param name="distributableObject"></param>
 public void Distribute(IDistributable distributableObject)
 {
     throw new NotImplementedException();
 }
Exemple #25
0
            private void ThreadMethod()
            {
                actions = new List <Action>();
                CancellationToken tok = cts.Token;

                Action[] arrActions = new Action[0];
                int      x = 0, f = 0;

                while (!tok.IsCancellationRequested)
                {
                    if (ActionsChanged)
                    {
                        lock (Tenants)
                        {
                            ActionsChanged = false;

                            actions.Clear();

                            f = Tenants.Count;

                            if (f == 0)
                            {
                                zero = true;
                                many = false;
                                return;
                            }
                            if (f == 1)
                            {
                                zero = false;
                                many = false;
                            }
                            else
                            {
                                zero = false;
                                many = true;
                                for (x = 0; x < f; x++)
                                {
                                    IDistributable t = Tenants[x];

                                    if (workRepeat > 1)
                                    {
                                        actions.Add(() =>
                                        {
                                            for (int r = 1; r <= workRepeat; r++)
                                            {
                                                t.DoWork();

                                                if (r == workRepeat)
                                                {
                                                    break;
                                                }
                                                if (workIdleSleepTime < 0)
                                                {
                                                    continue;
                                                }

                                                Thread.Sleep(workIdleSleepTime);
                                            }
                                        });
                                    }
                                    else
                                    {
                                        if (workIdleSleepTime < 0)
                                        {
                                            actions.Add(t.DoWork);
                                        }
                                        else
                                        {
                                            actions.Add(() =>
                                            {
                                                t.DoWork();
                                                Thread.Sleep(workIdleSleepTime);
                                            });
                                        }
                                    }
                                }
                            }

                            f          = 0;
                            arrActions = actions.ToArray();
                            continue;
                        }
                    }

                    lock (lockObj)
                    {
                        if (Tenants.Count == 0)
                        {
                            Thread.Sleep(5);
                        }
                        else if (many)
                        {
                            Parallel.Invoke(arrActions);
                            if (sleepDivisor < 0)
                            {
                                continue;
                            }
                        }
                        else
                        {
                            Tenants[0].DoWork();
                        }
                    }

                    if (f == sleepDivisor)
                    {
                        Thread.Sleep(idleSleepTime);
                        f = 0;
                    }
                    else
                    {
                        f++;
                    }
                }
            }
 public virtual void AddEntry(IDistributable entry)
 {
     Contents.Add(entry);
     entry.Attach(this);
 }
Exemple #27
0
        private static void SubmitViaAPI3(ClusterSubmitterArgs clusterArgs, IDistributable distributableObj)
        {
            Console.WriteLine(string.Format("Connecting to cluster {0} using API version 3 .", clusterArgs.Cluster));

            using (IScheduler scheduler = new Scheduler())
            {
                scheduler.Connect(clusterArgs.Cluster);
                ISchedulerJob job = scheduler.CreateJob();

                job.Name     = distributableObj.JobName;
                job.Priority = clusterArgs.Priority;

                if (clusterArgs.JobTemplate != null)
                {
                    Microsoft.Hpc.Scheduler.IStringCollection jobTemplates = scheduler.GetJobTemplateList();
                    string decodedJobTemplate = System.Web.HttpUtility.UrlDecode(clusterArgs.JobTemplate);
                    if (jobTemplates.Contains(decodedJobTemplate))
                    {
                        job.SetJobTemplate(decodedJobTemplate);
                    }
                    else
                    {
                        Console.WriteLine(string.Format(Resource.Job_template, decodedJobTemplate));
                        foreach (var template in jobTemplates)
                        {
                            Console.Write("'" + template + "' ");
                        }
                        Console.WriteLine(Resource.SubmitViaAPI3);
                    }
                }


                if (clusterArgs.NumCoresPerTask != null)
                {
                    clusterArgs.IsExclusive = false;
                }

                IStringCollection nodesToUse = null;

                if (clusterArgs.NodeExclusionList != null && clusterArgs.NodeExclusionList.Count > 0)
                {
                    nodesToUse = GetNodesToUse(clusterArgs, scheduler, job);
                }
                else if (clusterArgs.NodesToUseList != null && clusterArgs.NodesToUseList.Count > 0)
                {
                    nodesToUse = scheduler.CreateStringCollection();
                    foreach (string nodeName in clusterArgs.NodesToUseList)
                    {
                        nodesToUse.Add(nodeName);
                    }
                }
                else if (clusterArgs.NumCoresPerTask != null)
                {
                    job.AutoCalculateMax = true;
                    job.AutoCalculateMin = true;
                }
                else if (clusterArgs.IsExclusive)
                {
                    job.UnitType = Microsoft.Hpc.Scheduler.Properties.JobUnitType.Node;
                    if (clusterArgs.MinimumNumberOfNodes != null)
                    {
                        job.MaximumNumberOfNodes = clusterArgs.MaximumNumberOfNodes.Value;
                        job.MinimumNumberOfNodes = clusterArgs.MinimumNumberOfNodes.Value;
                    }
                }
                else if (clusterArgs.MinimumNumberOfCores != null)
                {
                    if (clusterArgs.MaximumNumberOfCores == null)
                    {
                        job.AutoCalculateMax = true;
                    }
                    else
                    {
                        job.AutoCalculateMax     = false;
                        job.MaximumNumberOfCores = clusterArgs.MaximumNumberOfCores.Value;
                    }
                    job.MaximumNumberOfCores = clusterArgs.MaximumNumberOfCores ?? Math.Max(clusterArgs.TaskCount, scheduler.GetCounters().TotalCores);
                    job.MinimumNumberOfCores = clusterArgs.MinimumNumberOfCores.Value;
                    job.AutoCalculateMin     = false;
                }
                else
                {
                    job.AutoCalculateMax = true;
                    job.AutoCalculateMin = true;
                }

                if (!clusterArgs.OnlyDoCleanup)
                {
                    if (clusterArgs.TaskRange.IsContiguous())
                    {
                        if (clusterArgs.TaskRange.LastElement > clusterArgs.TaskCount - 1)
                        {
                            clusterArgs.TaskRange = new RangeCollection(clusterArgs.TaskRange.FirstElement, clusterArgs.TaskCount - 1);
                        }
                        ISchedulerTask task = CreateTask(null, clusterArgs, job, distributableObj, nodesToUse);

                        task.Type = TaskType.ParametricSweep;

                        task.StartValue = 0;
                        task.EndValue   = clusterArgs.TaskCount - 1;

                        job.AddTask(task);
                    }
                    else
                    {
                        job.AddTasks(clusterArgs.TaskRange.Select(taskNum => CreateTask((int)taskNum, clusterArgs, job, distributableObj, nodesToUse)).ToArray());
                    }
                }
                else
                {
                    clusterArgs.Cleanup = true;
                }

                ISchedulerTask cleanupTask = null;
                if (clusterArgs.Cleanup)
                {
                    cleanupTask = AddCleanupTaskToJob(clusterArgs, scheduler, job, distributableObj);
                }

                Console.WriteLine(Resource.Submitting_job);
                scheduler.SubmitJob(job, null, null);
                clusterArgs.JobID = job.Id;
                Console.WriteLine(job.Name + Resource.submitted);
            }
        }
 public virtual void AddEntry(IDistributable entry, double probability)
 {
     AddEntry(entry);
     entry.Probability = probability;
 }
 public virtual void RemoveEntry(IDistributable entry)
 {
     Contents.Remove(entry);
     entry.Detach(this);
 }
Exemple #30
0
 /// <summary>
 /// Distributes the task over HPC
 /// </summary>
 /// <param name="distributableObject">distributable tasks</param>
 public virtual void Distribute(IDistributable distributableObject)
 {
     this.Name = distributableObject.JobName;
     ClusterSubmitter.Submit(this, distributableObject);
 }