private Org.Apache.Hadoop.Mapreduce.V2.App.Job.Job GetJob() { Org.Apache.Hadoop.Mapreduce.V2.App.Job.Job job = Org.Mockito.Mockito.Mock <Org.Apache.Hadoop.Mapreduce.V2.App.Job.Job >(); JobId jobId = new JobIdPBImpl(); ApplicationId appId = ApplicationIdPBImpl.NewInstance(Runtime.CurrentTimeMillis() , 4); jobId.SetAppId(appId); jobId.SetId(1); Org.Mockito.Mockito.When(job.GetID()).ThenReturn(jobId); JobReport report = Org.Mockito.Mockito.Mock <JobReport>(); Org.Mockito.Mockito.When(report.GetStartTime()).ThenReturn(100010L); Org.Mockito.Mockito.When(report.GetFinishTime()).ThenReturn(100015L); Org.Mockito.Mockito.When(job.GetReport()).ThenReturn(report); Org.Mockito.Mockito.When(job.GetName()).ThenReturn("JobName"); Org.Mockito.Mockito.When(job.GetUserName()).ThenReturn("UserName"); Org.Mockito.Mockito.When(job.GetQueueName()).ThenReturn("QueueName"); Org.Mockito.Mockito.When(job.GetState()).ThenReturn(JobState.Succeeded); Org.Mockito.Mockito.When(job.GetTotalMaps()).ThenReturn(3); Org.Mockito.Mockito.When(job.GetCompletedMaps()).ThenReturn(2); Org.Mockito.Mockito.When(job.GetTotalReduces()).ThenReturn(2); Org.Mockito.Mockito.When(job.GetCompletedReduces()).ThenReturn(1); Org.Mockito.Mockito.When(job.GetCompletedReduces()).ThenReturn(1); return(job); }
public JobInfo(Org.Apache.Hadoop.Mapreduce.V2.App.Job.Job job) { this.id = MRApps.ToString(job.GetID()); JobReport report = job.GetReport(); this.mapsTotal = job.GetTotalMaps(); this.mapsCompleted = job.GetCompletedMaps(); this.reducesTotal = job.GetTotalReduces(); this.reducesCompleted = job.GetCompletedReduces(); this.submitTime = report.GetSubmitTime(); this.startTime = report.GetStartTime(); this.finishTime = report.GetFinishTime(); this.name = job.GetName().ToString(); this.queue = job.GetQueueName(); this.user = job.GetUserName(); this.state = job.GetState().ToString(); this.acls = new AList <ConfEntryInfo>(); if (job is CompletedJob) { avgMapTime = 0l; avgReduceTime = 0l; avgShuffleTime = 0l; avgMergeTime = 0l; failedReduceAttempts = 0; killedReduceAttempts = 0; successfulReduceAttempts = 0; failedMapAttempts = 0; killedMapAttempts = 0; successfulMapAttempts = 0; CountTasksAndAttempts(job); this.uberized = job.IsUber(); this.diagnostics = string.Empty; IList <string> diagnostics = job.GetDiagnostics(); if (diagnostics != null && !diagnostics.IsEmpty()) { StringBuilder b = new StringBuilder(); foreach (string diag in diagnostics) { b.Append(diag); } this.diagnostics = b.ToString(); } IDictionary <JobACL, AccessControlList> allacls = job.GetJobACLs(); if (allacls != null) { foreach (KeyValuePair <JobACL, AccessControlList> entry in allacls) { this.acls.AddItem(new ConfEntryInfo(entry.Key.GetAclName(), entry.Value.GetAclString ())); } } } }
public JobInfo(Org.Apache.Hadoop.Mapreduce.V2.App.Job.Job job, bool hasAccess) { // ok for any user to see // these should only be seen if acls allow this.id = MRApps.ToString(job.GetID()); JobReport report = job.GetReport(); this.startTime = report.GetStartTime(); this.finishTime = report.GetFinishTime(); this.elapsedTime = Times.Elapsed(this.startTime, this.finishTime); if (this.elapsedTime == -1) { this.elapsedTime = 0; } this.name = job.GetName().ToString(); this.user = job.GetUserName(); this.state = job.GetState(); this.mapsTotal = job.GetTotalMaps(); this.mapsCompleted = job.GetCompletedMaps(); this.mapProgress = report.GetMapProgress() * 100; this.mapProgressPercent = StringHelper.Percent(report.GetMapProgress()); this.reducesTotal = job.GetTotalReduces(); this.reducesCompleted = job.GetCompletedReduces(); this.reduceProgress = report.GetReduceProgress() * 100; this.reduceProgressPercent = StringHelper.Percent(report.GetReduceProgress()); this.acls = new AList <ConfEntryInfo>(); if (hasAccess) { this.diagnostics = string.Empty; CountTasksAndAttempts(job); this.uberized = job.IsUber(); IList <string> diagnostics = job.GetDiagnostics(); if (diagnostics != null && !diagnostics.IsEmpty()) { StringBuilder b = new StringBuilder(); foreach (string diag in diagnostics) { b.Append(diag); } this.diagnostics = b.ToString(); } IDictionary <JobACL, AccessControlList> allacls = job.GetJobACLs(); if (allacls != null) { foreach (KeyValuePair <JobACL, AccessControlList> entry in allacls) { this.acls.AddItem(new ConfEntryInfo(entry.Key.GetAclName(), entry.Value.GetAclString ())); } } } }
public virtual long ThresholdRuntime(TaskId taskID) { JobId jobID = taskID.GetJobId(); Org.Apache.Hadoop.Mapreduce.V2.App.Job.Job job = context.GetJob(jobID); TaskType type = taskID.GetTaskType(); DataStatistics statistics = DataStatisticsForTask(taskID); int completedTasksOfType = type == TaskType.Map ? job.GetCompletedMaps() : job.GetCompletedReduces (); int totalTasksOfType = type == TaskType.Map ? job.GetTotalMaps() : job.GetTotalReduces (); if (completedTasksOfType < MinimumCompleteNumberToSpeculate || (((float)completedTasksOfType ) / totalTasksOfType) < MinimumCompleteProportionToSpeculate) { return(long.MaxValue); } long result = statistics == null ? long.MaxValue : (long)statistics.Outlier(slowTaskRelativeTresholds [job]); return(result); }
private void RunTask(ContainerRemoteLaunchEvent launchEv, IDictionary <TaskAttemptID , MapOutputFile> localMapFiles) { TaskAttemptId attemptID = launchEv.GetTaskAttemptID(); Org.Apache.Hadoop.Mapreduce.V2.App.Job.Job job = this._enclosing.context.GetAllJobs ()[attemptID.GetTaskId().GetJobId()]; int numMapTasks = job.GetTotalMaps(); int numReduceTasks = job.GetTotalReduces(); // YARN (tracking) Task: Task ytask = job.GetTask(attemptID.GetTaskId()); // classic mapred Task: Task remoteTask = launchEv.GetRemoteTask(); // after "launching," send launched event to task attempt to move // state from ASSIGNED to RUNNING (also nukes "remoteTask", so must // do getRemoteTask() call first) //There is no port number because we are not really talking to a task // tracker. The shuffle is just done through local files. So the // port number is set to -1 in this case. this._enclosing.context.GetEventHandler().Handle(new TaskAttemptContainerLaunchedEvent (attemptID, -1)); if (numMapTasks == 0) { this.doneWithMaps = true; } try { if (remoteTask.IsMapOrReduce()) { JobCounterUpdateEvent jce = new JobCounterUpdateEvent(attemptID.GetTaskId().GetJobId ()); jce.AddCounterUpdate(JobCounter.TotalLaunchedUbertasks, 1); if (remoteTask.IsMapTask()) { jce.AddCounterUpdate(JobCounter.NumUberSubmaps, 1); } else { jce.AddCounterUpdate(JobCounter.NumUberSubreduces, 1); } this._enclosing.context.GetEventHandler().Handle(jce); } this.RunSubtask(remoteTask, ytask.GetType(), attemptID, numMapTasks, (numReduceTasks > 0), localMapFiles); } catch (RuntimeException) { JobCounterUpdateEvent jce = new JobCounterUpdateEvent(attemptID.GetTaskId().GetJobId ()); jce.AddCounterUpdate(JobCounter.NumFailedUbertasks, 1); this._enclosing.context.GetEventHandler().Handle(jce); // this is our signal that the subtask failed in some way, so // simulate a failed JVM/container and send a container-completed // event to task attempt (i.e., move state machine from RUNNING // to FAIL_CONTAINER_CLEANUP [and ultimately to FAILED]) this._enclosing.context.GetEventHandler().Handle(new TaskAttemptEvent(attemptID, TaskAttemptEventType.TaContainerCompleted)); } catch (IOException ioe) { // if umbilical itself barfs (in error-handler of runSubMap()), // we're pretty much hosed, so do what YarnChild main() does // (i.e., exit clumsily--but can never happen, so no worries!) LocalContainerLauncher.Log.Fatal("oopsie... this can never happen: " + StringUtils .StringifyException(ioe)); ExitUtil.Terminate(-1); } finally { // remove my future if (Sharpen.Collections.Remove(this.futures, attemptID) != null) { LocalContainerLauncher.Log.Info("removed attempt " + attemptID + " from the futures to keep track of" ); } } }
public override int GetCompletedReduces() { // we always return total since this is history server // and PartialJob also assumes completed - total return(job.GetTotalReduces()); }
public virtual int GetTotalReduces() { return(mockJob.GetTotalReduces()); }
/// <exception cref="System.Exception"/> public virtual void TestKillJob() { JobConf conf = new JobConf(); AppContext context = Org.Mockito.Mockito.Mock <AppContext>(); // a simple event handler solely to detect the container cleaned event CountDownLatch isDone = new CountDownLatch(1); EventHandler handler = new _EventHandler_106(isDone); Org.Mockito.Mockito.When(context.GetEventHandler()).ThenReturn(handler); // create and start the launcher LocalContainerLauncher launcher = new LocalContainerLauncher(context, Org.Mockito.Mockito.Mock <TaskUmbilicalProtocol>()); launcher.Init(conf); launcher.Start(); // create mocked job, task, and task attempt // a single-mapper job JobId jobId = MRBuilderUtils.NewJobId(Runtime.CurrentTimeMillis(), 1, 1); TaskId taskId = MRBuilderUtils.NewTaskId(jobId, 1, TaskType.Map); TaskAttemptId taId = MRBuilderUtils.NewTaskAttemptId(taskId, 0); Org.Apache.Hadoop.Mapreduce.V2.App.Job.Job job = Org.Mockito.Mockito.Mock <Org.Apache.Hadoop.Mapreduce.V2.App.Job.Job >(); Org.Mockito.Mockito.When(job.GetTotalMaps()).ThenReturn(1); Org.Mockito.Mockito.When(job.GetTotalReduces()).ThenReturn(0); IDictionary <JobId, Org.Apache.Hadoop.Mapreduce.V2.App.Job.Job> jobs = new Dictionary <JobId, Org.Apache.Hadoop.Mapreduce.V2.App.Job.Job>(); jobs[jobId] = job; // app context returns the one and only job Org.Mockito.Mockito.When(context.GetAllJobs()).ThenReturn(jobs); Task ytask = Org.Mockito.Mockito.Mock <Task>(); Org.Mockito.Mockito.When(ytask.GetType()).ThenReturn(TaskType.Map); Org.Mockito.Mockito.When(job.GetTask(taskId)).ThenReturn(ytask); // create a sleeping mapper that runs beyond the test timeout MapTask mapTask = Org.Mockito.Mockito.Mock <MapTask>(); Org.Mockito.Mockito.When(mapTask.IsMapOrReduce()).ThenReturn(true); Org.Mockito.Mockito.When(mapTask.IsMapTask()).ThenReturn(true); TaskAttemptID taskID = TypeConverter.FromYarn(taId); Org.Mockito.Mockito.When(mapTask.GetTaskID()).ThenReturn(taskID); Org.Mockito.Mockito.When(mapTask.GetJobID()).ThenReturn(((JobID)taskID.GetJobID() )); Org.Mockito.Mockito.DoAnswer(new _Answer_152()).When(mapTask).Run(Matchers.IsA <JobConf >(), Matchers.IsA <TaskUmbilicalProtocol>()); // sleep for a long time // pump in a task attempt launch event ContainerLauncherEvent launchEvent = new ContainerRemoteLaunchEvent(taId, null, CreateMockContainer (), mapTask); launcher.Handle(launchEvent); Sharpen.Thread.Sleep(200); // now pump in a container clean-up event ContainerLauncherEvent cleanupEvent = new ContainerLauncherEvent(taId, null, null , null, ContainerLauncher.EventType.ContainerRemoteCleanup); launcher.Handle(cleanupEvent); // wait for the event to fire: this should be received promptly isDone.Await(); launcher.Close(); }