public virtual void SetUp() { AppContext context = Org.Mockito.Mockito.Mock <AppContext>(); Org.Mockito.Mockito.When(context.GetApplicationID()).ThenReturn(ApplicationId.NewInstance (0, 0)); Org.Mockito.Mockito.When(context.GetApplicationName()).ThenReturn("AppName"); Org.Mockito.Mockito.When(context.GetUser()).ThenReturn("User"); Org.Mockito.Mockito.When(context.GetStartTime()).ThenReturn(Runtime.CurrentTimeMillis ()); job = Org.Mockito.Mockito.Mock <Org.Apache.Hadoop.Mapreduce.V2.App.Job.Job>(); Task task = Org.Mockito.Mockito.Mock <Task>(); Org.Mockito.Mockito.When(job.GetTask(Any <TaskId>())).ThenReturn(task); JobId jobID = MRApps.ToJobID("job_01_01"); Org.Mockito.Mockito.When(context.GetJob(jobID)).ThenReturn(job); Org.Mockito.Mockito.When(job.CheckAccess(Any <UserGroupInformation>(), Any <JobACL> ())).ThenReturn(true); Org.Apache.Hadoop.Mapreduce.V2.App.Webapp.App app = new Org.Apache.Hadoop.Mapreduce.V2.App.Webapp.App (context); Configuration configuration = new Configuration(); ctx = Org.Mockito.Mockito.Mock <Controller.RequestContext>(); appController = new AppControllerForTest(app, configuration, ctx); appController.GetProperty()[AMParams.JobId] = "job_01_01"; appController.GetProperty()[AMParams.TaskId] = "task_01_01_m01_01"; }
/// <summary>Absorbs one TaskAttemptStatus</summary> /// <param name="reportedStatus"> /// the status report that we got from a task attempt /// that we want to fold into the speculation data for this job /// </param> /// <param name="timestamp"> /// the time this status corresponds to. This matters /// because statuses contain progress. /// </param> protected internal virtual void StatusUpdate(TaskAttemptStatusUpdateEvent.TaskAttemptStatus reportedStatus, long timestamp) { string stateString = reportedStatus.taskState.ToString(); TaskAttemptId attemptID = reportedStatus.id; TaskId taskID = attemptID.GetTaskId(); Org.Apache.Hadoop.Mapreduce.V2.App.Job.Job job = context.GetJob(taskID.GetJobId() ); if (job == null) { return; } Task task = job.GetTask(taskID); if (task == null) { return; } estimator.UpdateAttempt(reportedStatus, timestamp); if (stateString.Equals(TaskAttemptState.Running.ToString())) { runningTasks.PutIfAbsent(taskID, true); } else { runningTasks.Remove(taskID, true); if (!stateString.Equals(TaskAttemptState.Starting.ToString())) { Sharpen.Collections.Remove(runningTaskAttemptStatistics, attemptID); } } }
public virtual void UpdateAttempt(TaskAttemptStatusUpdateEvent.TaskAttemptStatus status, long timestamp) { TaskAttemptId attemptID = status.id; TaskId taskID = attemptID.GetTaskId(); JobId jobID = taskID.GetJobId(); Org.Apache.Hadoop.Mapreduce.V2.App.Job.Job job = context.GetJob(jobID); if (job == null) { return; } Task task = job.GetTask(taskID); if (task == null) { return; } long boxedStart = startTimes[attemptID]; long start = boxedStart == null ? long.MinValue : boxedStart; TaskAttempt taskAttempt = task.GetAttempt(attemptID); if (taskAttempt.GetState() == TaskAttemptState.Succeeded) { bool isNew = false; // is this a new success? lock (doneTasks) { if (!doneTasks.Contains(task)) { doneTasks.AddItem(task); isNew = true; } } // It's a new completion // Note that if a task completes twice [because of a previous speculation // and a race, or a success followed by loss of the machine with the // local data] we only count the first one. if (isNew) { long finish = timestamp; if (start > 1L && finish > 1L && start <= finish) { long duration = finish - start; DataStatistics statistics = DataStatisticsForTask(taskID); if (statistics != null) { statistics.Add(duration); } } } } }
/// <exception cref="System.IO.IOException"/> public virtual GetTaskReportResponse GetTaskReport(GetTaskReportRequest request) { TaskId taskId = request.GetTaskId(); Org.Apache.Hadoop.Mapreduce.V2.App.Job.Job job = this.VerifyAndGetJob(taskId.GetJobId (), true); GetTaskReportResponse response = this.recordFactory.NewRecordInstance <GetTaskReportResponse >(); response.SetTaskReport(job.GetTask(taskId).GetReport()); return(response); }
internal virtual bool CanSpeculate(AppContext context, TaskId taskID) { // This class rejects speculating any task that already has speculations, // or isn't running. // Subclasses should call TaskSpeculationPredicate.canSpeculate(...) , but // can be even more restrictive. JobId jobID = taskID.GetJobId(); Org.Apache.Hadoop.Mapreduce.V2.App.Job.Job job = context.GetJob(jobID); Task task = job.GetTask(taskID); return(task.GetAttempts().Count == 1); }
protected internal virtual DataStatistics DataStatisticsForTask(TaskId taskID) { JobId jobID = taskID.GetJobId(); Org.Apache.Hadoop.Mapreduce.V2.App.Job.Job job = context.GetJob(jobID); if (job == null) { return(null); } Task task = job.GetTask(taskID); if (task == null) { return(null); } return(task.GetType() == TaskType.Map ? mapperStatistics[job] : task.GetType() == TaskType.Reduce ? reducerStatistics[job] : null); }
/// <exception cref="System.IO.IOException"/> public virtual void TestCommitWindow() { SystemClock clock = new SystemClock(); Task mockTask = Org.Mockito.Mockito.Mock <Task>(); Org.Mockito.Mockito.When(mockTask.CanCommit(Matchers.Any <TaskAttemptId>())).ThenReturn (true); Org.Apache.Hadoop.Mapreduce.V2.App.Job.Job mockJob = Org.Mockito.Mockito.Mock <Org.Apache.Hadoop.Mapreduce.V2.App.Job.Job >(); Org.Mockito.Mockito.When(mockJob.GetTask(Matchers.Any <TaskId>())).ThenReturn(mockTask ); AppContext appCtx = Org.Mockito.Mockito.Mock <AppContext>(); Org.Mockito.Mockito.When(appCtx.GetJob(Matchers.Any <JobId>())).ThenReturn(mockJob ); Org.Mockito.Mockito.When(appCtx.GetClock()).ThenReturn(clock); JobTokenSecretManager secret = Org.Mockito.Mockito.Mock <JobTokenSecretManager>(); RMHeartbeatHandler rmHeartbeatHandler = Org.Mockito.Mockito.Mock <RMHeartbeatHandler >(); TaskHeartbeatHandler hbHandler = Org.Mockito.Mockito.Mock <TaskHeartbeatHandler>(); TaskAttemptListenerImpl listener = new _MockTaskAttemptListenerImpl_254(hbHandler , appCtx, secret, rmHeartbeatHandler); Configuration conf = new Configuration(); listener.Init(conf); listener.Start(); // verify commit not allowed when RM heartbeat has not occurred recently TaskAttemptID tid = new TaskAttemptID("12345", 1, TaskType.Reduce, 1, 0); bool canCommit = listener.CanCommit(tid); NUnit.Framework.Assert.IsFalse(canCommit); Org.Mockito.Mockito.Verify(mockTask, Org.Mockito.Mockito.Never()).CanCommit(Matchers.Any <TaskAttemptId>()); // verify commit allowed when RM heartbeat is recent Org.Mockito.Mockito.When(rmHeartbeatHandler.GetLastHeartbeatTime()).ThenReturn(clock .GetTime()); canCommit = listener.CanCommit(tid); NUnit.Framework.Assert.IsTrue(canCommit); Org.Mockito.Mockito.Verify(mockTask, Org.Mockito.Mockito.Times(1)).CanCommit(Matchers.Any <TaskAttemptId>()); listener.Stop(); }
public virtual JobTaskCounterInfo GetSingleTaskCounters(HttpServletRequest hsr, string jid, string tid) { Init(); Org.Apache.Hadoop.Mapreduce.V2.App.Job.Job job = AMWebServices.GetJobFromJobIdString (jid, ctx); CheckAccess(job, hsr); TaskId taskID = MRApps.ToTaskID(tid); if (taskID == null) { throw new NotFoundException("taskid " + tid + " not found or invalid"); } Task task = job.GetTask(taskID); if (task == null) { throw new NotFoundException("task not found with id " + tid); } return(new JobTaskCounterInfo(task)); }
/// <summary> /// convert a task id string to an actual task and handle all the error /// checking. /// </summary> /// <exception cref="Org.Apache.Hadoop.Yarn.Webapp.NotFoundException"/> public static Task GetTaskFromTaskIdString(string tid, Org.Apache.Hadoop.Mapreduce.V2.App.Job.Job job) { TaskId taskID; Task task; try { taskID = MRApps.ToTaskID(tid); } catch (YarnRuntimeException e) { // TODO: after MAPREDUCE-2793 YarnRuntimeException is probably not expected here // anymore but keeping it for now just in case other stuff starts failing. // Also, the webservice should ideally return BadRequest (HTTP:400) when // the id is malformed instead of NotFound (HTTP:404). The webserver on // top of which AMWebServices is built seems to automatically do that for // unhandled exceptions throw new NotFoundException(e.Message); } catch (FormatException ne) { throw new NotFoundException(ne.Message); } catch (ArgumentException e) { throw new NotFoundException(e.Message); } if (taskID == null) { throw new NotFoundException("taskid " + tid + " not found or invalid"); } task = job.GetTask(taskID); if (task == null) { throw new NotFoundException("task not found with id " + tid); } return(task); }
private long StoredPerAttemptValue(IDictionary <TaskAttempt, AtomicLong> data, TaskAttemptId attemptID) { TaskId taskID = attemptID.GetTaskId(); JobId jobID = taskID.GetJobId(); Org.Apache.Hadoop.Mapreduce.V2.App.Job.Job job = context.GetJob(jobID); Task task = job.GetTask(taskID); if (task == null) { return(-1L); } TaskAttempt taskAttempt = task.GetAttempt(attemptID); if (taskAttempt == null) { return(-1L); } AtomicLong estimate = data[taskAttempt]; return(estimate == null ? -1L : estimate.Get()); }
/// <summary>Child checking whether it can commit.</summary> /// <remarks> /// Child checking whether it can commit. /// <br /> /// Commit is a two-phased protocol. First the attempt informs the /// ApplicationMaster that it is /// <see cref="CommitPending(TaskAttemptID, TaskStatus)"/> /// . Then it repeatedly polls /// the ApplicationMaster whether it /// <see cref="CanCommit(TaskAttemptID)"/> /// This is /// a legacy from the centralized commit protocol handling by the JobTracker. /// </remarks> /// <exception cref="System.IO.IOException"/> public virtual bool CanCommit(TaskAttemptID taskAttemptID) { Log.Info("Commit go/no-go request from " + taskAttemptID.ToString()); // An attempt is asking if it can commit its output. This can be decided // only by the task which is managing the multiple attempts. So redirect the // request there. TaskAttemptId attemptID = TypeConverter.ToYarn(taskAttemptID); taskHeartbeatHandler.Progressing(attemptID); // tell task to retry later if AM has not heard from RM within the commit // window to help avoid double-committing in a split-brain situation long now = context.GetClock().GetTime(); if (now - rmHeartbeatHandler.GetLastHeartbeatTime() > commitWindowMs) { return(false); } Org.Apache.Hadoop.Mapreduce.V2.App.Job.Job job = context.GetJob(attemptID.GetTaskId ().GetJobId()); Task task = job.GetTask(attemptID.GetTaskId()); return(task.CanCommit(attemptID)); }
public virtual Task GetTask(TaskId taskID) { return(mockJob.GetTask(taskID)); }
/// <exception cref="System.Exception"/> public virtual void TestKillJob() { JobConf conf = new JobConf(); AppContext context = Org.Mockito.Mockito.Mock <AppContext>(); // a simple event handler solely to detect the container cleaned event CountDownLatch isDone = new CountDownLatch(1); EventHandler handler = new _EventHandler_106(isDone); Org.Mockito.Mockito.When(context.GetEventHandler()).ThenReturn(handler); // create and start the launcher LocalContainerLauncher launcher = new LocalContainerLauncher(context, Org.Mockito.Mockito.Mock <TaskUmbilicalProtocol>()); launcher.Init(conf); launcher.Start(); // create mocked job, task, and task attempt // a single-mapper job JobId jobId = MRBuilderUtils.NewJobId(Runtime.CurrentTimeMillis(), 1, 1); TaskId taskId = MRBuilderUtils.NewTaskId(jobId, 1, TaskType.Map); TaskAttemptId taId = MRBuilderUtils.NewTaskAttemptId(taskId, 0); Org.Apache.Hadoop.Mapreduce.V2.App.Job.Job job = Org.Mockito.Mockito.Mock <Org.Apache.Hadoop.Mapreduce.V2.App.Job.Job >(); Org.Mockito.Mockito.When(job.GetTotalMaps()).ThenReturn(1); Org.Mockito.Mockito.When(job.GetTotalReduces()).ThenReturn(0); IDictionary <JobId, Org.Apache.Hadoop.Mapreduce.V2.App.Job.Job> jobs = new Dictionary <JobId, Org.Apache.Hadoop.Mapreduce.V2.App.Job.Job>(); jobs[jobId] = job; // app context returns the one and only job Org.Mockito.Mockito.When(context.GetAllJobs()).ThenReturn(jobs); Task ytask = Org.Mockito.Mockito.Mock <Task>(); Org.Mockito.Mockito.When(ytask.GetType()).ThenReturn(TaskType.Map); Org.Mockito.Mockito.When(job.GetTask(taskId)).ThenReturn(ytask); // create a sleeping mapper that runs beyond the test timeout MapTask mapTask = Org.Mockito.Mockito.Mock <MapTask>(); Org.Mockito.Mockito.When(mapTask.IsMapOrReduce()).ThenReturn(true); Org.Mockito.Mockito.When(mapTask.IsMapTask()).ThenReturn(true); TaskAttemptID taskID = TypeConverter.FromYarn(taId); Org.Mockito.Mockito.When(mapTask.GetTaskID()).ThenReturn(taskID); Org.Mockito.Mockito.When(mapTask.GetJobID()).ThenReturn(((JobID)taskID.GetJobID() )); Org.Mockito.Mockito.DoAnswer(new _Answer_152()).When(mapTask).Run(Matchers.IsA <JobConf >(), Matchers.IsA <TaskUmbilicalProtocol>()); // sleep for a long time // pump in a task attempt launch event ContainerLauncherEvent launchEvent = new ContainerRemoteLaunchEvent(taId, null, CreateMockContainer (), mapTask); launcher.Handle(launchEvent); Sharpen.Thread.Sleep(200); // now pump in a container clean-up event ContainerLauncherEvent cleanupEvent = new ContainerLauncherEvent(taId, null, null , null, ContainerLauncher.EventType.ContainerRemoteCleanup); launcher.Handle(cleanupEvent); // wait for the event to fire: this should be received promptly isDone.Await(); launcher.Close(); }
private void PopulateMembers(AppContext ctx) { JobId jobID = null; TaskId taskID = null; string tid = $(AMParams.TaskId); if (!tid.IsEmpty()) { taskID = MRApps.ToTaskID(tid); jobID = taskID.GetJobId(); } else { string jid = $(AMParams.JobId); if (!jid.IsEmpty()) { jobID = MRApps.ToJobID(jid); } } if (jobID == null) { return; } job = ctx.GetJob(jobID); if (job == null) { return; } if (taskID != null) { task = job.GetTask(taskID); if (task == null) { return; } foreach (KeyValuePair <TaskAttemptId, TaskAttempt> entry in task.GetAttempts()) { long value = 0; Counters counters = entry.Value.GetCounters(); CounterGroup group = (counters != null) ? counters.GetGroup($(AMParams.CounterGroup )) : null; if (group != null) { Counter c = group.FindCounter($(AMParams.CounterName)); if (c != null) { value = c.GetValue(); } } values[MRApps.ToString(entry.Key)] = value; } return; } // Get all types of counters IDictionary <TaskId, Task> tasks = job.GetTasks(); foreach (KeyValuePair <TaskId, Task> entry_1 in tasks) { long value = 0; Counters counters = entry_1.Value.GetCounters(); CounterGroup group = (counters != null) ? counters.GetGroup($(AMParams.CounterGroup )) : null; if (group != null) { Counter c = group.FindCounter($(AMParams.CounterName)); if (c != null) { value = c.GetValue(); } } values[MRApps.ToString(entry_1.Key)] = value; } }
/* ************************************************************* */ // This is the code section that runs periodically and adds speculations for // those jobs that need them. // This can return a few magic values for tasks that shouldn't speculate: // returns ON_SCHEDULE if thresholdRuntime(taskID) says that we should not // considering speculating this task // returns ALREADY_SPECULATING if that is true. This has priority. // returns TOO_NEW if our companion task hasn't gotten any information // returns PROGRESS_IS_GOOD if the task is sailing through // returns NOT_RUNNING if the task is not running // // All of these values are negative. Any value that should be allowed to // speculate is 0 or positive. private long SpeculationValue(TaskId taskID, long now) { Org.Apache.Hadoop.Mapreduce.V2.App.Job.Job job = context.GetJob(taskID.GetJobId() ); Task task = job.GetTask(taskID); IDictionary <TaskAttemptId, TaskAttempt> attempts = task.GetAttempts(); long acceptableRuntime = long.MinValue; long result = long.MinValue; if (!mayHaveSpeculated.Contains(taskID)) { acceptableRuntime = estimator.ThresholdRuntime(taskID); if (acceptableRuntime == long.MaxValue) { return(OnSchedule); } } TaskAttemptId runningTaskAttemptID = null; int numberRunningAttempts = 0; foreach (TaskAttempt taskAttempt in attempts.Values) { if (taskAttempt.GetState() == TaskAttemptState.Running || taskAttempt.GetState() == TaskAttemptState.Starting) { if (++numberRunningAttempts > 1) { return(AlreadySpeculating); } runningTaskAttemptID = taskAttempt.GetID(); long estimatedRunTime = estimator.EstimatedRuntime(runningTaskAttemptID); long taskAttemptStartTime = estimator.AttemptEnrolledTime(runningTaskAttemptID); if (taskAttemptStartTime > now) { // This background process ran before we could process the task // attempt status change that chronicles the attempt start return(TooNew); } long estimatedEndTime = estimatedRunTime + taskAttemptStartTime; long estimatedReplacementEndTime = now + estimator.EstimatedNewAttemptRuntime(taskID ); float progress = taskAttempt.GetProgress(); DefaultSpeculator.TaskAttemptHistoryStatistics data = runningTaskAttemptStatistics [runningTaskAttemptID]; if (data == null) { runningTaskAttemptStatistics[runningTaskAttemptID] = new DefaultSpeculator.TaskAttemptHistoryStatistics (estimatedRunTime, progress, now); } else { if (estimatedRunTime == data.GetEstimatedRunTime() && progress == data.GetProgress ()) { // Previous stats are same as same stats if (data.NotHeartbeatedInAWhile(now)) { // Stats have stagnated for a while, simulate heart-beat. TaskAttemptStatusUpdateEvent.TaskAttemptStatus taskAttemptStatus = new TaskAttemptStatusUpdateEvent.TaskAttemptStatus (); taskAttemptStatus.id = runningTaskAttemptID; taskAttemptStatus.progress = progress; taskAttemptStatus.taskState = taskAttempt.GetState(); // Now simulate the heart-beat HandleAttempt(taskAttemptStatus); } } else { // Stats have changed - update our data structure data.SetEstimatedRunTime(estimatedRunTime); data.SetProgress(progress); data.ResetHeartBeatTime(now); } } if (estimatedEndTime < now) { return(ProgressIsGood); } if (estimatedReplacementEndTime >= estimatedEndTime) { return(TooLateToSpeculate); } result = estimatedEndTime - estimatedReplacementEndTime; } } // If we are here, there's at most one task attempt. if (numberRunningAttempts == 0) { return(NotRunning); } if (acceptableRuntime == long.MinValue) { acceptableRuntime = estimator.ThresholdRuntime(taskID); if (acceptableRuntime == long.MaxValue) { return(OnSchedule); } } return(result); }
public override Task GetTask(TaskId taskId) { return(job.GetTask(taskId)); }
private void GetCounters(AppContext ctx) { JobId jobID = null; TaskId taskID = null; string tid = $(AMParams.TaskId); if (!tid.IsEmpty()) { taskID = MRApps.ToTaskID(tid); jobID = taskID.GetJobId(); } else { string jid = $(AMParams.JobId); if (jid != null && !jid.IsEmpty()) { jobID = MRApps.ToJobID(jid); } } if (jobID == null) { return; } job = ctx.GetJob(jobID); if (job == null) { return; } if (taskID != null) { task = job.GetTask(taskID); if (task == null) { return; } total = task.GetCounters(); return; } // Get all types of counters IDictionary <TaskId, Task> tasks = job.GetTasks(); total = job.GetAllCounters(); bool needTotalCounters = false; if (total == null) { total = new Counters(); needTotalCounters = true; } map = new Counters(); reduce = new Counters(); foreach (Task t in tasks.Values) { Counters counters = t.GetCounters(); if (counters == null) { continue; } switch (t.GetType()) { case TaskType.Map: { map.IncrAllCounters(counters); break; } case TaskType.Reduce: { reduce.IncrAllCounters(counters); break; } } if (needTotalCounters) { total.IncrAllCounters(counters); } } }
public override void UpdateAttempt(TaskAttemptStatusUpdateEvent.TaskAttemptStatus status, long timestamp) { base.UpdateAttempt(status, timestamp); TaskAttemptId attemptID = status.id; TaskId taskID = attemptID.GetTaskId(); JobId jobID = taskID.GetJobId(); Org.Apache.Hadoop.Mapreduce.V2.App.Job.Job job = context.GetJob(jobID); if (job == null) { return; } Task task = job.GetTask(taskID); if (task == null) { return; } TaskAttempt taskAttempt = task.GetAttempt(attemptID); if (taskAttempt == null) { return; } long boxedStart = startTimes[attemptID]; long start = boxedStart == null ? long.MinValue : boxedStart; // We need to do two things. // 1: If this is a completion, we accumulate statistics in the superclass // 2: If this is not a completion, we learn more about it. // This is not a completion, but we're cooking. // if (taskAttempt.GetState() == TaskAttemptState.Running) { // See if this task is already in the registry AtomicLong estimateContainer = attemptRuntimeEstimates[taskAttempt]; AtomicLong estimateVarianceContainer = attemptRuntimeEstimateVariances[taskAttempt ]; if (estimateContainer == null) { if (attemptRuntimeEstimates[taskAttempt] == null) { attemptRuntimeEstimates[taskAttempt] = new AtomicLong(); estimateContainer = attemptRuntimeEstimates[taskAttempt]; } } if (estimateVarianceContainer == null) { attemptRuntimeEstimateVariances.PutIfAbsent(taskAttempt, new AtomicLong()); estimateVarianceContainer = attemptRuntimeEstimateVariances[taskAttempt]; } long estimate = -1; long varianceEstimate = -1; // This code assumes that we'll never consider starting a third // speculative task attempt if two are already running for this task if (start > 0 && timestamp > start) { estimate = (long)((timestamp - start) / Math.Max(0.0001, status.progress)); varianceEstimate = (long)(estimate * status.progress / 10); } if (estimateContainer != null) { estimateContainer.Set(estimate); } if (estimateVarianceContainer != null) { estimateVarianceContainer.Set(varianceEstimate); } } }
public virtual void TestHsController() { AppContext ctx = Org.Mockito.Mockito.Mock <AppContext>(); ApplicationId appId = ApplicationIdPBImpl.NewInstance(0, 5); Org.Mockito.Mockito.When(ctx.GetApplicationID()).ThenReturn(appId); AppForTest app = new AppForTest(ctx); Configuration config = new Configuration(); Controller.RequestContext requestCtx = Org.Mockito.Mockito.Mock <Controller.RequestContext >(); TestBlocks.HsControllerForTest controller = new TestBlocks.HsControllerForTest(app , config, requestCtx); controller.Index(); NUnit.Framework.Assert.AreEqual("JobHistory", controller.Get(Params.Title, string.Empty )); NUnit.Framework.Assert.AreEqual(typeof(HsJobPage), controller.JobPage()); NUnit.Framework.Assert.AreEqual(typeof(HsCountersPage), controller.CountersPage() ); NUnit.Framework.Assert.AreEqual(typeof(HsTasksPage), controller.TasksPage()); NUnit.Framework.Assert.AreEqual(typeof(HsTaskPage), controller.TaskPage()); NUnit.Framework.Assert.AreEqual(typeof(HsAttemptsPage), controller.AttemptsPage() ); controller.Set(AMParams.JobId, "job_01_01"); controller.Set(AMParams.TaskId, "task_01_01_m01_01"); controller.Set(AMParams.TaskType, "m"); controller.Set(AMParams.AttemptState, "State"); Org.Apache.Hadoop.Mapreduce.V2.App.Job.Job job = Org.Mockito.Mockito.Mock <Org.Apache.Hadoop.Mapreduce.V2.App.Job.Job >(); Task task = Org.Mockito.Mockito.Mock <Task>(); Org.Mockito.Mockito.When(job.GetTask(Matchers.Any <TaskId>())).ThenReturn(task); JobId jobID = MRApps.ToJobID("job_01_01"); Org.Mockito.Mockito.When(ctx.GetJob(jobID)).ThenReturn(job); Org.Mockito.Mockito.When(job.CheckAccess(Matchers.Any <UserGroupInformation>(), Matchers.Any <JobACL>())).ThenReturn(true); controller.Job(); NUnit.Framework.Assert.AreEqual(typeof(HsJobPage), controller.GetClazz()); controller.JobCounters(); NUnit.Framework.Assert.AreEqual(typeof(HsCountersPage), controller.GetClazz()); controller.TaskCounters(); NUnit.Framework.Assert.AreEqual(typeof(HsCountersPage), controller.GetClazz()); controller.Tasks(); NUnit.Framework.Assert.AreEqual(typeof(HsTasksPage), controller.GetClazz()); controller.Task(); NUnit.Framework.Assert.AreEqual(typeof(HsTaskPage), controller.GetClazz()); controller.Attempts(); NUnit.Framework.Assert.AreEqual(typeof(HsAttemptsPage), controller.GetClazz()); NUnit.Framework.Assert.AreEqual(typeof(HsConfPage), controller.ConfPage()); NUnit.Framework.Assert.AreEqual(typeof(HsAboutPage), controller.AboutPage()); controller.About(); NUnit.Framework.Assert.AreEqual(typeof(HsAboutPage), controller.GetClazz()); controller.Logs(); NUnit.Framework.Assert.AreEqual(typeof(HsLogsPage), controller.GetClazz()); controller.Nmlogs(); NUnit.Framework.Assert.AreEqual(typeof(AggregatedLogsPage), controller.GetClazz() ); NUnit.Framework.Assert.AreEqual(typeof(HsSingleCounterPage), controller.SingleCounterPage ()); controller.SingleJobCounter(); NUnit.Framework.Assert.AreEqual(typeof(HsSingleCounterPage), controller.GetClazz( )); controller.SingleTaskCounter(); NUnit.Framework.Assert.AreEqual(typeof(HsSingleCounterPage), controller.GetClazz( )); }