/// <summary>test clean old history files.</summary> /// <remarks> /// test clean old history files. Files should be deleted after 1 week by /// default. /// </remarks> /// <exception cref="System.Exception"/> public virtual void TestDeleteFileInfo() { Log.Info("STARTING testDeleteFileInfo"); try { Configuration conf = new Configuration(); conf.SetClass(CommonConfigurationKeysPublic.NetTopologyNodeSwitchMappingImplKey, typeof(TestJobHistoryParsing.MyResolver), typeof(DNSToSwitchMapping)); RackResolver.Init(conf); MRApp app = new TestJobHistoryEvents.MRAppWithHistory(1, 1, true, this.GetType(). FullName, true); app.Submit(conf); Org.Apache.Hadoop.Mapreduce.V2.App.Job.Job job = app.GetContext().GetAllJobs().Values .GetEnumerator().Next(); JobId jobId = job.GetID(); app.WaitForState(job, JobState.Succeeded); // make sure all events are flushed app.WaitForState(Service.STATE.Stopped); HistoryFileManager hfm = new HistoryFileManager(); hfm.Init(conf); HistoryFileManager.HistoryFileInfo fileInfo = hfm.GetFileInfo(jobId); hfm.InitExisting(); // wait for move files form the done_intermediate directory to the gone // directory while (fileInfo.IsMovePending()) { Sharpen.Thread.Sleep(300); } NUnit.Framework.Assert.IsNotNull(hfm.jobListCache.Values()); // try to remove fileInfo hfm.Clean(); // check that fileInfo does not deleted NUnit.Framework.Assert.IsFalse(fileInfo.IsDeleted()); // correct live time hfm.SetMaxHistoryAge(-1); hfm.Clean(); hfm.Stop(); NUnit.Framework.Assert.IsTrue("Thread pool shutdown", hfm.moveToDoneExecutor.IsTerminated ()); // should be deleted ! NUnit.Framework.Assert.IsTrue("file should be deleted ", fileInfo.IsDeleted()); } finally { Log.Info("FINISHED testDeleteFileInfo"); } }
/// <exception cref="System.Exception"/> public virtual void TestScanningOldDirs() { Log.Info("STARTING testScanningOldDirs"); try { Configuration conf = new Configuration(); conf.SetClass(CommonConfigurationKeysPublic.NetTopologyNodeSwitchMappingImplKey, typeof(TestJobHistoryParsing.MyResolver), typeof(DNSToSwitchMapping)); RackResolver.Init(conf); MRApp app = new TestJobHistoryEvents.MRAppWithHistory(1, 1, true, this.GetType(). FullName, true); app.Submit(conf); Org.Apache.Hadoop.Mapreduce.V2.App.Job.Job job = app.GetContext().GetAllJobs().Values .GetEnumerator().Next(); JobId jobId = job.GetID(); Log.Info("JOBID is " + TypeConverter.FromYarn(jobId).ToString()); app.WaitForState(job, JobState.Succeeded); // make sure all events are flushed app.WaitForState(Service.STATE.Stopped); TestJobHistoryParsing.HistoryFileManagerForTest hfm = new TestJobHistoryParsing.HistoryFileManagerForTest (); hfm.Init(conf); HistoryFileManager.HistoryFileInfo fileInfo = hfm.GetFileInfo(jobId); NUnit.Framework.Assert.IsNotNull("Unable to locate job history", fileInfo); // force the manager to "forget" the job hfm.DeleteJobFromJobListCache(fileInfo); int msecPerSleep = 10; int msecToSleep = 10 * 1000; while (fileInfo.IsMovePending() && msecToSleep > 0) { NUnit.Framework.Assert.IsTrue(!fileInfo.DidMoveFail()); msecToSleep -= msecPerSleep; Sharpen.Thread.Sleep(msecPerSleep); } NUnit.Framework.Assert.IsTrue("Timeout waiting for history move", msecToSleep > 0 ); fileInfo = hfm.GetFileInfo(jobId); hfm.Stop(); NUnit.Framework.Assert.IsNotNull("Unable to locate old job history", fileInfo); NUnit.Framework.Assert.IsTrue("HistoryFileManager not shutdown properly", hfm.moveToDoneExecutor .IsTerminated()); } finally { Log.Info("FINISHED testScanningOldDirs"); } }
/// <summary>Simple test some methods of JobHistory</summary> /// <exception cref="System.Exception"/> public virtual void TestJobHistoryMethods() { Log.Info("STARTING testJobHistoryMethods"); try { Configuration configuration = new Configuration(); configuration.SetClass(CommonConfigurationKeysPublic.NetTopologyNodeSwitchMappingImplKey , typeof(TestJobHistoryParsing.MyResolver), typeof(DNSToSwitchMapping)); RackResolver.Init(configuration); MRApp app = new TestJobHistoryEvents.MRAppWithHistory(1, 1, true, this.GetType(). FullName, true); app.Submit(configuration); Org.Apache.Hadoop.Mapreduce.V2.App.Job.Job job = app.GetContext().GetAllJobs().Values .GetEnumerator().Next(); app.WaitForState(job, JobState.Succeeded); JobHistory jobHistory = new JobHistory(); jobHistory.Init(configuration); // Method getAllJobs NUnit.Framework.Assert.AreEqual(1, jobHistory.GetAllJobs().Count); // and with ApplicationId NUnit.Framework.Assert.AreEqual(1, jobHistory.GetAllJobs(app.GetAppID()).Count); JobsInfo jobsinfo = jobHistory.GetPartialJobs(0L, 10L, null, "default", 0L, Runtime .CurrentTimeMillis() + 1, 0L, Runtime.CurrentTimeMillis() + 1, JobState.Succeeded ); NUnit.Framework.Assert.AreEqual(1, jobsinfo.GetJobs().Count); NUnit.Framework.Assert.IsNotNull(jobHistory.GetApplicationAttemptId()); // test Application Id NUnit.Framework.Assert.AreEqual("application_0_0000", jobHistory.GetApplicationID ().ToString()); NUnit.Framework.Assert.AreEqual("Job History Server", jobHistory.GetApplicationName ()); // method does not work NUnit.Framework.Assert.IsNull(jobHistory.GetEventHandler()); // method does not work NUnit.Framework.Assert.IsNull(jobHistory.GetClock()); // method does not work NUnit.Framework.Assert.IsNull(jobHistory.GetClusterInfo()); } finally { Log.Info("FINISHED testJobHistoryMethods"); } }
/// <exception cref="System.Exception"/> protected override void ServiceInit(Configuration conf) { resourceTrackerAddress = conf.GetSocketAddr(YarnConfiguration.RmBindHost, YarnConfiguration .RmResourceTrackerAddress, YarnConfiguration.DefaultRmResourceTrackerAddress, YarnConfiguration .DefaultRmResourceTrackerPort); RackResolver.Init(conf); nextHeartBeatInterval = conf.GetLong(YarnConfiguration.RmNmHeartbeatIntervalMs, YarnConfiguration .DefaultRmNmHeartbeatIntervalMs); if (nextHeartBeatInterval <= 0) { throw new YarnRuntimeException("Invalid Configuration. " + YarnConfiguration.RmNmHeartbeatIntervalMs + " should be larger than 0."); } minAllocMb = conf.GetInt(YarnConfiguration.RmSchedulerMinimumAllocationMb, YarnConfiguration .DefaultRmSchedulerMinimumAllocationMb); minAllocVcores = conf.GetInt(YarnConfiguration.RmSchedulerMinimumAllocationVcores , YarnConfiguration.DefaultRmSchedulerMinimumAllocationVcores); minimumNodeManagerVersion = conf.Get(YarnConfiguration.RmNodemanagerMinimumVersion , YarnConfiguration.DefaultRmNodemanagerMinimumVersion); base.ServiceInit(conf); }
//Test reports of JobHistoryServer. History server should get log files from MRApp and read them /// <exception cref="System.Exception"/> public virtual void TestReports() { Configuration config = new Configuration(); config.SetClass(CommonConfigurationKeysPublic.NetTopologyNodeSwitchMappingImplKey , typeof(TestJobHistoryParsing.MyResolver), typeof(DNSToSwitchMapping)); RackResolver.Init(config); MRApp app = new TestJobHistoryEvents.MRAppWithHistory(1, 1, true, this.GetType(). FullName, true); app.Submit(config); Org.Apache.Hadoop.Mapreduce.V2.App.Job.Job job = app.GetContext().GetAllJobs().Values .GetEnumerator().Next(); app.WaitForState(job, JobState.Succeeded); historyServer = new JobHistoryServer(); historyServer.Init(config); historyServer.Start(); // search JobHistory service JobHistory jobHistory = null; foreach (Org.Apache.Hadoop.Service.Service service in historyServer.GetServices()) { if (service is JobHistory) { jobHistory = (JobHistory)service; } } IDictionary <JobId, Org.Apache.Hadoop.Mapreduce.V2.App.Job.Job> jobs = jobHistory. GetAllJobs(); NUnit.Framework.Assert.AreEqual(1, jobs.Count); NUnit.Framework.Assert.AreEqual("job_0_0000", jobs.Keys.GetEnumerator().Next().ToString ()); Task task = job.GetTasks().Values.GetEnumerator().Next(); TaskAttempt attempt = task.GetAttempts().Values.GetEnumerator().Next(); HistoryClientService historyService = historyServer.GetClientService(); MRClientProtocol protocol = historyService.GetClientHandler(); GetTaskAttemptReportRequest gtarRequest = recordFactory.NewRecordInstance <GetTaskAttemptReportRequest >(); // test getTaskAttemptReport TaskAttemptId taId = attempt.GetID(); taId.SetTaskId(task.GetID()); taId.GetTaskId().SetJobId(job.GetID()); gtarRequest.SetTaskAttemptId(taId); GetTaskAttemptReportResponse response = protocol.GetTaskAttemptReport(gtarRequest ); NUnit.Framework.Assert.AreEqual("container_0_0000_01_000000", response.GetTaskAttemptReport ().GetContainerId().ToString()); NUnit.Framework.Assert.IsTrue(response.GetTaskAttemptReport().GetDiagnosticInfo() .IsEmpty()); // counters NUnit.Framework.Assert.IsNotNull(response.GetTaskAttemptReport().GetCounters().GetCounter (TaskCounter.PhysicalMemoryBytes)); NUnit.Framework.Assert.AreEqual(taId.ToString(), response.GetTaskAttemptReport(). GetTaskAttemptId().ToString()); // test getTaskReport GetTaskReportRequest request = recordFactory.NewRecordInstance <GetTaskReportRequest >(); TaskId taskId = task.GetID(); taskId.SetJobId(job.GetID()); request.SetTaskId(taskId); GetTaskReportResponse reportResponse = protocol.GetTaskReport(request); NUnit.Framework.Assert.AreEqual(string.Empty, reportResponse.GetTaskReport().GetDiagnosticsList ().GetEnumerator().Next()); // progress NUnit.Framework.Assert.AreEqual(1.0f, reportResponse.GetTaskReport().GetProgress( ), 0.01); // report has corrected taskId NUnit.Framework.Assert.AreEqual(taskId.ToString(), reportResponse.GetTaskReport() .GetTaskId().ToString()); // Task state should be SUCCEEDED NUnit.Framework.Assert.AreEqual(TaskState.Succeeded, reportResponse.GetTaskReport ().GetTaskState()); // For invalid jobid, throw IOException GetTaskReportsRequest gtreportsRequest = recordFactory.NewRecordInstance <GetTaskReportsRequest >(); gtreportsRequest.SetJobId(TypeConverter.ToYarn(JobID.ForName("job_1415730144495_0001" ))); gtreportsRequest.SetTaskType(TaskType.Reduce); try { protocol.GetTaskReports(gtreportsRequest); NUnit.Framework.Assert.Fail("IOException not thrown for invalid job id"); } catch (IOException) { } // Expected // test getTaskAttemptCompletionEvents GetTaskAttemptCompletionEventsRequest taskAttemptRequest = recordFactory.NewRecordInstance <GetTaskAttemptCompletionEventsRequest>(); taskAttemptRequest.SetJobId(job.GetID()); GetTaskAttemptCompletionEventsResponse taskAttemptCompletionEventsResponse = protocol .GetTaskAttemptCompletionEvents(taskAttemptRequest); NUnit.Framework.Assert.AreEqual(0, taskAttemptCompletionEventsResponse.GetCompletionEventCount ()); // test getDiagnostics GetDiagnosticsRequest diagnosticRequest = recordFactory.NewRecordInstance <GetDiagnosticsRequest >(); diagnosticRequest.SetTaskAttemptId(taId); GetDiagnosticsResponse diagnosticResponse = protocol.GetDiagnostics(diagnosticRequest ); // it is strange : why one empty string ? NUnit.Framework.Assert.AreEqual(1, diagnosticResponse.GetDiagnosticsCount()); NUnit.Framework.Assert.AreEqual(string.Empty, diagnosticResponse.GetDiagnostics(0 )); }
//Key -> Priority //Value -> Map //Key->ResourceName (e.g., nodename, rackname, *) //Value->Map //Key->Resource Capability //Value->ResourceRequest // pendingRelease holds history or release requests.request is removed only if // RM sends completedContainer. // How it different from release? --> release is for per allocate() request. /// <exception cref="System.Exception"/> protected override void ServiceInit(Configuration conf) { RackResolver.Init(conf); base.ServiceInit(conf); }
/// <exception cref="System.Exception"/> public virtual void TestDiagnosticsForKilledJob() { Log.Info("STARTING testDiagnosticsForKilledJob"); try { Configuration conf = new Configuration(); conf.SetClass(CommonConfigurationKeysPublic.NetTopologyNodeSwitchMappingImplKey, typeof(TestJobHistoryParsing.MyResolver), typeof(DNSToSwitchMapping)); RackResolver.Init(conf); MRApp app = new TestJobHistoryParsing.MRAppWithHistoryWithJobKilled(2, 1, true, this .GetType().FullName, true); app.Submit(conf); Org.Apache.Hadoop.Mapreduce.V2.App.Job.Job job = app.GetContext().GetAllJobs().Values .GetEnumerator().Next(); JobId jobId = job.GetID(); app.WaitForState(job, JobState.Killed); // make sure all events are flushed app.WaitForState(Service.STATE.Stopped); JobHistory jobHistory = new JobHistory(); jobHistory.Init(conf); HistoryFileManager.HistoryFileInfo fileInfo = jobHistory.GetJobFileInfo(jobId); JobHistoryParser parser; JobHistoryParser.JobInfo jobInfo; lock (fileInfo) { Path historyFilePath = fileInfo.GetHistoryFile(); FSDataInputStream @in = null; FileContext fc = null; try { fc = FileContext.GetFileContext(conf); @in = fc.Open(fc.MakeQualified(historyFilePath)); } catch (IOException ioe) { Log.Info("Can not open history file: " + historyFilePath, ioe); throw (new Exception("Can not open History File")); } parser = new JobHistoryParser(@in); jobInfo = parser.Parse(); } Exception parseException = parser.GetParseException(); NUnit.Framework.Assert.IsNull("Caught an expected exception " + parseException, parseException ); IList <string> originalDiagnostics = job.GetDiagnostics(); string historyError = jobInfo.GetErrorInfo(); NUnit.Framework.Assert.IsTrue("No original diagnostics for a failed job", originalDiagnostics != null && !originalDiagnostics.IsEmpty()); NUnit.Framework.Assert.IsNotNull("No history error info for a failed job ", historyError ); foreach (string diagString in originalDiagnostics) { NUnit.Framework.Assert.IsTrue(historyError.Contains(diagString)); } NUnit.Framework.Assert.IsTrue("No killed message in diagnostics", historyError.Contains (JobImpl.JobKilledDiag)); } finally { Log.Info("FINISHED testDiagnosticsForKilledJob"); } }
/// <exception cref="System.Exception"/> public virtual void TestHistoryParsingForFailedAttempts() { Log.Info("STARTING testHistoryParsingForFailedAttempts"); try { Configuration conf = new Configuration(); conf.SetClass(CommonConfigurationKeysPublic.NetTopologyNodeSwitchMappingImplKey, typeof(TestJobHistoryParsing.MyResolver), typeof(DNSToSwitchMapping)); RackResolver.Init(conf); MRApp app = new TestJobHistoryParsing.MRAppWithHistoryWithFailedAttempt(2, 1, true , this.GetType().FullName, true); app.Submit(conf); Org.Apache.Hadoop.Mapreduce.V2.App.Job.Job job = app.GetContext().GetAllJobs().Values .GetEnumerator().Next(); JobId jobId = job.GetID(); app.WaitForState(job, JobState.Succeeded); // make sure all events are flushed app.WaitForState(Service.STATE.Stopped); JobHistory jobHistory = new JobHistory(); jobHistory.Init(conf); HistoryFileManager.HistoryFileInfo fileInfo = jobHistory.GetJobFileInfo(jobId); JobHistoryParser parser; JobHistoryParser.JobInfo jobInfo; lock (fileInfo) { Path historyFilePath = fileInfo.GetHistoryFile(); FSDataInputStream @in = null; FileContext fc = null; try { fc = FileContext.GetFileContext(conf); @in = fc.Open(fc.MakeQualified(historyFilePath)); } catch (IOException ioe) { Log.Info("Can not open history file: " + historyFilePath, ioe); throw (new Exception("Can not open History File")); } parser = new JobHistoryParser(@in); jobInfo = parser.Parse(); } Exception parseException = parser.GetParseException(); NUnit.Framework.Assert.IsNull("Caught an expected exception " + parseException, parseException ); int noOffailedAttempts = 0; IDictionary <TaskID, JobHistoryParser.TaskInfo> allTasks = jobInfo.GetAllTasks(); foreach (Task task in job.GetTasks().Values) { JobHistoryParser.TaskInfo taskInfo = allTasks[TypeConverter.FromYarn(task.GetID() )]; foreach (TaskAttempt taskAttempt in task.GetAttempts().Values) { JobHistoryParser.TaskAttemptInfo taskAttemptInfo = taskInfo.GetAllTaskAttempts()[ TypeConverter.FromYarn((taskAttempt.GetID()))]; // Verify rack-name for all task attempts NUnit.Framework.Assert.AreEqual("rack-name is incorrect", taskAttemptInfo.GetRackname (), RackName); if (taskAttemptInfo.GetTaskStatus().Equals("FAILED")) { noOffailedAttempts++; } } } NUnit.Framework.Assert.AreEqual("No of Failed tasks doesn't match.", 2, noOffailedAttempts ); } finally { Log.Info("FINISHED testHistoryParsingForFailedAttempts"); } }
/// <exception cref="System.Exception"/> private void CheckHistoryParsing(int numMaps, int numReduces, int numSuccessfulMaps ) { Configuration conf = new Configuration(); conf.Set(MRJobConfig.UserName, Runtime.GetProperty("user.name")); long amStartTimeEst = Runtime.CurrentTimeMillis(); conf.SetClass(CommonConfigurationKeysPublic.NetTopologyNodeSwitchMappingImplKey, typeof(TestJobHistoryParsing.MyResolver), typeof(DNSToSwitchMapping)); RackResolver.Init(conf); MRApp app = new TestJobHistoryEvents.MRAppWithHistory(numMaps, numReduces, true, this.GetType().FullName, true); app.Submit(conf); Org.Apache.Hadoop.Mapreduce.V2.App.Job.Job job = app.GetContext().GetAllJobs().Values .GetEnumerator().Next(); JobId jobId = job.GetID(); Log.Info("JOBID is " + TypeConverter.FromYarn(jobId).ToString()); app.WaitForState(job, JobState.Succeeded); // make sure all events are flushed app.WaitForState(Service.STATE.Stopped); string jobhistoryDir = JobHistoryUtils.GetHistoryIntermediateDoneDirForUser(conf); FileContext fc = null; try { fc = FileContext.GetFileContext(conf); } catch (IOException ioe) { Log.Info("Can not get FileContext", ioe); throw (new Exception("Can not get File Context")); } if (numMaps == numSuccessfulMaps) { string summaryFileName = JobHistoryUtils.GetIntermediateSummaryFileName(jobId); Path summaryFile = new Path(jobhistoryDir, summaryFileName); string jobSummaryString = GetJobSummary(fc, summaryFile); NUnit.Framework.Assert.IsNotNull(jobSummaryString); NUnit.Framework.Assert.IsTrue(jobSummaryString.Contains("resourcesPerMap=100")); NUnit.Framework.Assert.IsTrue(jobSummaryString.Contains("resourcesPerReduce=100") ); IDictionary <string, string> jobSummaryElements = new Dictionary <string, string>(); StringTokenizer strToken = new StringTokenizer(jobSummaryString, ","); while (strToken.HasMoreTokens()) { string keypair = strToken.NextToken(); jobSummaryElements[keypair.Split("=")[0]] = keypair.Split("=")[1]; } NUnit.Framework.Assert.AreEqual("JobId does not match", jobId.ToString(), jobSummaryElements ["jobId"]); NUnit.Framework.Assert.AreEqual("JobName does not match", "test", jobSummaryElements ["jobName"]); NUnit.Framework.Assert.IsTrue("submitTime should not be 0", long.Parse(jobSummaryElements ["submitTime"]) != 0); NUnit.Framework.Assert.IsTrue("launchTime should not be 0", long.Parse(jobSummaryElements ["launchTime"]) != 0); NUnit.Framework.Assert.IsTrue("firstMapTaskLaunchTime should not be 0", long.Parse (jobSummaryElements["firstMapTaskLaunchTime"]) != 0); NUnit.Framework.Assert.IsTrue("firstReduceTaskLaunchTime should not be 0", long.Parse (jobSummaryElements["firstReduceTaskLaunchTime"]) != 0); NUnit.Framework.Assert.IsTrue("finishTime should not be 0", long.Parse(jobSummaryElements ["finishTime"]) != 0); NUnit.Framework.Assert.AreEqual("Mismatch in num map slots", numSuccessfulMaps, System.Convert.ToInt32 (jobSummaryElements["numMaps"])); NUnit.Framework.Assert.AreEqual("Mismatch in num reduce slots", numReduces, System.Convert.ToInt32 (jobSummaryElements["numReduces"])); NUnit.Framework.Assert.AreEqual("User does not match", Runtime.GetProperty("user.name" ), jobSummaryElements["user"]); NUnit.Framework.Assert.AreEqual("Queue does not match", "default", jobSummaryElements ["queue"]); NUnit.Framework.Assert.AreEqual("Status does not match", "SUCCEEDED", jobSummaryElements ["status"]); } JobHistory jobHistory = new JobHistory(); jobHistory.Init(conf); HistoryFileManager.HistoryFileInfo fileInfo = jobHistory.GetJobFileInfo(jobId); JobHistoryParser.JobInfo jobInfo; long numFinishedMaps; lock (fileInfo) { Path historyFilePath = fileInfo.GetHistoryFile(); FSDataInputStream @in = null; Log.Info("JobHistoryFile is: " + historyFilePath); try { @in = fc.Open(fc.MakeQualified(historyFilePath)); } catch (IOException ioe) { Log.Info("Can not open history file: " + historyFilePath, ioe); throw (new Exception("Can not open History File")); } JobHistoryParser parser = new JobHistoryParser(@in); EventReader realReader = new EventReader(@in); EventReader reader = Org.Mockito.Mockito.Mock <EventReader>(); if (numMaps == numSuccessfulMaps) { reader = realReader; } else { AtomicInteger numFinishedEvents = new AtomicInteger(0); // Hack! Org.Mockito.Mockito.When(reader.GetNextEvent()).ThenAnswer(new _Answer_257(realReader , numFinishedEvents, numSuccessfulMaps)); } jobInfo = parser.Parse(reader); numFinishedMaps = ComputeFinishedMaps(jobInfo, numMaps, numSuccessfulMaps); if (numFinishedMaps != numMaps) { Exception parseException = parser.GetParseException(); NUnit.Framework.Assert.IsNotNull("Didn't get expected parse exception", parseException ); } } NUnit.Framework.Assert.AreEqual("Incorrect username ", Runtime.GetProperty("user.name" ), jobInfo.GetUsername()); NUnit.Framework.Assert.AreEqual("Incorrect jobName ", "test", jobInfo.GetJobname( )); NUnit.Framework.Assert.AreEqual("Incorrect queuename ", "default", jobInfo.GetJobQueueName ()); NUnit.Framework.Assert.AreEqual("incorrect conf path", "test", jobInfo.GetJobConfPath ()); NUnit.Framework.Assert.AreEqual("incorrect finishedMap ", numSuccessfulMaps, numFinishedMaps ); NUnit.Framework.Assert.AreEqual("incorrect finishedReduces ", numReduces, jobInfo .GetFinishedReduces()); NUnit.Framework.Assert.AreEqual("incorrect uberized ", job.IsUber(), jobInfo.GetUberized ()); IDictionary <TaskID, JobHistoryParser.TaskInfo> allTasks = jobInfo.GetAllTasks(); int totalTasks = allTasks.Count; NUnit.Framework.Assert.AreEqual("total number of tasks is incorrect ", (numMaps + numReduces), totalTasks); // Verify aminfo NUnit.Framework.Assert.AreEqual(1, jobInfo.GetAMInfos().Count); NUnit.Framework.Assert.AreEqual(MRApp.NmHost, jobInfo.GetAMInfos()[0].GetNodeManagerHost ()); JobHistoryParser.AMInfo amInfo = jobInfo.GetAMInfos()[0]; NUnit.Framework.Assert.AreEqual(MRApp.NmPort, amInfo.GetNodeManagerPort()); NUnit.Framework.Assert.AreEqual(MRApp.NmHttpPort, amInfo.GetNodeManagerHttpPort() ); NUnit.Framework.Assert.AreEqual(1, amInfo.GetAppAttemptId().GetAttemptId()); NUnit.Framework.Assert.AreEqual(amInfo.GetAppAttemptId(), amInfo.GetContainerId() .GetApplicationAttemptId()); NUnit.Framework.Assert.IsTrue(amInfo.GetStartTime() <= Runtime.CurrentTimeMillis( ) && amInfo.GetStartTime() >= amStartTimeEst); ContainerId fakeCid = MRApp.NewContainerId(-1, -1, -1, -1); // Assert at taskAttempt level foreach (JobHistoryParser.TaskInfo taskInfo in allTasks.Values) { int taskAttemptCount = taskInfo.GetAllTaskAttempts().Count; NUnit.Framework.Assert.AreEqual("total number of task attempts ", 1, taskAttemptCount ); JobHistoryParser.TaskAttemptInfo taInfo = taskInfo.GetAllTaskAttempts().Values.GetEnumerator ().Next(); NUnit.Framework.Assert.IsNotNull(taInfo.GetContainerId()); // Verify the wrong ctor is not being used. Remove after mrv1 is removed. NUnit.Framework.Assert.IsFalse(taInfo.GetContainerId().Equals(fakeCid)); } // Deep compare Job and JobInfo foreach (Task task in job.GetTasks().Values) { JobHistoryParser.TaskInfo taskInfo_1 = allTasks[TypeConverter.FromYarn(task.GetID ())]; NUnit.Framework.Assert.IsNotNull("TaskInfo not found", taskInfo_1); foreach (TaskAttempt taskAttempt in task.GetAttempts().Values) { JobHistoryParser.TaskAttemptInfo taskAttemptInfo = taskInfo_1.GetAllTaskAttempts( )[TypeConverter.FromYarn((taskAttempt.GetID()))]; NUnit.Framework.Assert.IsNotNull("TaskAttemptInfo not found", taskAttemptInfo); NUnit.Framework.Assert.AreEqual("Incorrect shuffle port for task attempt", taskAttempt .GetShufflePort(), taskAttemptInfo.GetShufflePort()); if (numMaps == numSuccessfulMaps) { NUnit.Framework.Assert.AreEqual(MRApp.NmHost, taskAttemptInfo.GetHostname()); NUnit.Framework.Assert.AreEqual(MRApp.NmPort, taskAttemptInfo.GetPort()); // Verify rack-name NUnit.Framework.Assert.AreEqual("rack-name is incorrect", taskAttemptInfo.GetRackname (), RackName); } } } // test output for HistoryViewer TextWriter stdps = System.Console.Out; try { Runtime.SetOut(new TextWriter(outContent)); HistoryViewer viewer; lock (fileInfo) { viewer = new HistoryViewer(fc.MakeQualified(fileInfo.GetHistoryFile()).ToString() , conf, true); } viewer.Print(); foreach (JobHistoryParser.TaskInfo taskInfo_1 in allTasks.Values) { string test = (taskInfo_1.GetTaskStatus() == null ? string.Empty : taskInfo_1.GetTaskStatus ()) + " " + taskInfo_1.GetTaskType() + " task list for " + taskInfo_1.GetTaskId( ).GetJobID(); NUnit.Framework.Assert.IsTrue(outContent.ToString().IndexOf(test) > 0); NUnit.Framework.Assert.IsTrue(outContent.ToString().IndexOf(taskInfo_1.GetTaskId( ).ToString()) > 0); } } finally { Runtime.SetOut(stdps); } }