/// <summary>test clean old history files.</summary> /// <remarks> /// test clean old history files. Files should be deleted after 1 week by /// default. /// </remarks> /// <exception cref="System.Exception"/> public virtual void TestDeleteFileInfo() { Log.Info("STARTING testDeleteFileInfo"); try { Configuration conf = new Configuration(); conf.SetClass(CommonConfigurationKeysPublic.NetTopologyNodeSwitchMappingImplKey, typeof(TestJobHistoryParsing.MyResolver), typeof(DNSToSwitchMapping)); RackResolver.Init(conf); MRApp app = new TestJobHistoryEvents.MRAppWithHistory(1, 1, true, this.GetType(). FullName, true); app.Submit(conf); Org.Apache.Hadoop.Mapreduce.V2.App.Job.Job job = app.GetContext().GetAllJobs().Values .GetEnumerator().Next(); JobId jobId = job.GetID(); app.WaitForState(job, JobState.Succeeded); // make sure all events are flushed app.WaitForState(Service.STATE.Stopped); HistoryFileManager hfm = new HistoryFileManager(); hfm.Init(conf); HistoryFileManager.HistoryFileInfo fileInfo = hfm.GetFileInfo(jobId); hfm.InitExisting(); // wait for move files form the done_intermediate directory to the gone // directory while (fileInfo.IsMovePending()) { Sharpen.Thread.Sleep(300); } NUnit.Framework.Assert.IsNotNull(hfm.jobListCache.Values()); // try to remove fileInfo hfm.Clean(); // check that fileInfo does not deleted NUnit.Framework.Assert.IsFalse(fileInfo.IsDeleted()); // correct live time hfm.SetMaxHistoryAge(-1); hfm.Clean(); hfm.Stop(); NUnit.Framework.Assert.IsTrue("Thread pool shutdown", hfm.moveToDoneExecutor.IsTerminated ()); // should be deleted ! NUnit.Framework.Assert.IsTrue("file should be deleted ", fileInfo.IsDeleted()); } finally { Log.Info("FINISHED testDeleteFileInfo"); } }
public virtual Org.Apache.Hadoop.Mapreduce.V2.App.Job.Job GetFullJob(JobId jobId) { if (Log.IsDebugEnabled()) { Log.Debug("Looking for Job " + jobId); } try { HistoryFileManager.HistoryFileInfo fileInfo = hsManager.GetFileInfo(jobId); Org.Apache.Hadoop.Mapreduce.V2.App.Job.Job result = null; if (fileInfo != null) { result = loadedJobCache[jobId]; if (result == null) { result = LoadJob(fileInfo); } else { if (fileInfo.IsDeleted()) { Sharpen.Collections.Remove(loadedJobCache, jobId); result = null; } } } else { Sharpen.Collections.Remove(loadedJobCache, jobId); } return(result); } catch (IOException e) { throw new YarnRuntimeException(e); } }
public virtual void TestRefreshLoadedJobCache() { HistoryFileManager historyManager = Org.Mockito.Mockito.Mock <HistoryFileManager>( ); jobHistory = Org.Mockito.Mockito.Spy(new JobHistory()); Org.Mockito.Mockito.DoReturn(historyManager).When(jobHistory).CreateHistoryFileManager (); Configuration conf = new Configuration(); // Set the cache size to 2 conf.Set(JHAdminConfig.MrHistoryLoadedJobCacheSize, "2"); jobHistory.Init(conf); jobHistory.Start(); CachedHistoryStorage storage = Org.Mockito.Mockito.Spy((CachedHistoryStorage)jobHistory .GetHistoryStorage()); Org.Apache.Hadoop.Mapreduce.V2.App.Job.Job[] jobs = new Org.Apache.Hadoop.Mapreduce.V2.App.Job.Job [3]; JobId[] jobIds = new JobId[3]; for (int i = 0; i < 3; i++) { jobs[i] = Org.Mockito.Mockito.Mock <Org.Apache.Hadoop.Mapreduce.V2.App.Job.Job>(); jobIds[i] = Org.Mockito.Mockito.Mock <JobId>(); Org.Mockito.Mockito.When(jobs[i].GetID()).ThenReturn(jobIds[i]); } HistoryFileManager.HistoryFileInfo fileInfo = Org.Mockito.Mockito.Mock <HistoryFileManager.HistoryFileInfo >(); Org.Mockito.Mockito.When(historyManager.GetFileInfo(Any <JobId>())).ThenReturn(fileInfo ); Org.Mockito.Mockito.When(fileInfo.LoadJob()).ThenReturn(jobs[0]).ThenReturn(jobs[ 1]).ThenReturn(jobs[2]); // getFullJob will put the job in the cache if it isn't there for (int i_1 = 0; i_1 < 3; i_1++) { storage.GetFullJob(jobs[i_1].GetID()); } IDictionary <JobId, Org.Apache.Hadoop.Mapreduce.V2.App.Job.Job> jobCache = storage .GetLoadedJobCache(); // job0 should have been purged since cache size is 2 NUnit.Framework.Assert.IsFalse(jobCache.Contains(jobs[0].GetID())); NUnit.Framework.Assert.IsTrue(jobCache.Contains(jobs[1].GetID()) && jobCache.Contains (jobs[2].GetID())); // Setting cache size to 3 conf.Set(JHAdminConfig.MrHistoryLoadedJobCacheSize, "3"); Org.Mockito.Mockito.DoReturn(conf).When(storage).CreateConf(); Org.Mockito.Mockito.When(fileInfo.LoadJob()).ThenReturn(jobs[0]).ThenReturn(jobs[ 1]).ThenReturn(jobs[2]); jobHistory.RefreshLoadedJobCache(); for (int i_2 = 0; i_2 < 3; i_2++) { storage.GetFullJob(jobs[i_2].GetID()); } jobCache = storage.GetLoadedJobCache(); // All three jobs should be in cache since its size is now 3 for (int i_3 = 0; i_3 < 3; i_3++) { NUnit.Framework.Assert.IsTrue(jobCache.Contains(jobs[i_3].GetID())); } }
/// <summary>Helper method for test cases.</summary> /// <exception cref="System.IO.IOException"/> internal virtual HistoryFileManager.HistoryFileInfo GetJobFileInfo(JobId jobId) { return(hsManager.GetFileInfo(jobId)); }