public virtual void TestConfigurationBlock() { AppContext ctx = Org.Mockito.Mockito.Mock <AppContext>(); Org.Apache.Hadoop.Mapreduce.V2.App.Job.Job job = Org.Mockito.Mockito.Mock <Org.Apache.Hadoop.Mapreduce.V2.App.Job.Job >(); Path path = new Path("conf"); Configuration configuration = new Configuration(); configuration.Set("Key for test", "Value for test"); Org.Mockito.Mockito.When(job.GetConfFile()).ThenReturn(path); Org.Mockito.Mockito.When(job.LoadConfFile()).ThenReturn(configuration); Org.Mockito.Mockito.When(ctx.GetJob(Any <JobId>())).ThenReturn(job); TestBlocks.ConfBlockForTest configurationBlock = new TestBlocks.ConfBlockForTest( this, ctx); PrintWriter pWriter = new PrintWriter(data); HtmlBlock.Block html = new BlockForTest(new TestBlocks.HtmlBlockForTest(this), pWriter , 0, false); configurationBlock.Render(html); pWriter.Flush(); NUnit.Framework.Assert.IsTrue(data.ToString().Contains("Sorry, can't do anything without a JobID" )); configurationBlock.AddParameter(AMParams.JobId, "job_01_01"); data.Reset(); configurationBlock.Render(html); pWriter.Flush(); NUnit.Framework.Assert.IsTrue(data.ToString().Contains("Key for test")); NUnit.Framework.Assert.IsTrue(data.ToString().Contains("Value for test")); }
public virtual JobInfo GetJob(HttpServletRequest hsr, string jid) { Init(); Org.Apache.Hadoop.Mapreduce.V2.App.Job.Job job = GetJobFromJobIdString(jid, appCtx ); return(new JobInfo(job, HasAccess(job, hsr))); }
public virtual void SetUp() { AppContext context = Org.Mockito.Mockito.Mock <AppContext>(); Org.Mockito.Mockito.When(context.GetApplicationID()).ThenReturn(ApplicationId.NewInstance (0, 0)); Org.Mockito.Mockito.When(context.GetApplicationName()).ThenReturn("AppName"); Org.Mockito.Mockito.When(context.GetUser()).ThenReturn("User"); Org.Mockito.Mockito.When(context.GetStartTime()).ThenReturn(Runtime.CurrentTimeMillis ()); job = Org.Mockito.Mockito.Mock <Org.Apache.Hadoop.Mapreduce.V2.App.Job.Job>(); Task task = Org.Mockito.Mockito.Mock <Task>(); Org.Mockito.Mockito.When(job.GetTask(Any <TaskId>())).ThenReturn(task); JobId jobID = MRApps.ToJobID("job_01_01"); Org.Mockito.Mockito.When(context.GetJob(jobID)).ThenReturn(job); Org.Mockito.Mockito.When(job.CheckAccess(Any <UserGroupInformation>(), Any <JobACL> ())).ThenReturn(true); Org.Apache.Hadoop.Mapreduce.V2.App.Webapp.App app = new Org.Apache.Hadoop.Mapreduce.V2.App.Webapp.App (context); Configuration configuration = new Configuration(); ctx = Org.Mockito.Mockito.Mock <Controller.RequestContext>(); appController = new AppControllerForTest(app, configuration, ctx); appController.GetProperty()[AMParams.JobId] = "job_01_01"; appController.GetProperty()[AMParams.TaskId] = "task_01_01_m01_01"; }
/// <exception cref="System.Exception"/> public virtual Org.Apache.Hadoop.Mapreduce.V2.App.Job.Job Submit(Configuration conf , bool mapSpeculative, bool reduceSpeculative) { string user = conf.Get(MRJobConfig.UserName, UserGroupInformation.GetCurrentUser( ).GetShortUserName()); conf.Set(MRJobConfig.UserName, user); conf.Set(MRJobConfig.MrAmStagingDir, testAbsPath.ToString()); conf.SetBoolean(MRJobConfig.MrAmCreateJhIntermediateBaseDir, true); // TODO: fix the bug where the speculator gets events with // not-fully-constructed objects. For now, disable speculative exec conf.SetBoolean(MRJobConfig.MapSpeculative, mapSpeculative); conf.SetBoolean(MRJobConfig.ReduceSpeculative, reduceSpeculative); Init(conf); Start(); DefaultMetricsSystem.Shutdown(); Org.Apache.Hadoop.Mapreduce.V2.App.Job.Job job = GetContext().GetAllJobs().Values .GetEnumerator().Next(); if (assignedQueue != null) { job.SetQueueName(assignedQueue); } // Write job.xml string jobFile = MRApps.GetJobFile(conf, user, TypeConverter.FromYarn(job.GetID() )); Log.Info("Writing job conf to " + jobFile); new FilePath(jobFile).GetParentFile().Mkdirs(); conf.WriteXml(new FileOutputStream(jobFile)); return(job); }
private float GetReduceProgress() { Org.Apache.Hadoop.Mapreduce.V2.App.Job.Job job = this._enclosing.myAppContext.GetJob (this.myAttemptID.GetTaskId().GetJobId()); float runtime = this.GetCodeRuntime(); ICollection <Task> allMapTasks = job.GetTasks(TaskType.Map).Values; int numberMaps = allMapTasks.Count; int numberDoneMaps = 0; foreach (Task mapTask in allMapTasks) { if (mapTask.IsFinished()) { ++numberDoneMaps; } } if (numberMaps == numberDoneMaps) { this.shuffleCompletedTime = Math.Min(this.shuffleCompletedTime, this._enclosing.clock .GetTime()); return(Math.Min((float)(this._enclosing.clock.GetTime() - this.shuffleCompletedTime ) / (runtime * 2000.0F) + 0.5F, 1.0F)); } else { return(((float)numberDoneMaps) / numberMaps * 0.5F); } }
public virtual void TestHsTasksBlock() { Task task = GetTask(0); IDictionary <TaskId, Task> tasks = new Dictionary <TaskId, Task>(); tasks[task.GetID()] = task; AppContext ctx = Org.Mockito.Mockito.Mock <AppContext>(); AppForTest app = new AppForTest(ctx); Org.Apache.Hadoop.Mapreduce.V2.App.Job.Job job = Org.Mockito.Mockito.Mock <Org.Apache.Hadoop.Mapreduce.V2.App.Job.Job >(); Org.Mockito.Mockito.When(job.GetTasks()).ThenReturn(tasks); app.SetJob(job); TestBlocks.HsTasksBlockForTest block = new TestBlocks.HsTasksBlockForTest(this, app ); block.AddParameter(AMParams.TaskType, "r"); PrintWriter pWriter = new PrintWriter(data); HtmlBlock.Block html = new BlockForTest(new TestBlocks.HtmlBlockForTest(this), pWriter , 0, false); block.Render(html); pWriter.Flush(); // should be printed information about task NUnit.Framework.Assert.IsTrue(data.ToString().Contains("task_0_0001_r_000000")); NUnit.Framework.Assert.IsTrue(data.ToString().Contains("SUCCEEDED")); NUnit.Framework.Assert.IsTrue(data.ToString().Contains("100001")); NUnit.Framework.Assert.IsTrue(data.ToString().Contains("100011")); NUnit.Framework.Assert.IsTrue(data.ToString().Contains(string.Empty)); }
public virtual TasksInfo GetJobTasks(HttpServletRequest hsr, string jid, string type ) { Init(); Org.Apache.Hadoop.Mapreduce.V2.App.Job.Job job = AMWebServices.GetJobFromJobIdString (jid, ctx); CheckAccess(job, hsr); TasksInfo allTasks = new TasksInfo(); foreach (Task task in job.GetTasks().Values) { TaskType ttype = null; if (type != null && !type.IsEmpty()) { try { ttype = MRApps.TaskType(type); } catch (YarnRuntimeException) { throw new BadRequestException("tasktype must be either m or r"); } } if (ttype != null && task.GetType() != ttype) { continue; } allTasks.Add(new TaskInfo(task)); } return(allTasks); }
public virtual void TestMRWebAppSSLDisabled() { MRApp app = new _MRApp_175(2, 2, true, this.GetType().FullName, true); Configuration conf = new Configuration(); // MR is explicitly disabling SSL, even though setting as HTTPS_ONLY conf.Set(YarnConfiguration.YarnHttpPolicyKey, HttpConfig.Policy.HttpsOnly.ToString ()); Org.Apache.Hadoop.Mapreduce.V2.App.Job.Job job = app.Submit(conf); string hostPort = NetUtils.GetHostPortString(((MRClientService)app.GetClientService ()).GetWebApp().GetListenerAddress()); // http:// should be accessible Uri httpUrl = new Uri("http://" + hostPort); HttpURLConnection conn = (HttpURLConnection)httpUrl.OpenConnection(); InputStream @in = conn.GetInputStream(); ByteArrayOutputStream @out = new ByteArrayOutputStream(); IOUtils.CopyBytes(@in, @out, 1024); NUnit.Framework.Assert.IsTrue(@out.ToString().Contains("MapReduce Application")); // https:// is not accessible. Uri httpsUrl = new Uri("https://" + hostPort); try { HttpURLConnection httpsConn = (HttpURLConnection)httpsUrl.OpenConnection(); httpsConn.GetInputStream(); NUnit.Framework.Assert.Fail("https:// is not accessible, expected to fail"); } catch (Exception e) { NUnit.Framework.Assert.IsTrue(e is SSLException); } app.WaitForState(job, JobState.Succeeded); app.VerifyCompleted(); }
/// <exception cref="System.IO.IOException"/> private static MockHistoryJobs.JobsPair Split(IDictionary <JobId, Org.Apache.Hadoop.Mapreduce.V2.App.Job.Job > mocked) { MockHistoryJobs.JobsPair ret = new MockHistoryJobs.JobsPair(); ret.full = Maps.NewHashMap(); ret.partial = Maps.NewHashMap(); foreach (KeyValuePair <JobId, Org.Apache.Hadoop.Mapreduce.V2.App.Job.Job> entry in mocked) { JobId id = entry.Key; Org.Apache.Hadoop.Mapreduce.V2.App.Job.Job j = entry.Value; MockHistoryJobs.MockCompletedJob mockJob = new MockHistoryJobs.MockCompletedJob(j ); // use MockCompletedJob to set everything below to make sure // consistent with what history server would do ret.full[id] = mockJob; JobReport report = mockJob.GetReport(); JobIndexInfo info = new JobIndexInfo(report.GetStartTime(), report.GetFinishTime( ), mockJob.GetUserName(), mockJob.GetName(), id, mockJob.GetCompletedMaps(), mockJob .GetCompletedReduces(), mockJob.GetState().ToString()); info.SetJobStartTime(report.GetStartTime()); info.SetQueueName(mockJob.GetQueueName()); ret.partial[id] = new PartialJob(info, id); } return(ret); }
public virtual void TestMRWebAppRedirection() { string[] schemePrefix = new string[] { WebAppUtils.HttpPrefix, WebAppUtils.HttpsPrefix }; foreach (string scheme in schemePrefix) { MRApp app = new _MRApp_227(2, 2, true, this.GetType().FullName, true); Configuration conf = new Configuration(); conf.Set(YarnConfiguration.ProxyAddress, "9.9.9.9"); conf.Set(YarnConfiguration.YarnHttpPolicyKey, scheme.Equals(WebAppUtils.HttpsPrefix ) ? HttpConfig.Policy.HttpsOnly.ToString() : HttpConfig.Policy.HttpOnly.ToString ()); webProxyBase = "/proxy/" + app.GetAppID(); conf.Set("hadoop.http.filter.initializers", typeof(TestAMWebApp.TestAMFilterInitializer ).FullName); Org.Apache.Hadoop.Mapreduce.V2.App.Job.Job job = app.Submit(conf); string hostPort = NetUtils.GetHostPortString(((MRClientService)app.GetClientService ()).GetWebApp().GetListenerAddress()); Uri httpUrl = new Uri("http://" + hostPort + "/mapreduce"); HttpURLConnection conn = (HttpURLConnection)httpUrl.OpenConnection(); conn.SetInstanceFollowRedirects(false); conn.Connect(); string expectedURL = scheme + conf.Get(YarnConfiguration.ProxyAddress) + ProxyUriUtils .GetPath(app.GetAppID(), "/mapreduce"); NUnit.Framework.Assert.AreEqual(expectedURL, conn.GetHeaderField(HttpHeaders.Location )); NUnit.Framework.Assert.AreEqual(HttpStatus.ScMovedTemporarily, conn.GetResponseCode ()); app.WaitForState(job, JobState.Succeeded); app.VerifyCompleted(); } }
public virtual void TestKillJob() { CountDownLatch latch = new CountDownLatch(1); MRApp app = new TestKill.BlockingMRApp(1, 0, latch); //this will start the job but job won't complete as task is //blocked Org.Apache.Hadoop.Mapreduce.V2.App.Job.Job job = app.Submit(new Configuration()); //wait and vailidate for Job to become RUNNING app.WaitForState(job, JobState.Running); //send the kill signal to Job app.GetContext().GetEventHandler().Handle(new JobEvent(job.GetID(), JobEventType. JobKill)); //unblock Task latch.CountDown(); //wait and validate for Job to be KILLED app.WaitForState(job, JobState.Killed); IDictionary <TaskId, Task> tasks = job.GetTasks(); NUnit.Framework.Assert.AreEqual("No of tasks is not correct", 1, tasks.Count); Task task = tasks.Values.GetEnumerator().Next(); NUnit.Framework.Assert.AreEqual("Task state not correct", TaskState.Killed, task. GetReport().GetTaskState()); IDictionary <TaskAttemptId, TaskAttempt> attempts = tasks.Values.GetEnumerator().Next ().GetAttempts(); NUnit.Framework.Assert.AreEqual("No of attempts is not correct", 1, attempts.Count ); IEnumerator <TaskAttempt> it = attempts.Values.GetEnumerator(); NUnit.Framework.Assert.AreEqual("Attempt state not correct", TaskAttemptState.Killed , it.Next().GetReport().GetTaskAttemptState()); }
/// <exception cref="System.IO.IOException"/> public virtual GetTaskReportsResponse GetTaskReports(GetTaskReportsRequest request ) { JobId jobId = request.GetJobId(); TaskType taskType = request.GetTaskType(); GetTaskReportsResponse response = this.recordFactory.NewRecordInstance <GetTaskReportsResponse >(); Org.Apache.Hadoop.Mapreduce.V2.App.Job.Job job = this.VerifyAndGetJob(jobId, JobACL .ViewJob, true); ICollection <Task> tasks = job.GetTasks(taskType).Values; MRClientService.Log.Info("Getting task report for " + taskType + " " + jobId + ". Report-size will be " + tasks.Count); // Take lock to allow only one call, otherwise heap will blow up because // of counters in the report when there are multiple callers. lock (this.getTaskReportsLock) { foreach (Task task in tasks) { response.AddTaskReport(task.GetReport()); } } return(response); }
public virtual void TestReduceFailureMaxPercent() { MRApp app = new TestFail.MockFirstFailingTaskMRApp(2, 4); Configuration conf = new Configuration(); //reduce the no of attempts so test run faster conf.SetInt(MRJobConfig.MapMaxAttempts, 1); conf.SetInt(MRJobConfig.ReduceMaxAttempts, 2); conf.SetInt(MRJobConfig.MapFailuresMaxPercent, 50); //no failure due to Map conf.SetInt(MRJobConfig.MapMaxAttempts, 1); conf.SetInt(MRJobConfig.ReduceFailuresMaxpercent, 20); conf.SetInt(MRJobConfig.ReduceMaxAttempts, 1); Org.Apache.Hadoop.Mapreduce.V2.App.Job.Job job = app.Submit(conf); app.WaitForState(job, JobState.Failed); //setting the failure percentage to 25% (1/4 is 25) will //make the Job successful app = new TestFail.MockFirstFailingTaskMRApp(2, 4); conf = new Configuration(); //reduce the no of attempts so test run faster conf.SetInt(MRJobConfig.MapMaxAttempts, 1); conf.SetInt(MRJobConfig.ReduceMaxAttempts, 2); conf.SetInt(MRJobConfig.MapFailuresMaxPercent, 50); //no failure due to Map conf.SetInt(MRJobConfig.MapMaxAttempts, 1); conf.SetInt(MRJobConfig.ReduceFailuresMaxpercent, 25); conf.SetInt(MRJobConfig.ReduceMaxAttempts, 1); job = app.Submit(conf); app.WaitForState(job, JobState.Succeeded); }
/// <summary>Absorbs one TaskAttemptStatus</summary> /// <param name="reportedStatus"> /// the status report that we got from a task attempt /// that we want to fold into the speculation data for this job /// </param> /// <param name="timestamp"> /// the time this status corresponds to. This matters /// because statuses contain progress. /// </param> protected internal virtual void StatusUpdate(TaskAttemptStatusUpdateEvent.TaskAttemptStatus reportedStatus, long timestamp) { string stateString = reportedStatus.taskState.ToString(); TaskAttemptId attemptID = reportedStatus.id; TaskId taskID = attemptID.GetTaskId(); Org.Apache.Hadoop.Mapreduce.V2.App.Job.Job job = context.GetJob(taskID.GetJobId() ); if (job == null) { return; } Task task = job.GetTask(taskID); if (task == null) { return; } estimator.UpdateAttempt(reportedStatus, timestamp); if (stateString.Equals(TaskAttemptState.Running.ToString())) { runningTasks.PutIfAbsent(taskID, true); } else { runningTasks.Remove(taskID, true); if (!stateString.Equals(TaskAttemptState.Starting.ToString())) { Sharpen.Collections.Remove(runningTaskAttemptStatistics, attemptID); } } }
public virtual void TestTaskFailWithUnusedContainer() { MRApp app = new TestFail.MRAppWithFailingTaskAndUnusedContainer(); Configuration conf = new Configuration(); int maxAttempts = 1; conf.SetInt(MRJobConfig.MapMaxAttempts, maxAttempts); // disable uberization (requires entire job to be reattempted, so max for // subtask attempts is overridden to 1) conf.SetBoolean(MRJobConfig.JobUbertaskEnable, false); Org.Apache.Hadoop.Mapreduce.V2.App.Job.Job job = app.Submit(conf); app.WaitForState(job, JobState.Running); IDictionary <TaskId, Task> tasks = job.GetTasks(); NUnit.Framework.Assert.AreEqual("Num tasks is not correct", 1, tasks.Count); Task task = tasks.Values.GetEnumerator().Next(); app.WaitForState(task, TaskState.Scheduled); IDictionary <TaskAttemptId, TaskAttempt> attempts = tasks.Values.GetEnumerator().Next ().GetAttempts(); NUnit.Framework.Assert.AreEqual("Num attempts is not correct", maxAttempts, attempts .Count); TaskAttempt attempt = attempts.Values.GetEnumerator().Next(); app.WaitForInternalState((TaskAttemptImpl)attempt, TaskAttemptStateInternal.Assigned ); app.GetDispatcher().GetEventHandler().Handle(new TaskAttemptEvent(attempt.GetID() , TaskAttemptEventType.TaContainerCompleted)); app.WaitForState(job, JobState.Failed); }
public virtual void TestFailTask() { //First attempt is failed and second attempt is passed //The job succeeds. MRApp app = new TestFail.MockFirstFailingAttemptMRApp(1, 0); Configuration conf = new Configuration(); // this test requires two task attempts, but uberization overrides max to 1 conf.SetBoolean(MRJobConfig.JobUbertaskEnable, false); Org.Apache.Hadoop.Mapreduce.V2.App.Job.Job job = app.Submit(conf); app.WaitForState(job, JobState.Succeeded); IDictionary <TaskId, Task> tasks = job.GetTasks(); NUnit.Framework.Assert.AreEqual("Num tasks is not correct", 1, tasks.Count); Task task = tasks.Values.GetEnumerator().Next(); NUnit.Framework.Assert.AreEqual("Task state not correct", TaskState.Succeeded, task .GetReport().GetTaskState()); IDictionary <TaskAttemptId, TaskAttempt> attempts = tasks.Values.GetEnumerator().Next ().GetAttempts(); NUnit.Framework.Assert.AreEqual("Num attempts is not correct", 2, attempts.Count); //one attempt must be failed //and another must have succeeded IEnumerator <TaskAttempt> it = attempts.Values.GetEnumerator(); NUnit.Framework.Assert.AreEqual("Attempt state not correct", TaskAttemptState.Failed , it.Next().GetReport().GetTaskAttemptState()); NUnit.Framework.Assert.AreEqual("Attempt state not correct", TaskAttemptState.Succeeded , it.Next().GetReport().GetTaskAttemptState()); }
public virtual void TestTimedOutTask() { //All Task attempts are timed out, leading to Job failure MRApp app = new TestFail.TimeOutTaskMRApp(1, 0); Configuration conf = new Configuration(); int maxAttempts = 2; conf.SetInt(MRJobConfig.MapMaxAttempts, maxAttempts); // disable uberization (requires entire job to be reattempted, so max for // subtask attempts is overridden to 1) conf.SetBoolean(MRJobConfig.JobUbertaskEnable, false); Org.Apache.Hadoop.Mapreduce.V2.App.Job.Job job = app.Submit(conf); app.WaitForState(job, JobState.Failed); IDictionary <TaskId, Task> tasks = job.GetTasks(); NUnit.Framework.Assert.AreEqual("Num tasks is not correct", 1, tasks.Count); Task task = tasks.Values.GetEnumerator().Next(); NUnit.Framework.Assert.AreEqual("Task state not correct", TaskState.Failed, task. GetReport().GetTaskState()); IDictionary <TaskAttemptId, TaskAttempt> attempts = tasks.Values.GetEnumerator().Next ().GetAttempts(); NUnit.Framework.Assert.AreEqual("Num attempts is not correct", maxAttempts, attempts .Count); foreach (TaskAttempt attempt in attempts.Values) { NUnit.Framework.Assert.AreEqual("Attempt state not correct", TaskAttemptState.Failed , attempt.GetReport().GetTaskAttemptState()); } }
public virtual void TestCommitPending() { MRApp app = new MRApp(1, 0, false, this.GetType().FullName, true); Org.Apache.Hadoop.Mapreduce.V2.App.Job.Job job = app.Submit(new Configuration()); app.WaitForState(job, JobState.Running); NUnit.Framework.Assert.AreEqual("Num tasks not correct", 1, job.GetTasks().Count); IEnumerator <Task> it = job.GetTasks().Values.GetEnumerator(); Task task = it.Next(); app.WaitForState(task, TaskState.Running); TaskAttempt attempt = task.GetAttempts().Values.GetEnumerator().Next(); app.WaitForState(attempt, TaskAttemptState.Running); //send the commit pending signal to the task app.GetContext().GetEventHandler().Handle(new TaskAttemptEvent(attempt.GetID(), TaskAttemptEventType .TaCommitPending)); //wait for first attempt to commit pending app.WaitForState(attempt, TaskAttemptState.CommitPending); //re-send the commit pending signal to the task app.GetContext().GetEventHandler().Handle(new TaskAttemptEvent(attempt.GetID(), TaskAttemptEventType .TaCommitPending)); //the task attempt should be still at COMMIT_PENDING app.WaitForState(attempt, TaskAttemptState.CommitPending); //send the done signal to the task app.GetContext().GetEventHandler().Handle(new TaskAttemptEvent(task.GetAttempts() .Values.GetEnumerator().Next().GetID(), TaskAttemptEventType.TaDone)); app.WaitForState(job, JobState.Succeeded); }
public virtual void TestZeroMapReduces() { MRApp app = new MRApp(0, 0, true, this.GetType().FullName, true); Org.Apache.Hadoop.Mapreduce.V2.App.Job.Job job = app.Submit(new Configuration()); app.WaitForState(job, JobState.Succeeded); }
public virtual TaskAttemptsInfo GetJobTaskAttempts(HttpServletRequest hsr, string jid, string tid) { Init(); TaskAttemptsInfo attempts = new TaskAttemptsInfo(); Org.Apache.Hadoop.Mapreduce.V2.App.Job.Job job = AMWebServices.GetJobFromJobIdString (jid, ctx); CheckAccess(job, hsr); Task task = AMWebServices.GetTaskFromTaskIdString(tid, job); foreach (TaskAttempt ta in task.GetAttempts().Values) { if (ta != null) { if (task.GetType() == TaskType.Reduce) { attempts.Add(new ReduceTaskAttemptInfo(ta, task.GetType())); } else { attempts.Add(new TaskAttemptInfo(ta, task.GetType(), false)); } } } return(attempts); }
private Org.Apache.Hadoop.Mapreduce.V2.App.Job.Job GetJob() { Org.Apache.Hadoop.Mapreduce.V2.App.Job.Job job = Org.Mockito.Mockito.Mock <Org.Apache.Hadoop.Mapreduce.V2.App.Job.Job >(); JobId jobId = new JobIdPBImpl(); ApplicationId appId = ApplicationIdPBImpl.NewInstance(Runtime.CurrentTimeMillis() , 4); jobId.SetAppId(appId); jobId.SetId(1); Org.Mockito.Mockito.When(job.GetID()).ThenReturn(jobId); JobReport report = Org.Mockito.Mockito.Mock <JobReport>(); Org.Mockito.Mockito.When(report.GetStartTime()).ThenReturn(100010L); Org.Mockito.Mockito.When(report.GetFinishTime()).ThenReturn(100015L); Org.Mockito.Mockito.When(job.GetReport()).ThenReturn(report); Org.Mockito.Mockito.When(job.GetName()).ThenReturn("JobName"); Org.Mockito.Mockito.When(job.GetUserName()).ThenReturn("UserName"); Org.Mockito.Mockito.When(job.GetQueueName()).ThenReturn("QueueName"); Org.Mockito.Mockito.When(job.GetState()).ThenReturn(JobState.Succeeded); Org.Mockito.Mockito.When(job.GetTotalMaps()).ThenReturn(3); Org.Mockito.Mockito.When(job.GetCompletedMaps()).ThenReturn(2); Org.Mockito.Mockito.When(job.GetTotalReduces()).ThenReturn(2); Org.Mockito.Mockito.When(job.GetCompletedReduces()).ThenReturn(1); Org.Mockito.Mockito.When(job.GetCompletedReduces()).ThenReturn(1); return(job); }
/// <exception cref="System.IO.IOException"/> private Org.Apache.Hadoop.Mapreduce.V2.App.Job.Job VerifyAndGetJob(JobId jobID, bool exceptionThrow) { UserGroupInformation loginUgi = null; Org.Apache.Hadoop.Mapreduce.V2.App.Job.Job job = null; try { loginUgi = UserGroupInformation.GetLoginUser(); job = loginUgi.DoAs(new _PrivilegedExceptionAction_205(this, jobID)); } catch (Exception e) { throw new IOException(e); } if (job == null && exceptionThrow) { throw new IOException("Unknown Job " + jobID); } if (job != null) { JobACL operation = JobACL.ViewJob; this.CheckAccess(job, operation); } return(job); }
public virtual JobCounterInfo GetJobCounters(HttpServletRequest hsr, string jid) { Init(); Org.Apache.Hadoop.Mapreduce.V2.App.Job.Job job = AMWebServices.GetJobFromJobIdString (jid, ctx); CheckAccess(job, hsr); return(new JobCounterInfo(this.ctx, job)); }
private void CheckAccess(Org.Apache.Hadoop.Mapreduce.V2.App.Job.Job job, HttpServletRequest request) { if (!HasAccess(job, request)) { throw new WebApplicationException(Response.Status.Unauthorized); } }
private int MaybeScheduleASpeculation(TaskType type) { int successes = 0; long now = clock.GetTime(); ConcurrentMap <JobId, AtomicInteger> containerNeeds = type == TaskType.Map ? mapContainerNeeds : reduceContainerNeeds; foreach (KeyValuePair <JobId, AtomicInteger> jobEntry in containerNeeds) { // This race conditon is okay. If we skip a speculation attempt we // should have tried because the event that lowers the number of // containers needed to zero hasn't come through, it will next time. // Also, if we miss the fact that the number of containers needed was // zero but increased due to a failure it's not too bad to launch one // container prematurely. if (jobEntry.Value.Get() > 0) { continue; } int numberSpeculationsAlready = 0; int numberRunningTasks = 0; // loop through the tasks of the kind Org.Apache.Hadoop.Mapreduce.V2.App.Job.Job job = context.GetJob(jobEntry.Key); IDictionary <TaskId, Task> tasks = job.GetTasks(type); int numberAllowedSpeculativeTasks = (int)Math.Max(minimumAllowedSpeculativeTasks, proportionTotalTasksSpeculatable * tasks.Count); TaskId bestTaskID = null; long bestSpeculationValue = -1L; // this loop is potentially pricey. // TODO track the tasks that are potentially worth looking at foreach (KeyValuePair <TaskId, Task> taskEntry in tasks) { long mySpeculationValue = SpeculationValue(taskEntry.Key, now); if (mySpeculationValue == AlreadySpeculating) { ++numberSpeculationsAlready; } if (mySpeculationValue != NotRunning) { ++numberRunningTasks; } if (mySpeculationValue > bestSpeculationValue) { bestTaskID = taskEntry.Key; bestSpeculationValue = mySpeculationValue; } } numberAllowedSpeculativeTasks = (int)Math.Max(numberAllowedSpeculativeTasks, proportionRunningTasksSpeculatable * numberRunningTasks); // If we found a speculation target, fire it off if (bestTaskID != null && numberAllowedSpeculativeTasks > numberSpeculationsAlready) { AddSpeculativeAttempt(bestTaskID); ++successes; } } return(successes); }
public virtual void TestHistoryEvents() { Configuration conf = new Configuration(); MRApp app = new TestJobHistoryEvents.MRAppWithHistory(2, 1, true, this.GetType(). FullName, true); app.Submit(conf); Org.Apache.Hadoop.Mapreduce.V2.App.Job.Job job = app.GetContext().GetAllJobs().Values .GetEnumerator().Next(); JobId jobId = job.GetID(); Log.Info("JOBID is " + TypeConverter.FromYarn(jobId).ToString()); app.WaitForState(job, JobState.Succeeded); //make sure all events are flushed app.WaitForState(Service.STATE.Stopped); /* * Use HistoryContext to read logged events and verify the number of * completed maps */ HistoryContext context = new JobHistory(); // test start and stop states ((JobHistory)context).Init(conf); ((JobHistory)context).Start(); NUnit.Framework.Assert.IsTrue(context.GetStartTime() > 0); NUnit.Framework.Assert.AreEqual(((JobHistory)context).GetServiceState(), Service.STATE .Started); // get job before stopping JobHistory Org.Apache.Hadoop.Mapreduce.V2.App.Job.Job parsedJob = context.GetJob(jobId); // stop JobHistory ((JobHistory)context).Stop(); NUnit.Framework.Assert.AreEqual(((JobHistory)context).GetServiceState(), Service.STATE .Stopped); NUnit.Framework.Assert.AreEqual("CompletedMaps not correct", 2, parsedJob.GetCompletedMaps ()); NUnit.Framework.Assert.AreEqual(Runtime.GetProperty("user.name"), parsedJob.GetUserName ()); IDictionary <TaskId, Task> tasks = parsedJob.GetTasks(); NUnit.Framework.Assert.AreEqual("No of tasks not correct", 3, tasks.Count); foreach (Task task in tasks.Values) { VerifyTask(task); } IDictionary <TaskId, Task> maps = parsedJob.GetTasks(TaskType.Map); NUnit.Framework.Assert.AreEqual("No of maps not correct", 2, maps.Count); IDictionary <TaskId, Task> reduces = parsedJob.GetTasks(TaskType.Reduce); NUnit.Framework.Assert.AreEqual("No of reduces not correct", 1, reduces.Count); NUnit.Framework.Assert.AreEqual("CompletedReduce not correct", 1, parsedJob.GetCompletedReduces ()); NUnit.Framework.Assert.AreEqual("Job state not currect", JobState.Succeeded, parsedJob .GetState()); }
public virtual void TestMapReduce() { MRApp app = new MRApp(2, 2, true, this.GetType().FullName, true); Org.Apache.Hadoop.Mapreduce.V2.App.Job.Job job = app.Submit(new Configuration()); app.WaitForState(job, JobState.Succeeded); app.VerifyCompleted(); NUnit.Framework.Assert.AreEqual(Runtime.GetProperty("user.name"), job.GetUserName ()); }
public MockJobForAcls(Org.Apache.Hadoop.Mapreduce.V2.App.Job.Job mockJob, Configuration conf) { this.mockJob = mockJob; this.conf = conf; AccessControlList viewAcl = new AccessControlList(FriendlyUser); this.jobAcls = new Dictionary <JobACL, AccessControlList>(); this.jobAcls[JobACL.ViewJob] = viewAcl; this.aclsMgr = new JobACLsManager(conf); }
public virtual TaskInfo GetJobTask(HttpServletRequest hsr, string jid, string tid ) { Init(); Org.Apache.Hadoop.Mapreduce.V2.App.Job.Job job = AMWebServices.GetJobFromJobIdString (jid, ctx); CheckAccess(job, hsr); Task task = AMWebServices.GetTaskFromTaskIdString(tid, job); return(new TaskInfo(task)); }
public virtual void TestNames() { Org.Apache.Hadoop.Mapreduce.V2.App.Job.Job job = Org.Mockito.Mockito.Mock <Org.Apache.Hadoop.Mapreduce.V2.App.Job.Job >(); Task mapTask = MockitoMaker.Make(MockitoMaker.Stub <Task>().Returning(TaskType.Map ).from.GetType()); Task reduceTask = MockitoMaker.Make(MockitoMaker.Stub <Task>().Returning(TaskType. Reduce).from.GetType()); MRAppMetrics metrics = MRAppMetrics.Create(); metrics.SubmittedJob(job); metrics.WaitingTask(mapTask); metrics.WaitingTask(reduceTask); metrics.PreparingJob(job); metrics.SubmittedJob(job); metrics.WaitingTask(mapTask); metrics.WaitingTask(reduceTask); metrics.PreparingJob(job); metrics.SubmittedJob(job); metrics.WaitingTask(mapTask); metrics.WaitingTask(reduceTask); metrics.PreparingJob(job); metrics.EndPreparingJob(job); metrics.EndPreparingJob(job); metrics.EndPreparingJob(job); metrics.RunningJob(job); metrics.LaunchedTask(mapTask); metrics.RunningTask(mapTask); metrics.FailedTask(mapTask); metrics.EndWaitingTask(reduceTask); metrics.EndRunningTask(mapTask); metrics.EndRunningJob(job); metrics.FailedJob(job); metrics.RunningJob(job); metrics.LaunchedTask(mapTask); metrics.RunningTask(mapTask); metrics.KilledTask(mapTask); metrics.EndWaitingTask(reduceTask); metrics.EndRunningTask(mapTask); metrics.EndRunningJob(job); metrics.KilledJob(job); metrics.RunningJob(job); metrics.LaunchedTask(mapTask); metrics.RunningTask(mapTask); metrics.CompletedTask(mapTask); metrics.EndRunningTask(mapTask); metrics.LaunchedTask(reduceTask); metrics.RunningTask(reduceTask); metrics.CompletedTask(reduceTask); metrics.EndRunningTask(reduceTask); metrics.EndRunningJob(job); metrics.CompletedJob(job); CheckMetrics(3, 1, 1, 1, 0, 0, 3, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0); }