public virtual void Handle(ContainerAllocatorEvent @event) { if (@event.GetType() == ContainerAllocator.EventType.ContainerReq) { Log.Info("Processing the event " + @event.ToString()); // Assign the same container ID as the AM ContainerId cID = ContainerId.NewContainerId(GetContext().GetApplicationAttemptId (), this.containerId.GetContainerId()); Container container = recordFactory.NewRecordInstance <Container>(); container.SetId(cID); NodeId nodeId = NodeId.NewInstance(this.nmHost, this.nmPort); container.SetNodeId(nodeId); container.SetContainerToken(null); container.SetNodeHttpAddress(this.nmHost + ":" + this.nmHttpPort); // send the container-assigned event to task attempt if (@event.GetAttemptID().GetTaskId().GetTaskType() == TaskType.Map) { JobCounterUpdateEvent jce = new JobCounterUpdateEvent(@event.GetAttemptID().GetTaskId ().GetJobId()); // TODO Setting OTHER_LOCAL_MAP for now. jce.AddCounterUpdate(JobCounter.OtherLocalMaps, 1); eventHandler.Handle(jce); } eventHandler.Handle(new TaskAttemptContainerAssignedEvent(@event.GetAttemptID(), container, applicationACLs)); } }
private void RunTask(ContainerRemoteLaunchEvent launchEv, IDictionary <TaskAttemptID , MapOutputFile> localMapFiles) { TaskAttemptId attemptID = launchEv.GetTaskAttemptID(); Org.Apache.Hadoop.Mapreduce.V2.App.Job.Job job = this._enclosing.context.GetAllJobs ()[attemptID.GetTaskId().GetJobId()]; int numMapTasks = job.GetTotalMaps(); int numReduceTasks = job.GetTotalReduces(); // YARN (tracking) Task: Task ytask = job.GetTask(attemptID.GetTaskId()); // classic mapred Task: Task remoteTask = launchEv.GetRemoteTask(); // after "launching," send launched event to task attempt to move // state from ASSIGNED to RUNNING (also nukes "remoteTask", so must // do getRemoteTask() call first) //There is no port number because we are not really talking to a task // tracker. The shuffle is just done through local files. So the // port number is set to -1 in this case. this._enclosing.context.GetEventHandler().Handle(new TaskAttemptContainerLaunchedEvent (attemptID, -1)); if (numMapTasks == 0) { this.doneWithMaps = true; } try { if (remoteTask.IsMapOrReduce()) { JobCounterUpdateEvent jce = new JobCounterUpdateEvent(attemptID.GetTaskId().GetJobId ()); jce.AddCounterUpdate(JobCounter.TotalLaunchedUbertasks, 1); if (remoteTask.IsMapTask()) { jce.AddCounterUpdate(JobCounter.NumUberSubmaps, 1); } else { jce.AddCounterUpdate(JobCounter.NumUberSubreduces, 1); } this._enclosing.context.GetEventHandler().Handle(jce); } this.RunSubtask(remoteTask, ytask.GetType(), attemptID, numMapTasks, (numReduceTasks > 0), localMapFiles); } catch (RuntimeException) { JobCounterUpdateEvent jce = new JobCounterUpdateEvent(attemptID.GetTaskId().GetJobId ()); jce.AddCounterUpdate(JobCounter.NumFailedUbertasks, 1); this._enclosing.context.GetEventHandler().Handle(jce); // this is our signal that the subtask failed in some way, so // simulate a failed JVM/container and send a container-completed // event to task attempt (i.e., move state machine from RUNNING // to FAIL_CONTAINER_CLEANUP [and ultimately to FAILED]) this._enclosing.context.GetEventHandler().Handle(new TaskAttemptEvent(attemptID, TaskAttemptEventType.TaContainerCompleted)); } catch (IOException ioe) { // if umbilical itself barfs (in error-handler of runSubMap()), // we're pretty much hosed, so do what YarnChild main() does // (i.e., exit clumsily--but can never happen, so no worries!) LocalContainerLauncher.Log.Fatal("oopsie... this can never happen: " + StringUtils .StringifyException(ioe)); ExitUtil.Terminate(-1); } finally { // remove my future if (Sharpen.Collections.Remove(this.futures, attemptID) != null) { LocalContainerLauncher.Log.Info("removed attempt " + attemptID + " from the futures to keep track of" ); } } }