public virtual void Setup() { // mocked generics Log.Info(">>>> " + name.GetMethodName()); job = new JobConf(); job.SetBoolean(MRJobConfig.ShuffleFetchRetryEnabled, false); jobWithRetry = new JobConf(); jobWithRetry.SetBoolean(MRJobConfig.ShuffleFetchRetryEnabled, true); id = TaskAttemptID.ForName("attempt_0_1_r_1_1"); ss = Org.Mockito.Mockito.Mock <ShuffleSchedulerImpl>(); mm = Org.Mockito.Mockito.Mock <MergeManagerImpl>(); r = Org.Mockito.Mockito.Mock <Reporter>(); metrics = Org.Mockito.Mockito.Mock <ShuffleClientMetrics>(); except = Org.Mockito.Mockito.Mock <ExceptionReporter>(); key = JobTokenSecretManager.CreateSecretKey(new byte[] { 0, 0, 0, 0 }); connection = Org.Mockito.Mockito.Mock <HttpURLConnection>(); allErrs = Org.Mockito.Mockito.Mock <Counters.Counter>(); Org.Mockito.Mockito.When(r.GetCounter(Matchers.AnyString(), Matchers.AnyString()) ).ThenReturn(allErrs); AList <TaskAttemptID> maps = new AList <TaskAttemptID>(1); maps.AddItem(map1ID); maps.AddItem(map2ID); Org.Mockito.Mockito.When(ss.GetMapsForHost(host)).ThenReturn(maps); }
/// <exception cref="System.IO.IOException"/> /// <exception cref="System.Exception"/> public override RecordReader CreateRecordReader(InputSplit split, TaskAttemptContext taskContext) { try { if (!rrCstrMap.Contains(ident)) { throw new IOException("No RecordReader for " + ident); } Configuration conf = GetConf(taskContext.GetConfiguration()); TaskAttemptContext context = new TaskAttemptContextImpl(conf, TaskAttemptID.ForName (conf.Get(MRJobConfig.TaskAttemptId)), new Parser.WrappedStatusReporter(taskContext )); return(rrCstrMap[ident].NewInstance(id, inf.CreateRecordReader(split, context), cmpcl )); } catch (MemberAccessException e) { throw new IOException(e); } catch (InstantiationException e) { throw new IOException(e); } catch (TargetInvocationException e) { throw new IOException(e); } }
/* * (non-Javadoc) * * @see * org.apache.hadoop.mapred.MapReduceBase#configure(org.apache.hadoop.mapred * .JobConf) */ public override void Configure(JobConf conf) { // MapReduceBase try { config = new ConfigExtractor(conf); ConfigExtractor.DumpOptions(config); filesystem = config.GetBaseDirectory().GetFileSystem(conf); } catch (Exception e) { Log.Error("Unable to setup slive " + StringUtils.StringifyException(e)); throw new RuntimeException("Unable to setup slive configuration", e); } if (conf.Get(MRJobConfig.TaskAttemptId) != null) { this.taskId = TaskAttemptID.ForName(conf.Get(MRJobConfig.TaskAttemptId)).GetTaskID ().GetId(); } else { // So that branch-1/0.20 can run this same code as well this.taskId = TaskAttemptID.ForName(conf.Get("mapred.task.id")).GetTaskID().GetId (); } }
/// <exception cref="System.Exception"/> private string ReadStdOut(JobConf conf) { TaskAttemptID taskId = ((TaskAttemptID)TaskAttemptID.ForName(conf.Get(MRJobConfig .TaskAttemptId))); FilePath stdOut = TaskLog.GetTaskLogFile(taskId, false, TaskLog.LogName.Stdout); return(ReadFile(stdOut)); }
public virtual void SetDatum(object odatum) { this.datum = (TaskFailed)odatum; this.id = TaskID.ForName(datum.taskid.ToString()); this.taskType = TaskType.ValueOf(datum.taskType.ToString()); this.finishTime = datum.finishTime; this.error = datum.error.ToString(); this.failedDueToAttempt = datum.failedDueToAttempt == null ? null : TaskAttemptID .ForName(datum.failedDueToAttempt.ToString()); this.status = datum.status.ToString(); this.counters = EventReader.FromAvro(datum.counters); }
public virtual void SetDatum(object oDatum) { this.datum = (TaskAttemptFinished)oDatum; this.attemptId = TaskAttemptID.ForName(datum.attemptId.ToString()); this.taskType = TaskType.ValueOf(datum.taskType.ToString()); this.taskStatus = datum.taskStatus.ToString(); this.finishTime = datum.finishTime; this.rackName = datum.rackname.ToString(); this.hostname = datum.hostname.ToString(); this.state = datum.state.ToString(); this.counters = EventReader.FromAvro(datum.counters); }
public virtual void SetDatum(object oDatum) { this.datum = (TaskFinished)oDatum; this.taskid = TaskID.ForName(datum.taskid.ToString()); if (datum.successfulAttemptId != null) { this.successfulAttemptId = TaskAttemptID.ForName(datum.successfulAttemptId.ToString ()); } this.finishTime = datum.finishTime; this.taskType = TaskType.ValueOf(datum.taskType.ToString()); this.status = datum.status.ToString(); this.counters = EventReader.FromAvro(datum.counters); }
public virtual void SetDatum(object odatum) { this.datum = (TaskAttemptUnsuccessfulCompletion)odatum; this.attemptId = TaskAttemptID.ForName(datum.attemptId.ToString()); this.taskType = TaskType.ValueOf(datum.taskType.ToString()); this.finishTime = datum.finishTime; this.hostname = datum.hostname.ToString(); this.rackName = datum.rackname.ToString(); this.port = datum.port; this.status = datum.status.ToString(); this.error = datum.error.ToString(); this.counters = EventReader.FromAvro(datum.counters); this.clockSplits = AvroArrayUtils.FromAvro(datum.clockSplits); this.cpuUsages = AvroArrayUtils.FromAvro(datum.cpuUsages); this.vMemKbytes = AvroArrayUtils.FromAvro(datum.vMemKbytes); this.physMemKbytes = AvroArrayUtils.FromAvro(datum.physMemKbytes); }
public virtual void SetDatum(object oDatum) { this.datum = (MapAttemptFinished)oDatum; this.attemptId = TaskAttemptID.ForName(datum.attemptId.ToString()); this.taskType = TaskType.ValueOf(datum.taskType.ToString()); this.taskStatus = datum.taskStatus.ToString(); this.mapFinishTime = datum.mapFinishTime; this.finishTime = datum.finishTime; this.hostname = datum.hostname.ToString(); this.rackName = datum.rackname.ToString(); this.port = datum.port; this.state = datum.state.ToString(); this.counters = EventReader.FromAvro(datum.counters); this.clockSplits = AvroArrayUtils.FromAvro(datum.clockSplits); this.cpuUsages = AvroArrayUtils.FromAvro(datum.cpuUsages); this.vMemKbytes = AvroArrayUtils.FromAvro(datum.vMemKbytes); this.physMemKbytes = AvroArrayUtils.FromAvro(datum.physMemKbytes); }
/// <summary>clean previous std error and outs</summary> private void InitStdOut(JobConf configuration) { TaskAttemptID taskId = ((TaskAttemptID)TaskAttemptID.ForName(configuration.Get(MRJobConfig .TaskAttemptId))); FilePath stdOut = TaskLog.GetTaskLogFile(taskId, false, TaskLog.LogName.Stdout); FilePath stdErr = TaskLog.GetTaskLogFile(taskId, false, TaskLog.LogName.Stderr); // prepare folder if (!stdOut.GetParentFile().Exists()) { stdOut.GetParentFile().Mkdirs(); } else { // clean logs stdOut.DeleteOnExit(); stdErr.DeleteOnExit(); } }
/// <exception cref="System.Exception"/> private void TestProfilerInternal(bool useDefault) { if (!(new FilePath(MiniMRYarnCluster.Appjar)).Exists()) { Log.Info("MRAppJar " + MiniMRYarnCluster.Appjar + " not found. Not running test." ); return; } SleepJob sleepJob = new SleepJob(); JobConf sleepConf = new JobConf(mrCluster.GetConfig()); sleepConf.SetProfileEnabled(true); sleepConf.SetProfileTaskRange(true, ProfiledTaskId.ToString()); sleepConf.SetProfileTaskRange(false, ProfiledTaskId.ToString()); if (!useDefault) { // use hprof for map to profile.out sleepConf.Set(MRJobConfig.TaskMapProfileParams, "-agentlib:hprof=cpu=times,heap=sites,force=n,thread=y,verbose=n," + "file=%s"); // use Xprof for reduce to stdout sleepConf.Set(MRJobConfig.TaskReduceProfileParams, "-Xprof"); } sleepJob.SetConf(sleepConf); // 2-map-2-reduce SleepJob Job job = sleepJob.CreateJob(2, 2, 500, 1, 500, 1); job.SetJarByClass(typeof(SleepJob)); job.AddFileToClassPath(AppJar); // The AppMaster jar itself. job.WaitForCompletion(true); JobId jobId = TypeConverter.ToYarn(job.GetJobID()); ApplicationId appID = jobId.GetAppId(); int pollElapsed = 0; while (true) { Sharpen.Thread.Sleep(1000); pollElapsed += 1000; if (TerminalRmAppStates.Contains(mrCluster.GetResourceManager().GetRMContext().GetRMApps ()[appID].GetState())) { break; } if (pollElapsed >= 60000) { Log.Warn("application did not reach terminal state within 60 seconds"); break; } } NUnit.Framework.Assert.AreEqual(RMAppState.Finished, mrCluster.GetResourceManager ().GetRMContext().GetRMApps()[appID].GetState()); // Job finished, verify logs // Configuration nmConf = mrCluster.GetNodeManager(0).GetConfig(); string appIdStr = appID.ToString(); string appIdSuffix = Sharpen.Runtime.Substring(appIdStr, "application_".Length, appIdStr .Length); string containerGlob = "container_" + appIdSuffix + "_*_*"; IDictionary <TaskAttemptID, Path> taLogDirs = new Dictionary <TaskAttemptID, Path>( ); Sharpen.Pattern taskPattern = Sharpen.Pattern.Compile(".*Task:(attempt_" + appIdSuffix + "_[rm]_" + "[0-9]+_[0-9]+).*"); foreach (string logDir in nmConf.GetTrimmedStrings(YarnConfiguration.NmLogDirs)) { // filter out MRAppMaster and create attemptId->logDir map // foreach (FileStatus fileStatus in localFs.GlobStatus(new Path(logDir + Path.Separator + appIdStr + Path.Separator + containerGlob + Path.Separator + TaskLog.LogName. Syslog))) { BufferedReader br = new BufferedReader(new InputStreamReader(localFs.Open(fileStatus .GetPath()))); string line; while ((line = br.ReadLine()) != null) { Matcher m = taskPattern.Matcher(line); if (m.Matches()) { // found Task done message taLogDirs[TaskAttemptID.ForName(m.Group(1))] = fileStatus.GetPath().GetParent(); break; } } br.Close(); } } NUnit.Framework.Assert.AreEqual(4, taLogDirs.Count); // all 4 attempts found foreach (KeyValuePair <TaskAttemptID, Path> dirEntry in taLogDirs) { TaskAttemptID tid = dirEntry.Key; Path profilePath = new Path(dirEntry.Value, TaskLog.LogName.Profile.ToString()); Path stdoutPath = new Path(dirEntry.Value, TaskLog.LogName.Stdout.ToString()); if (useDefault || tid.GetTaskType() == TaskType.Map) { if (tid.GetTaskID().GetId() == ProfiledTaskId) { // verify profile.out BufferedReader br = new BufferedReader(new InputStreamReader(localFs.Open(profilePath ))); string line = br.ReadLine(); NUnit.Framework.Assert.IsTrue("No hprof content found!", line != null && line.StartsWith ("JAVA PROFILE")); br.Close(); NUnit.Framework.Assert.AreEqual(0L, localFs.GetFileStatus(stdoutPath).GetLen()); } else { NUnit.Framework.Assert.IsFalse("hprof file should not exist", localFs.Exists(profilePath )); } } else { NUnit.Framework.Assert.IsFalse("hprof file should not exist", localFs.Exists(profilePath )); if (tid.GetTaskID().GetId() == ProfiledTaskId) { // reducer is profiled with Xprof BufferedReader br = new BufferedReader(new InputStreamReader(localFs.Open(stdoutPath ))); bool flatProfFound = false; string line; while ((line = br.ReadLine()) != null) { if (line.StartsWith("Flat profile")) { flatProfFound = true; break; } } br.Close(); NUnit.Framework.Assert.IsTrue("Xprof flat profile not found!", flatProfFound); } else { NUnit.Framework.Assert.AreEqual(0L, localFs.GetFileStatus(stdoutPath).GetLen()); } } } }
/// <summary>Start the child process to handle the task for us.</summary> /// <param name="conf">the task's configuration</param> /// <param name="recordReader">the fake record reader to update progress with</param> /// <param name="output">the collector to send output to</param> /// <param name="reporter">the reporter for the task</param> /// <param name="outputKeyClass">the class of the output keys</param> /// <param name="outputValueClass">the class of the output values</param> /// <exception cref="System.IO.IOException"/> /// <exception cref="System.Exception"/> internal Application(JobConf conf, RecordReader <FloatWritable, NullWritable> recordReader , OutputCollector <K2, V2> output, Reporter reporter, Type outputKeyClass, Type outputValueClass ) { serverSocket = Sharpen.Extensions.CreateServerSocket(0); IDictionary <string, string> env = new Dictionary <string, string>(); // add TMPDIR environment variable with the value of java.io.tmpdir env["TMPDIR"] = Runtime.GetProperty("java.io.tmpdir"); env[Submitter.Port] = Sharpen.Extensions.ToString(serverSocket.GetLocalPort()); //Add token to the environment if security is enabled Org.Apache.Hadoop.Security.Token.Token <JobTokenIdentifier> jobToken = TokenCache. GetJobToken(conf.GetCredentials()); // This password is used as shared secret key between this application and // child pipes process byte[] password = jobToken.GetPassword(); string localPasswordFile = new FilePath(".") + Path.Separator + "jobTokenPassword"; WritePasswordToLocalFile(localPasswordFile, password, conf); env["hadoop.pipes.shared.secret.location"] = localPasswordFile; IList <string> cmd = new AList <string>(); string interpretor = conf.Get(Submitter.Interpretor); if (interpretor != null) { cmd.AddItem(interpretor); } string executable = DistributedCache.GetLocalCacheFiles(conf)[0].ToString(); if (!FileUtil.CanExecute(new FilePath(executable))) { // LinuxTaskController sets +x permissions on all distcache files already. // In case of DefaultTaskController, set permissions here. FileUtil.Chmod(executable, "u+x"); } cmd.AddItem(executable); // wrap the command in a stdout/stderr capture // we are starting map/reduce task of the pipes job. this is not a cleanup // attempt. TaskAttemptID taskid = ((TaskAttemptID)TaskAttemptID.ForName(conf.Get(MRJobConfig .TaskAttemptId))); FilePath stdout = TaskLog.GetTaskLogFile(taskid, false, TaskLog.LogName.Stdout); FilePath stderr = TaskLog.GetTaskLogFile(taskid, false, TaskLog.LogName.Stderr); long logLength = TaskLog.GetTaskLogLength(conf); cmd = TaskLog.CaptureOutAndError(null, cmd, stdout, stderr, logLength, false); process = RunClient(cmd, env); clientSocket = serverSocket.Accept(); string challenge = GetSecurityChallenge(); string digestToSend = CreateDigest(password, challenge); string digestExpected = CreateDigest(password, digestToSend); handler = new OutputHandler <K2, V2>(output, reporter, recordReader, digestExpected ); K2 outputKey = (K2)ReflectionUtils.NewInstance(outputKeyClass, conf); V2 outputValue = (V2)ReflectionUtils.NewInstance(outputValueClass, conf); downlink = new BinaryProtocol <K1, V1, K2, V2>(clientSocket, handler, outputKey, outputValue , conf); downlink.Authenticate(digestToSend, challenge); WaitForAuthentication(); Log.Debug("Authentication succeeded"); downlink.Start(); downlink.SetJobConf(conf); }
/// <exception cref="System.IO.IOException"/> private TaskAttemptID[] CopyMapOutput(MapHost host, DataInputStream input, ICollection <TaskAttemptID> remaining, bool canRetry) { MapOutput <K, V> mapOutput = null; TaskAttemptID mapId = null; long decompressedLength = -1; long compressedLength = -1; try { long startTime = Time.MonotonicNow(); int forReduce = -1; //Read the shuffle header try { ShuffleHeader header = new ShuffleHeader(); header.ReadFields(input); mapId = TaskAttemptID.ForName(header.mapId); compressedLength = header.compressedLength; decompressedLength = header.uncompressedLength; forReduce = header.forReduce; } catch (ArgumentException e) { badIdErrs.Increment(1); Log.Warn("Invalid map id ", e); //Don't know which one was bad, so consider all of them as bad return(Sharpen.Collections.ToArray(remaining, new TaskAttemptID[remaining.Count])); } InputStream @is = input; @is = CryptoUtils.WrapIfNecessary(jobConf, @is, compressedLength); compressedLength -= CryptoUtils.CryptoPadding(jobConf); decompressedLength -= CryptoUtils.CryptoPadding(jobConf); // Do some basic sanity verification if (!VerifySanity(compressedLength, decompressedLength, forReduce, remaining, mapId )) { return(new TaskAttemptID[] { mapId }); } if (Log.IsDebugEnabled()) { Log.Debug("header: " + mapId + ", len: " + compressedLength + ", decomp len: " + decompressedLength); } // Get the location for the map output - either in-memory or on-disk try { mapOutput = merger.Reserve(mapId, decompressedLength, id); } catch (IOException ioe) { // kill this reduce attempt ioErrs.Increment(1); scheduler.ReportLocalError(ioe); return(EmptyAttemptIdArray); } // Check if we can shuffle *now* ... if (mapOutput == null) { Log.Info("fetcher#" + id + " - MergeManager returned status WAIT ..."); //Not an error but wait to process data. return(EmptyAttemptIdArray); } // The codec for lz0,lz4,snappy,bz2,etc. throw java.lang.InternalError // on decompression failures. Catching and re-throwing as IOException // to allow fetch failure logic to be processed try { // Go! Log.Info("fetcher#" + id + " about to shuffle output of map " + mapOutput.GetMapId () + " decomp: " + decompressedLength + " len: " + compressedLength + " to " + mapOutput .GetDescription()); mapOutput.Shuffle(host, @is, compressedLength, decompressedLength, metrics, reporter ); } catch (InternalError e) { Log.Warn("Failed to shuffle for fetcher#" + id, e); throw new IOException(e); } // Inform the shuffle scheduler long endTime = Time.MonotonicNow(); // Reset retryStartTime as map task make progress if retried before. retryStartTime = 0; scheduler.CopySucceeded(mapId, host, compressedLength, startTime, endTime, mapOutput ); // Note successful shuffle remaining.Remove(mapId); metrics.SuccessFetch(); return(null); } catch (IOException ioe) { if (mapOutput != null) { mapOutput.Abort(); } if (canRetry) { CheckTimeoutOrRetry(host, ioe); } ioErrs.Increment(1); if (mapId == null || mapOutput == null) { Log.Warn("fetcher#" + id + " failed to read map header" + mapId + " decomp: " + decompressedLength + ", " + compressedLength, ioe); if (mapId == null) { return(Sharpen.Collections.ToArray(remaining, new TaskAttemptID[remaining.Count])); } else { return(new TaskAttemptID[] { mapId }); } } Log.Warn("Failed to shuffle output of " + mapId + " from " + host.GetHostName(), ioe); // Inform the shuffle-scheduler metrics.FailedFetch(); return(new TaskAttemptID[] { mapId }); } }
public static TaskAttemptId ToTaskAttemptID(string taid) { return(TypeConverter.ToYarn(TaskAttemptID.ForName(taid))); }
/// <exception cref="System.Exception"/> public virtual int Run(string[] argv) { int exitCode = -1; if (argv.Length < 1) { DisplayUsage(string.Empty); return(exitCode); } // process arguments string cmd = argv[0]; string submitJobFile = null; string jobid = null; string taskid = null; string historyFile = null; string counterGroupName = null; string counterName = null; JobPriority jp = null; string taskType = null; string taskState = null; int fromEvent = 0; int nEvents = 0; bool getStatus = false; bool getCounter = false; bool killJob = false; bool listEvents = false; bool viewHistory = false; bool viewAllHistory = false; bool listJobs = false; bool listAllJobs = false; bool listActiveTrackers = false; bool listBlacklistedTrackers = false; bool displayTasks = false; bool killTask = false; bool failTask = false; bool setJobPriority = false; bool logs = false; if ("-submit".Equals(cmd)) { if (argv.Length != 2) { DisplayUsage(cmd); return(exitCode); } submitJobFile = argv[1]; } else { if ("-status".Equals(cmd)) { if (argv.Length != 2) { DisplayUsage(cmd); return(exitCode); } jobid = argv[1]; getStatus = true; } else { if ("-counter".Equals(cmd)) { if (argv.Length != 4) { DisplayUsage(cmd); return(exitCode); } getCounter = true; jobid = argv[1]; counterGroupName = argv[2]; counterName = argv[3]; } else { if ("-kill".Equals(cmd)) { if (argv.Length != 2) { DisplayUsage(cmd); return(exitCode); } jobid = argv[1]; killJob = true; } else { if ("-set-priority".Equals(cmd)) { if (argv.Length != 3) { DisplayUsage(cmd); return(exitCode); } jobid = argv[1]; try { jp = JobPriority.ValueOf(argv[2]); } catch (ArgumentException iae) { Log.Info(iae); DisplayUsage(cmd); return(exitCode); } setJobPriority = true; } else { if ("-events".Equals(cmd)) { if (argv.Length != 4) { DisplayUsage(cmd); return(exitCode); } jobid = argv[1]; fromEvent = System.Convert.ToInt32(argv[2]); nEvents = System.Convert.ToInt32(argv[3]); listEvents = true; } else { if ("-history".Equals(cmd)) { if (argv.Length != 2 && !(argv.Length == 3 && "all".Equals(argv[1]))) { DisplayUsage(cmd); return(exitCode); } viewHistory = true; if (argv.Length == 3 && "all".Equals(argv[1])) { viewAllHistory = true; historyFile = argv[2]; } else { historyFile = argv[1]; } } else { if ("-list".Equals(cmd)) { if (argv.Length != 1 && !(argv.Length == 2 && "all".Equals(argv[1]))) { DisplayUsage(cmd); return(exitCode); } if (argv.Length == 2 && "all".Equals(argv[1])) { listAllJobs = true; } else { listJobs = true; } } else { if ("-kill-task".Equals(cmd)) { if (argv.Length != 2) { DisplayUsage(cmd); return(exitCode); } killTask = true; taskid = argv[1]; } else { if ("-fail-task".Equals(cmd)) { if (argv.Length != 2) { DisplayUsage(cmd); return(exitCode); } failTask = true; taskid = argv[1]; } else { if ("-list-active-trackers".Equals(cmd)) { if (argv.Length != 1) { DisplayUsage(cmd); return(exitCode); } listActiveTrackers = true; } else { if ("-list-blacklisted-trackers".Equals(cmd)) { if (argv.Length != 1) { DisplayUsage(cmd); return(exitCode); } listBlacklistedTrackers = true; } else { if ("-list-attempt-ids".Equals(cmd)) { if (argv.Length != 4) { DisplayUsage(cmd); return(exitCode); } jobid = argv[1]; taskType = argv[2]; taskState = argv[3]; displayTasks = true; if (!taskTypes.Contains(StringUtils.ToUpperCase(taskType))) { System.Console.Out.WriteLine("Error: Invalid task-type: " + taskType); DisplayUsage(cmd); return(exitCode); } if (!taskStates.Contains(StringUtils.ToLowerCase(taskState))) { System.Console.Out.WriteLine("Error: Invalid task-state: " + taskState); DisplayUsage(cmd); return(exitCode); } } else { if ("-logs".Equals(cmd)) { if (argv.Length == 2 || argv.Length == 3) { logs = true; jobid = argv[1]; if (argv.Length == 3) { taskid = argv[2]; } else { taskid = null; } } else { DisplayUsage(cmd); return(exitCode); } } else { DisplayUsage(cmd); return(exitCode); } } } } } } } } } } } } } } // initialize cluster cluster = CreateCluster(); // Submit the request try { if (submitJobFile != null) { Job job = Job.GetInstance(new JobConf(submitJobFile)); job.Submit(); System.Console.Out.WriteLine("Created job " + job.GetJobID()); exitCode = 0; } else { if (getStatus) { Job job = cluster.GetJob(JobID.ForName(jobid)); if (job == null) { System.Console.Out.WriteLine("Could not find job " + jobid); } else { Counters counters = job.GetCounters(); System.Console.Out.WriteLine(); System.Console.Out.WriteLine(job); if (counters != null) { System.Console.Out.WriteLine(counters); } else { System.Console.Out.WriteLine("Counters not available. Job is retired."); } exitCode = 0; } } else { if (getCounter) { Job job = cluster.GetJob(JobID.ForName(jobid)); if (job == null) { System.Console.Out.WriteLine("Could not find job " + jobid); } else { Counters counters = job.GetCounters(); if (counters == null) { System.Console.Out.WriteLine("Counters not available for retired job " + jobid); exitCode = -1; } else { System.Console.Out.WriteLine(GetCounter(counters, counterGroupName, counterName)); exitCode = 0; } } } else { if (killJob) { Job job = cluster.GetJob(JobID.ForName(jobid)); if (job == null) { System.Console.Out.WriteLine("Could not find job " + jobid); } else { JobStatus jobStatus = job.GetStatus(); if (jobStatus.GetState() == JobStatus.State.Failed) { System.Console.Out.WriteLine("Could not mark the job " + jobid + " as killed, as it has already failed." ); exitCode = -1; } else { if (jobStatus.GetState() == JobStatus.State.Killed) { System.Console.Out.WriteLine("The job " + jobid + " has already been killed."); exitCode = -1; } else { if (jobStatus.GetState() == JobStatus.State.Succeeded) { System.Console.Out.WriteLine("Could not kill the job " + jobid + ", as it has already succeeded." ); exitCode = -1; } else { job.KillJob(); System.Console.Out.WriteLine("Killed job " + jobid); exitCode = 0; } } } } } else { if (setJobPriority) { Job job = cluster.GetJob(JobID.ForName(jobid)); if (job == null) { System.Console.Out.WriteLine("Could not find job " + jobid); } else { job.SetPriority(jp); System.Console.Out.WriteLine("Changed job priority."); exitCode = 0; } } else { if (viewHistory) { ViewHistory(historyFile, viewAllHistory); exitCode = 0; } else { if (listEvents) { ListEvents(cluster.GetJob(JobID.ForName(jobid)), fromEvent, nEvents); exitCode = 0; } else { if (listJobs) { ListJobs(cluster); exitCode = 0; } else { if (listAllJobs) { ListAllJobs(cluster); exitCode = 0; } else { if (listActiveTrackers) { ListActiveTrackers(cluster); exitCode = 0; } else { if (listBlacklistedTrackers) { ListBlacklistedTrackers(cluster); exitCode = 0; } else { if (displayTasks) { DisplayTasks(cluster.GetJob(JobID.ForName(jobid)), taskType, taskState); exitCode = 0; } else { if (killTask) { TaskAttemptID taskID = TaskAttemptID.ForName(taskid); Job job = cluster.GetJob(taskID.GetJobID()); if (job == null) { System.Console.Out.WriteLine("Could not find job " + jobid); } else { if (job.KillTask(taskID, false)) { System.Console.Out.WriteLine("Killed task " + taskid); exitCode = 0; } else { System.Console.Out.WriteLine("Could not kill task " + taskid); exitCode = -1; } } } else { if (failTask) { TaskAttemptID taskID = TaskAttemptID.ForName(taskid); Job job = cluster.GetJob(taskID.GetJobID()); if (job == null) { System.Console.Out.WriteLine("Could not find job " + jobid); } else { if (job.KillTask(taskID, true)) { System.Console.Out.WriteLine("Killed task " + taskID + " by failing it"); exitCode = 0; } else { System.Console.Out.WriteLine("Could not fail task " + taskid); exitCode = -1; } } } else { if (logs) { try { JobID jobID = JobID.ForName(jobid); TaskAttemptID taskAttemptID = TaskAttemptID.ForName(taskid); LogParams logParams = cluster.GetLogParams(jobID, taskAttemptID); LogCLIHelpers logDumper = new LogCLIHelpers(); logDumper.SetConf(GetConf()); exitCode = logDumper.DumpAContainersLogs(logParams.GetApplicationId(), logParams. GetContainerId(), logParams.GetNodeId(), logParams.GetOwner()); } catch (IOException e) { if (e is RemoteException) { throw; } System.Console.Out.WriteLine(e.Message); } } } } } } } } } } } } } } } } } catch (RemoteException re) { IOException unwrappedException = re.UnwrapRemoteException(); if (unwrappedException is AccessControlException) { System.Console.Out.WriteLine(unwrappedException.Message); } else { throw; } } finally { cluster.Close(); } return(exitCode); }
/// <summary>Get the attempt id</summary> public virtual TaskAttemptID GetTaskAttemptId() { return(TaskAttemptID.ForName(datum.attemptId.ToString())); }
/// <summary> /// <inheritDoc/> /// /// </summary> /// <exception cref="System.IO.IOException"/> public virtual RecordWriter <K, V> GetRecordWriter(FileSystem filesystem, JobConf job, string name, Progressable progress) { RecordWriter <K, V> w = base.GetRecordWriter(new TaskAttemptContextImpl(job, TaskAttemptID .ForName(job.Get(MRJobConfig.TaskAttemptId)))); DBOutputFormat.DBRecordWriter writer = (DBOutputFormat.DBRecordWriter)w; try { return(new DBOutputFormat.DBRecordWriter(this, writer.GetConnection(), writer.GetStatement ())); } catch (SQLException se) { throw new IOException(se); } }