public static Org.Apache.Hadoop.Hdfs.Nfs.Nfs3.Nfs3Metrics Create(Configuration conf , string gatewayName) { string sessionId = conf.Get(DFSConfigKeys.DfsMetricsSessionIdKey); MetricsSystem ms = DefaultMetricsSystem.Instance(); JvmMetrics jm = JvmMetrics.Create(gatewayName, sessionId, ms); // Percentile measurement is [50th,75th,90th,95th,99th] currently int[] intervals = conf.GetInts(NfsConfigKeys.NfsMetricsPercentilesIntervalsKey); return(ms.Register(new Org.Apache.Hadoop.Hdfs.Nfs.Nfs3.Nfs3Metrics(gatewayName, sessionId , intervals, jm))); }
public static Org.Apache.Hadoop.Hdfs.Server.Namenode.Metrics.NameNodeMetrics Create (Configuration conf, HdfsServerConstants.NamenodeRole r) { string sessionId = conf.Get(DFSConfigKeys.DfsMetricsSessionIdKey); string processName = r.ToString(); MetricsSystem ms = DefaultMetricsSystem.Instance(); JvmMetrics jm = JvmMetrics.Create(processName, sessionId, ms); // Percentile measurement is off by default, by watching no intervals int[] intervals = conf.GetInts(DFSConfigKeys.DfsMetricsPercentilesIntervalsKey); return(ms.Register(new Org.Apache.Hadoop.Hdfs.Server.Namenode.Metrics.NameNodeMetrics (processName, sessionId, intervals, jm))); }
public LocalJobRunnerMetrics(JobConf conf) { string sessionId = conf.GetSessionId(); // Initiate JVM Metrics JvmMetrics.Init("JobTracker", sessionId); // Create a record for map-reduce metrics MetricsContext context = MetricsUtil.GetContext("mapred"); // record name is jobtracker for compatibility metricsRecord = MetricsUtil.CreateRecord(context, "jobtracker"); metricsRecord.SetTag("sessionId", sessionId); context.RegisterUpdater(this); }
public static Org.Apache.Hadoop.Hdfs.Server.Datanode.Metrics.DataNodeMetrics Create (Configuration conf, string dnName) { string sessionId = conf.Get(DFSConfigKeys.DfsMetricsSessionIdKey); MetricsSystem ms = DefaultMetricsSystem.Instance(); JvmMetrics jm = JvmMetrics.Create("DataNode", sessionId, ms); string name = "DataNodeActivity-" + (dnName.IsEmpty() ? "UndefinedDataNodeName" + DFSUtil.GetRandom().Next() : dnName.Replace(':', '-')); // Percentile measurement is off by default, by watching no intervals int[] intervals = conf.GetInts(DFSConfigKeys.DfsMetricsPercentilesIntervalsKey); return(ms.Register(name, null, new Org.Apache.Hadoop.Hdfs.Server.Datanode.Metrics.DataNodeMetrics (name, sessionId, intervals, jm))); }
/// <summary>Start listening for edits via RPC.</summary> /// <exception cref="System.IO.IOException"/> public virtual void Start() { Preconditions.CheckState(!IsStarted(), "JN already running"); ValidateAndCreateJournalDir(localDir); DefaultMetricsSystem.Initialize("JournalNode"); JvmMetrics.Create("JournalNode", conf.Get(DFSConfigKeys.DfsMetricsSessionIdKey), DefaultMetricsSystem.Instance()); IPEndPoint socAddr = JournalNodeRpcServer.GetAddress(conf); SecurityUtil.Login(conf, DFSConfigKeys.DfsJournalnodeKeytabFileKey, DFSConfigKeys .DfsJournalnodeKerberosPrincipalKey, socAddr.GetHostName()); RegisterJNMXBean(); httpServer = new JournalNodeHttpServer(conf, this); httpServer.Start(); httpServerURI = httpServer.GetServerURI().ToString(); rpcServer = new JournalNodeRpcServer(conf, this); rpcServer.Start(); }
/// <exception cref="System.Exception"/> protected override void ServiceInit(Configuration conf) { // init timeline services first timelineStore = CreateTimelineStore(conf); AddIfService(timelineStore); secretManagerService = CreateTimelineDelegationTokenSecretManagerService(conf); AddService(secretManagerService); timelineDataManager = CreateTimelineDataManager(conf); AddService(timelineDataManager); // init generic history service afterwards aclsManager = CreateApplicationACLsManager(conf); historyManager = CreateApplicationHistoryManager(conf); ahsClientService = CreateApplicationHistoryClientService(historyManager); AddService(ahsClientService); AddService((Org.Apache.Hadoop.Service.Service)historyManager); DefaultMetricsSystem.Initialize("ApplicationHistoryServer"); JvmMetrics.InitSingleton("ApplicationHistoryServer", null); base.ServiceInit(conf); }
public DataNodeMetrics(string name, string sessionId, int[] intervals, JvmMetrics jvmMetrics) { // RamDisk metrics on read/write // RamDisk metrics on eviction // RamDisk metrics on lazy persist this.name = name; this.jvmMetrics = jvmMetrics; registry.Tag(MsInfo.SessionId, sessionId); int len = intervals.Length; packetAckRoundTripTimeNanosQuantiles = new MutableQuantiles[len]; flushNanosQuantiles = new MutableQuantiles[len]; fsyncNanosQuantiles = new MutableQuantiles[len]; sendDataPacketBlockedOnNetworkNanosQuantiles = new MutableQuantiles[len]; sendDataPacketTransferNanosQuantiles = new MutableQuantiles[len]; ramDiskBlocksEvictionWindowMsQuantiles = new MutableQuantiles[len]; ramDiskBlocksLazyPersistWindowMsQuantiles = new MutableQuantiles[len]; for (int i = 0; i < len; i++) { int interval = intervals[i]; packetAckRoundTripTimeNanosQuantiles[i] = registry.NewQuantiles("packetAckRoundTripTimeNanos" + interval + "s", "Packet Ack RTT in ns", "ops", "latency", interval); flushNanosQuantiles[i] = registry.NewQuantiles("flushNanos" + interval + "s", "Disk flush latency in ns" , "ops", "latency", interval); fsyncNanosQuantiles[i] = registry.NewQuantiles("fsyncNanos" + interval + "s", "Disk fsync latency in ns" , "ops", "latency", interval); sendDataPacketBlockedOnNetworkNanosQuantiles[i] = registry.NewQuantiles("sendDataPacketBlockedOnNetworkNanos" + interval + "s", "Time blocked on network while sending a packet in ns", "ops" , "latency", interval); sendDataPacketTransferNanosQuantiles[i] = registry.NewQuantiles("sendDataPacketTransferNanos" + interval + "s", "Time reading from disk and writing to network while sending " + "a packet in ns", "ops", "latency", interval); ramDiskBlocksEvictionWindowMsQuantiles[i] = registry.NewQuantiles("ramDiskBlocksEvictionWindows" + interval + "s", "Time between the RamDisk block write and eviction in ms", "ops" , "latency", interval); ramDiskBlocksLazyPersistWindowMsQuantiles[i] = registry.NewQuantiles("ramDiskBlocksLazyPersistWindows" + interval + "s", "Time between the RamDisk block write and disk persist in ms" , "ops", "latency", interval); } }
internal NameNodeMetrics(string processName, string sessionId, int[] intervals, JvmMetrics jvmMetrics) { this.jvmMetrics = jvmMetrics; registry.Tag(MsInfo.ProcessName, processName).Tag(MsInfo.SessionId, sessionId); int len = intervals.Length; syncsQuantiles = new MutableQuantiles[len]; blockReportQuantiles = new MutableQuantiles[len]; cacheReportQuantiles = new MutableQuantiles[len]; for (int i = 0; i < len; i++) { int interval = intervals[i]; syncsQuantiles[i] = registry.NewQuantiles("syncs" + interval + "s", "Journal syncs" , "ops", "latency", interval); blockReportQuantiles[i] = registry.NewQuantiles("blockReport" + interval + "s", "Block report" , "ops", "latency", interval); cacheReportQuantiles[i] = registry.NewQuantiles("cacheReport" + interval + "s", "Cache report" , "ops", "latency", interval); } }
/// <exception cref="System.Exception"/> protected override void ServiceInit(Configuration conf) { this.store = CreateSCMStoreService(conf); AddService(store); CleanerService cs = CreateCleanerService(store); AddService(cs); SharedCacheUploaderService nms = CreateNMCacheUploaderSCMProtocolService(store); AddService(nms); ClientProtocolService cps = CreateClientProtocolService(store); AddService(cps); SCMAdminProtocolService saps = CreateSCMAdminProtocolService(cs); AddService(saps); SCMWebServer webUI = CreateSCMWebServer(this); AddService(webUI); // init metrics DefaultMetricsSystem.Initialize("SharedCacheManager"); JvmMetrics.InitSingleton("SharedCacheManager", null); base.ServiceInit(conf); }
public Nfs3Metrics(string name, string sessionId, int[] intervals, JvmMetrics jvmMetrics ) { // All mutable rates are in nanoseconds // No metric for nullProcedure; this.name = name; this.jvmMetrics = jvmMetrics; registry.Tag(MsInfo.SessionId, sessionId); int len = intervals.Length; readNanosQuantiles = new MutableQuantiles[len]; writeNanosQuantiles = new MutableQuantiles[len]; commitNanosQuantiles = new MutableQuantiles[len]; for (int i = 0; i < len; i++) { int interval = intervals[i]; readNanosQuantiles[i] = registry.NewQuantiles("readProcessNanos" + interval + "s" , "Read process in ns", "ops", "latency", interval); writeNanosQuantiles[i] = registry.NewQuantiles("writeProcessNanos" + interval + "s" , "Write process in ns", "ops", "latency", interval); commitNanosQuantiles[i] = registry.NewQuantiles("commitProcessNanos" + interval + "s", "Commit process in ns", "ops", "latency", interval); } }
public static MRAppMetrics Create(MetricsSystem ms) { JvmMetrics.InitSingleton("MRAppMaster", null); return(ms.Register(new MRAppMetrics())); }
/// <exception cref="System.Exception"/> public static void Main(string[] args) { Sharpen.Thread.SetDefaultUncaughtExceptionHandler(new YarnUncaughtExceptionHandler ()); Log.Debug("Child starting"); JobConf job = new JobConf(MRJobConfig.JobConfFile); // Initing with our JobConf allows us to avoid loading confs twice Limits.Init(job); UserGroupInformation.SetConfiguration(job); string host = args[0]; int port = System.Convert.ToInt32(args[1]); IPEndPoint address = NetUtils.CreateSocketAddrForHost(host, port); TaskAttemptID firstTaskid = ((TaskAttemptID)TaskAttemptID.ForName(args[2])); long jvmIdLong = long.Parse(args[3]); JVMId jvmId = new JVMId(((JobID)firstTaskid.GetJobID()), firstTaskid.GetTaskType( ) == TaskType.Map, jvmIdLong); // initialize metrics DefaultMetricsSystem.Initialize(StringUtils.Camelize(firstTaskid.GetTaskType().ToString ()) + "Task"); // Security framework already loaded the tokens into current ugi Credentials credentials = UserGroupInformation.GetCurrentUser().GetCredentials(); Log.Info("Executing with tokens:"); foreach (Org.Apache.Hadoop.Security.Token.Token <object> token in credentials.GetAllTokens ()) { Log.Info(token); } // Create TaskUmbilicalProtocol as actual task owner. UserGroupInformation taskOwner = UserGroupInformation.CreateRemoteUser(((JobID)firstTaskid .GetJobID()).ToString()); Org.Apache.Hadoop.Security.Token.Token <JobTokenIdentifier> jt = TokenCache.GetJobToken (credentials); SecurityUtil.SetTokenService(jt, address); taskOwner.AddToken(jt); TaskUmbilicalProtocol umbilical = taskOwner.DoAs(new _PrivilegedExceptionAction_108 (address, job)); // report non-pid to application master JvmContext context = new JvmContext(jvmId, "-1000"); Log.Debug("PID: " + Sharpen.Runtime.GetEnv()["JVM_PID"]); Task task = null; UserGroupInformation childUGI = null; ScheduledExecutorService logSyncer = null; try { int idleLoopCount = 0; JvmTask myTask = null; // poll for new task for (int idle = 0; null == myTask; ++idle) { long sleepTimeMilliSecs = Math.Min(idle * 500, 1500); Log.Info("Sleeping for " + sleepTimeMilliSecs + "ms before retrying again. Got null now." ); TimeUnit.Milliseconds.Sleep(sleepTimeMilliSecs); myTask = umbilical.GetTask(context); } if (myTask.ShouldDie()) { return; } task = myTask.GetTask(); YarnChild.taskid = task.GetTaskID(); // Create the job-conf and set credentials ConfigureTask(job, task, credentials, jt); // Initiate Java VM metrics JvmMetrics.InitSingleton(jvmId.ToString(), job.GetSessionId()); childUGI = UserGroupInformation.CreateRemoteUser(Runtime.Getenv(ApplicationConstants.Environment .User.ToString())); // Add tokens to new user so that it may execute its task correctly. childUGI.AddCredentials(credentials); // set job classloader if configured before invoking the task MRApps.SetJobClassLoader(job); logSyncer = TaskLog.CreateLogSyncer(); // Create a final reference to the task for the doAs block Task taskFinal = task; childUGI.DoAs(new _PrivilegedExceptionAction_158(taskFinal, job, umbilical)); } catch (FSError e) { // use job-specified working directory // run the task Log.Fatal("FSError from child", e); if (!ShutdownHookManager.Get().IsShutdownInProgress()) { umbilical.FsError(taskid, e.Message); } } catch (Exception exception) { Log.Warn("Exception running child : " + StringUtils.StringifyException(exception) ); try { if (task != null) { // do cleanup for the task if (childUGI == null) { // no need to job into doAs block task.TaskCleanup(umbilical); } else { Task taskFinal = task; childUGI.DoAs(new _PrivilegedExceptionAction_183(taskFinal, umbilical)); } } } catch (Exception e) { Log.Info("Exception cleaning up: " + StringUtils.StringifyException(e)); } // Report back any failures, for diagnostic purposes if (taskid != null) { if (!ShutdownHookManager.Get().IsShutdownInProgress()) { umbilical.FatalError(taskid, StringUtils.StringifyException(exception)); } } } catch (Exception throwable) { Log.Fatal("Error running child : " + StringUtils.StringifyException(throwable)); if (taskid != null) { if (!ShutdownHookManager.Get().IsShutdownInProgress()) { Exception tCause = throwable.InnerException; string cause = tCause == null ? throwable.Message : StringUtils.StringifyException (tCause); umbilical.FatalError(taskid, cause); } } } finally { RPC.StopProxy(umbilical); DefaultMetricsSystem.Shutdown(); TaskLog.SyncLogsShutdown(logSyncer); } }
/// <exception cref="System.Exception"/> protected override void ServiceStart() { DefaultMetricsSystem.Initialize("JobHistoryServer"); JvmMetrics.InitSingleton("JobHistoryServer", null); base.ServiceStart(); }
internal static NodeManagerMetrics Create(MetricsSystem ms) { JvmMetrics.Create("NodeManager", null, ms); return(ms.Register(new NodeManagerMetrics())); }
/// <summary>Initialize SecondaryNameNode.</summary> /// <exception cref="System.IO.IOException"/> private void Initialize(Configuration conf, SecondaryNameNode.CommandLineOpts commandLineOpts ) { IPEndPoint infoSocAddr = GetHttpAddress(conf); string infoBindAddress = infoSocAddr.GetHostName(); UserGroupInformation.SetConfiguration(conf); if (UserGroupInformation.IsSecurityEnabled()) { SecurityUtil.Login(conf, DFSConfigKeys.DfsSecondaryNamenodeKeytabFileKey, DFSConfigKeys .DfsSecondaryNamenodeKerberosPrincipalKey, infoBindAddress); } // initiate Java VM metrics DefaultMetricsSystem.Initialize("SecondaryNameNode"); JvmMetrics.Create("SecondaryNameNode", conf.Get(DFSConfigKeys.DfsMetricsSessionIdKey ), DefaultMetricsSystem.Instance()); // Create connection to the namenode. shouldRun = true; nameNodeAddr = NameNode.GetServiceAddress(conf, true); this.conf = conf; this.namenode = NameNodeProxies.CreateNonHAProxy <NamenodeProtocol>(conf, nameNodeAddr , UserGroupInformation.GetCurrentUser(), true).GetProxy(); // initialize checkpoint directories fsName = GetInfoServer(); checkpointDirs = FSImage.GetCheckpointDirs(conf, "/tmp/hadoop/dfs/namesecondary"); checkpointEditsDirs = FSImage.GetCheckpointEditsDirs(conf, "/tmp/hadoop/dfs/namesecondary" ); checkpointImage = new SecondaryNameNode.CheckpointStorage(conf, checkpointDirs, checkpointEditsDirs ); checkpointImage.RecoverCreate(commandLineOpts.ShouldFormat()); checkpointImage.DeleteTempEdits(); namesystem = new FSNamesystem(conf, checkpointImage, true); // Disable quota checks namesystem.dir.DisableQuotaChecks(); // Initialize other scheduling parameters from the configuration checkpointConf = new CheckpointConf(conf); IPEndPoint httpAddr = infoSocAddr; string httpsAddrString = conf.GetTrimmed(DFSConfigKeys.DfsNamenodeSecondaryHttpsAddressKey , DFSConfigKeys.DfsNamenodeSecondaryHttpsAddressDefault); IPEndPoint httpsAddr = NetUtils.CreateSocketAddr(httpsAddrString); HttpServer2.Builder builder = DFSUtil.HttpServerTemplateForNNAndJN(conf, httpAddr , httpsAddr, "secondary", DFSConfigKeys.DfsSecondaryNamenodeKerberosInternalSpnegoPrincipalKey , DFSConfigKeys.DfsSecondaryNamenodeKeytabFileKey); nameNodeStatusBeanName = MBeans.Register("SecondaryNameNode", "SecondaryNameNodeInfo" , this); infoServer = builder.Build(); infoServer.SetAttribute("secondary.name.node", this); infoServer.SetAttribute("name.system.image", checkpointImage); infoServer.SetAttribute(JspHelper.CurrentConf, conf); infoServer.AddInternalServlet("imagetransfer", ImageServlet.PathSpec, typeof(ImageServlet ), true); infoServer.Start(); Log.Info("Web server init done"); HttpConfig.Policy policy = DFSUtil.GetHttpPolicy(conf); int connIdx = 0; if (policy.IsHttpEnabled()) { IPEndPoint httpAddress = infoServer.GetConnectorAddress(connIdx++); conf.Set(DFSConfigKeys.DfsNamenodeSecondaryHttpAddressKey, NetUtils.GetHostPortString (httpAddress)); } if (policy.IsHttpsEnabled()) { IPEndPoint httpsAddress = infoServer.GetConnectorAddress(connIdx); conf.Set(DFSConfigKeys.DfsNamenodeSecondaryHttpsAddressKey, NetUtils.GetHostPortString (httpsAddress)); } legacyOivImageDir = conf.Get(DFSConfigKeys.DfsNamenodeLegacyOivImageDirKey); Log.Info("Checkpoint Period :" + checkpointConf.GetPeriod() + " secs " + "(" + checkpointConf.GetPeriod() / 60 + " min)"); Log.Info("Log Size Trigger :" + checkpointConf.GetTxnCount() + " txns"); }