public virtual void TestNameNode() { int numDatanodes = 2; cluster = new MiniDFSCluster.Builder(config).NumDataNodes(numDatanodes).Build(); cluster.WaitActive(); WriteFile(cluster.GetFileSystem(), new Path("/test1"), 2); JMXGet jmx = new JMXGet(); string serviceName = "NameNode"; jmx.SetService(serviceName); jmx.Init(); // default lists namenode mbeans only NUnit.Framework.Assert.IsTrue("error printAllValues", CheckPrintAllValues(jmx)); //get some data from different source NUnit.Framework.Assert.AreEqual(numDatanodes, System.Convert.ToInt32(jmx.GetValue ("NumLiveDataNodes"))); MetricsAsserts.AssertGauge("CorruptBlocks", long.Parse(jmx.GetValue("CorruptBlocks" )), MetricsAsserts.GetMetrics("FSNamesystem")); NUnit.Framework.Assert.AreEqual(numDatanodes, System.Convert.ToInt32(jmx.GetValue ("NumOpenConnections"))); cluster.Shutdown(); MBeanServerConnection mbsc = ManagementFactory.GetPlatformMBeanServer(); ObjectName query = new ObjectName("Hadoop:service=" + serviceName + ",*"); ICollection <ObjectName> names = mbsc.QueryNames(query, null); NUnit.Framework.Assert.IsTrue("No beans should be registered for " + serviceName, names.IsEmpty()); }
/*job*/ /*map*/ /*reduce*/ private void CheckMetrics(int jobsSubmitted, int jobsCompleted, int jobsFailed, int jobsKilled, int jobsPreparing, int jobsRunning, int mapsLaunched, int mapsCompleted , int mapsFailed, int mapsKilled, int mapsRunning, int mapsWaiting, int reducesLaunched , int reducesCompleted, int reducesFailed, int reducesKilled, int reducesRunning , int reducesWaiting) { MetricsRecordBuilder rb = MetricsAsserts.GetMetrics("MRAppMetrics"); MetricsAsserts.AssertCounter("JobsSubmitted", jobsSubmitted, rb); MetricsAsserts.AssertCounter("JobsCompleted", jobsCompleted, rb); MetricsAsserts.AssertCounter("JobsFailed", jobsFailed, rb); MetricsAsserts.AssertCounter("JobsKilled", jobsKilled, rb); MetricsAsserts.AssertGauge("JobsPreparing", jobsPreparing, rb); MetricsAsserts.AssertGauge("JobsRunning", jobsRunning, rb); MetricsAsserts.AssertCounter("MapsLaunched", mapsLaunched, rb); MetricsAsserts.AssertCounter("MapsCompleted", mapsCompleted, rb); MetricsAsserts.AssertCounter("MapsFailed", mapsFailed, rb); MetricsAsserts.AssertCounter("MapsKilled", mapsKilled, rb); MetricsAsserts.AssertGauge("MapsRunning", mapsRunning, rb); MetricsAsserts.AssertGauge("MapsWaiting", mapsWaiting, rb); MetricsAsserts.AssertCounter("ReducesLaunched", reducesLaunched, rb); MetricsAsserts.AssertCounter("ReducesCompleted", reducesCompleted, rb); MetricsAsserts.AssertCounter("ReducesFailed", reducesFailed, rb); MetricsAsserts.AssertCounter("ReducesKilled", reducesKilled, rb); MetricsAsserts.AssertGauge("ReducesRunning", reducesRunning, rb); MetricsAsserts.AssertGauge("ReducesWaiting", reducesWaiting, rb); }
public virtual void TestMissingBlock() { // Create a file with single block with two replicas Path file = GetTestPath("testMissingBlocks"); CreateFile(file, 100, (short)1); // Corrupt the only replica of the block to result in a missing block LocatedBlock block = NameNodeAdapter.GetBlockLocations(cluster.GetNameNode(), file .ToString(), 0, 1).Get(0); cluster.GetNamesystem().WriteLock(); try { bm.FindAndMarkBlockAsCorrupt(block.GetBlock(), block.GetLocations()[0], "STORAGE_ID" , "TEST"); } finally { cluster.GetNamesystem().WriteUnlock(); } UpdateMetrics(); MetricsRecordBuilder rb = MetricsAsserts.GetMetrics(NsMetrics); MetricsAsserts.AssertGauge("UnderReplicatedBlocks", 1L, rb); MetricsAsserts.AssertGauge("MissingBlocks", 1L, rb); MetricsAsserts.AssertGauge("MissingReplOneBlocks", 1L, rb); fs.Delete(file, true); WaitForDnMetricValue(NsMetrics, "UnderReplicatedBlocks", 0L); }
public virtual void TestStaleNodes() { // Set two datanodes as stale for (int i = 0; i < 2; i++) { DataNode dn = cluster.GetDataNodes()[i]; DataNodeTestUtils.SetHeartbeatsDisabledForTests(dn, true); long staleInterval = Conf.GetLong(DFSConfigKeys.DfsNamenodeStaleDatanodeIntervalKey , DFSConfigKeys.DfsNamenodeStaleDatanodeIntervalDefault); DatanodeDescriptor dnDes = cluster.GetNameNode().GetNamesystem().GetBlockManager( ).GetDatanodeManager().GetDatanode(dn.GetDatanodeId()); DFSTestUtil.ResetLastUpdatesWithOffset(dnDes, -(staleInterval + 1)); } // Let HeartbeatManager to check heartbeat BlockManagerTestUtil.CheckHeartbeat(cluster.GetNameNode().GetNamesystem().GetBlockManager ()); MetricsAsserts.AssertGauge("StaleDataNodes", 2, MetricsAsserts.GetMetrics(NsMetrics )); // Reset stale datanodes for (int i_1 = 0; i_1 < 2; i_1++) { DataNode dn = cluster.GetDataNodes()[i_1]; DataNodeTestUtils.SetHeartbeatsDisabledForTests(dn, false); DatanodeDescriptor dnDes = cluster.GetNameNode().GetNamesystem().GetBlockManager( ).GetDatanodeManager().GetDatanode(dn.GetDatanodeId()); DFSTestUtil.ResetLastUpdatesWithOffset(dnDes, 0); } // Let HeartbeatManager to refresh BlockManagerTestUtil.CheckHeartbeat(cluster.GetNameNode().GetNamesystem().GetBlockManager ()); MetricsAsserts.AssertGauge("StaleDataNodes", 0, MetricsAsserts.GetMetrics(NsMetrics )); }
/// <exception cref="System.Exception"/> public virtual void TestJournal() { MetricsRecordBuilder metrics = MetricsAsserts.GetMetrics(journal.GetMetricsForTests ().GetName()); MetricsAsserts.AssertCounter("BatchesWritten", 0L, metrics); MetricsAsserts.AssertCounter("BatchesWrittenWhileLagging", 0L, metrics); MetricsAsserts.AssertGauge("CurrentLagTxns", 0L, metrics); IPCLoggerChannel ch = new IPCLoggerChannel(conf, FakeNsinfo, journalId, jn.GetBoundIpcAddress ()); ch.NewEpoch(1).Get(); ch.SetEpoch(1); ch.StartLogSegment(1, NameNodeLayoutVersion.CurrentLayoutVersion).Get(); ch.SendEdits(1L, 1, 1, Sharpen.Runtime.GetBytesForString("hello", Charsets.Utf8)) .Get(); metrics = MetricsAsserts.GetMetrics(journal.GetMetricsForTests().GetName()); MetricsAsserts.AssertCounter("BatchesWritten", 1L, metrics); MetricsAsserts.AssertCounter("BatchesWrittenWhileLagging", 0L, metrics); MetricsAsserts.AssertGauge("CurrentLagTxns", 0L, metrics); ch.SetCommittedTxId(100L); ch.SendEdits(1L, 2, 1, Sharpen.Runtime.GetBytesForString("goodbye", Charsets.Utf8 )).Get(); metrics = MetricsAsserts.GetMetrics(journal.GetMetricsForTests().GetName()); MetricsAsserts.AssertCounter("BatchesWritten", 2L, metrics); MetricsAsserts.AssertCounter("BatchesWrittenWhileLagging", 1L, metrics); MetricsAsserts.AssertGauge("CurrentLagTxns", 98L, metrics); }
public virtual void TestFinalState() { StartupProgressTestHelper.SetStartupProgressForFinalState(startupProgress); MetricsRecordBuilder builder = MetricsAsserts.GetMetrics(metrics, true); NUnit.Framework.Assert.IsTrue(MetricsAsserts.GetLongCounter("ElapsedTime", builder ) >= 0L); MetricsAsserts.AssertGauge("PercentComplete", 1.0f, builder); MetricsAsserts.AssertCounter("LoadingFsImageCount", 100L, builder); NUnit.Framework.Assert.IsTrue(MetricsAsserts.GetLongCounter("LoadingFsImageElapsedTime" , builder) >= 0L); MetricsAsserts.AssertCounter("LoadingFsImageTotal", 100L, builder); MetricsAsserts.AssertGauge("LoadingFsImagePercentComplete", 1.0f, builder); MetricsAsserts.AssertCounter("LoadingEditsCount", 200L, builder); NUnit.Framework.Assert.IsTrue(MetricsAsserts.GetLongCounter("LoadingEditsElapsedTime" , builder) >= 0L); MetricsAsserts.AssertCounter("LoadingEditsTotal", 200L, builder); MetricsAsserts.AssertGauge("LoadingEditsPercentComplete", 1.0f, builder); MetricsAsserts.AssertCounter("SavingCheckpointCount", 300L, builder); NUnit.Framework.Assert.IsTrue(MetricsAsserts.GetLongCounter("SavingCheckpointElapsedTime" , builder) >= 0L); MetricsAsserts.AssertCounter("SavingCheckpointTotal", 300L, builder); MetricsAsserts.AssertGauge("SavingCheckpointPercentComplete", 1.0f, builder); MetricsAsserts.AssertCounter("SafeModeCount", 400L, builder); NUnit.Framework.Assert.IsTrue(MetricsAsserts.GetLongCounter("SafeModeElapsedTime" , builder) >= 0L); MetricsAsserts.AssertCounter("SafeModeTotal", 400L, builder); MetricsAsserts.AssertGauge("SafeModePercentComplete", 1.0f, builder); }
public virtual void TestSnapshottableDirs() { cluster.GetNamesystem().GetSnapshotManager().SetAllowNestedSnapshots(true); MetricsAsserts.AssertGauge("SnapshottableDirectories", 0, MetricsAsserts.GetMetrics (NsMetrics)); MetricsAsserts.AssertCounter("AllowSnapshotOps", 0L, MetricsAsserts.GetMetrics(NnMetrics )); MetricsAsserts.AssertCounter("DisallowSnapshotOps", 0L, MetricsAsserts.GetMetrics (NnMetrics)); // Allow snapshots for directories, and check the metrics hdfs.AllowSnapshot(sub1); MetricsAsserts.AssertGauge("SnapshottableDirectories", 1, MetricsAsserts.GetMetrics (NsMetrics)); MetricsAsserts.AssertCounter("AllowSnapshotOps", 1L, MetricsAsserts.GetMetrics(NnMetrics )); Path sub2 = new Path(dir, "sub2"); Path file = new Path(sub2, "file"); DFSTestUtil.CreateFile(hdfs, file, 1024, Replication, seed); hdfs.AllowSnapshot(sub2); MetricsAsserts.AssertGauge("SnapshottableDirectories", 2, MetricsAsserts.GetMetrics (NsMetrics)); MetricsAsserts.AssertCounter("AllowSnapshotOps", 2L, MetricsAsserts.GetMetrics(NnMetrics )); Path subsub1 = new Path(sub1, "sub1sub1"); Path subfile = new Path(subsub1, "file"); DFSTestUtil.CreateFile(hdfs, subfile, 1024, Replication, seed); hdfs.AllowSnapshot(subsub1); MetricsAsserts.AssertGauge("SnapshottableDirectories", 3, MetricsAsserts.GetMetrics (NsMetrics)); MetricsAsserts.AssertCounter("AllowSnapshotOps", 3L, MetricsAsserts.GetMetrics(NnMetrics )); // Set an already snapshottable directory to snapshottable, should not // change the metrics hdfs.AllowSnapshot(sub1); MetricsAsserts.AssertGauge("SnapshottableDirectories", 3, MetricsAsserts.GetMetrics (NsMetrics)); // But the number of allowSnapshot operations still increases MetricsAsserts.AssertCounter("AllowSnapshotOps", 4L, MetricsAsserts.GetMetrics(NnMetrics )); // Disallow the snapshot for snapshottable directories, then check the // metrics again hdfs.DisallowSnapshot(sub1); MetricsAsserts.AssertGauge("SnapshottableDirectories", 2, MetricsAsserts.GetMetrics (NsMetrics)); MetricsAsserts.AssertCounter("DisallowSnapshotOps", 1L, MetricsAsserts.GetMetrics (NnMetrics)); // delete subsub1, snapshottable directories should be 1 hdfs.Delete(subsub1, true); MetricsAsserts.AssertGauge("SnapshottableDirectories", 1, MetricsAsserts.GetMetrics (NsMetrics)); // list all the snapshottable directories SnapshottableDirectoryStatus[] status = hdfs.GetSnapshottableDirListing(); NUnit.Framework.Assert.AreEqual(1, status.Length); MetricsAsserts.AssertCounter("ListSnapshottableDirOps", 1L, MetricsAsserts.GetMetrics (NnMetrics)); }
public static void CheckApps(MetricsSource source, int submitted, int pending, int running, int completed, int failed, int killed, bool all) { MetricsRecordBuilder rb = MetricsAsserts.GetMetrics(source, all); MetricsAsserts.AssertCounter("AppsSubmitted", submitted, rb); MetricsAsserts.AssertGauge("AppsPending", pending, rb); MetricsAsserts.AssertGauge("AppsRunning", running, rb); MetricsAsserts.AssertCounter("AppsCompleted", completed, rb); MetricsAsserts.AssertCounter("AppsFailed", failed, rb); MetricsAsserts.AssertCounter("AppsKilled", killed, rb); }
public virtual void TestMetricsInitializedOnRMInit() { YarnConfiguration conf = new YarnConfiguration(); conf.SetClass(YarnConfiguration.RmScheduler, typeof(FifoScheduler), typeof(ResourceScheduler )); MockRM rm = new MockRM(conf); QueueMetrics metrics = rm.GetResourceScheduler().GetRootQueueMetrics(); CheckApps(metrics, 0, 0, 0, 0, 0, 0, true); MetricsAsserts.AssertGauge("ReservedContainers", 0, metrics); }
public virtual void TestSnapshots() { cluster.GetNamesystem().GetSnapshotManager().SetAllowNestedSnapshots(true); MetricsAsserts.AssertGauge("Snapshots", 0, MetricsAsserts.GetMetrics(NsMetrics)); MetricsAsserts.AssertCounter("CreateSnapshotOps", 0L, MetricsAsserts.GetMetrics(NnMetrics )); // Create a snapshot for a non-snapshottable directory, thus should not // change the metrics try { hdfs.CreateSnapshot(sub1, "s1"); } catch (Exception) { } MetricsAsserts.AssertGauge("Snapshots", 0, MetricsAsserts.GetMetrics(NsMetrics)); MetricsAsserts.AssertCounter("CreateSnapshotOps", 1L, MetricsAsserts.GetMetrics(NnMetrics )); // Create snapshot for sub1 hdfs.AllowSnapshot(sub1); hdfs.CreateSnapshot(sub1, "s1"); MetricsAsserts.AssertGauge("Snapshots", 1, MetricsAsserts.GetMetrics(NsMetrics)); MetricsAsserts.AssertCounter("CreateSnapshotOps", 2L, MetricsAsserts.GetMetrics(NnMetrics )); hdfs.CreateSnapshot(sub1, "s2"); MetricsAsserts.AssertGauge("Snapshots", 2, MetricsAsserts.GetMetrics(NsMetrics)); MetricsAsserts.AssertCounter("CreateSnapshotOps", 3L, MetricsAsserts.GetMetrics(NnMetrics )); hdfs.GetSnapshotDiffReport(sub1, "s1", "s2"); MetricsAsserts.AssertCounter("SnapshotDiffReportOps", 1L, MetricsAsserts.GetMetrics (NnMetrics)); // Create snapshot for a directory under sub1 Path subsub1 = new Path(sub1, "sub1sub1"); Path subfile = new Path(subsub1, "file"); DFSTestUtil.CreateFile(hdfs, subfile, 1024, Replication, seed); hdfs.AllowSnapshot(subsub1); hdfs.CreateSnapshot(subsub1, "s11"); MetricsAsserts.AssertGauge("Snapshots", 3, MetricsAsserts.GetMetrics(NsMetrics)); MetricsAsserts.AssertCounter("CreateSnapshotOps", 4L, MetricsAsserts.GetMetrics(NnMetrics )); // delete snapshot hdfs.DeleteSnapshot(sub1, "s2"); MetricsAsserts.AssertGauge("Snapshots", 2, MetricsAsserts.GetMetrics(NsMetrics)); MetricsAsserts.AssertCounter("DeleteSnapshotOps", 1L, MetricsAsserts.GetMetrics(NnMetrics )); // rename snapshot hdfs.RenameSnapshot(sub1, "s1", "NewS1"); MetricsAsserts.AssertGauge("Snapshots", 2, MetricsAsserts.GetMetrics(NsMetrics)); MetricsAsserts.AssertCounter("RenameSnapshotOps", 1L, MetricsAsserts.GetMetrics(NnMetrics )); }
public virtual void TestMutableRates() { MetricsRecordBuilder rb = MetricsAsserts.MockMetricsRecordBuilder(); MetricsRegistry registry = new MetricsRegistry("test"); MutableRates rates = new MutableRates(registry); rates.Init(typeof(TestMutableMetrics.TestProtocol)); registry.Snapshot(rb, false); MetricsAsserts.AssertCounter("FooNumOps", 0L, rb); MetricsAsserts.AssertGauge("FooAvgTime", 0.0, rb); MetricsAsserts.AssertCounter("BarNumOps", 0L, rb); MetricsAsserts.AssertGauge("BarAvgTime", 0.0, rb); }
public virtual void TestDatanodeReport() { conf.SetInt(DFSConfigKeys.DfsNamenodeHeartbeatRecheckIntervalKey, 500); // 0.5s conf.SetLong(DFSConfigKeys.DfsHeartbeatIntervalKey, 1L); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(NumOfDatanodes ).Build(); try { //wait until the cluster is up cluster.WaitActive(); string bpid = cluster.GetNamesystem().GetBlockPoolId(); IList <DataNode> datanodes = cluster.GetDataNodes(); DFSClient client = cluster.GetFileSystem().dfs; AssertReports(NumOfDatanodes, HdfsConstants.DatanodeReportType.All, client, datanodes , bpid); AssertReports(NumOfDatanodes, HdfsConstants.DatanodeReportType.Live, client, datanodes , bpid); AssertReports(0, HdfsConstants.DatanodeReportType.Dead, client, datanodes, bpid); // bring down one datanode DataNode last = datanodes[datanodes.Count - 1]; Log.Info("XXX shutdown datanode " + last.GetDatanodeUuid()); last.Shutdown(); DatanodeInfo[] nodeInfo = client.DatanodeReport(HdfsConstants.DatanodeReportType. Dead); while (nodeInfo.Length != 1) { try { Sharpen.Thread.Sleep(500); } catch (Exception) { } nodeInfo = client.DatanodeReport(HdfsConstants.DatanodeReportType.Dead); } AssertReports(NumOfDatanodes, HdfsConstants.DatanodeReportType.All, client, datanodes , null); AssertReports(NumOfDatanodes - 1, HdfsConstants.DatanodeReportType.Live, client, datanodes, null); AssertReports(1, HdfsConstants.DatanodeReportType.Dead, client, datanodes, null); Sharpen.Thread.Sleep(5000); MetricsAsserts.AssertGauge("ExpiredHeartbeats", 1, MetricsAsserts.GetMetrics("FSNamesystem" )); } finally { cluster.Shutdown(); } }
public virtual void TestTransactionAndCheckpointMetrics() { long lastCkptTime = MetricsAsserts.GetLongGauge("LastCheckpointTime", MetricsAsserts.GetMetrics (NsMetrics)); MetricsAsserts.AssertGauge("LastCheckpointTime", lastCkptTime, MetricsAsserts.GetMetrics (NsMetrics)); MetricsAsserts.AssertGauge("LastWrittenTransactionId", 1L, MetricsAsserts.GetMetrics (NsMetrics)); MetricsAsserts.AssertGauge("TransactionsSinceLastCheckpoint", 1L, MetricsAsserts.GetMetrics (NsMetrics)); MetricsAsserts.AssertGauge("TransactionsSinceLastLogRoll", 1L, MetricsAsserts.GetMetrics (NsMetrics)); fs.Mkdirs(new Path(TestRootDirPath, "/tmp")); UpdateMetrics(); MetricsAsserts.AssertGauge("LastCheckpointTime", lastCkptTime, MetricsAsserts.GetMetrics (NsMetrics)); MetricsAsserts.AssertGauge("LastWrittenTransactionId", 2L, MetricsAsserts.GetMetrics (NsMetrics)); MetricsAsserts.AssertGauge("TransactionsSinceLastCheckpoint", 2L, MetricsAsserts.GetMetrics (NsMetrics)); MetricsAsserts.AssertGauge("TransactionsSinceLastLogRoll", 2L, MetricsAsserts.GetMetrics (NsMetrics)); cluster.GetNameNodeRpc().RollEditLog(); UpdateMetrics(); MetricsAsserts.AssertGauge("LastCheckpointTime", lastCkptTime, MetricsAsserts.GetMetrics (NsMetrics)); MetricsAsserts.AssertGauge("LastWrittenTransactionId", 4L, MetricsAsserts.GetMetrics (NsMetrics)); MetricsAsserts.AssertGauge("TransactionsSinceLastCheckpoint", 4L, MetricsAsserts.GetMetrics (NsMetrics)); MetricsAsserts.AssertGauge("TransactionsSinceLastLogRoll", 1L, MetricsAsserts.GetMetrics (NsMetrics)); cluster.GetNameNodeRpc().SetSafeMode(HdfsConstants.SafeModeAction.SafemodeEnter, false); cluster.GetNameNodeRpc().SaveNamespace(); cluster.GetNameNodeRpc().SetSafeMode(HdfsConstants.SafeModeAction.SafemodeLeave, false); UpdateMetrics(); long newLastCkptTime = MetricsAsserts.GetLongGauge("LastCheckpointTime", MetricsAsserts.GetMetrics (NsMetrics)); NUnit.Framework.Assert.IsTrue(lastCkptTime < newLastCkptTime); MetricsAsserts.AssertGauge("LastWrittenTransactionId", 6L, MetricsAsserts.GetMetrics (NsMetrics)); MetricsAsserts.AssertGauge("TransactionsSinceLastCheckpoint", 1L, MetricsAsserts.GetMetrics (NsMetrics)); MetricsAsserts.AssertGauge("TransactionsSinceLastLogRoll", 1L, MetricsAsserts.GetMetrics (NsMetrics)); }
public virtual void TestExcessBlocks() { Path file = GetTestPath("testExcessBlocks"); CreateFile(file, 100, (short)2); NameNodeAdapter.SetReplication(namesystem, file.ToString(), (short)1); UpdateMetrics(); MetricsRecordBuilder rb = MetricsAsserts.GetMetrics(NsMetrics); MetricsAsserts.AssertGauge("ExcessBlocks", 1L, rb); // verify ExcessBlocks metric is decremented and // excessReplicateMap is cleared after deleting a file fs.Delete(file, true); rb = MetricsAsserts.GetMetrics(NsMetrics); MetricsAsserts.AssertGauge("ExcessBlocks", 0L, rb); NUnit.Framework.Assert.IsTrue(bm.excessReplicateMap.IsEmpty()); }
private void CheckMetrics(int launched, int completed, int failed, int killed, int initing, int running, int allocatedGB, int allocatedContainers, int availableGB , int allocatedVCores, int availableVCores) { MetricsRecordBuilder rb = MetricsAsserts.GetMetrics("NodeManagerMetrics"); MetricsAsserts.AssertCounter("ContainersLaunched", launched, rb); MetricsAsserts.AssertCounter("ContainersCompleted", completed, rb); MetricsAsserts.AssertCounter("ContainersFailed", failed, rb); MetricsAsserts.AssertCounter("ContainersKilled", killed, rb); MetricsAsserts.AssertGauge("ContainersIniting", initing, rb); MetricsAsserts.AssertGauge("ContainersRunning", running, rb); MetricsAsserts.AssertGauge("AllocatedGB", allocatedGB, rb); MetricsAsserts.AssertGauge("AllocatedVCores", allocatedVCores, rb); MetricsAsserts.AssertGauge("AllocatedContainers", allocatedContainers, rb); MetricsAsserts.AssertGauge("AvailableGB", availableGB, rb); MetricsAsserts.AssertGauge("AvailableVCores", availableVCores, rb); }
public static void CheckResources(MetricsSource source, int allocatedMB, int allocatedCores , int allocCtnrs, long aggreAllocCtnrs, long aggreReleasedCtnrs, int availableMB , int availableCores, int pendingMB, int pendingCores, int pendingCtnrs, int reservedMB , int reservedCores, int reservedCtnrs) { MetricsRecordBuilder rb = MetricsAsserts.GetMetrics(source); MetricsAsserts.AssertGauge("AllocatedMB", allocatedMB, rb); MetricsAsserts.AssertGauge("AllocatedVCores", allocatedCores, rb); MetricsAsserts.AssertGauge("AllocatedContainers", allocCtnrs, rb); MetricsAsserts.AssertCounter("AggregateContainersAllocated", aggreAllocCtnrs, rb); MetricsAsserts.AssertCounter("AggregateContainersReleased", aggreReleasedCtnrs, rb ); MetricsAsserts.AssertGauge("AvailableMB", availableMB, rb); MetricsAsserts.AssertGauge("AvailableVCores", availableCores, rb); MetricsAsserts.AssertGauge("PendingMB", pendingMB, rb); MetricsAsserts.AssertGauge("PendingVCores", pendingCores, rb); MetricsAsserts.AssertGauge("PendingContainers", pendingCtnrs, rb); MetricsAsserts.AssertGauge("ReservedMB", reservedMB, rb); MetricsAsserts.AssertGauge("ReservedVCores", reservedCores, rb); MetricsAsserts.AssertGauge("ReservedContainers", reservedCtnrs, rb); }
public virtual void TestInitialState() { MetricsRecordBuilder builder = MetricsAsserts.GetMetrics(metrics, true); MetricsAsserts.AssertCounter("ElapsedTime", 0L, builder); MetricsAsserts.AssertGauge("PercentComplete", 0.0f, builder); MetricsAsserts.AssertCounter("LoadingFsImageCount", 0L, builder); MetricsAsserts.AssertCounter("LoadingFsImageElapsedTime", 0L, builder); MetricsAsserts.AssertCounter("LoadingFsImageTotal", 0L, builder); MetricsAsserts.AssertGauge("LoadingFsImagePercentComplete", 0.0f, builder); MetricsAsserts.AssertCounter("LoadingEditsCount", 0L, builder); MetricsAsserts.AssertCounter("LoadingEditsElapsedTime", 0L, builder); MetricsAsserts.AssertCounter("LoadingEditsTotal", 0L, builder); MetricsAsserts.AssertGauge("LoadingEditsPercentComplete", 0.0f, builder); MetricsAsserts.AssertCounter("SavingCheckpointCount", 0L, builder); MetricsAsserts.AssertCounter("SavingCheckpointElapsedTime", 0L, builder); MetricsAsserts.AssertCounter("SavingCheckpointTotal", 0L, builder); MetricsAsserts.AssertGauge("SavingCheckpointPercentComplete", 0.0f, builder); MetricsAsserts.AssertCounter("SafeModeCount", 0L, builder); MetricsAsserts.AssertCounter("SafeModeElapsedTime", 0L, builder); MetricsAsserts.AssertCounter("SafeModeTotal", 0L, builder); MetricsAsserts.AssertGauge("SafeModePercentComplete", 0.0f, builder); }
public virtual void TestFileAdd() { // Add files with 100 blocks Path file = GetTestPath("testFileAdd"); CreateFile(file, 3200, (short)3); long blockCount = 32; int blockCapacity = namesystem.GetBlockCapacity(); UpdateMetrics(); MetricsAsserts.AssertGauge("BlockCapacity", blockCapacity, MetricsAsserts.GetMetrics (NsMetrics)); MetricsRecordBuilder rb = MetricsAsserts.GetMetrics(NnMetrics); // File create operations is 1 // Number of files created is depth of <code>file</code> path MetricsAsserts.AssertCounter("CreateFileOps", 1L, rb); MetricsAsserts.AssertCounter("FilesCreated", (long)file.Depth(), rb); UpdateMetrics(); long filesTotal = file.Depth() + 1; // Add 1 for root rb = MetricsAsserts.GetMetrics(NsMetrics); MetricsAsserts.AssertGauge("FilesTotal", filesTotal, rb); MetricsAsserts.AssertGauge("BlocksTotal", blockCount, rb); fs.Delete(file, true); filesTotal--; // reduce the filecount for deleted file rb = WaitForDnMetricValue(NsMetrics, "FilesTotal", filesTotal); MetricsAsserts.AssertGauge("BlocksTotal", 0L, rb); MetricsAsserts.AssertGauge("PendingDeletionBlocks", 0L, rb); rb = MetricsAsserts.GetMetrics(NnMetrics); // Delete file operations and number of files deleted must be 1 MetricsAsserts.AssertCounter("DeleteFileOps", 1L, rb); MetricsAsserts.AssertCounter("FilesDeleted", 1L, rb); }
/// <summary> /// Wait for the named gauge value from the metrics source to reach the /// desired value. /// </summary> /// <remarks> /// Wait for the named gauge value from the metrics source to reach the /// desired value. /// There's an initial delay then a spin cycle of sleep and poll. Because /// all the tests use a shared FS instance, these tests are not independent; /// that's why the initial sleep is in there. /// </remarks> /// <param name="source">metrics source</param> /// <param name="name">gauge name</param> /// <param name="expected">expected value</param> /// <returns>the last metrics record polled</returns> /// <exception cref="System.Exception">if something went wrong.</exception> private MetricsRecordBuilder WaitForDnMetricValue(string source, string name, long expected) { MetricsRecordBuilder rb; long gauge; //initial wait. WaitForDeletion(); //lots of retries are allowed for slow systems; fast ones will still //exit early int retries = (DatanodeCount + 1) * WaitGaugeValueRetries; rb = MetricsAsserts.GetMetrics(source); gauge = MetricsAsserts.GetLongGauge(name, rb); while (gauge != expected && (--retries > 0)) { Sharpen.Thread.Sleep(DfsReplicationInterval * 500); rb = MetricsAsserts.GetMetrics(source); gauge = MetricsAsserts.GetLongGauge(name, rb); } //at this point the assertion is valid or the retry count ran out MetricsAsserts.AssertGauge(name, expected, rb); return(rb); }