private void CheckSyncMetric(MiniDFSCluster cluster, int dn, long value) { DataNode datanode = cluster.GetDataNodes()[dn]; MetricsAsserts.AssertCounter("FsyncCount", value, MetricsAsserts.GetMetrics(datanode .GetMetrics().Name())); }
/*job*/ /*map*/ /*reduce*/ private void CheckMetrics(int jobsSubmitted, int jobsCompleted, int jobsFailed, int jobsKilled, int jobsPreparing, int jobsRunning, int mapsLaunched, int mapsCompleted , int mapsFailed, int mapsKilled, int mapsRunning, int mapsWaiting, int reducesLaunched , int reducesCompleted, int reducesFailed, int reducesKilled, int reducesRunning , int reducesWaiting) { MetricsRecordBuilder rb = MetricsAsserts.GetMetrics("MRAppMetrics"); MetricsAsserts.AssertCounter("JobsSubmitted", jobsSubmitted, rb); MetricsAsserts.AssertCounter("JobsCompleted", jobsCompleted, rb); MetricsAsserts.AssertCounter("JobsFailed", jobsFailed, rb); MetricsAsserts.AssertCounter("JobsKilled", jobsKilled, rb); MetricsAsserts.AssertGauge("JobsPreparing", jobsPreparing, rb); MetricsAsserts.AssertGauge("JobsRunning", jobsRunning, rb); MetricsAsserts.AssertCounter("MapsLaunched", mapsLaunched, rb); MetricsAsserts.AssertCounter("MapsCompleted", mapsCompleted, rb); MetricsAsserts.AssertCounter("MapsFailed", mapsFailed, rb); MetricsAsserts.AssertCounter("MapsKilled", mapsKilled, rb); MetricsAsserts.AssertGauge("MapsRunning", mapsRunning, rb); MetricsAsserts.AssertGauge("MapsWaiting", mapsWaiting, rb); MetricsAsserts.AssertCounter("ReducesLaunched", reducesLaunched, rb); MetricsAsserts.AssertCounter("ReducesCompleted", reducesCompleted, rb); MetricsAsserts.AssertCounter("ReducesFailed", reducesFailed, rb); MetricsAsserts.AssertCounter("ReducesKilled", reducesKilled, rb); MetricsAsserts.AssertGauge("ReducesRunning", reducesRunning, rb); MetricsAsserts.AssertGauge("ReducesWaiting", reducesWaiting, rb); }
public virtual void TestFields() { TestMetricsAnnotations.MyMetrics metrics = new TestMetricsAnnotations.MyMetrics(); MetricsSource source = MetricsAnnotations.MakeSource(metrics); metrics.c1.Incr(); metrics.c2.Incr(); metrics.g1.Incr(); metrics.g2.Incr(); metrics.g3.Incr(); metrics.r1.Add(1); metrics.s1.Add(1); metrics.rs1.Add("rs1", 1); MetricsRecordBuilder rb = MetricsAsserts.GetMetrics(source); Org.Mockito.Mockito.Verify(rb).AddCounter(Interns.Info("C1", "C1"), 1); Org.Mockito.Mockito.Verify(rb).AddCounter(Interns.Info("Counter2", "Counter2 desc" ), 1L); Org.Mockito.Mockito.Verify(rb).AddGauge(Interns.Info("G1", "G1"), 1); Org.Mockito.Mockito.Verify(rb).AddGauge(Interns.Info("G2", "G2"), 1); Org.Mockito.Mockito.Verify(rb).AddGauge(Interns.Info("G3", "g3 desc"), 1L); Org.Mockito.Mockito.Verify(rb).AddCounter(Interns.Info("R1NumOps", "Number of ops for r1" ), 1L); Org.Mockito.Mockito.Verify(rb).AddGauge(Interns.Info("R1AvgTime", "Average time for r1" ), 1.0); Org.Mockito.Mockito.Verify(rb).AddCounter(Interns.Info("S1NumOps", "Number of ops for s1" ), 1L); Org.Mockito.Mockito.Verify(rb).AddGauge(Interns.Info("S1AvgTime", "Average time for s1" ), 1.0); Org.Mockito.Mockito.Verify(rb).AddCounter(Interns.Info("Rs1NumOps", "Number of ops for rs1" ), 1L); Org.Mockito.Mockito.Verify(rb).AddGauge(Interns.Info("Rs1AvgTime", "Average time for rs1" ), 1.0); }
public virtual void TestFinalState() { StartupProgressTestHelper.SetStartupProgressForFinalState(startupProgress); MetricsRecordBuilder builder = MetricsAsserts.GetMetrics(metrics, true); NUnit.Framework.Assert.IsTrue(MetricsAsserts.GetLongCounter("ElapsedTime", builder ) >= 0L); MetricsAsserts.AssertGauge("PercentComplete", 1.0f, builder); MetricsAsserts.AssertCounter("LoadingFsImageCount", 100L, builder); NUnit.Framework.Assert.IsTrue(MetricsAsserts.GetLongCounter("LoadingFsImageElapsedTime" , builder) >= 0L); MetricsAsserts.AssertCounter("LoadingFsImageTotal", 100L, builder); MetricsAsserts.AssertGauge("LoadingFsImagePercentComplete", 1.0f, builder); MetricsAsserts.AssertCounter("LoadingEditsCount", 200L, builder); NUnit.Framework.Assert.IsTrue(MetricsAsserts.GetLongCounter("LoadingEditsElapsedTime" , builder) >= 0L); MetricsAsserts.AssertCounter("LoadingEditsTotal", 200L, builder); MetricsAsserts.AssertGauge("LoadingEditsPercentComplete", 1.0f, builder); MetricsAsserts.AssertCounter("SavingCheckpointCount", 300L, builder); NUnit.Framework.Assert.IsTrue(MetricsAsserts.GetLongCounter("SavingCheckpointElapsedTime" , builder) >= 0L); MetricsAsserts.AssertCounter("SavingCheckpointTotal", 300L, builder); MetricsAsserts.AssertGauge("SavingCheckpointPercentComplete", 1.0f, builder); MetricsAsserts.AssertCounter("SafeModeCount", 400L, builder); NUnit.Framework.Assert.IsTrue(MetricsAsserts.GetLongCounter("SafeModeElapsedTime" , builder) >= 0L); MetricsAsserts.AssertCounter("SafeModeTotal", 400L, builder); MetricsAsserts.AssertGauge("SafeModePercentComplete", 1.0f, builder); }
public virtual void TestNameNode() { int numDatanodes = 2; cluster = new MiniDFSCluster.Builder(config).NumDataNodes(numDatanodes).Build(); cluster.WaitActive(); WriteFile(cluster.GetFileSystem(), new Path("/test1"), 2); JMXGet jmx = new JMXGet(); string serviceName = "NameNode"; jmx.SetService(serviceName); jmx.Init(); // default lists namenode mbeans only NUnit.Framework.Assert.IsTrue("error printAllValues", CheckPrintAllValues(jmx)); //get some data from different source NUnit.Framework.Assert.AreEqual(numDatanodes, System.Convert.ToInt32(jmx.GetValue ("NumLiveDataNodes"))); MetricsAsserts.AssertGauge("CorruptBlocks", long.Parse(jmx.GetValue("CorruptBlocks" )), MetricsAsserts.GetMetrics("FSNamesystem")); NUnit.Framework.Assert.AreEqual(numDatanodes, System.Convert.ToInt32(jmx.GetValue ("NumOpenConnections"))); cluster.Shutdown(); MBeanServerConnection mbsc = ManagementFactory.GetPlatformMBeanServer(); ObjectName query = new ObjectName("Hadoop:service=" + serviceName + ",*"); ICollection <ObjectName> names = mbsc.QueryNames(query, null); NUnit.Framework.Assert.IsTrue("No beans should be registered for " + serviceName, names.IsEmpty()); }
public virtual void TestDataNodeMetrics() { Configuration conf = new HdfsConfiguration(); SimulatedFSDataset.SetFactory(conf); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).Build(); try { FileSystem fs = cluster.GetFileSystem(); long LongFileLen = int.MaxValue + 1L; DFSTestUtil.CreateFile(fs, new Path("/tmp.txt"), LongFileLen, (short)1, 1L); IList <DataNode> datanodes = cluster.GetDataNodes(); NUnit.Framework.Assert.AreEqual(datanodes.Count, 1); DataNode datanode = datanodes[0]; MetricsRecordBuilder rb = MetricsAsserts.GetMetrics(datanode.GetMetrics().Name()); MetricsAsserts.AssertCounter("BytesWritten", LongFileLen, rb); NUnit.Framework.Assert.IsTrue("Expected non-zero number of incremental block reports" , MetricsAsserts.GetLongCounter("IncrementalBlockReportsNumOps", rb) > 0); } finally { if (cluster != null) { cluster.Shutdown(); } } }
public virtual void TestGetBlockLocationMetric() { Path file1_Path = new Path(TestRootDirPath, "file1.dat"); // When cluster starts first time there are no file (read,create,open) // operations so metric GetBlockLocations should be 0. MetricsAsserts.AssertCounter("GetBlockLocations", 0L, MetricsAsserts.GetMetrics(NnMetrics )); //Perform create file operation CreateFile(file1_Path, 100, (short)2); UpdateMetrics(); //Create file does not change numGetBlockLocations metric //expect numGetBlockLocations = 0 for previous and current interval MetricsAsserts.AssertCounter("GetBlockLocations", 0L, MetricsAsserts.GetMetrics(NnMetrics )); // Open and read file operation increments GetBlockLocations // Perform read file operation on earlier created file ReadFile(fs, file1_Path); UpdateMetrics(); // Verify read file operation has incremented numGetBlockLocations by 1 MetricsAsserts.AssertCounter("GetBlockLocations", 1L, MetricsAsserts.GetMetrics(NnMetrics )); // opening and reading file twice will increment numGetBlockLocations by 2 ReadFile(fs, file1_Path); ReadFile(fs, file1_Path); UpdateMetrics(); MetricsAsserts.AssertCounter("GetBlockLocations", 3L, MetricsAsserts.GetMetrics(NnMetrics )); }
/// <exception cref="System.Exception"/> public virtual void TestUncacheQuiesces() { // Create a file Path fileName = new Path("/testUncacheQuiesces"); int fileLen = 4096; DFSTestUtil.CreateFile(fs, fileName, fileLen, (short)1, unchecked ((int)(0xFDFD))); // Cache it DistributedFileSystem dfs = cluster.GetFileSystem(); dfs.AddCachePool(new CachePoolInfo("pool")); dfs.AddCacheDirective(new CacheDirectiveInfo.Builder().SetPool("pool").SetPath(fileName ).SetReplication((short)3).Build()); GenericTestUtils.WaitFor(new _Supplier_484(), 1000, 30000); // Uncache it dfs.RemoveCacheDirective(1); GenericTestUtils.WaitFor(new _Supplier_495(), 1000, 30000); // Make sure that no additional messages were sent Sharpen.Thread.Sleep(10000); MetricsRecordBuilder dnMetrics = MetricsAsserts.GetMetrics(dn.GetMetrics().Name() ); MetricsAsserts.AssertCounter("BlocksCached", 1l, dnMetrics); MetricsAsserts.AssertCounter("BlocksUncached", 1l, dnMetrics); }
public virtual void TestMissingBlock() { // Create a file with single block with two replicas Path file = GetTestPath("testMissingBlocks"); CreateFile(file, 100, (short)1); // Corrupt the only replica of the block to result in a missing block LocatedBlock block = NameNodeAdapter.GetBlockLocations(cluster.GetNameNode(), file .ToString(), 0, 1).Get(0); cluster.GetNamesystem().WriteLock(); try { bm.FindAndMarkBlockAsCorrupt(block.GetBlock(), block.GetLocations()[0], "STORAGE_ID" , "TEST"); } finally { cluster.GetNamesystem().WriteUnlock(); } UpdateMetrics(); MetricsRecordBuilder rb = MetricsAsserts.GetMetrics(NsMetrics); MetricsAsserts.AssertGauge("UnderReplicatedBlocks", 1L, rb); MetricsAsserts.AssertGauge("MissingBlocks", 1L, rb); MetricsAsserts.AssertGauge("MissingReplOneBlocks", 1L, rb); fs.Delete(file, true); WaitForDnMetricValue(NsMetrics, "UnderReplicatedBlocks", 0L); }
public virtual void TestStaleNodes() { // Set two datanodes as stale for (int i = 0; i < 2; i++) { DataNode dn = cluster.GetDataNodes()[i]; DataNodeTestUtils.SetHeartbeatsDisabledForTests(dn, true); long staleInterval = Conf.GetLong(DFSConfigKeys.DfsNamenodeStaleDatanodeIntervalKey , DFSConfigKeys.DfsNamenodeStaleDatanodeIntervalDefault); DatanodeDescriptor dnDes = cluster.GetNameNode().GetNamesystem().GetBlockManager( ).GetDatanodeManager().GetDatanode(dn.GetDatanodeId()); DFSTestUtil.ResetLastUpdatesWithOffset(dnDes, -(staleInterval + 1)); } // Let HeartbeatManager to check heartbeat BlockManagerTestUtil.CheckHeartbeat(cluster.GetNameNode().GetNamesystem().GetBlockManager ()); MetricsAsserts.AssertGauge("StaleDataNodes", 2, MetricsAsserts.GetMetrics(NsMetrics )); // Reset stale datanodes for (int i_1 = 0; i_1 < 2; i_1++) { DataNode dn = cluster.GetDataNodes()[i_1]; DataNodeTestUtils.SetHeartbeatsDisabledForTests(dn, false); DatanodeDescriptor dnDes = cluster.GetNameNode().GetNamesystem().GetBlockManager( ).GetDatanodeManager().GetDatanode(dn.GetDatanodeId()); DFSTestUtil.ResetLastUpdatesWithOffset(dnDes, 0); } // Let HeartbeatManager to refresh BlockManagerTestUtil.CheckHeartbeat(cluster.GetNameNode().GetNamesystem().GetBlockManager ()); MetricsAsserts.AssertGauge("StaleDataNodes", 0, MetricsAsserts.GetMetrics(NsMetrics )); }
/// <exception cref="System.Exception"/> public virtual void TestJournal() { MetricsRecordBuilder metrics = MetricsAsserts.GetMetrics(journal.GetMetricsForTests ().GetName()); MetricsAsserts.AssertCounter("BatchesWritten", 0L, metrics); MetricsAsserts.AssertCounter("BatchesWrittenWhileLagging", 0L, metrics); MetricsAsserts.AssertGauge("CurrentLagTxns", 0L, metrics); IPCLoggerChannel ch = new IPCLoggerChannel(conf, FakeNsinfo, journalId, jn.GetBoundIpcAddress ()); ch.NewEpoch(1).Get(); ch.SetEpoch(1); ch.StartLogSegment(1, NameNodeLayoutVersion.CurrentLayoutVersion).Get(); ch.SendEdits(1L, 1, 1, Sharpen.Runtime.GetBytesForString("hello", Charsets.Utf8)) .Get(); metrics = MetricsAsserts.GetMetrics(journal.GetMetricsForTests().GetName()); MetricsAsserts.AssertCounter("BatchesWritten", 1L, metrics); MetricsAsserts.AssertCounter("BatchesWrittenWhileLagging", 0L, metrics); MetricsAsserts.AssertGauge("CurrentLagTxns", 0L, metrics); ch.SetCommittedTxId(100L); ch.SendEdits(1L, 2, 1, Sharpen.Runtime.GetBytesForString("goodbye", Charsets.Utf8 )).Get(); metrics = MetricsAsserts.GetMetrics(journal.GetMetricsForTests().GetName()); MetricsAsserts.AssertCounter("BatchesWritten", 2L, metrics); MetricsAsserts.AssertCounter("BatchesWrittenWhileLagging", 1L, metrics); MetricsAsserts.AssertGauge("CurrentLagTxns", 98L, metrics); }
public bool Get() { MetricsRecordBuilder dnMetrics = MetricsAsserts.GetMetrics(TestFsDatasetCache.dn. GetMetrics().Name()); long blocksUncached = MetricsAsserts.GetLongCounter("BlocksUncached", dnMetrics); return(blocksUncached > 0); }
private void CheckMetrics(long hit, long cleared, long updated) { MetricsRecordBuilder rb = MetricsAsserts.GetMetrics("RetryCache." + cacheName); MetricsAsserts.AssertCounter("CacheHit", hit, rb); MetricsAsserts.AssertCounter("CacheCleared", cleared, rb); MetricsAsserts.AssertCounter("CacheUpdated", updated, rb); }
public virtual void TestSnapshottableDirs() { cluster.GetNamesystem().GetSnapshotManager().SetAllowNestedSnapshots(true); MetricsAsserts.AssertGauge("SnapshottableDirectories", 0, MetricsAsserts.GetMetrics (NsMetrics)); MetricsAsserts.AssertCounter("AllowSnapshotOps", 0L, MetricsAsserts.GetMetrics(NnMetrics )); MetricsAsserts.AssertCounter("DisallowSnapshotOps", 0L, MetricsAsserts.GetMetrics (NnMetrics)); // Allow snapshots for directories, and check the metrics hdfs.AllowSnapshot(sub1); MetricsAsserts.AssertGauge("SnapshottableDirectories", 1, MetricsAsserts.GetMetrics (NsMetrics)); MetricsAsserts.AssertCounter("AllowSnapshotOps", 1L, MetricsAsserts.GetMetrics(NnMetrics )); Path sub2 = new Path(dir, "sub2"); Path file = new Path(sub2, "file"); DFSTestUtil.CreateFile(hdfs, file, 1024, Replication, seed); hdfs.AllowSnapshot(sub2); MetricsAsserts.AssertGauge("SnapshottableDirectories", 2, MetricsAsserts.GetMetrics (NsMetrics)); MetricsAsserts.AssertCounter("AllowSnapshotOps", 2L, MetricsAsserts.GetMetrics(NnMetrics )); Path subsub1 = new Path(sub1, "sub1sub1"); Path subfile = new Path(subsub1, "file"); DFSTestUtil.CreateFile(hdfs, subfile, 1024, Replication, seed); hdfs.AllowSnapshot(subsub1); MetricsAsserts.AssertGauge("SnapshottableDirectories", 3, MetricsAsserts.GetMetrics (NsMetrics)); MetricsAsserts.AssertCounter("AllowSnapshotOps", 3L, MetricsAsserts.GetMetrics(NnMetrics )); // Set an already snapshottable directory to snapshottable, should not // change the metrics hdfs.AllowSnapshot(sub1); MetricsAsserts.AssertGauge("SnapshottableDirectories", 3, MetricsAsserts.GetMetrics (NsMetrics)); // But the number of allowSnapshot operations still increases MetricsAsserts.AssertCounter("AllowSnapshotOps", 4L, MetricsAsserts.GetMetrics(NnMetrics )); // Disallow the snapshot for snapshottable directories, then check the // metrics again hdfs.DisallowSnapshot(sub1); MetricsAsserts.AssertGauge("SnapshottableDirectories", 2, MetricsAsserts.GetMetrics (NsMetrics)); MetricsAsserts.AssertCounter("DisallowSnapshotOps", 1L, MetricsAsserts.GetMetrics (NnMetrics)); // delete subsub1, snapshottable directories should be 1 hdfs.Delete(subsub1, true); MetricsAsserts.AssertGauge("SnapshottableDirectories", 1, MetricsAsserts.GetMetrics (NsMetrics)); // list all the snapshottable directories SnapshottableDirectoryStatus[] status = hdfs.GetSnapshottableDirListing(); NUnit.Framework.Assert.AreEqual(1, status.Length); MetricsAsserts.AssertCounter("ListSnapshottableDirOps", 1L, MetricsAsserts.GetMetrics (NnMetrics)); }
/// <exception cref="System.Exception"/> private void TestCacheAndUncacheBlock() { Log.Info("beginning testCacheAndUncacheBlock"); int NumBlocks = 5; DFSTestUtil.VerifyExpectedCacheUsage(0, 0, fsd); NUnit.Framework.Assert.AreEqual(0, fsd.GetNumBlocksCached()); // Write a test file Path testFile = new Path("/testCacheBlock"); long testFileLen = BlockSize * NumBlocks; DFSTestUtil.CreateFile(fs, testFile, testFileLen, (short)1, unchecked ((long)(0xABBAl ))); // Get the details of the written file HdfsBlockLocation[] locs = (HdfsBlockLocation[])fs.GetFileBlockLocations(testFile , 0, testFileLen); NUnit.Framework.Assert.AreEqual("Unexpected number of blocks", NumBlocks, locs.Length ); long[] blockSizes = GetBlockSizes(locs); // Check initial state long cacheCapacity = fsd.GetCacheCapacity(); long cacheUsed = fsd.GetCacheUsed(); long current = 0; NUnit.Framework.Assert.AreEqual("Unexpected cache capacity", CacheCapacity, cacheCapacity ); NUnit.Framework.Assert.AreEqual("Unexpected amount of cache used", current, cacheUsed ); MetricsRecordBuilder dnMetrics; long numCacheCommands = 0; long numUncacheCommands = 0; // Cache each block in succession, checking each time for (int i = 0; i < NumBlocks; i++) { SetHeartbeatResponse(CacheBlock(locs[i])); current = DFSTestUtil.VerifyExpectedCacheUsage(current + blockSizes[i], i + 1, fsd ); dnMetrics = MetricsAsserts.GetMetrics(dn.GetMetrics().Name()); long cmds = MetricsAsserts.GetLongCounter("BlocksCached", dnMetrics); NUnit.Framework.Assert.IsTrue("Expected more cache requests from the NN (" + cmds + " <= " + numCacheCommands + ")", cmds > numCacheCommands); numCacheCommands = cmds; } // Uncache each block in succession, again checking each time for (int i_1 = 0; i_1 < NumBlocks; i_1++) { SetHeartbeatResponse(UncacheBlock(locs[i_1])); current = DFSTestUtil.VerifyExpectedCacheUsage(current - blockSizes[i_1], NumBlocks - 1 - i_1, fsd); dnMetrics = MetricsAsserts.GetMetrics(dn.GetMetrics().Name()); long cmds = MetricsAsserts.GetLongCounter("BlocksUncached", dnMetrics); NUnit.Framework.Assert.IsTrue("Expected more uncache requests from the NN", cmds > numUncacheCommands); numUncacheCommands = cmds; } Log.Info("finishing testCacheAndUncacheBlock"); }
/// <exception cref="System.IO.IOException"/> private void DoRPCs(Configuration conf, bool expectFailure) { Server server = new RPC.Builder(conf).SetProtocol(typeof(TestRPC.TestProtocol)).SetInstance (new TestRPC.TestImpl()).SetBindAddress(Address).SetPort(0).SetNumHandlers(5).SetVerbose (true).Build(); server.RefreshServiceAcl(conf, new TestRPC.TestPolicyProvider()); TestRPC.TestProtocol proxy = null; server.Start(); IPEndPoint addr = NetUtils.GetConnectAddress(server); try { proxy = RPC.GetProxy <TestRPC.TestProtocol>(TestRPC.TestProtocol.versionID, addr, conf); proxy.Ping(); if (expectFailure) { NUnit.Framework.Assert.Fail("Expect RPC.getProxy to fail with AuthorizationException!" ); } } catch (RemoteException e) { if (expectFailure) { Assert.True(e.UnwrapRemoteException() is AuthorizationException ); } else { throw; } } finally { server.Stop(); if (proxy != null) { RPC.StopProxy(proxy); } MetricsRecordBuilder rb = MetricsAsserts.GetMetrics(server.rpcMetrics.Name()); if (expectFailure) { MetricsAsserts.AssertCounter("RpcAuthorizationFailures", 1L, rb); } else { MetricsAsserts.AssertCounter("RpcAuthorizationSuccesses", 1L, rb); } //since we don't have authentication turned ON, we should see // 0 for the authentication successes and 0 for failure MetricsAsserts.AssertCounter("RpcAuthenticationFailures", 0L, rb); MetricsAsserts.AssertCounter("RpcAuthenticationSuccesses", 0L, rb); } }
public virtual void TestClasses() { MetricsRecordBuilder rb = MetricsAsserts.GetMetrics(MetricsAnnotations.MakeSource (new TestMetricsAnnotations.MyMetrics3())); MetricsCollector collector = rb.Parent(); Org.Mockito.Mockito.Verify(collector).AddRecord(Interns.Info("MyMetrics3", "My metrics" )); Org.Mockito.Mockito.Verify(rb).Add(Interns.Tag(MsInfo.Context, "foo")); }
public static void CheckApps(MetricsSource source, int submitted, int pending, int running, int completed, int failed, int killed, bool all) { MetricsRecordBuilder rb = MetricsAsserts.GetMetrics(source, all); MetricsAsserts.AssertCounter("AppsSubmitted", submitted, rb); MetricsAsserts.AssertGauge("AppsPending", pending, rb); MetricsAsserts.AssertGauge("AppsRunning", running, rb); MetricsAsserts.AssertCounter("AppsCompleted", completed, rb); MetricsAsserts.AssertCounter("AppsFailed", failed, rb); MetricsAsserts.AssertCounter("AppsKilled", killed, rb); }
public virtual void TestMetricsInitializedOnRMInit() { YarnConfiguration conf = new YarnConfiguration(); conf.SetClass(YarnConfiguration.RmScheduler, typeof(FifoScheduler), typeof(ResourceScheduler )); MockRM rm = new MockRM(conf); QueueMetrics metrics = rm.GetResourceScheduler().GetRootQueueMetrics(); CheckApps(metrics, 0, 0, 0, 0, 0, 0, true); MetricsAsserts.AssertGauge("ReservedContainers", 0, metrics); }
public virtual void TearDown() { MetricsSource source = DefaultMetricsSystem.Instance().GetSource("UgiMetrics"); if (source != null) { // Run only once since the UGI metrics is cleaned up during teardown MetricsRecordBuilder rb = MetricsAsserts.GetMetrics(source); MetricsAsserts.AssertQuantileGauges("GetGroups1s", rb); } cluster.Shutdown(); }
public virtual void TestSnapshots() { cluster.GetNamesystem().GetSnapshotManager().SetAllowNestedSnapshots(true); MetricsAsserts.AssertGauge("Snapshots", 0, MetricsAsserts.GetMetrics(NsMetrics)); MetricsAsserts.AssertCounter("CreateSnapshotOps", 0L, MetricsAsserts.GetMetrics(NnMetrics )); // Create a snapshot for a non-snapshottable directory, thus should not // change the metrics try { hdfs.CreateSnapshot(sub1, "s1"); } catch (Exception) { } MetricsAsserts.AssertGauge("Snapshots", 0, MetricsAsserts.GetMetrics(NsMetrics)); MetricsAsserts.AssertCounter("CreateSnapshotOps", 1L, MetricsAsserts.GetMetrics(NnMetrics )); // Create snapshot for sub1 hdfs.AllowSnapshot(sub1); hdfs.CreateSnapshot(sub1, "s1"); MetricsAsserts.AssertGauge("Snapshots", 1, MetricsAsserts.GetMetrics(NsMetrics)); MetricsAsserts.AssertCounter("CreateSnapshotOps", 2L, MetricsAsserts.GetMetrics(NnMetrics )); hdfs.CreateSnapshot(sub1, "s2"); MetricsAsserts.AssertGauge("Snapshots", 2, MetricsAsserts.GetMetrics(NsMetrics)); MetricsAsserts.AssertCounter("CreateSnapshotOps", 3L, MetricsAsserts.GetMetrics(NnMetrics )); hdfs.GetSnapshotDiffReport(sub1, "s1", "s2"); MetricsAsserts.AssertCounter("SnapshotDiffReportOps", 1L, MetricsAsserts.GetMetrics (NnMetrics)); // Create snapshot for a directory under sub1 Path subsub1 = new Path(sub1, "sub1sub1"); Path subfile = new Path(subsub1, "file"); DFSTestUtil.CreateFile(hdfs, subfile, 1024, Replication, seed); hdfs.AllowSnapshot(subsub1); hdfs.CreateSnapshot(subsub1, "s11"); MetricsAsserts.AssertGauge("Snapshots", 3, MetricsAsserts.GetMetrics(NsMetrics)); MetricsAsserts.AssertCounter("CreateSnapshotOps", 4L, MetricsAsserts.GetMetrics(NnMetrics )); // delete snapshot hdfs.DeleteSnapshot(sub1, "s2"); MetricsAsserts.AssertGauge("Snapshots", 2, MetricsAsserts.GetMetrics(NsMetrics)); MetricsAsserts.AssertCounter("DeleteSnapshotOps", 1L, MetricsAsserts.GetMetrics(NnMetrics )); // rename snapshot hdfs.RenameSnapshot(sub1, "s1", "NewS1"); MetricsAsserts.AssertGauge("Snapshots", 2, MetricsAsserts.GetMetrics(NsMetrics)); MetricsAsserts.AssertCounter("RenameSnapshotOps", 1L, MetricsAsserts.GetMetrics(NnMetrics )); }
/// <summary> /// Test that capacity metrics are exported and pass /// basic sanity tests. /// </summary> /// <exception cref="System.Exception"/> public virtual void TestCapacityMetrics() { MetricsRecordBuilder rb = MetricsAsserts.GetMetrics(NsMetrics); long capacityTotal = MetricsAsserts.GetLongGauge("CapacityTotal", rb); System.Diagnostics.Debug.Assert((capacityTotal != 0)); long capacityUsed = MetricsAsserts.GetLongGauge("CapacityUsed", rb); long capacityRemaining = MetricsAsserts.GetLongGauge("CapacityRemaining", rb); long capacityUsedNonDFS = MetricsAsserts.GetLongGauge("CapacityUsedNonDFS", rb); System.Diagnostics.Debug.Assert((capacityUsed + capacityRemaining + capacityUsedNonDFS == capacityTotal)); }
public virtual void TestFilesInGetListingOps() { CreateFile("/tmp1/t1", 3200, (short)3); CreateFile("/tmp1/t2", 3200, (short)3); CreateFile("/tmp2/t1", 3200, (short)3); CreateFile("/tmp2/t2", 3200, (short)3); cluster.GetNameNodeRpc().GetListing("/tmp1", HdfsFileStatus.EmptyName, false); MetricsAsserts.AssertCounter("FilesInGetListingOps", 2L, MetricsAsserts.GetMetrics (NnMetrics)); cluster.GetNameNodeRpc().GetListing("/tmp2", HdfsFileStatus.EmptyName, false); MetricsAsserts.AssertCounter("FilesInGetListingOps", 4L, MetricsAsserts.GetMetrics (NnMetrics)); }
public virtual void TestMutableRates() { MetricsRecordBuilder rb = MetricsAsserts.MockMetricsRecordBuilder(); MetricsRegistry registry = new MetricsRegistry("test"); MutableRates rates = new MutableRates(registry); rates.Init(typeof(TestMutableMetrics.TestProtocol)); registry.Snapshot(rb, false); MetricsAsserts.AssertCounter("FooNumOps", 0L, rb); MetricsAsserts.AssertGauge("FooAvgTime", 0.0, rb); MetricsAsserts.AssertCounter("BarNumOps", 0L, rb); MetricsAsserts.AssertGauge("BarAvgTime", 0.0, rb); }
public virtual void TestDatanodeReport() { conf.SetInt(DFSConfigKeys.DfsNamenodeHeartbeatRecheckIntervalKey, 500); // 0.5s conf.SetLong(DFSConfigKeys.DfsHeartbeatIntervalKey, 1L); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(NumOfDatanodes ).Build(); try { //wait until the cluster is up cluster.WaitActive(); string bpid = cluster.GetNamesystem().GetBlockPoolId(); IList <DataNode> datanodes = cluster.GetDataNodes(); DFSClient client = cluster.GetFileSystem().dfs; AssertReports(NumOfDatanodes, HdfsConstants.DatanodeReportType.All, client, datanodes , bpid); AssertReports(NumOfDatanodes, HdfsConstants.DatanodeReportType.Live, client, datanodes , bpid); AssertReports(0, HdfsConstants.DatanodeReportType.Dead, client, datanodes, bpid); // bring down one datanode DataNode last = datanodes[datanodes.Count - 1]; Log.Info("XXX shutdown datanode " + last.GetDatanodeUuid()); last.Shutdown(); DatanodeInfo[] nodeInfo = client.DatanodeReport(HdfsConstants.DatanodeReportType. Dead); while (nodeInfo.Length != 1) { try { Sharpen.Thread.Sleep(500); } catch (Exception) { } nodeInfo = client.DatanodeReport(HdfsConstants.DatanodeReportType.Dead); } AssertReports(NumOfDatanodes, HdfsConstants.DatanodeReportType.All, client, datanodes , null); AssertReports(NumOfDatanodes - 1, HdfsConstants.DatanodeReportType.Live, client, datanodes, null); AssertReports(1, HdfsConstants.DatanodeReportType.Dead, client, datanodes, null); Sharpen.Thread.Sleep(5000); MetricsAsserts.AssertGauge("ExpiredHeartbeats", 1, MetricsAsserts.GetMetrics("FSNamesystem" )); } finally { cluster.Shutdown(); } }
public virtual void TestTransactionAndCheckpointMetrics() { long lastCkptTime = MetricsAsserts.GetLongGauge("LastCheckpointTime", MetricsAsserts.GetMetrics (NsMetrics)); MetricsAsserts.AssertGauge("LastCheckpointTime", lastCkptTime, MetricsAsserts.GetMetrics (NsMetrics)); MetricsAsserts.AssertGauge("LastWrittenTransactionId", 1L, MetricsAsserts.GetMetrics (NsMetrics)); MetricsAsserts.AssertGauge("TransactionsSinceLastCheckpoint", 1L, MetricsAsserts.GetMetrics (NsMetrics)); MetricsAsserts.AssertGauge("TransactionsSinceLastLogRoll", 1L, MetricsAsserts.GetMetrics (NsMetrics)); fs.Mkdirs(new Path(TestRootDirPath, "/tmp")); UpdateMetrics(); MetricsAsserts.AssertGauge("LastCheckpointTime", lastCkptTime, MetricsAsserts.GetMetrics (NsMetrics)); MetricsAsserts.AssertGauge("LastWrittenTransactionId", 2L, MetricsAsserts.GetMetrics (NsMetrics)); MetricsAsserts.AssertGauge("TransactionsSinceLastCheckpoint", 2L, MetricsAsserts.GetMetrics (NsMetrics)); MetricsAsserts.AssertGauge("TransactionsSinceLastLogRoll", 2L, MetricsAsserts.GetMetrics (NsMetrics)); cluster.GetNameNodeRpc().RollEditLog(); UpdateMetrics(); MetricsAsserts.AssertGauge("LastCheckpointTime", lastCkptTime, MetricsAsserts.GetMetrics (NsMetrics)); MetricsAsserts.AssertGauge("LastWrittenTransactionId", 4L, MetricsAsserts.GetMetrics (NsMetrics)); MetricsAsserts.AssertGauge("TransactionsSinceLastCheckpoint", 4L, MetricsAsserts.GetMetrics (NsMetrics)); MetricsAsserts.AssertGauge("TransactionsSinceLastLogRoll", 1L, MetricsAsserts.GetMetrics (NsMetrics)); cluster.GetNameNodeRpc().SetSafeMode(HdfsConstants.SafeModeAction.SafemodeEnter, false); cluster.GetNameNodeRpc().SaveNamespace(); cluster.GetNameNodeRpc().SetSafeMode(HdfsConstants.SafeModeAction.SafemodeLeave, false); UpdateMetrics(); long newLastCkptTime = MetricsAsserts.GetLongGauge("LastCheckpointTime", MetricsAsserts.GetMetrics (NsMetrics)); NUnit.Framework.Assert.IsTrue(lastCkptTime < newLastCkptTime); MetricsAsserts.AssertGauge("LastWrittenTransactionId", 6L, MetricsAsserts.GetMetrics (NsMetrics)); MetricsAsserts.AssertGauge("TransactionsSinceLastCheckpoint", 1L, MetricsAsserts.GetMetrics (NsMetrics)); MetricsAsserts.AssertGauge("TransactionsSinceLastLogRoll", 1L, MetricsAsserts.GetMetrics (NsMetrics)); }
public virtual void TestReadWriteOps() { MetricsRecordBuilder rb = MetricsAsserts.GetMetrics(NnMetrics); long startWriteCounter = MetricsAsserts.GetLongCounter("TransactionsNumOps", rb); Path file1_Path = new Path(TestRootDirPath, "ReadData.dat"); //Perform create file operation CreateFile(file1_Path, 1024 * 1024, (short)2); // Perform read file operation on earlier created file ReadFile(fs, file1_Path); MetricsRecordBuilder rbNew = MetricsAsserts.GetMetrics(NnMetrics); NUnit.Framework.Assert.IsTrue(MetricsAsserts.GetLongCounter("TransactionsNumOps", rbNew) > startWriteCounter); }
public bool Get() { MetricsRecordBuilder dnMetrics = MetricsAsserts.GetMetrics(TestFsDatasetCache.dn. GetMetrics().Name()); long blocksCached = MetricsAsserts.GetLongCounter("BlocksCached", dnMetrics); if (blocksCached != TotalBlocksPerCache) { TestFsDatasetCache.Log.Info("waiting for " + TotalBlocksPerCache + " to " + "be cached. Right now only " + blocksCached + " blocks are cached."); return(false); } TestFsDatasetCache.Log.Info(TotalBlocksPerCache + " blocks are now cached."); return(true); }
/// <exception cref="System.Exception"/> private static void VerifyGroupMetrics(long groups) { MetricsRecordBuilder rb = MetricsAsserts.GetMetrics("UgiMetrics"); if (groups > 0) { MetricsAsserts.AssertCounterGt("GetGroupsNumOps", groups - 1, rb); double avg = MetricsAsserts.GetDoubleGauge("GetGroupsAvgTime", rb); Assert.True(avg >= 0.0); // Sleep for an interval+slop to let the percentiles rollover Thread.Sleep((PercentilesInterval + 1) * 1000); // Check that the percentiles were updated MetricsAsserts.AssertQuantileGauges("GetGroups1s", rb); } }
public virtual void TestMethods() { TestMetricsAnnotations.MyMetrics2 metrics = new TestMetricsAnnotations.MyMetrics2 (); MetricsSource source = MetricsAnnotations.MakeSource(metrics); MetricsRecordBuilder rb = MetricsAsserts.GetMetrics(source); Org.Mockito.Mockito.Verify(rb).AddGauge(Interns.Info("G1", "G1"), 1); Org.Mockito.Mockito.Verify(rb).AddGauge(Interns.Info("G2", "G2"), 2L); Org.Mockito.Mockito.Verify(rb).AddGauge(Interns.Info("G3", "G3"), 3.0f); Org.Mockito.Mockito.Verify(rb).AddGauge(Interns.Info("G4", "G4"), 4.0); Org.Mockito.Mockito.Verify(rb).AddCounter(Interns.Info("C1", "C1"), 1); Org.Mockito.Mockito.Verify(rb).AddCounter(Interns.Info("C2", "C2"), 2L); Org.Mockito.Mockito.Verify(rb).Tag(Interns.Info("T1", "T1"), "t1"); }