public virtual void TestFinalState() { StartupProgressTestHelper.SetStartupProgressForFinalState(startupProgress); MetricsRecordBuilder builder = MetricsAsserts.GetMetrics(metrics, true); NUnit.Framework.Assert.IsTrue(MetricsAsserts.GetLongCounter("ElapsedTime", builder ) >= 0L); MetricsAsserts.AssertGauge("PercentComplete", 1.0f, builder); MetricsAsserts.AssertCounter("LoadingFsImageCount", 100L, builder); NUnit.Framework.Assert.IsTrue(MetricsAsserts.GetLongCounter("LoadingFsImageElapsedTime" , builder) >= 0L); MetricsAsserts.AssertCounter("LoadingFsImageTotal", 100L, builder); MetricsAsserts.AssertGauge("LoadingFsImagePercentComplete", 1.0f, builder); MetricsAsserts.AssertCounter("LoadingEditsCount", 200L, builder); NUnit.Framework.Assert.IsTrue(MetricsAsserts.GetLongCounter("LoadingEditsElapsedTime" , builder) >= 0L); MetricsAsserts.AssertCounter("LoadingEditsTotal", 200L, builder); MetricsAsserts.AssertGauge("LoadingEditsPercentComplete", 1.0f, builder); MetricsAsserts.AssertCounter("SavingCheckpointCount", 300L, builder); NUnit.Framework.Assert.IsTrue(MetricsAsserts.GetLongCounter("SavingCheckpointElapsedTime" , builder) >= 0L); MetricsAsserts.AssertCounter("SavingCheckpointTotal", 300L, builder); MetricsAsserts.AssertGauge("SavingCheckpointPercentComplete", 1.0f, builder); MetricsAsserts.AssertCounter("SafeModeCount", 400L, builder); NUnit.Framework.Assert.IsTrue(MetricsAsserts.GetLongCounter("SafeModeElapsedTime" , builder) >= 0L); MetricsAsserts.AssertCounter("SafeModeTotal", 400L, builder); MetricsAsserts.AssertGauge("SafeModePercentComplete", 1.0f, builder); }
public virtual void TestDataNodeMetrics() { Configuration conf = new HdfsConfiguration(); SimulatedFSDataset.SetFactory(conf); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).Build(); try { FileSystem fs = cluster.GetFileSystem(); long LongFileLen = int.MaxValue + 1L; DFSTestUtil.CreateFile(fs, new Path("/tmp.txt"), LongFileLen, (short)1, 1L); IList <DataNode> datanodes = cluster.GetDataNodes(); NUnit.Framework.Assert.AreEqual(datanodes.Count, 1); DataNode datanode = datanodes[0]; MetricsRecordBuilder rb = MetricsAsserts.GetMetrics(datanode.GetMetrics().Name()); MetricsAsserts.AssertCounter("BytesWritten", LongFileLen, rb); NUnit.Framework.Assert.IsTrue("Expected non-zero number of incremental block reports" , MetricsAsserts.GetLongCounter("IncrementalBlockReportsNumOps", rb) > 0); } finally { if (cluster != null) { cluster.Shutdown(); } } }
public bool Get() { MetricsRecordBuilder dnMetrics = MetricsAsserts.GetMetrics(TestFsDatasetCache.dn. GetMetrics().Name()); long blocksUncached = MetricsAsserts.GetLongCounter("BlocksUncached", dnMetrics); return(blocksUncached > 0); }
/// <exception cref="System.Exception"/> private void TestCacheAndUncacheBlock() { Log.Info("beginning testCacheAndUncacheBlock"); int NumBlocks = 5; DFSTestUtil.VerifyExpectedCacheUsage(0, 0, fsd); NUnit.Framework.Assert.AreEqual(0, fsd.GetNumBlocksCached()); // Write a test file Path testFile = new Path("/testCacheBlock"); long testFileLen = BlockSize * NumBlocks; DFSTestUtil.CreateFile(fs, testFile, testFileLen, (short)1, unchecked ((long)(0xABBAl ))); // Get the details of the written file HdfsBlockLocation[] locs = (HdfsBlockLocation[])fs.GetFileBlockLocations(testFile , 0, testFileLen); NUnit.Framework.Assert.AreEqual("Unexpected number of blocks", NumBlocks, locs.Length ); long[] blockSizes = GetBlockSizes(locs); // Check initial state long cacheCapacity = fsd.GetCacheCapacity(); long cacheUsed = fsd.GetCacheUsed(); long current = 0; NUnit.Framework.Assert.AreEqual("Unexpected cache capacity", CacheCapacity, cacheCapacity ); NUnit.Framework.Assert.AreEqual("Unexpected amount of cache used", current, cacheUsed ); MetricsRecordBuilder dnMetrics; long numCacheCommands = 0; long numUncacheCommands = 0; // Cache each block in succession, checking each time for (int i = 0; i < NumBlocks; i++) { SetHeartbeatResponse(CacheBlock(locs[i])); current = DFSTestUtil.VerifyExpectedCacheUsage(current + blockSizes[i], i + 1, fsd ); dnMetrics = MetricsAsserts.GetMetrics(dn.GetMetrics().Name()); long cmds = MetricsAsserts.GetLongCounter("BlocksCached", dnMetrics); NUnit.Framework.Assert.IsTrue("Expected more cache requests from the NN (" + cmds + " <= " + numCacheCommands + ")", cmds > numCacheCommands); numCacheCommands = cmds; } // Uncache each block in succession, again checking each time for (int i_1 = 0; i_1 < NumBlocks; i_1++) { SetHeartbeatResponse(UncacheBlock(locs[i_1])); current = DFSTestUtil.VerifyExpectedCacheUsage(current - blockSizes[i_1], NumBlocks - 1 - i_1, fsd); dnMetrics = MetricsAsserts.GetMetrics(dn.GetMetrics().Name()); long cmds = MetricsAsserts.GetLongCounter("BlocksUncached", dnMetrics); NUnit.Framework.Assert.IsTrue("Expected more uncache requests from the NN", cmds > numUncacheCommands); numUncacheCommands = cmds; } Log.Info("finishing testCacheAndUncacheBlock"); }
public bool Get() { MetricsRecordBuilder dnMetrics = MetricsAsserts.GetMetrics(TestFsDatasetCache.dn. GetMetrics().Name()); long blocksCached = MetricsAsserts.GetLongCounter("BlocksCached", dnMetrics); if (blocksCached != TotalBlocksPerCache) { TestFsDatasetCache.Log.Info("waiting for " + TotalBlocksPerCache + " to " + "be cached. Right now only " + blocksCached + " blocks are cached."); return(false); } TestFsDatasetCache.Log.Info(TotalBlocksPerCache + " blocks are now cached."); return(true); }
public virtual void TestReadWriteOps() { MetricsRecordBuilder rb = MetricsAsserts.GetMetrics(NnMetrics); long startWriteCounter = MetricsAsserts.GetLongCounter("TransactionsNumOps", rb); Path file1_Path = new Path(TestRootDirPath, "ReadData.dat"); //Perform create file operation CreateFile(file1_Path, 1024 * 1024, (short)2); // Perform read file operation on earlier created file ReadFile(fs, file1_Path); MetricsRecordBuilder rbNew = MetricsAsserts.GetMetrics(NnMetrics); NUnit.Framework.Assert.IsTrue(MetricsAsserts.GetLongCounter("TransactionsNumOps", rbNew) > startWriteCounter); }
public virtual void TestDataNodeTimeSpend() { Configuration conf = new HdfsConfiguration(); SimulatedFSDataset.SetFactory(conf); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).Build(); try { FileSystem fs = cluster.GetFileSystem(); IList <DataNode> datanodes = cluster.GetDataNodes(); NUnit.Framework.Assert.AreEqual(datanodes.Count, 1); DataNode datanode = datanodes[0]; MetricsRecordBuilder rb = MetricsAsserts.GetMetrics(datanode.GetMetrics().Name()); long LongFileLen = 1024 * 1024 * 10; long startWriteValue = MetricsAsserts.GetLongCounter("TotalWriteTime", rb); long startReadValue = MetricsAsserts.GetLongCounter("TotalReadTime", rb); for (int x = 0; x < 50; x++) { DFSTestUtil.CreateFile(fs, new Path("/time.txt." + x), LongFileLen, (short)1, Time .MonotonicNow()); } for (int x_1 = 0; x_1 < 50; x_1++) { string s = DFSTestUtil.ReadFile(fs, new Path("/time.txt." + x_1)); } MetricsRecordBuilder rbNew = MetricsAsserts.GetMetrics(datanode.GetMetrics().Name ()); long endWriteValue = MetricsAsserts.GetLongCounter("TotalWriteTime", rbNew); long endReadValue = MetricsAsserts.GetLongCounter("TotalReadTime", rbNew); NUnit.Framework.Assert.IsTrue(endReadValue > startReadValue); NUnit.Framework.Assert.IsTrue(endWriteValue > startWriteValue); } finally { if (cluster != null) { cluster.Shutdown(); } } }
public virtual void TestRpcMetrics() { Configuration configuration = new Configuration(); int interval = 1; configuration.SetBoolean(CommonConfigurationKeys.RpcMetricsQuantileEnable, true); configuration.Set(CommonConfigurationKeys.RpcMetricsPercentilesIntervalsKey, string.Empty + interval); Server server = new RPC.Builder(configuration).SetProtocol(typeof(TestRPC.TestProtocol )).SetInstance(new TestRPC.TestImpl()).SetBindAddress(Address).SetPort(0).SetNumHandlers (5).SetVerbose(true).Build(); server.Start(); TestRPC.TestProtocol proxy = RPC.GetProxy <TestRPC.TestProtocol>(TestRPC.TestProtocol .versionID, server.GetListenerAddress(), configuration); try { for (int i = 0; i < 1000; i++) { proxy.Ping(); proxy.Echo(string.Empty + i); } MetricsRecordBuilder rpcMetrics = MetricsAsserts.GetMetrics(server.GetRpcMetrics( ).Name()); Assert.True("Expected non-zero rpc queue time", MetricsAsserts.GetLongCounter ("RpcQueueTimeNumOps", rpcMetrics) > 0); Assert.True("Expected non-zero rpc processing time", MetricsAsserts.GetLongCounter ("RpcProcessingTimeNumOps", rpcMetrics) > 0); MetricsAsserts.AssertQuantileGauges("RpcQueueTime" + interval + "s", rpcMetrics); MetricsAsserts.AssertQuantileGauges("RpcProcessingTime" + interval + "s", rpcMetrics ); } finally { if (proxy != null) { RPC.StopProxy(proxy); } server.Stop(); } }
/// <exception cref="System.Exception"/> public virtual void TestReCacheAfterUncache() { int TotalBlocksPerCache = Ints.CheckedCast(CacheCapacity / BlockSize); BlockReaderTestUtil.EnableHdfsCachingTracing(); NUnit.Framework.Assert.AreEqual(0, CacheCapacity % BlockSize); // Create a small file Path SmallFile = new Path("/smallFile"); DFSTestUtil.CreateFile(fs, SmallFile, BlockSize, (short)1, unchecked ((int)(0xcafe ))); // Create a file that will take up the whole cache Path BigFile = new Path("/bigFile"); DFSTestUtil.CreateFile(fs, BigFile, TotalBlocksPerCache * BlockSize, (short)1, unchecked ( (int)(0xbeef))); DistributedFileSystem dfs = cluster.GetFileSystem(); dfs.AddCachePool(new CachePoolInfo("pool")); long bigCacheDirectiveId = dfs.AddCacheDirective(new CacheDirectiveInfo.Builder() .SetPool("pool").SetPath(BigFile).SetReplication((short)1).Build()); GenericTestUtils.WaitFor(new _Supplier_532(TotalBlocksPerCache), 1000, 30000); // Try to cache a smaller file. It should fail. long shortCacheDirectiveId = dfs.AddCacheDirective(new CacheDirectiveInfo.Builder ().SetPool("pool").SetPath(SmallFile).SetReplication((short)1).Build()); Sharpen.Thread.Sleep(10000); MetricsRecordBuilder dnMetrics = MetricsAsserts.GetMetrics(dn.GetMetrics().Name() ); NUnit.Framework.Assert.AreEqual(TotalBlocksPerCache, MetricsAsserts.GetLongCounter ("BlocksCached", dnMetrics)); // Uncache the big file and verify that the small file can now be // cached (regression test for HDFS-6107) dfs.RemoveCacheDirective(bigCacheDirectiveId); GenericTestUtils.WaitFor(new _Supplier_560(dfs, shortCacheDirectiveId), 1000, 30000 ); dfs.RemoveCacheDirective(shortCacheDirectiveId); }
/// <summary> /// Verify that the DataNode sends a single incremental block report for all /// storages. /// </summary> /// <exception cref="System.IO.IOException"/> /// <exception cref="System.Exception"/> public virtual void TestDataNodeDoesNotSplitReports() { LocatedBlocks blocks = CreateFileGetBlocks(GenericTestUtils.GetMethodName()); Assert.AssertThat(cluster.GetDataNodes().Count, IS.Is(1)); // Remove all blocks from the DataNode. foreach (LocatedBlock block in blocks.GetLocatedBlocks()) { dn0.NotifyNamenodeDeletedBlock(block.GetBlock(), block.GetStorageIDs()[0]); } Log.Info("Triggering report after deleting blocks"); long ops = MetricsAsserts.GetLongCounter("BlockReceivedAndDeletedOps", MetricsAsserts.GetMetrics (NnMetrics)); // Trigger a report to the NameNode and give it a few seconds. DataNodeTestUtils.TriggerBlockReport(dn0); Sharpen.Thread.Sleep(5000); // Ensure that NameNodeRpcServer.blockReceivedAndDeletes is invoked // exactly once after we triggered the report. MetricsAsserts.AssertCounter("BlockReceivedAndDeletedOps", ops + 1, MetricsAsserts.GetMetrics (NnMetrics)); }
public virtual void TestRoundTripAckMetric() { int datanodeCount = 2; int interval = 1; Configuration conf = new HdfsConfiguration(); conf.Set(DFSConfigKeys.DfsMetricsPercentilesIntervalsKey, string.Empty + interval ); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(datanodeCount ).Build(); try { cluster.WaitActive(); FileSystem fs = cluster.GetFileSystem(); // Open a file and get the head of the pipeline Path testFile = new Path("/testRoundTripAckMetric.txt"); FSDataOutputStream fsout = fs.Create(testFile, (short)datanodeCount); DFSOutputStream dout = (DFSOutputStream)fsout.GetWrappedStream(); // Slow down the writes to catch the write pipeline dout.SetChunksPerPacket(5); dout.SetArtificialSlowdown(3000); fsout.Write(new byte[10000]); DatanodeInfo[] pipeline = null; int count = 0; while (pipeline == null && count < 5) { pipeline = dout.GetPipeline(); System.Console.Out.WriteLine("Waiting for pipeline to be created."); Sharpen.Thread.Sleep(1000); count++; } // Get the head node that should be receiving downstream acks DatanodeInfo headInfo = pipeline[0]; DataNode headNode = null; foreach (DataNode datanode in cluster.GetDataNodes()) { if (datanode.GetDatanodeId().Equals(headInfo)) { headNode = datanode; break; } } NUnit.Framework.Assert.IsNotNull("Could not find the head of the datanode write pipeline" , headNode); // Close the file and wait for the metrics to rollover Sharpen.Thread.Sleep((interval + 1) * 1000); // Check the ack was received MetricsRecordBuilder dnMetrics = MetricsAsserts.GetMetrics(headNode.GetMetrics(). Name()); NUnit.Framework.Assert.IsTrue("Expected non-zero number of acks", MetricsAsserts.GetLongCounter ("PacketAckRoundTripTimeNanosNumOps", dnMetrics) > 0); MetricsAsserts.AssertQuantileGauges("PacketAckRoundTripTimeNanos" + interval + "s" , dnMetrics); } finally { if (cluster != null) { cluster.Shutdown(); } } }
public bool Get() { return(MetricsAsserts.GetLongCounter("StorageBlockReportOps", MetricsAsserts.GetMetrics (TestSafeMode.NnMetrics)) == this._enclosing.cluster.GetStoragesPerDatanode()); }