public virtual void TestRoundTripAckMetric() { int datanodeCount = 2; int interval = 1; Configuration conf = new HdfsConfiguration(); conf.Set(DFSConfigKeys.DfsMetricsPercentilesIntervalsKey, string.Empty + interval ); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(datanodeCount ).Build(); try { cluster.WaitActive(); FileSystem fs = cluster.GetFileSystem(); // Open a file and get the head of the pipeline Path testFile = new Path("/testRoundTripAckMetric.txt"); FSDataOutputStream fsout = fs.Create(testFile, (short)datanodeCount); DFSOutputStream dout = (DFSOutputStream)fsout.GetWrappedStream(); // Slow down the writes to catch the write pipeline dout.SetChunksPerPacket(5); dout.SetArtificialSlowdown(3000); fsout.Write(new byte[10000]); DatanodeInfo[] pipeline = null; int count = 0; while (pipeline == null && count < 5) { pipeline = dout.GetPipeline(); System.Console.Out.WriteLine("Waiting for pipeline to be created."); Sharpen.Thread.Sleep(1000); count++; } // Get the head node that should be receiving downstream acks DatanodeInfo headInfo = pipeline[0]; DataNode headNode = null; foreach (DataNode datanode in cluster.GetDataNodes()) { if (datanode.GetDatanodeId().Equals(headInfo)) { headNode = datanode; break; } } NUnit.Framework.Assert.IsNotNull("Could not find the head of the datanode write pipeline" , headNode); // Close the file and wait for the metrics to rollover Sharpen.Thread.Sleep((interval + 1) * 1000); // Check the ack was received MetricsRecordBuilder dnMetrics = MetricsAsserts.GetMetrics(headNode.GetMetrics(). Name()); NUnit.Framework.Assert.IsTrue("Expected non-zero number of acks", MetricsAsserts.GetLongCounter ("PacketAckRoundTripTimeNanosNumOps", dnMetrics) > 0); MetricsAsserts.AssertQuantileGauges("PacketAckRoundTripTimeNanos" + interval + "s" , dnMetrics); } finally { if (cluster != null) { cluster.Shutdown(); } } }
/// <summary> /// Write to one file, then kill one datanode in the pipeline and then /// close the file. /// </summary> /// <exception cref="System.IO.IOException"/> private void SimpleTest(int datanodeToKill) { Configuration conf = new HdfsConfiguration(); conf.SetInt(DFSConfigKeys.DfsNamenodeHeartbeatRecheckIntervalKey, 2000); conf.SetInt(DFSConfigKeys.DfsHeartbeatIntervalKey, 1); conf.SetInt(DFSConfigKeys.DfsNamenodeReplicationPendingTimeoutSecKey, 2); conf.SetInt(DFSConfigKeys.DfsClientSocketTimeoutKey, 5000); int myMaxNodes = 5; System.Console.Out.WriteLine("SimpleTest starting with DataNode to Kill " + datanodeToKill ); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(myMaxNodes ).Build(); cluster.WaitActive(); FileSystem fs = cluster.GetFileSystem(); short repl = 3; Path filename = new Path("simpletest.dat"); try { // create a file and write one block of data System.Console.Out.WriteLine("SimpleTest creating file " + filename); FSDataOutputStream stm = CreateFile(fs, filename, repl); DFSOutputStream dfstream = (DFSOutputStream)(stm.GetWrappedStream()); // these are test settings dfstream.SetChunksPerPacket(5); dfstream.SetArtificialSlowdown(3000); long myseed = AppendTestUtil.NextLong(); byte[] buffer = AppendTestUtil.RandomBytes(myseed, fileSize); int mid = fileSize / 4; stm.Write(buffer, 0, mid); DatanodeInfo[] targets = dfstream.GetPipeline(); int count = 5; while (count-- > 0 && targets == null) { try { System.Console.Out.WriteLine("SimpleTest: Waiting for pipeline to be created."); Sharpen.Thread.Sleep(1000); } catch (Exception) { } targets = dfstream.GetPipeline(); } if (targets == null) { int victim = AppendTestUtil.NextInt(myMaxNodes); System.Console.Out.WriteLine("SimpleTest stopping datanode random " + victim); cluster.StopDataNode(victim); } else { int victim = datanodeToKill; System.Console.Out.WriteLine("SimpleTest stopping datanode " + targets[victim]); cluster.StopDataNode(targets[victim].GetXferAddr()); } System.Console.Out.WriteLine("SimpleTest stopping datanode complete"); // write some more data to file, close and verify stm.Write(buffer, mid, fileSize - mid); stm.Close(); CheckFile(fs, filename, repl, numBlocks, fileSize, myseed); } catch (Exception e) { System.Console.Out.WriteLine("Simple Workload exception " + e); Sharpen.Runtime.PrintStackTrace(e); NUnit.Framework.Assert.IsTrue(e.ToString(), false); } finally { fs.Close(); cluster.Shutdown(); } }