public virtual void TestFailedAppendBlockRejection() { Configuration conf = new HdfsConfiguration(); conf.Set("dfs.client.block.write.replace-datanode-on-failure.enable", "false"); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(3).Build(); DistributedFileSystem fs = null; try { fs = cluster.GetFileSystem(); Path path = new Path("/test"); FSDataOutputStream @out = fs.Create(path); @out.WriteBytes("hello\n"); @out.Close(); // stop one datanode MiniDFSCluster.DataNodeProperties dnProp = cluster.StopDataNode(0); string dnAddress = dnProp.datanode.GetXferAddress().ToString(); if (dnAddress.StartsWith("/")) { dnAddress = Sharpen.Runtime.Substring(dnAddress, 1); } // append again to bump genstamps for (int i = 0; i < 2; i++) { @out = fs.Append(path); @out.WriteBytes("helloagain\n"); @out.Close(); } // re-open and make the block state as underconstruction @out = fs.Append(path); cluster.RestartDataNode(dnProp, true); // wait till the block report comes Sharpen.Thread.Sleep(2000); // check the block locations, this should not contain restarted datanode BlockLocation[] locations = fs.GetFileBlockLocations(path, 0, long.MaxValue); string[] names = locations[0].GetNames(); foreach (string node in names) { if (node.Equals(dnAddress)) { NUnit.Framework.Assert.Fail("Failed append should not be present in latest block locations." ); } } @out.Close(); } finally { IOUtils.CloseStream(fs); cluster.Shutdown(); } }