/// <exception cref="System.Exception"/> public virtual void TestFadviseSkippedForSmallReads() { // start a cluster Log.Info("testFadviseSkippedForSmallReads"); tracker.Clear(); Configuration conf = new HdfsConfiguration(); conf.SetBoolean(DFSConfigKeys.DfsDatanodeDropCacheBehindReadsKey, true); conf.SetBoolean(DFSConfigKeys.DfsDatanodeDropCacheBehindWritesKey, true); MiniDFSCluster cluster = null; string TestPath = "/test"; int TestPathLen = MaxTestFileLen; FSDataInputStream fis = null; try { cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build(); cluster.WaitActive(); FileSystem fs = cluster.GetFileSystem(); // create new file CreateHdfsFile(fs, new Path(TestPath), TestPathLen, null); // Since the DataNode was configured with drop-behind, and we didn't // specify any policy, we should have done drop-behind. ExtendedBlock block = cluster.GetNameNode().GetRpcServer().GetBlockLocations(TestPath , 0, long.MaxValue).Get(0).GetBlock(); string fadvisedFileName = cluster.GetBlockFile(0, block).GetName(); TestCachingStrategy.Stats stats = tracker.GetStats(fadvisedFileName); stats.AssertDroppedInRange(0, TestPathLen - WritePacketSize); stats.Clear(); stats.AssertNotDroppedInRange(0, TestPathLen); // read file fis = fs.Open(new Path(TestPath)); byte[] buf = new byte[17]; fis.ReadFully(4096, buf, 0, buf.Length); // we should not have dropped anything because of the small read. stats = tracker.GetStats(fadvisedFileName); stats.AssertNotDroppedInRange(0, TestPathLen - WritePacketSize); } finally { IOUtils.Cleanup(null, fis); if (cluster != null) { cluster.Shutdown(); } } }
/// <summary> /// Test the scenario where the DataNode defaults to not dropping the cache, /// but our client defaults are set. /// </summary> /// <exception cref="System.Exception"/> public virtual void TestClientDefaults() { // start a cluster Log.Info("testClientDefaults"); tracker.Clear(); Configuration conf = new HdfsConfiguration(); conf.SetBoolean(DFSConfigKeys.DfsDatanodeDropCacheBehindReadsKey, false); conf.SetBoolean(DFSConfigKeys.DfsDatanodeDropCacheBehindWritesKey, false); conf.SetBoolean(DFSConfigKeys.DfsClientCacheDropBehindReads, true); conf.SetBoolean(DFSConfigKeys.DfsClientCacheDropBehindWrites, true); MiniDFSCluster cluster = null; string TestPath = "/test"; int TestPathLen = MaxTestFileLen; try { cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build(); cluster.WaitActive(); FileSystem fs = cluster.GetFileSystem(); // create new file CreateHdfsFile(fs, new Path(TestPath), TestPathLen, null); // verify that we dropped everything from the cache during file creation. ExtendedBlock block = cluster.GetNameNode().GetRpcServer().GetBlockLocations(TestPath , 0, long.MaxValue).Get(0).GetBlock(); string fadvisedFileName = cluster.GetBlockFile(0, block).GetName(); TestCachingStrategy.Stats stats = tracker.GetStats(fadvisedFileName); stats.AssertDroppedInRange(0, TestPathLen - WritePacketSize); stats.Clear(); // read file ReadHdfsFile(fs, new Path(TestPath), long.MaxValue, null); // verify that we dropped everything from the cache. NUnit.Framework.Assert.IsNotNull(stats); stats.AssertDroppedInRange(0, TestPathLen - WritePacketSize); } finally { if (cluster != null) { cluster.Shutdown(); } } }
/// <exception cref="Org.Apache.Hadoop.IO.Nativeio.NativeIOException"/> public override void PosixFadviseIfPossible(string name, FileDescriptor fd, long offset, long len, int flags) { if ((len < 0) || (len > int.MaxValue)) { throw new RuntimeException("invalid length of " + len + " passed to posixFadviseIfPossible" ); } if ((offset < 0) || (offset > int.MaxValue)) { throw new RuntimeException("invalid offset of " + offset + " passed to posixFadviseIfPossible" ); } TestCachingStrategy.Stats stats = map[name]; if (stats == null) { stats = new TestCachingStrategy.Stats(name); map[name] = stats; } stats.Fadvise((int)offset, (int)len, flags); base.PosixFadviseIfPossible(name, fd, offset, len, flags); }
/// <exception cref="System.Exception"/> public virtual void TestNoFadviseAfterWriteThenRead() { // start a cluster Log.Info("testNoFadviseAfterWriteThenRead"); tracker.Clear(); Configuration conf = new HdfsConfiguration(); MiniDFSCluster cluster = null; string TestPath = "/test"; int TestPathLen = MaxTestFileLen; try { cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build(); cluster.WaitActive(); FileSystem fs = cluster.GetFileSystem(); // create new file CreateHdfsFile(fs, new Path(TestPath), TestPathLen, false); // verify that we did not drop everything from the cache during file creation. ExtendedBlock block = cluster.GetNameNode().GetRpcServer().GetBlockLocations(TestPath , 0, long.MaxValue).Get(0).GetBlock(); string fadvisedFileName = cluster.GetBlockFile(0, block).GetName(); TestCachingStrategy.Stats stats = tracker.GetStats(fadvisedFileName); NUnit.Framework.Assert.IsNull(stats); // read file ReadHdfsFile(fs, new Path(TestPath), long.MaxValue, false); // verify that we dropped everything from the cache. NUnit.Framework.Assert.IsNull(stats); } finally { if (cluster != null) { cluster.Shutdown(); } } }