/// <exception cref="System.Exception"/> public virtual void TestUncacheQuiesces() { // Create a file Path fileName = new Path("/testUncacheQuiesces"); int fileLen = 4096; DFSTestUtil.CreateFile(fs, fileName, fileLen, (short)1, unchecked ((int)(0xFDFD))); // Cache it DistributedFileSystem dfs = cluster.GetFileSystem(); dfs.AddCachePool(new CachePoolInfo("pool")); dfs.AddCacheDirective(new CacheDirectiveInfo.Builder().SetPool("pool").SetPath(fileName ).SetReplication((short)3).Build()); GenericTestUtils.WaitFor(new _Supplier_484(), 1000, 30000); // Uncache it dfs.RemoveCacheDirective(1); GenericTestUtils.WaitFor(new _Supplier_495(), 1000, 30000); // Make sure that no additional messages were sent Sharpen.Thread.Sleep(10000); MetricsRecordBuilder dnMetrics = MetricsAsserts.GetMetrics(dn.GetMetrics().Name() ); MetricsAsserts.AssertCounter("BlocksCached", 1l, dnMetrics); MetricsAsserts.AssertCounter("BlocksUncached", 1l, dnMetrics); }
/// <exception cref="System.Exception"/> public virtual void TestReCacheAfterUncache() { int TotalBlocksPerCache = Ints.CheckedCast(CacheCapacity / BlockSize); BlockReaderTestUtil.EnableHdfsCachingTracing(); NUnit.Framework.Assert.AreEqual(0, CacheCapacity % BlockSize); // Create a small file Path SmallFile = new Path("/smallFile"); DFSTestUtil.CreateFile(fs, SmallFile, BlockSize, (short)1, unchecked ((int)(0xcafe ))); // Create a file that will take up the whole cache Path BigFile = new Path("/bigFile"); DFSTestUtil.CreateFile(fs, BigFile, TotalBlocksPerCache * BlockSize, (short)1, unchecked ( (int)(0xbeef))); DistributedFileSystem dfs = cluster.GetFileSystem(); dfs.AddCachePool(new CachePoolInfo("pool")); long bigCacheDirectiveId = dfs.AddCacheDirective(new CacheDirectiveInfo.Builder() .SetPool("pool").SetPath(BigFile).SetReplication((short)1).Build()); GenericTestUtils.WaitFor(new _Supplier_532(TotalBlocksPerCache), 1000, 30000); // Try to cache a smaller file. It should fail. long shortCacheDirectiveId = dfs.AddCacheDirective(new CacheDirectiveInfo.Builder ().SetPool("pool").SetPath(SmallFile).SetReplication((short)1).Build()); Sharpen.Thread.Sleep(10000); MetricsRecordBuilder dnMetrics = MetricsAsserts.GetMetrics(dn.GetMetrics().Name() ); NUnit.Framework.Assert.AreEqual(TotalBlocksPerCache, MetricsAsserts.GetLongCounter ("BlocksCached", dnMetrics)); // Uncache the big file and verify that the small file can now be // cached (regression test for HDFS-6107) dfs.RemoveCacheDirective(bigCacheDirectiveId); GenericTestUtils.WaitFor(new _Supplier_560(dfs, shortCacheDirectiveId), 1000, 30000 ); dfs.RemoveCacheDirective(shortCacheDirectiveId); }
/// <exception cref="System.IO.IOException"/> public virtual int Run(Configuration conf, IList <string> args) { string path = StringUtils.PopOptionWithArgument("-path", args); if (path == null) { System.Console.Error.WriteLine("You must specify a path with -path."); return(1); } if (!args.IsEmpty()) { System.Console.Error.WriteLine("Can't understand argument: " + args[0]); System.Console.Error.WriteLine("Usage is " + GetShortUsage()); return(1); } int exitCode = 0; try { DistributedFileSystem dfs = AdminHelper.GetDFS(conf); RemoteIterator <CacheDirectiveEntry> iter = dfs.ListCacheDirectives(new CacheDirectiveInfo.Builder ().SetPath(new Path(path)).Build()); while (iter.HasNext()) { CacheDirectiveEntry entry = iter.Next(); try { dfs.RemoveCacheDirective(entry.GetInfo().GetId()); System.Console.Out.WriteLine("Removed cache directive " + entry.GetInfo().GetId() ); } catch (IOException e) { System.Console.Error.WriteLine(AdminHelper.PrettifyException(e)); exitCode = 2; } } } catch (IOException e) { System.Console.Error.WriteLine(AdminHelper.PrettifyException(e)); exitCode = 2; } if (exitCode == 0) { System.Console.Out.WriteLine("Removed every cache directive with path " + path); } return(exitCode); }
/// <summary> /// Test that when we have an uncache request, and the client refuses to release /// the replica for a long time, we will un-mlock it. /// </summary> /// <exception cref="System.Exception"/> public virtual void TestRevocation() { Assume.AssumeTrue(NativeCodeLoader.IsNativeCodeLoaded() && !Path.Windows); BlockReaderTestUtil.EnableHdfsCachingTracing(); BlockReaderTestUtil.EnableShortCircuitShmTracing(); Configuration conf = GetDefaultConf(); // Set a really short revocation timeout. conf.SetLong(DFSConfigKeys.DfsDatanodeCacheRevocationTimeoutMs, 250L); // Poll very often conf.SetLong(DFSConfigKeys.DfsDatanodeCacheRevocationPollingMs, 2L); MiniDFSCluster cluster = null; cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build(); cluster.WaitActive(); DistributedFileSystem dfs = cluster.GetFileSystem(); // Create and cache a file. string TestFile = "/test_file2"; DFSTestUtil.CreateFile(dfs, new Path(TestFile), BlockSize, (short)1, unchecked ((int )(0xcafe))); dfs.AddCachePool(new CachePoolInfo("pool")); long cacheDirectiveId = dfs.AddCacheDirective(new CacheDirectiveInfo.Builder().SetPool ("pool").SetPath(new Path(TestFile)).SetReplication((short)1).Build()); FsDatasetSpi <object> fsd = cluster.GetDataNodes()[0].GetFSDataset(); DFSTestUtil.VerifyExpectedCacheUsage(BlockSize, 1, fsd); // Mmap the file. FSDataInputStream @in = dfs.Open(new Path(TestFile)); ByteBuffer buf = @in.Read(null, BlockSize, EnumSet.NoneOf <ReadOption>()); // Attempt to uncache file. The file should get uncached. Log.Info("removing cache directive {}", cacheDirectiveId); dfs.RemoveCacheDirective(cacheDirectiveId); Log.Info("finished removing cache directive {}", cacheDirectiveId); Sharpen.Thread.Sleep(1000); DFSTestUtil.VerifyExpectedCacheUsage(0, 0, fsd); // Cleanup @in.ReleaseBuffer(buf); @in.Close(); cluster.Shutdown(); }
/// <summary>Remove a CacheDirective.</summary> /// <param name="id">identifier of the CacheDirectiveInfo to remove</param> /// <exception cref="System.IO.IOException">if the directive could not be removed</exception> public virtual void RemoveCacheDirective(long id) { dfs.RemoveCacheDirective(id); }
/// <summary> /// Test that we can zero-copy read cached data even without disabling /// checksums. /// </summary> /// <exception cref="System.Exception"/> public virtual void TestZeroCopyReadOfCachedData() { BlockReaderTestUtil.EnableShortCircuitShmTracing(); BlockReaderTestUtil.EnableBlockReaderFactoryTracing(); BlockReaderTestUtil.EnableHdfsCachingTracing(); int TestFileLength = BlockSize; Path TestPath = new Path("/a"); int RandomSeed = 23453; HdfsConfiguration conf = InitZeroCopyTest(); conf.SetBoolean(DFSConfigKeys.DfsClientReadShortcircuitSkipChecksumKey, false); string Context = "testZeroCopyReadOfCachedData"; conf.Set(DFSConfigKeys.DfsClientContext, Context); conf.SetLong(DFSConfigKeys.DfsDatanodeMaxLockedMemoryKey, DFSTestUtil.RoundUpToMultiple (TestFileLength, (int)NativeIO.POSIX.GetCacheManipulator().GetOperatingSystemPageSize ())); MiniDFSCluster cluster = null; ByteBuffer result = null; ByteBuffer result2 = null; cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build(); cluster.WaitActive(); FsDatasetSpi <object> fsd = cluster.GetDataNodes()[0].GetFSDataset(); DistributedFileSystem fs = cluster.GetFileSystem(); DFSTestUtil.CreateFile(fs, TestPath, TestFileLength, (short)1, RandomSeed); DFSTestUtil.WaitReplication(fs, TestPath, (short)1); byte[] original = DFSTestUtil.CalculateFileContentsFromSeed(RandomSeed, TestFileLength ); // Prior to caching, the file can't be read via zero-copy FSDataInputStream fsIn = fs.Open(TestPath); try { result = fsIn.Read(null, TestFileLength / 2, EnumSet.NoneOf <ReadOption>()); NUnit.Framework.Assert.Fail("expected UnsupportedOperationException"); } catch (NotSupportedException) { } // expected // Cache the file fs.AddCachePool(new CachePoolInfo("pool1")); long directiveId = fs.AddCacheDirective(new CacheDirectiveInfo.Builder().SetPath( TestPath).SetReplication((short)1).SetPool("pool1").Build()); int numBlocks = (int)Math.Ceil((double)TestFileLength / BlockSize); DFSTestUtil.VerifyExpectedCacheUsage(DFSTestUtil.RoundUpToMultiple(TestFileLength , BlockSize), numBlocks, cluster.GetDataNodes()[0].GetFSDataset()); try { result = fsIn.Read(null, TestFileLength, EnumSet.NoneOf <ReadOption>()); } catch (NotSupportedException) { NUnit.Framework.Assert.Fail("expected to be able to read cached file via zero-copy" ); } Assert.AssertArrayEquals(Arrays.CopyOfRange(original, 0, BlockSize), ByteBufferToArray (result)); // Test that files opened after the cache operation has finished // still get the benefits of zero-copy (regression test for HDFS-6086) FSDataInputStream fsIn2 = fs.Open(TestPath); try { result2 = fsIn2.Read(null, TestFileLength, EnumSet.NoneOf <ReadOption>()); } catch (NotSupportedException) { NUnit.Framework.Assert.Fail("expected to be able to read cached file via zero-copy" ); } Assert.AssertArrayEquals(Arrays.CopyOfRange(original, 0, BlockSize), ByteBufferToArray (result2)); fsIn2.ReleaseBuffer(result2); fsIn2.Close(); // check that the replica is anchored ExtendedBlock firstBlock = DFSTestUtil.GetFirstBlock(fs, TestPath); ShortCircuitCache cache = ClientContext.Get(Context, new DFSClient.Conf(conf)).GetShortCircuitCache (); WaitForReplicaAnchorStatus(cache, firstBlock, true, true, 1); // Uncache the replica fs.RemoveCacheDirective(directiveId); WaitForReplicaAnchorStatus(cache, firstBlock, false, true, 1); fsIn.ReleaseBuffer(result); WaitForReplicaAnchorStatus(cache, firstBlock, false, false, 1); DFSTestUtil.VerifyExpectedCacheUsage(0, 0, fsd); fsIn.Close(); fs.Close(); cluster.Shutdown(); }