Exemplo n.º 1
0
        /// <exception cref="System.Exception"/>
        public virtual void TestUncacheQuiesces()
        {
            // Create a file
            Path fileName = new Path("/testUncacheQuiesces");
            int  fileLen  = 4096;

            DFSTestUtil.CreateFile(fs, fileName, fileLen, (short)1, unchecked ((int)(0xFDFD)));
            // Cache it
            DistributedFileSystem dfs = cluster.GetFileSystem();

            dfs.AddCachePool(new CachePoolInfo("pool"));
            dfs.AddCacheDirective(new CacheDirectiveInfo.Builder().SetPool("pool").SetPath(fileName
                                                                                           ).SetReplication((short)3).Build());
            GenericTestUtils.WaitFor(new _Supplier_484(), 1000, 30000);
            // Uncache it
            dfs.RemoveCacheDirective(1);
            GenericTestUtils.WaitFor(new _Supplier_495(), 1000, 30000);
            // Make sure that no additional messages were sent
            Sharpen.Thread.Sleep(10000);
            MetricsRecordBuilder dnMetrics = MetricsAsserts.GetMetrics(dn.GetMetrics().Name()
                                                                       );

            MetricsAsserts.AssertCounter("BlocksCached", 1l, dnMetrics);
            MetricsAsserts.AssertCounter("BlocksUncached", 1l, dnMetrics);
        }
        /// <summary>
        /// Test that when we have an uncache request, and the client refuses to release
        /// the replica for a long time, we will un-mlock it.
        /// </summary>
        /// <exception cref="System.Exception"/>
        public virtual void TestRevocation()
        {
            Assume.AssumeTrue(NativeCodeLoader.IsNativeCodeLoaded() && !Path.Windows);
            BlockReaderTestUtil.EnableHdfsCachingTracing();
            BlockReaderTestUtil.EnableShortCircuitShmTracing();
            Configuration conf = GetDefaultConf();

            // Set a really short revocation timeout.
            conf.SetLong(DFSConfigKeys.DfsDatanodeCacheRevocationTimeoutMs, 250L);
            // Poll very often
            conf.SetLong(DFSConfigKeys.DfsDatanodeCacheRevocationPollingMs, 2L);
            MiniDFSCluster cluster = null;

            cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build();
            cluster.WaitActive();
            DistributedFileSystem dfs = cluster.GetFileSystem();
            // Create and cache a file.
            string TestFile = "/test_file2";

            DFSTestUtil.CreateFile(dfs, new Path(TestFile), BlockSize, (short)1, unchecked ((int
                                                                                             )(0xcafe)));
            dfs.AddCachePool(new CachePoolInfo("pool"));
            long cacheDirectiveId = dfs.AddCacheDirective(new CacheDirectiveInfo.Builder().SetPool
                                                              ("pool").SetPath(new Path(TestFile)).SetReplication((short)1).Build());
            FsDatasetSpi <object> fsd = cluster.GetDataNodes()[0].GetFSDataset();

            DFSTestUtil.VerifyExpectedCacheUsage(BlockSize, 1, fsd);
            // Mmap the file.
            FSDataInputStream @in = dfs.Open(new Path(TestFile));
            ByteBuffer        buf = @in.Read(null, BlockSize, EnumSet.NoneOf <ReadOption>());

            // Attempt to uncache file.  The file should get uncached.
            Log.Info("removing cache directive {}", cacheDirectiveId);
            dfs.RemoveCacheDirective(cacheDirectiveId);
            Log.Info("finished removing cache directive {}", cacheDirectiveId);
            Sharpen.Thread.Sleep(1000);
            DFSTestUtil.VerifyExpectedCacheUsage(0, 0, fsd);
            // Cleanup
            @in.ReleaseBuffer(buf);
            @in.Close();
            cluster.Shutdown();
        }
Exemplo n.º 3
0
        /// <exception cref="System.Exception"/>
        public virtual void TestReCacheAfterUncache()
        {
            int TotalBlocksPerCache = Ints.CheckedCast(CacheCapacity / BlockSize);

            BlockReaderTestUtil.EnableHdfsCachingTracing();
            NUnit.Framework.Assert.AreEqual(0, CacheCapacity % BlockSize);
            // Create a small file
            Path SmallFile = new Path("/smallFile");

            DFSTestUtil.CreateFile(fs, SmallFile, BlockSize, (short)1, unchecked ((int)(0xcafe
                                                                                        )));
            // Create a file that will take up the whole cache
            Path BigFile = new Path("/bigFile");

            DFSTestUtil.CreateFile(fs, BigFile, TotalBlocksPerCache * BlockSize, (short)1, unchecked (
                                       (int)(0xbeef)));
            DistributedFileSystem dfs = cluster.GetFileSystem();

            dfs.AddCachePool(new CachePoolInfo("pool"));
            long bigCacheDirectiveId = dfs.AddCacheDirective(new CacheDirectiveInfo.Builder()
                                                             .SetPool("pool").SetPath(BigFile).SetReplication((short)1).Build());

            GenericTestUtils.WaitFor(new _Supplier_532(TotalBlocksPerCache), 1000, 30000);
            // Try to cache a smaller file.  It should fail.
            long shortCacheDirectiveId = dfs.AddCacheDirective(new CacheDirectiveInfo.Builder
                                                                   ().SetPool("pool").SetPath(SmallFile).SetReplication((short)1).Build());

            Sharpen.Thread.Sleep(10000);
            MetricsRecordBuilder dnMetrics = MetricsAsserts.GetMetrics(dn.GetMetrics().Name()
                                                                       );

            NUnit.Framework.Assert.AreEqual(TotalBlocksPerCache, MetricsAsserts.GetLongCounter
                                                ("BlocksCached", dnMetrics));
            // Uncache the big file and verify that the small file can now be
            // cached (regression test for HDFS-6107)
            dfs.RemoveCacheDirective(bigCacheDirectiveId);
            GenericTestUtils.WaitFor(new _Supplier_560(dfs, shortCacheDirectiveId), 1000, 30000
                                     );
            dfs.RemoveCacheDirective(shortCacheDirectiveId);
        }
Exemplo n.º 4
0
 /// <summary>Add a cache pool.</summary>
 /// <param name="info">The request to add a cache pool.</param>
 /// <exception cref="System.IO.IOException">
 ///
 /// If the request could not be completed.
 /// </exception>
 public virtual void AddCachePool(CachePoolInfo info)
 {
     dfs.AddCachePool(info);
 }
Exemplo n.º 5
0
            /// <exception cref="System.IO.IOException"/>
            public virtual int Run(Configuration conf, IList <string> args)
            {
                string name = StringUtils.PopFirstNonOption(args);

                if (name == null)
                {
                    System.Console.Error.WriteLine("You must specify a name when creating a " + "cache pool."
                                                   );
                    return(1);
                }
                CachePoolInfo info  = new CachePoolInfo(name);
                string        owner = StringUtils.PopOptionWithArgument("-owner", args);

                if (owner != null)
                {
                    info.SetOwnerName(owner);
                }
                string group = StringUtils.PopOptionWithArgument("-group", args);

                if (group != null)
                {
                    info.SetGroupName(group);
                }
                string modeString = StringUtils.PopOptionWithArgument("-mode", args);

                if (modeString != null)
                {
                    short mode = short.ParseShort(modeString, 8);
                    info.SetMode(new FsPermission(mode));
                }
                string limitString = StringUtils.PopOptionWithArgument("-limit", args);
                long   limit       = AdminHelper.ParseLimitString(limitString);

                if (limit != null)
                {
                    info.SetLimit(limit);
                }
                string maxTtlString = StringUtils.PopOptionWithArgument("-maxTtl", args);

                try
                {
                    long maxTtl = AdminHelper.ParseTtlString(maxTtlString);
                    if (maxTtl != null)
                    {
                        info.SetMaxRelativeExpiryMs(maxTtl);
                    }
                }
                catch (IOException e)
                {
                    System.Console.Error.WriteLine("Error while parsing maxTtl value: " + e.Message);
                    return(1);
                }
                if (!args.IsEmpty())
                {
                    System.Console.Error.Write("Can't understand arguments: " + Joiner.On(" ").Join(args
                                                                                                    ) + "\n");
                    System.Console.Error.WriteLine("Usage is " + GetShortUsage());
                    return(1);
                }
                DistributedFileSystem dfs = AdminHelper.GetDFS(conf);

                try
                {
                    dfs.AddCachePool(info);
                }
                catch (IOException e)
                {
                    System.Console.Error.WriteLine(AdminHelper.PrettifyException(e));
                    return(2);
                }
                System.Console.Out.WriteLine("Successfully added cache pool " + name + ".");
                return(0);
            }
        /// <summary>
        /// Test that we can zero-copy read cached data even without disabling
        /// checksums.
        /// </summary>
        /// <exception cref="System.Exception"/>
        public virtual void TestZeroCopyReadOfCachedData()
        {
            BlockReaderTestUtil.EnableShortCircuitShmTracing();
            BlockReaderTestUtil.EnableBlockReaderFactoryTracing();
            BlockReaderTestUtil.EnableHdfsCachingTracing();
            int  TestFileLength    = BlockSize;
            Path TestPath          = new Path("/a");
            int  RandomSeed        = 23453;
            HdfsConfiguration conf = InitZeroCopyTest();

            conf.SetBoolean(DFSConfigKeys.DfsClientReadShortcircuitSkipChecksumKey, false);
            string Context = "testZeroCopyReadOfCachedData";

            conf.Set(DFSConfigKeys.DfsClientContext, Context);
            conf.SetLong(DFSConfigKeys.DfsDatanodeMaxLockedMemoryKey, DFSTestUtil.RoundUpToMultiple
                             (TestFileLength, (int)NativeIO.POSIX.GetCacheManipulator().GetOperatingSystemPageSize
                                 ()));
            MiniDFSCluster cluster = null;
            ByteBuffer     result  = null;
            ByteBuffer     result2 = null;

            cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build();
            cluster.WaitActive();
            FsDatasetSpi <object> fsd = cluster.GetDataNodes()[0].GetFSDataset();
            DistributedFileSystem fs  = cluster.GetFileSystem();

            DFSTestUtil.CreateFile(fs, TestPath, TestFileLength, (short)1, RandomSeed);
            DFSTestUtil.WaitReplication(fs, TestPath, (short)1);
            byte[] original = DFSTestUtil.CalculateFileContentsFromSeed(RandomSeed, TestFileLength
                                                                        );
            // Prior to caching, the file can't be read via zero-copy
            FSDataInputStream fsIn = fs.Open(TestPath);

            try
            {
                result = fsIn.Read(null, TestFileLength / 2, EnumSet.NoneOf <ReadOption>());
                NUnit.Framework.Assert.Fail("expected UnsupportedOperationException");
            }
            catch (NotSupportedException)
            {
            }
            // expected
            // Cache the file
            fs.AddCachePool(new CachePoolInfo("pool1"));
            long directiveId = fs.AddCacheDirective(new CacheDirectiveInfo.Builder().SetPath(
                                                        TestPath).SetReplication((short)1).SetPool("pool1").Build());
            int numBlocks = (int)Math.Ceil((double)TestFileLength / BlockSize);

            DFSTestUtil.VerifyExpectedCacheUsage(DFSTestUtil.RoundUpToMultiple(TestFileLength
                                                                               , BlockSize), numBlocks, cluster.GetDataNodes()[0].GetFSDataset());
            try
            {
                result = fsIn.Read(null, TestFileLength, EnumSet.NoneOf <ReadOption>());
            }
            catch (NotSupportedException)
            {
                NUnit.Framework.Assert.Fail("expected to be able to read cached file via zero-copy"
                                            );
            }
            Assert.AssertArrayEquals(Arrays.CopyOfRange(original, 0, BlockSize), ByteBufferToArray
                                         (result));
            // Test that files opened after the cache operation has finished
            // still get the benefits of zero-copy (regression test for HDFS-6086)
            FSDataInputStream fsIn2 = fs.Open(TestPath);

            try
            {
                result2 = fsIn2.Read(null, TestFileLength, EnumSet.NoneOf <ReadOption>());
            }
            catch (NotSupportedException)
            {
                NUnit.Framework.Assert.Fail("expected to be able to read cached file via zero-copy"
                                            );
            }
            Assert.AssertArrayEquals(Arrays.CopyOfRange(original, 0, BlockSize), ByteBufferToArray
                                         (result2));
            fsIn2.ReleaseBuffer(result2);
            fsIn2.Close();
            // check that the replica is anchored
            ExtendedBlock     firstBlock = DFSTestUtil.GetFirstBlock(fs, TestPath);
            ShortCircuitCache cache      = ClientContext.Get(Context, new DFSClient.Conf(conf)).GetShortCircuitCache
                                               ();

            WaitForReplicaAnchorStatus(cache, firstBlock, true, true, 1);
            // Uncache the replica
            fs.RemoveCacheDirective(directiveId);
            WaitForReplicaAnchorStatus(cache, firstBlock, false, true, 1);
            fsIn.ReleaseBuffer(result);
            WaitForReplicaAnchorStatus(cache, firstBlock, false, false, 1);
            DFSTestUtil.VerifyExpectedCacheUsage(0, 0, fsd);
            fsIn.Close();
            fs.Close();
            cluster.Shutdown();
        }