public virtual void TestChooseReplicaToDelete()
        {
            MiniDFSCluster cluster = null;
            FileSystem     fs      = null;

            try
            {
                Configuration conf = new HdfsConfiguration();
                conf.SetLong(DFSConfigKeys.DfsBlockSizeKey, SmallBlockSize);
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(3).Build();
                fs      = cluster.GetFileSystem();
                FSNamesystem namesystem = cluster.GetNamesystem();
                conf.SetLong(DFSConfigKeys.DfsHeartbeatIntervalKey, 300);
                cluster.StartDataNodes(conf, 1, true, null, null, null);
                DataNode             lastDN = cluster.GetDataNodes()[3];
                DatanodeRegistration dnReg  = DataNodeTestUtils.GetDNRegistrationForBP(lastDN, namesystem
                                                                                       .GetBlockPoolId());
                string lastDNid = dnReg.GetDatanodeUuid();
                Path   fileName = new Path("/foo2");
                DFSTestUtil.CreateFile(fs, fileName, SmallFileLength, (short)4, 0L);
                DFSTestUtil.WaitReplication(fs, fileName, (short)4);
                // Wait for tolerable number of heartbeats plus one
                DatanodeDescriptor nodeInfo = null;
                long lastHeartbeat          = 0;
                long waitTime = DFSConfigKeys.DfsHeartbeatIntervalDefault * 1000 * (DFSConfigKeys
                                                                                    .DfsNamenodeTolerateHeartbeatMultiplierDefault + 1);
                do
                {
                    nodeInfo      = namesystem.GetBlockManager().GetDatanodeManager().GetDatanode(dnReg);
                    lastHeartbeat = nodeInfo.GetLastUpdateMonotonic();
                }while (Time.MonotonicNow() - lastHeartbeat < waitTime);
                fs.SetReplication(fileName, (short)3);
                BlockLocation[] locs = fs.GetFileBlockLocations(fs.GetFileStatus(fileName), 0, long.MaxValue
                                                                );
                // All replicas for deletion should be scheduled on lastDN.
                // And should not actually be deleted, because lastDN does not heartbeat.
                namesystem.ReadLock();
                ICollection <Block> dnBlocks = namesystem.GetBlockManager().excessReplicateMap[lastDNid
                                               ];
                NUnit.Framework.Assert.AreEqual("Replicas on node " + lastDNid + " should have been deleted"
                                                , SmallFileLength / SmallBlockSize, dnBlocks.Count);
                namesystem.ReadUnlock();
                foreach (BlockLocation location in locs)
                {
                    NUnit.Framework.Assert.AreEqual("Block should still have 4 replicas", 4, location
                                                    .GetNames().Length);
                }
            }
            finally
            {
                if (fs != null)
                {
                    fs.Close();
                }
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
        private static Configuration CreateConf()
        {
            Configuration conf = new HdfsConfiguration();

            conf.SetLong(DFSConfigKeys.DfsNamenodeAccesstimePrecisionKey, 1L);
            conf.SetLong(DFSConfigKeys.DfsBlockreportIntervalMsecKey, 10000L);
            return(conf);
        }
Example #3
0
        public static void SetUp()
        {
            // start a cluster
            Configuration conf = new HdfsConfiguration();

            // High value of replication interval
            // so that blocks remain under-replicated
            conf.SetInt(DFSConfigKeys.DfsNamenodeReplicationIntervalKey, 1000);
            conf.SetLong(DFSConfigKeys.DfsHeartbeatIntervalKey, 1L);
            conf.SetLong(DFSConfigKeys.DfsNamenodeHeartbeatRecheckIntervalKey, 1L);
            cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(NumDataNodes).Build();
            cluster.WaitActive();
            fileSys = cluster.GetFileSystem();
            nnRpc   = cluster.GetNameNodeRpc();
        }
        private static Configuration GetDefaultConf()
        {
            HdfsConfiguration conf = new HdfsConfiguration();

            conf.SetLong(DFSConfigKeys.DfsNamenodePathBasedCacheRefreshIntervalMs, 50);
            conf.SetLong(DFSConfigKeys.DfsCachereportIntervalMsecKey, 250);
            conf.SetLong(DFSConfigKeys.DfsBlockSizeKey, BlockSize);
            conf.SetLong(DFSConfigKeys.DfsDatanodeMaxLockedMemoryKey, TestFsDatasetCache.CacheCapacity
                         );
            conf.SetLong(DFSConfigKeys.DfsHeartbeatIntervalKey, 1);
            conf.SetBoolean(DFSConfigKeys.DfsClientReadShortcircuitKey, true);
            conf.Set(DFSConfigKeys.DfsDomainSocketPathKey, new FilePath(sockDir.GetDir(), "sock"
                                                                        ).GetAbsolutePath());
            return(conf);
        }
Example #5
0
        /// <exception cref="System.Exception"/>
        public virtual void TestMinBlockSizeLimit()
        {
            long          blockSize = 4096;
            Configuration conf      = new HdfsConfiguration();

            conf.SetLong(DFSConfigKeys.DfsNamenodeMinBlockSizeKey, blockSize);
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).Build();
            FileSystem     fs      = cluster.GetFileSystem();

            try
            {
                // Try with min block size
                fs.Create(new Path("/testmblock1"), true, 4096, (short)3, blockSize);
                try
                {
                    // Try with min block size - 1
                    fs.Create(new Path("/testmblock2"), true, 4096, (short)3, blockSize - 1);
                    System.Diagnostics.Debug.Assert(false, "Expected IOException after creating a file with small"
                                                    + " blocks ");
                }
                catch (IOException e)
                {
                    GenericTestUtils.AssertExceptionContains("Specified block size is less", e);
                }
            }
            finally
            {
                cluster.Shutdown();
            }
        }
        public virtual void TestFinalizedReplicas()
        {
            // bring up a cluster of 3
            Configuration conf = new HdfsConfiguration();

            conf.SetLong(DFSConfigKeys.DfsBlockSizeKey, 1024L);
            conf.SetInt(DFSConfigKeys.DfsClientWritePacketSizeKey, 512);
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(3).Build();

            cluster.WaitActive();
            FileSystem fs = cluster.GetFileSystem();

            try
            {
                // test finalized replicas
                string      TopDir = "/test";
                DFSTestUtil util   = new DFSTestUtil.Builder().SetName("TestDatanodeRestart").SetNumFiles
                                         (2).Build();
                util.CreateFiles(fs, TopDir, (short)3);
                util.WaitReplication(fs, TopDir, (short)3);
                util.CheckFiles(fs, TopDir);
                cluster.RestartDataNodes();
                cluster.WaitActive();
                util.CheckFiles(fs, TopDir);
            }
            finally
            {
                cluster.Shutdown();
            }
        }
Example #7
0
        /// <summary>
        /// The test verifies the number of outstanding replication requests for a
        /// given DN shouldn't exceed the limit set by configuration property
        /// dfs.namenode.replication.max-streams-hard-limit.
        /// </summary>
        /// <remarks>
        /// The test verifies the number of outstanding replication requests for a
        /// given DN shouldn't exceed the limit set by configuration property
        /// dfs.namenode.replication.max-streams-hard-limit.
        /// The test does the followings:
        /// 1. Create a mini cluster with 2 DNs. Set large heartbeat interval so that
        /// replication requests won't be picked by any DN right away.
        /// 2. Create a file with 10 blocks and replication factor 2. Thus each
        /// of the 2 DNs have one replica of each block.
        /// 3. Add a DN to the cluster for later replication.
        /// 4. Remove a DN that has data.
        /// 5. Ask BlockManager to compute the replication work. This will assign
        /// replication requests to the only DN that has data.
        /// 6. Make sure the number of pending replication requests of that DN don't
        /// exceed the limit.
        /// </remarks>
        /// <exception cref="System.Exception"/>
        public virtual void TestNumberOfBlocksToBeReplicated()
        {
            // 1 min timeout
            Configuration conf = new HdfsConfiguration();

            conf.SetLong(DFSConfigKeys.DfsNamenodeMinBlockSizeKey, 0);
            conf.SetLong(DFSConfigKeys.DfsBlockSizeKey, 1);
            conf.SetInt(DFSConfigKeys.DfsBytesPerChecksumKey, 1);
            // Large value to make sure the pending replication request can stay in
            // DatanodeDescriptor.replicateBlocks before test timeout.
            conf.SetInt(DFSConfigKeys.DfsHeartbeatIntervalKey, 100);
            // Make sure BlockManager can pull all blocks from UnderReplicatedBlocks via
            // chooseUnderReplicatedBlocks at once.
            conf.SetInt(DFSConfigKeys.DfsNamenodeReplicationWorkMultiplierPerIteration, 5);
            int            NumOfBlocks = 10;
            short          RepFactor   = 2;
            string         FileName    = "/testFile";
            Path           FilePath    = new Path(FileName);
            MiniDFSCluster cluster     = new MiniDFSCluster.Builder(conf).NumDataNodes(RepFactor)
                                         .Build();

            try
            {
                // create a file with 10 blocks with a replication factor of 2
                FileSystem fs = cluster.GetFileSystem();
                DFSTestUtil.CreateFile(fs, FilePath, NumOfBlocks, RepFactor, 1L);
                DFSTestUtil.WaitReplication(fs, FilePath, RepFactor);
                cluster.StartDataNodes(conf, 1, true, null, null, null, null);
                BlockManager  bm = cluster.GetNamesystem().GetBlockManager();
                ExtendedBlock b  = DFSTestUtil.GetFirstBlock(fs, FilePath);
                IEnumerator <DatanodeStorageInfo> storageInfos = bm.blocksMap.GetStorages(b.GetLocalBlock
                                                                                              ()).GetEnumerator();
                DatanodeDescriptor firstDn  = storageInfos.Next().GetDatanodeDescriptor();
                DatanodeDescriptor secondDn = storageInfos.Next().GetDatanodeDescriptor();
                bm.GetDatanodeManager().RemoveDatanode(firstDn);
                NUnit.Framework.Assert.AreEqual(NumOfBlocks, bm.GetUnderReplicatedNotMissingBlocks
                                                    ());
                bm.ComputeDatanodeWork();
                NUnit.Framework.Assert.IsTrue("The number of blocks to be replicated should be less than "
                                              + "or equal to " + bm.replicationStreamsHardLimit, secondDn.GetNumberOfBlocksToBeReplicated
                                                  () <= bm.replicationStreamsHardLimit);
            }
            finally
            {
                cluster.Shutdown();
            }
        }
Example #8
0
        public virtual void TestDeadDatanode()
        {
            Configuration conf = new HdfsConfiguration();

            conf.SetInt(DFSConfigKeys.DfsNamenodeHeartbeatRecheckIntervalKey, 500);
            conf.SetLong(DFSConfigKeys.DfsHeartbeatIntervalKey, 1L);
            cluster = new MiniDFSCluster.Builder(conf).Build();
            cluster.WaitActive();
            string poolId = cluster.GetNamesystem().GetBlockPoolId();
            // wait for datanode to be marked live
            DataNode             dn  = cluster.GetDataNodes()[0];
            DatanodeRegistration reg = DataNodeTestUtils.GetDNRegistrationForBP(cluster.GetDataNodes
                                                                                    ()[0], poolId);

            DFSTestUtil.WaitForDatanodeState(cluster, reg.GetDatanodeUuid(), true, 20000);
            // Shutdown and wait for datanode to be marked dead
            dn.Shutdown();
            DFSTestUtil.WaitForDatanodeState(cluster, reg.GetDatanodeUuid(), false, 20000);
            DatanodeProtocol dnp = cluster.GetNameNodeRpc();

            ReceivedDeletedBlockInfo[] blocks = new ReceivedDeletedBlockInfo[] { new ReceivedDeletedBlockInfo
                                                                                     (new Block(0), ReceivedDeletedBlockInfo.BlockStatus.ReceivedBlock, null) };
            StorageReceivedDeletedBlocks[] storageBlocks = new StorageReceivedDeletedBlocks[]
            { new StorageReceivedDeletedBlocks(reg.GetDatanodeUuid(), blocks) };
            // Ensure blockReceived call from dead datanode is rejected with IOException
            try
            {
                dnp.BlockReceivedAndDeleted(reg, poolId, storageBlocks);
                NUnit.Framework.Assert.Fail("Expected IOException is not thrown");
            }
            catch (IOException)
            {
            }
            // Expected
            // Ensure blockReport from dead datanode is rejected with IOException
            StorageBlockReport[] report = new StorageBlockReport[] { new StorageBlockReport(new
                                                                                            DatanodeStorage(reg.GetDatanodeUuid()), BlockListAsLongs.Empty) };
            try
            {
                dnp.BlockReport(reg, poolId, report, new BlockReportContext(1, 0, Runtime.NanoTime
                                                                                ()));
                NUnit.Framework.Assert.Fail("Expected IOException is not thrown");
            }
            catch (IOException)
            {
            }
            // Expected
            // Ensure heartbeat from dead datanode is rejected with a command
            // that asks datanode to register again
            StorageReport[] rep = new StorageReport[] { new StorageReport(new DatanodeStorage
                                                                              (reg.GetDatanodeUuid()), false, 0, 0, 0, 0) };
            DatanodeCommand[] cmd = dnp.SendHeartbeat(reg, rep, 0L, 0L, 0, 0, 0, null).GetCommands
                                        ();
            NUnit.Framework.Assert.AreEqual(1, cmd.Length);
            NUnit.Framework.Assert.AreEqual(cmd[0].GetAction(), RegisterCommand.Register.GetAction
                                                ());
        }
        public static HdfsConfiguration InitZeroCopyTest()
        {
            Assume.AssumeTrue(NativeIO.IsAvailable());
            Assume.AssumeTrue(SystemUtils.IsOsUnix);
            HdfsConfiguration conf = new HdfsConfiguration();

            conf.SetBoolean(DFSConfigKeys.DfsClientReadShortcircuitKey, true);
            conf.SetLong(DFSConfigKeys.DfsBlockSizeKey, BlockSize);
            conf.SetInt(DFSConfigKeys.DfsClientMmapCacheSize, 3);
            conf.SetLong(DFSConfigKeys.DfsClientMmapCacheTimeoutMs, 100);
            conf.Set(DFSConfigKeys.DfsDomainSocketPathKey, new FilePath(sockDir.GetDir(), "TestRequestMmapAccess._PORT.sock"
                                                                        ).GetAbsolutePath());
            conf.SetBoolean(DFSConfigKeys.DfsClientReadShortcircuitSkipChecksumKey, true);
            conf.SetLong(DFSConfigKeys.DfsHeartbeatIntervalKey, 1);
            conf.SetLong(DFSConfigKeys.DfsCachereportIntervalMsecKey, 1000);
            conf.SetLong(DFSConfigKeys.DfsNamenodePathBasedCacheRefreshIntervalMs, 1000);
            return(conf);
        }
        public virtual void TestRecoverReplicas()
        {
            Configuration conf = new HdfsConfiguration();

            conf.SetLong(DFSConfigKeys.DfsBlockSizeKey, 1024L);
            conf.SetInt(DFSConfigKeys.DfsClientWritePacketSizeKey, 512);
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).Build();

            cluster.WaitActive();
            try
            {
                FileSystem fs = cluster.GetFileSystem();
                for (int i = 0; i < 4; i++)
                {
                    Path fileName = new Path("/test" + i);
                    DFSTestUtil.CreateFile(fs, fileName, 1, (short)1, 0L);
                    DFSTestUtil.WaitReplication(fs, fileName, (short)1);
                }
                string   bpid = cluster.GetNamesystem().GetBlockPoolId();
                DataNode dn   = cluster.GetDataNodes()[0];
                IEnumerator <ReplicaInfo> replicasItor = Dataset(dn).volumeMap.Replicas(bpid).GetEnumerator
                                                             ();
                ReplicaInfo replica = replicasItor.Next();
                CreateUnlinkTmpFile(replica, true, true);
                // rename block file
                CreateUnlinkTmpFile(replica, false, true);
                // rename meta file
                replica = replicasItor.Next();
                CreateUnlinkTmpFile(replica, true, false);
                // copy block file
                CreateUnlinkTmpFile(replica, false, false);
                // copy meta file
                replica = replicasItor.Next();
                CreateUnlinkTmpFile(replica, true, true);
                // rename block file
                CreateUnlinkTmpFile(replica, false, false);
                // copy meta file
                cluster.RestartDataNodes();
                cluster.WaitActive();
                dn = cluster.GetDataNodes()[0];
                // check volumeMap: 4 finalized replica
                ICollection <ReplicaInfo> replicas = Dataset(dn).volumeMap.Replicas(bpid);
                NUnit.Framework.Assert.AreEqual(4, replicas.Count);
                replicasItor = replicas.GetEnumerator();
                while (replicasItor.HasNext())
                {
                    NUnit.Framework.Assert.AreEqual(HdfsServerConstants.ReplicaState.Finalized, replicasItor
                                                    .Next().GetState());
                }
            }
            finally
            {
                cluster.Shutdown();
            }
        }
Example #11
0
        /*
         * Return a configuration object with low timeouts for testing and
         * a topology script set (which enables rack awareness).
         */
        private Configuration GetConf()
        {
            Configuration conf = new HdfsConfiguration();

            // Lower the heart beat interval so the NN quickly learns of dead
            // or decommissioned DNs and the NN issues replication and invalidation
            // commands quickly (as replies to heartbeats)
            conf.SetLong(DFSConfigKeys.DfsHeartbeatIntervalKey, 1L);
            // Have the NN ReplicationMonitor compute the replication and
            // invalidation commands to send DNs every second.
            conf.SetInt(DFSConfigKeys.DfsNamenodeReplicationIntervalKey, 1);
            // Have the NN check for pending replications every second so it
            // quickly schedules additional replicas as they are identified.
            conf.SetInt(DFSConfigKeys.DfsNamenodeReplicationPendingTimeoutSecKey, 1);
            // The DNs report blocks every second.
            conf.SetLong(DFSConfigKeys.DfsBlockreportIntervalMsecKey, 1000L);
            // Indicates we have multiple racks
            conf.Set(DFSConfigKeys.NetTopologyScriptFileNameKey, "xyz");
            return(conf);
        }
        internal static Configuration CreateConf()
        {
            Configuration conf = new HdfsConfiguration();

            TestBalancer.InitConf(conf);
            conf.SetLong(DFSConfigKeys.DfsBlockSizeKey, DefaultBlockSize);
            conf.Set(CommonConfigurationKeysPublic.NetTopologyImplKey, typeof(NetworkTopologyWithNodeGroup
                                                                              ).FullName);
            conf.Set(DFSConfigKeys.DfsBlockReplicatorClassnameKey, typeof(BlockPlacementPolicyWithNodeGroup
                                                                          ).FullName);
            return(conf);
        }
        public virtual void TestWithAllCorruptReplicas()
        {
            Configuration conf = new HdfsConfiguration();

            conf.SetLong(DFSConfigKeys.DfsBlockreportIntervalMsecKey, 1000L);
            conf.Set(DFSConfigKeys.DfsNamenodeReplicationPendingTimeoutSecKey, Sharpen.Extensions.ToString
                         (2));
            MiniDFSCluster cluster    = new MiniDFSCluster.Builder(conf).NumDataNodes(3).Build();
            FileSystem     fs         = cluster.GetFileSystem();
            FSNamesystem   namesystem = cluster.GetNamesystem();

            try
            {
                Path fileName = new Path("/foo1");
                DFSTestUtil.CreateFile(fs, fileName, 2, (short)3, 0L);
                DFSTestUtil.WaitReplication(fs, fileName, (short)3);
                ExtendedBlock block = DFSTestUtil.GetFirstBlock(fs, fileName);
                CorruptBlock(cluster, fs, fileName, 0, block);
                CorruptBlock(cluster, fs, fileName, 1, block);
                CorruptBlock(cluster, fs, fileName, 2, block);
                // wait for 3 seconds so that all block reports are processed.
                try
                {
                    Sharpen.Thread.Sleep(3000);
                }
                catch (Exception)
                {
                }
                NUnit.Framework.Assert.AreEqual(0, CountReplicas(namesystem, block).LiveReplicas(
                                                    ));
                NUnit.Framework.Assert.AreEqual(3, CountReplicas(namesystem, block).CorruptReplicas
                                                    ());
                namesystem.SetReplication(fileName.ToString(), (short)1);
                // wait for 3 seconds so that all block reports are processed.
                try
                {
                    Sharpen.Thread.Sleep(3000);
                }
                catch (Exception)
                {
                }
                NUnit.Framework.Assert.AreEqual(0, CountReplicas(namesystem, block).LiveReplicas(
                                                    ));
                NUnit.Framework.Assert.AreEqual(3, CountReplicas(namesystem, block).CorruptReplicas
                                                    ());
            }
            finally
            {
                cluster.Shutdown();
            }
        }
Example #14
0
        /// <exception cref="System.Exception"/>
        public virtual void TestMaxBlocksPerFileLimit()
        {
            Configuration conf = new HdfsConfiguration();
            // Make a small block size and a low limit
            long blockSize = 4096;
            long numBlocks = 2;

            conf.SetLong(DFSConfigKeys.DfsBlockSizeKey, blockSize);
            conf.SetLong(DFSConfigKeys.DfsNamenodeMaxBlocksPerFileKey, numBlocks);
            MiniDFSCluster       cluster = new MiniDFSCluster.Builder(conf).Build();
            FileSystem           fs      = cluster.GetFileSystem();
            HdfsDataOutputStream fout    = (HdfsDataOutputStream)fs.Create(new Path("/testmaxfilelimit"
                                                                                    ));

            try
            {
                // Write maximum number of blocks
                fout.Write(new byte[(int)blockSize * (int)numBlocks]);
                fout.Hflush();
                // Try to write one more block
                try
                {
                    fout.Write(new byte[1]);
                    fout.Hflush();
                    System.Diagnostics.Debug.Assert(false, "Expected IOException after writing too many blocks"
                                                    );
                }
                catch (IOException e)
                {
                    GenericTestUtils.AssertExceptionContains("File has reached the limit" + " on maximum number of"
                                                             , e);
                }
            }
            finally
            {
                cluster.Shutdown();
            }
        }
Example #15
0
        public virtual void Setup()
        {
            StaticMapping.ResetMap();
            Configuration conf = new HdfsConfiguration();

            string[] racks = new string[] { "/RACK0", "/RACK0", "/RACK2", "/RACK3", "/RACK2" };
            string[] hosts = new string[] { "/host0", "/host1", "/host2", "/host3", "/host4" };
            conf.SetLong(DFSConfigKeys.DfsBlockSizeKey, DefaultBlockSize);
            conf.SetInt(DFSConfigKeys.DfsBytesPerChecksumKey, DefaultBlockSize / 2);
            cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(5).Racks(racks).Hosts(hosts
                                                                                          ).Build();
            cluster.WaitActive();
            nameNodeRpc = cluster.GetNameNodeRpc();
            namesystem  = cluster.GetNamesystem();
            perm        = new PermissionStatus("TestDefaultBlockPlacementPolicy", null, FsPermission
                                               .GetDefault());
        }
        // test rbw replicas persist across DataNode restarts
        /// <exception cref="System.IO.IOException"/>
        public virtual void TestRbwReplicas()
        {
            Configuration conf = new HdfsConfiguration();

            conf.SetLong(DFSConfigKeys.DfsBlockSizeKey, 1024L);
            conf.SetInt(DFSConfigKeys.DfsClientWritePacketSizeKey, 512);
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(2).Build();

            cluster.WaitActive();
            try
            {
                TestRbwReplicas(cluster, false);
                TestRbwReplicas(cluster, true);
            }
            finally
            {
                cluster.Shutdown();
            }
        }
        public virtual void TestStartNNWithTrashEmptier()
        {
            MiniDFSCluster cluster = null;
            Configuration  conf    = new HdfsConfiguration();

            // enable both trash emptier and dropping response
            conf.SetLong("fs.trash.interval", 360);
            conf.SetInt(DFSConfigKeys.DfsClientTestDropNamenodeResponseNumKey, 2);
            try
            {
                cluster = new MiniDFSCluster.Builder(conf).NnTopology(MiniDFSNNTopology.SimpleHATopology
                                                                          ()).NumDataNodes(0).Build();
                cluster.WaitActive();
                cluster.TransitionToActive(0);
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
        public virtual void TestByAddingAnExtraDataNode()
        {
            Configuration conf = new HdfsConfiguration();

            conf.SetLong(DFSConfigKeys.DfsBlockreportIntervalMsecKey, 1000L);
            conf.Set(DFSConfigKeys.DfsNamenodeReplicationPendingTimeoutSecKey, Sharpen.Extensions.ToString
                         (2));
            MiniDFSCluster cluster    = new MiniDFSCluster.Builder(conf).NumDataNodes(4).Build();
            FileSystem     fs         = cluster.GetFileSystem();
            FSNamesystem   namesystem = cluster.GetNamesystem();

            MiniDFSCluster.DataNodeProperties dnPropsFourth = cluster.StopDataNode(3);
            try
            {
                Path fileName = new Path("/foo1");
                DFSTestUtil.CreateFile(fs, fileName, 2, (short)3, 0L);
                DFSTestUtil.WaitReplication(fs, fileName, (short)3);
                ExtendedBlock block = DFSTestUtil.GetFirstBlock(fs, fileName);
                CorruptBlock(cluster, fs, fileName, 0, block);
                DFSTestUtil.WaitReplication(fs, fileName, (short)2);
                NUnit.Framework.Assert.AreEqual(2, CountReplicas(namesystem, block).LiveReplicas(
                                                    ));
                NUnit.Framework.Assert.AreEqual(1, CountReplicas(namesystem, block).CorruptReplicas
                                                    ());
                cluster.RestartDataNode(dnPropsFourth);
                DFSTestUtil.WaitReplication(fs, fileName, (short)3);
                NUnit.Framework.Assert.AreEqual(3, CountReplicas(namesystem, block).LiveReplicas(
                                                    ));
                NUnit.Framework.Assert.AreEqual(0, CountReplicas(namesystem, block).CorruptReplicas
                                                    ());
            }
            finally
            {
                cluster.Shutdown();
            }
        }
        public virtual void TestPendingAndInvalidate()
        {
            Configuration Conf = new HdfsConfiguration();

            Conf.SetLong(DFSConfigKeys.DfsBlockSizeKey, 1024);
            Conf.SetLong(DFSConfigKeys.DfsHeartbeatIntervalKey, DfsReplicationInterval);
            Conf.SetInt(DFSConfigKeys.DfsNamenodeReplicationIntervalKey, DfsReplicationInterval
                        );
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(Conf).NumDataNodes(DatanodeCount
                                                                                   ).Build();

            cluster.WaitActive();
            FSNamesystem          namesystem = cluster.GetNamesystem();
            BlockManager          bm         = namesystem.GetBlockManager();
            DistributedFileSystem fs         = cluster.GetFileSystem();

            try
            {
                // 1. create a file
                Path filePath = new Path("/tmp.txt");
                DFSTestUtil.CreateFile(fs, filePath, 1024, (short)3, 0L);
                // 2. disable the heartbeats
                foreach (DataNode dn in cluster.GetDataNodes())
                {
                    DataNodeTestUtils.SetHeartbeatsDisabledForTests(dn, true);
                }
                // 3. mark a couple of blocks as corrupt
                LocatedBlock block = NameNodeAdapter.GetBlockLocations(cluster.GetNameNode(), filePath
                                                                       .ToString(), 0, 1).Get(0);
                cluster.GetNamesystem().WriteLock();
                try
                {
                    bm.FindAndMarkBlockAsCorrupt(block.GetBlock(), block.GetLocations()[0], "STORAGE_ID"
                                                 , "TEST");
                    bm.FindAndMarkBlockAsCorrupt(block.GetBlock(), block.GetLocations()[1], "STORAGE_ID"
                                                 , "TEST");
                }
                finally
                {
                    cluster.GetNamesystem().WriteUnlock();
                }
                BlockManagerTestUtil.ComputeAllPendingWork(bm);
                BlockManagerTestUtil.UpdateState(bm);
                NUnit.Framework.Assert.AreEqual(bm.GetPendingReplicationBlocksCount(), 1L);
                NUnit.Framework.Assert.AreEqual(bm.pendingReplications.GetNumReplicas(block.GetBlock
                                                                                          ().GetLocalBlock()), 2);
                // 4. delete the file
                fs.Delete(filePath, true);
                // retry at most 10 times, each time sleep for 1s. Note that 10s is much
                // less than the default pending record timeout (5~10min)
                int  retries    = 10;
                long pendingNum = bm.GetPendingReplicationBlocksCount();
                while (pendingNum != 0 && retries-- > 0)
                {
                    Sharpen.Thread.Sleep(1000);
                    // let NN do the deletion
                    BlockManagerTestUtil.UpdateState(bm);
                    pendingNum = bm.GetPendingReplicationBlocksCount();
                }
                NUnit.Framework.Assert.AreEqual(pendingNum, 0L);
            }
            finally
            {
                cluster.Shutdown();
            }
        }
        public virtual void TestBlockReceived()
        {
            Configuration conf = new HdfsConfiguration();

            conf.SetLong(DFSConfigKeys.DfsBlockSizeKey, 1024);
            MiniDFSCluster cluster = null;

            try
            {
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(DatanodeCount).Build();
                cluster.WaitActive();
                DistributedFileSystem hdfs       = cluster.GetFileSystem();
                FSNamesystem          fsn        = cluster.GetNamesystem();
                BlockManager          blkManager = fsn.GetBlockManager();
                string file       = "/tmp.txt";
                Path   filePath   = new Path(file);
                short  replFactor = 1;
                DFSTestUtil.CreateFile(hdfs, filePath, 1024L, replFactor, 0);
                // temporarily stop the heartbeat
                AList <DataNode> datanodes = cluster.GetDataNodes();
                for (int i = 0; i < DatanodeCount; i++)
                {
                    DataNodeTestUtils.SetHeartbeatsDisabledForTests(datanodes[i], true);
                }
                hdfs.SetReplication(filePath, (short)DatanodeCount);
                BlockManagerTestUtil.ComputeAllPendingWork(blkManager);
                NUnit.Framework.Assert.AreEqual(1, blkManager.pendingReplications.Size());
                INodeFile fileNode = fsn.GetFSDirectory().GetINode4Write(file).AsFile();
                Block[]   blocks   = fileNode.GetBlocks();
                NUnit.Framework.Assert.AreEqual(DatanodeCount - 1, blkManager.pendingReplications
                                                .GetNumReplicas(blocks[0]));
                LocatedBlock locatedBlock = hdfs.GetClient().GetLocatedBlocks(file, 0).Get(0);
                DatanodeInfo existingDn   = (locatedBlock.GetLocations())[0];
                int          reportDnNum  = 0;
                string       poolId       = cluster.GetNamesystem().GetBlockPoolId();
                // let two datanodes (other than the one that already has the data) to
                // report to NN
                for (int i_1 = 0; i_1 < DatanodeCount && reportDnNum < 2; i_1++)
                {
                    if (!datanodes[i_1].GetDatanodeId().Equals(existingDn))
                    {
                        DatanodeRegistration           dnR    = datanodes[i_1].GetDNRegistrationForBP(poolId);
                        StorageReceivedDeletedBlocks[] report = new StorageReceivedDeletedBlocks[] { new
                                                                                                     StorageReceivedDeletedBlocks("Fake-storage-ID-Ignored", new ReceivedDeletedBlockInfo
                                                                                                                                  [] { new ReceivedDeletedBlockInfo(blocks[0], ReceivedDeletedBlockInfo.BlockStatus
                                                                                                                                                                    .ReceivedBlock, string.Empty) }) };
                        cluster.GetNameNodeRpc().BlockReceivedAndDeleted(dnR, poolId, report);
                        reportDnNum++;
                    }
                }
                NUnit.Framework.Assert.AreEqual(DatanodeCount - 3, blkManager.pendingReplications
                                                .GetNumReplicas(blocks[0]));
                // let the same datanodes report again
                for (int i_2 = 0; i_2 < DatanodeCount && reportDnNum < 2; i_2++)
                {
                    if (!datanodes[i_2].GetDatanodeId().Equals(existingDn))
                    {
                        DatanodeRegistration           dnR    = datanodes[i_2].GetDNRegistrationForBP(poolId);
                        StorageReceivedDeletedBlocks[] report = new StorageReceivedDeletedBlocks[] { new
                                                                                                     StorageReceivedDeletedBlocks("Fake-storage-ID-Ignored", new ReceivedDeletedBlockInfo
                                                                                                                                  [] { new ReceivedDeletedBlockInfo(blocks[0], ReceivedDeletedBlockInfo.BlockStatus
                                                                                                                                                                    .ReceivedBlock, string.Empty) }) };
                        cluster.GetNameNodeRpc().BlockReceivedAndDeleted(dnR, poolId, report);
                        reportDnNum++;
                    }
                }
                NUnit.Framework.Assert.AreEqual(DatanodeCount - 3, blkManager.pendingReplications
                                                .GetNumReplicas(blocks[0]));
                // re-enable heartbeat for the datanode that has data
                for (int i_3 = 0; i_3 < DatanodeCount; i_3++)
                {
                    DataNodeTestUtils.SetHeartbeatsDisabledForTests(datanodes[i_3], false);
                    DataNodeTestUtils.TriggerHeartbeat(datanodes[i_3]);
                }
                Sharpen.Thread.Sleep(5000);
                NUnit.Framework.Assert.AreEqual(0, blkManager.pendingReplications.Size());
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
        public virtual void TestProcessPendingReplications()
        {
            Configuration conf = new HdfsConfiguration();

            conf.SetLong(DFSConfigKeys.DfsNamenodeReplicationPendingTimeoutSecKey, Timeout);
            MiniDFSCluster      cluster = null;
            Block               block;
            BlockInfoContiguous blockInfo;

            try
            {
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(DatanodeCount).Build();
                cluster.WaitActive();
                FSNamesystem             fsn                 = cluster.GetNamesystem();
                BlockManager             blkManager          = fsn.GetBlockManager();
                PendingReplicationBlocks pendingReplications = blkManager.pendingReplications;
                UnderReplicatedBlocks    neededReplications  = blkManager.neededReplications;
                BlocksMap blocksMap = blkManager.blocksMap;
                //
                // Add 1 block to pendingReplications with GenerationStamp = 0.
                //
                block     = new Block(1, 1, 0);
                blockInfo = new BlockInfoContiguous(block, (short)3);
                pendingReplications.Increment(block, DatanodeStorageInfo.ToDatanodeDescriptors(DFSTestUtil
                                                                                               .CreateDatanodeStorageInfos(1)));
                BlockCollection bc = Org.Mockito.Mockito.Mock <BlockCollection>();
                Org.Mockito.Mockito.DoReturn((short)3).When(bc).GetBlockReplication();
                // Place into blocksmap with GenerationStamp = 1
                blockInfo.SetGenerationStamp(1);
                blocksMap.AddBlockCollection(blockInfo, bc);
                NUnit.Framework.Assert.AreEqual("Size of pendingReplications ", 1, pendingReplications
                                                .Size());
                // Add a second block to pendingReplications that has no
                // corresponding entry in blocksmap
                block = new Block(2, 2, 0);
                pendingReplications.Increment(block, DatanodeStorageInfo.ToDatanodeDescriptors(DFSTestUtil
                                                                                               .CreateDatanodeStorageInfos(1)));
                // verify 2 blocks in pendingReplications
                NUnit.Framework.Assert.AreEqual("Size of pendingReplications ", 2, pendingReplications
                                                .Size());
                //
                // Wait for everything to timeout.
                //
                while (pendingReplications.Size() > 0)
                {
                    try
                    {
                        Sharpen.Thread.Sleep(100);
                    }
                    catch (Exception)
                    {
                    }
                }
                //
                // Verify that block moves to neededReplications
                //
                while (neededReplications.Size() == 0)
                {
                    try
                    {
                        Sharpen.Thread.Sleep(100);
                    }
                    catch (Exception)
                    {
                    }
                }
                // Verify that the generation stamp we will try to replicate
                // is now 1
                foreach (Block b in neededReplications)
                {
                    NUnit.Framework.Assert.AreEqual("Generation stamp is 1 ", 1, b.GetGenerationStamp
                                                        ());
                }
                // Verify size of neededReplications is exactly 1.
                NUnit.Framework.Assert.AreEqual("size of neededReplications is 1 ", 1, neededReplications
                                                .Size());
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
Example #22
0
        public virtual void TestFileLimit()
        {
            Configuration conf       = new HdfsConfiguration();
            int           maxObjects = 5;

            conf.SetLong(DFSConfigKeys.DfsNamenodeMaxObjectsKey, maxObjects);
            conf.SetLong(DFSConfigKeys.DfsBlockreportIntervalMsecKey, 1000L);
            conf.SetInt(DFSConfigKeys.DfsHeartbeatIntervalKey, 1);
            int currentNodes = 0;

            if (simulatedStorage)
            {
                SimulatedFSDataset.SetFactory(conf);
            }
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).Build();
            FileSystem     fs      = cluster.GetFileSystem();
            FSNamesystem   namesys = cluster.GetNamesystem();

            try
            {
                //
                // check that / exists
                //
                Path path = new Path("/");
                NUnit.Framework.Assert.IsTrue("/ should be a directory", fs.GetFileStatus(path).IsDirectory
                                                  ());
                currentNodes = 1;
                // root inode
                // verify that we can create the specified number of files. We leave
                // one for the "/". Each file takes an inode and a block.
                //
                for (int i = 0; i < maxObjects / 2; i++)
                {
                    Path file = new Path("/filestatus" + i);
                    CreateFile(fs, file);
                    System.Console.Out.WriteLine("Created file " + file);
                    currentNodes += 2;
                }
                // two more objects for this creation.
                // verify that creating another file fails
                bool hitException = false;
                try
                {
                    Path file = new Path("/filestatus");
                    CreateFile(fs, file);
                    System.Console.Out.WriteLine("Created file " + file);
                }
                catch (IOException)
                {
                    hitException = true;
                }
                NUnit.Framework.Assert.IsTrue("Was able to exceed file limit", hitException);
                // delete one file
                Path file0 = new Path("/filestatus0");
                fs.Delete(file0, true);
                System.Console.Out.WriteLine("Deleted file " + file0);
                currentNodes -= 2;
                // wait for number of blocks to decrease
                WaitForLimit(namesys, currentNodes);
                // now, we shud be able to create a new file
                CreateFile(fs, file0);
                System.Console.Out.WriteLine("Created file " + file0 + " again.");
                currentNodes += 2;
                // delete the file again
                file0 = new Path("/filestatus0");
                fs.Delete(file0, true);
                System.Console.Out.WriteLine("Deleted file " + file0 + " again.");
                currentNodes -= 2;
                // wait for number of blocks to decrease
                WaitForLimit(namesys, currentNodes);
                // create two directories in place of the file that we deleted
                Path dir = new Path("/dir0/dir1");
                fs.Mkdirs(dir);
                System.Console.Out.WriteLine("Created directories " + dir);
                currentNodes += 2;
                WaitForLimit(namesys, currentNodes);
                // verify that creating another directory fails
                hitException = false;
                try
                {
                    fs.Mkdirs(new Path("dir.fail"));
                    System.Console.Out.WriteLine("Created directory should not have succeeded.");
                }
                catch (IOException)
                {
                    hitException = true;
                }
                NUnit.Framework.Assert.IsTrue("Was able to exceed dir limit", hitException);
            }
            finally
            {
                fs.Close();
                cluster.Shutdown();
            }
        }
        public virtual void TestWaitForRegistrationOnRestart()
        {
            Configuration conf = new HdfsConfiguration();

            conf.SetLong(DFSConfigKeys.DfsDatanodeBpReadyTimeoutKey, 5);
            conf.SetInt(DFSConfigKeys.DfsClientSocketTimeoutKey, 5000);
            // This makes the datanode appear registered to the NN, but it won't be
            // able to get to the saved dn reg internally.
            DataNodeFaultInjector dnFaultInjector = new _DataNodeFaultInjector_224();
            DataNodeFaultInjector oldDnInjector   = DataNodeFaultInjector.Get();

            DataNodeFaultInjector.Set(dnFaultInjector);
            MiniDFSCluster cluster = null;
            long           start   = 0;
            Path           file    = new Path("/reg");

            try
            {
                int numDNs = 1;
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(numDNs).Build();
                cluster.WaitActive();
                start = Runtime.CurrentTimeMillis();
                FileSystem fileSys = cluster.GetFileSystem();
                try
                {
                    DFSTestUtil.CreateFile(fileSys, file, 10240L, (short)1, 0L);
                    // It is a bug if this does not fail.
                    throw new IOException("Did not fail!");
                }
                catch (RemoteException e)
                {
                    long elapsed = Runtime.CurrentTimeMillis() - start;
                    // timers have at-least semantics, so it should be at least 5 seconds.
                    if (elapsed < 5000 || elapsed > 10000)
                    {
                        throw new IOException(elapsed + " seconds passed.", e);
                    }
                }
                DataNodeFaultInjector.Set(oldDnInjector);
                // this should succeed now.
                DFSTestUtil.CreateFile(fileSys, file, 10240L, (short)1, 0L);
                // turn it back to under-construction, so that the client calls
                // getReplicaVisibleLength() rpc method against the datanode.
                fileSys.Append(file);
                // back to simulating unregistered node.
                DataNodeFaultInjector.Set(dnFaultInjector);
                byte[] buffer = new byte[8];
                start = Runtime.CurrentTimeMillis();
                try
                {
                    fileSys.Open(file).Read(0L, buffer, 0, 1);
                    throw new IOException("Did not fail!");
                }
                catch (IOException e)
                {
                    long elapsed = Runtime.CurrentTimeMillis() - start;
                    if (e.Message.Contains("readBlockLength"))
                    {
                        throw new IOException("Failed, but with unexpected exception:", e);
                    }
                    // timers have at-least semantics, so it should be at least 5 seconds.
                    if (elapsed < 5000 || elapsed > 10000)
                    {
                        throw new IOException(elapsed + " seconds passed.", e);
                    }
                }
                DataNodeFaultInjector.Set(oldDnInjector);
                fileSys.Open(file).Read(0L, buffer, 0, 1);
            }
            finally
            {
                DataNodeFaultInjector.Set(oldDnInjector);
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
        public virtual void Test2GBMmapLimit()
        {
            Assume.AssumeTrue(BlockReaderTestUtil.ShouldTestLargeFiles());
            HdfsConfiguration conf = InitZeroCopyTest();
            long TestFileLength    = 2469605888L;

            conf.Set(DFSConfigKeys.DfsChecksumTypeKey, "NULL");
            conf.SetLong(DFSConfigKeys.DfsBlockSizeKey, TestFileLength);
            MiniDFSCluster cluster  = null;
            Path           TestPath = new Path("/a");
            string         Context  = "test2GBMmapLimit";

            conf.Set(DFSConfigKeys.DfsClientContext, Context);
            FSDataInputStream fsIn  = null;
            FSDataInputStream fsIn2 = null;
            ByteBuffer        buf1  = null;
            ByteBuffer        buf2  = null;

            try
            {
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build();
                cluster.WaitActive();
                DistributedFileSystem fs = cluster.GetFileSystem();
                DFSTestUtil.CreateFile(fs, TestPath, TestFileLength, (short)1, unchecked ((int)(0xB
                                                                                                )));
                DFSTestUtil.WaitReplication(fs, TestPath, (short)1);
                fsIn = fs.Open(TestPath);
                buf1 = fsIn.Read(null, 1, EnumSet.Of(ReadOption.SkipChecksums));
                NUnit.Framework.Assert.AreEqual(1, buf1.Remaining());
                fsIn.ReleaseBuffer(buf1);
                buf1 = null;
                fsIn.Seek(2147483640L);
                buf1 = fsIn.Read(null, 1024, EnumSet.Of(ReadOption.SkipChecksums));
                NUnit.Framework.Assert.AreEqual(7, buf1.Remaining());
                NUnit.Framework.Assert.AreEqual(int.MaxValue, buf1.Limit());
                fsIn.ReleaseBuffer(buf1);
                buf1 = null;
                NUnit.Framework.Assert.AreEqual(2147483647L, fsIn.GetPos());
                try
                {
                    buf1 = fsIn.Read(null, 1024, EnumSet.Of(ReadOption.SkipChecksums));
                    NUnit.Framework.Assert.Fail("expected UnsupportedOperationException");
                }
                catch (NotSupportedException)
                {
                }
                // expected; can't read past 2GB boundary.
                fsIn.Close();
                fsIn = null;
                // Now create another file with normal-sized blocks, and verify we
                // can read past 2GB
                Path TestPath2 = new Path("/b");
                conf.SetLong(DFSConfigKeys.DfsBlockSizeKey, 268435456L);
                DFSTestUtil.CreateFile(fs, TestPath2, 1024 * 1024, TestFileLength, 268435456L, (short
                                                                                                )1, unchecked ((int)(0xA)));
                fsIn2 = fs.Open(TestPath2);
                fsIn2.Seek(2147483640L);
                buf2 = fsIn2.Read(null, 1024, EnumSet.Of(ReadOption.SkipChecksums));
                NUnit.Framework.Assert.AreEqual(8, buf2.Remaining());
                NUnit.Framework.Assert.AreEqual(2147483648L, fsIn2.GetPos());
                fsIn2.ReleaseBuffer(buf2);
                buf2 = null;
                buf2 = fsIn2.Read(null, 1024, EnumSet.Of(ReadOption.SkipChecksums));
                NUnit.Framework.Assert.AreEqual(1024, buf2.Remaining());
                NUnit.Framework.Assert.AreEqual(2147484672L, fsIn2.GetPos());
                fsIn2.ReleaseBuffer(buf2);
                buf2 = null;
            }
            finally
            {
                if (buf1 != null)
                {
                    fsIn.ReleaseBuffer(buf1);
                }
                if (buf2 != null)
                {
                    fsIn2.ReleaseBuffer(buf2);
                }
                IOUtils.Cleanup(null, fsIn, fsIn2);
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
Example #25
0
        public virtual void TestVolumeSize()
        {
            Configuration  conf    = new HdfsConfiguration();
            MiniDFSCluster cluster = null;
            // Set aside fifth of the total capacity as reserved
            long reserved = 10000;

            conf.SetLong(DFSConfigKeys.DfsDatanodeDuReservedKey, reserved);
            try
            {
                cluster = new MiniDFSCluster.Builder(conf).Build();
                cluster.WaitActive();
                FSNamesystem    namesystem = cluster.GetNamesystem();
                DatanodeManager dm         = cluster.GetNamesystem().GetBlockManager().GetDatanodeManager
                                                 ();
                // Ensure the data reported for each data node is right
                IList <DatanodeDescriptor> live = new AList <DatanodeDescriptor>();
                IList <DatanodeDescriptor> dead = new AList <DatanodeDescriptor>();
                dm.FetchDatanodes(live, dead, false);
                NUnit.Framework.Assert.IsTrue(live.Count == 1);
                long  used;
                long  remaining;
                long  configCapacity;
                long  nonDFSUsed;
                long  bpUsed;
                float percentUsed;
                float percentRemaining;
                float percentBpUsed;
                foreach (DatanodeDescriptor datanode in live)
                {
                    used             = datanode.GetDfsUsed();
                    remaining        = datanode.GetRemaining();
                    nonDFSUsed       = datanode.GetNonDfsUsed();
                    configCapacity   = datanode.GetCapacity();
                    percentUsed      = datanode.GetDfsUsedPercent();
                    percentRemaining = datanode.GetRemainingPercent();
                    bpUsed           = datanode.GetBlockPoolUsed();
                    percentBpUsed    = datanode.GetBlockPoolUsedPercent();
                    Log.Info("Datanode configCapacity " + configCapacity + " used " + used + " non DFS used "
                             + nonDFSUsed + " remaining " + remaining + " perentUsed " + percentUsed + " percentRemaining "
                             + percentRemaining);
                    NUnit.Framework.Assert.IsTrue(configCapacity == (used + remaining + nonDFSUsed));
                    NUnit.Framework.Assert.IsTrue(percentUsed == DFSUtil.GetPercentUsed(used, configCapacity
                                                                                        ));
                    NUnit.Framework.Assert.IsTrue(percentRemaining == DFSUtil.GetPercentRemaining(remaining
                                                                                                  , configCapacity));
                    NUnit.Framework.Assert.IsTrue(percentBpUsed == DFSUtil.GetPercentUsed(bpUsed, configCapacity
                                                                                          ));
                }
                DF df = new DF(new FilePath(cluster.GetDataDirectory()), conf);
                //
                // Currently two data directories are created by the data node
                // in the MiniDFSCluster. This results in each data directory having
                // capacity equals to the disk capacity of the data directory.
                // Hence the capacity reported by the data node is twice the disk space
                // the disk capacity
                //
                // So multiply the disk capacity and reserved space by two
                // for accommodating it
                //
                int  numOfDataDirs = 2;
                long diskCapacity  = numOfDataDirs * df.GetCapacity();
                reserved        *= numOfDataDirs;
                configCapacity   = namesystem.GetCapacityTotal();
                used             = namesystem.GetCapacityUsed();
                nonDFSUsed       = namesystem.GetNonDfsUsedSpace();
                remaining        = namesystem.GetCapacityRemaining();
                percentUsed      = namesystem.GetPercentUsed();
                percentRemaining = namesystem.GetPercentRemaining();
                bpUsed           = namesystem.GetBlockPoolUsedSpace();
                percentBpUsed    = namesystem.GetPercentBlockPoolUsed();
                Log.Info("Data node directory " + cluster.GetDataDirectory());
                Log.Info("Name node diskCapacity " + diskCapacity + " configCapacity " + configCapacity
                         + " reserved " + reserved + " used " + used + " remaining " + remaining + " nonDFSUsed "
                         + nonDFSUsed + " remaining " + remaining + " percentUsed " + percentUsed + " percentRemaining "
                         + percentRemaining + " bpUsed " + bpUsed + " percentBpUsed " + percentBpUsed);
                // Ensure new total capacity reported excludes the reserved space
                NUnit.Framework.Assert.IsTrue(configCapacity == diskCapacity - reserved);
                // Ensure new total capacity reported excludes the reserved space
                NUnit.Framework.Assert.IsTrue(configCapacity == (used + remaining + nonDFSUsed));
                // Ensure percent used is calculated based on used and present capacity
                NUnit.Framework.Assert.IsTrue(percentUsed == DFSUtil.GetPercentUsed(used, configCapacity
                                                                                    ));
                // Ensure percent used is calculated based on used and present capacity
                NUnit.Framework.Assert.IsTrue(percentBpUsed == DFSUtil.GetPercentUsed(bpUsed, configCapacity
                                                                                      ));
                // Ensure percent used is calculated based on used and present capacity
                NUnit.Framework.Assert.IsTrue(percentRemaining == ((float)remaining * 100.0f) / (
                                                  float)configCapacity);
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
Example #26
0
        /// <exception cref="System.Exception"/>
        private void TestTriggerBlockReport(bool incremental)
        {
            Configuration conf = new HdfsConfiguration();

            // Set a really long value for dfs.blockreport.intervalMsec and
            // dfs.heartbeat.interval, so that incremental block reports and heartbeats
            // won't be sent during this test unless they're triggered
            // manually.
            conf.SetLong(DFSConfigKeys.DfsBlockreportIntervalMsecKey, 10800000L);
            conf.SetLong(DFSConfigKeys.DfsHeartbeatIntervalKey, 1080L);
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build();

            cluster.WaitActive();
            FileSystem fs = cluster.GetFileSystem();
            DatanodeProtocolClientSideTranslatorPB spy = DataNodeTestUtils.SpyOnBposToNN(cluster
                                                                                         .GetDataNodes()[0], cluster.GetNameNode());

            DFSTestUtil.CreateFile(fs, new Path("/abc"), 16, (short)1, 1L);
            // We should get 1 incremental block report.
            Org.Mockito.Mockito.Verify(spy, Org.Mockito.Mockito.Timeout(60000).Times(1)).BlockReceivedAndDeleted
                (Matchers.Any <DatanodeRegistration>(), Matchers.AnyString(), Matchers.Any <StorageReceivedDeletedBlocks
                                                                                            []>());
            // We should not receive any more incremental or incremental block reports,
            // since the interval we configured is so long.
            for (int i = 0; i < 3; i++)
            {
                Sharpen.Thread.Sleep(10);
                Org.Mockito.Mockito.Verify(spy, Org.Mockito.Mockito.Times(0)).BlockReport(Matchers.Any
                                                                                          <DatanodeRegistration>(), Matchers.AnyString(), Matchers.Any <StorageBlockReport[]
                                                                                                                                                        >(), Org.Mockito.Mockito.AnyObject <BlockReportContext>());
                Org.Mockito.Mockito.Verify(spy, Org.Mockito.Mockito.Times(1)).BlockReceivedAndDeleted
                    (Matchers.Any <DatanodeRegistration>(), Matchers.AnyString(), Matchers.Any <StorageReceivedDeletedBlocks
                                                                                                []>());
            }
            // Create a fake block deletion notification on the DataNode.
            // This will be sent with the next incremental block report.
            ReceivedDeletedBlockInfo rdbi = new ReceivedDeletedBlockInfo(new Block(5678, 512,
                                                                                   1000), ReceivedDeletedBlockInfo.BlockStatus.DeletedBlock, null);
            DataNode       datanode    = cluster.GetDataNodes()[0];
            BPServiceActor actor       = datanode.GetAllBpOs()[0].GetBPServiceActors()[0];
            string         storageUuid = datanode.GetFSDataset().GetVolumes()[0].GetStorageID();

            actor.NotifyNamenodeDeletedBlock(rdbi, storageUuid);
            // Manually trigger a block report.
            datanode.TriggerBlockReport(new BlockReportOptions.Factory().SetIncremental(incremental
                                                                                        ).Build());
            // triggerBlockReport returns before the block report is
            // actually sent.  Wait for it to be sent here.
            if (incremental)
            {
                Org.Mockito.Mockito.Verify(spy, Org.Mockito.Mockito.Timeout(60000).Times(2)).BlockReceivedAndDeleted
                    (Matchers.Any <DatanodeRegistration>(), Matchers.AnyString(), Matchers.Any <StorageReceivedDeletedBlocks
                                                                                                []>());
            }
            else
            {
                Org.Mockito.Mockito.Verify(spy, Org.Mockito.Mockito.Timeout(60000)).BlockReport(Matchers.Any
                                                                                                <DatanodeRegistration>(), Matchers.AnyString(), Matchers.Any <StorageBlockReport[]
                                                                                                                                                              >(), Org.Mockito.Mockito.AnyObject <BlockReportContext>());
            }
            cluster.Shutdown();
        }
        public virtual void TestPurgingWithNameEditsDirAfterFailure()
        {
            MiniDFSCluster cluster = null;
            Configuration  conf    = new HdfsConfiguration();

            conf.SetLong(DFSConfigKeys.DfsNamenodeNumExtraEditsRetainedKey, 0);
            FilePath sd0 = new FilePath(TestRootDir, "nn0");
            FilePath sd1 = new FilePath(TestRootDir, "nn1");
            FilePath cd0 = new FilePath(sd0, "current");
            FilePath cd1 = new FilePath(sd1, "current");

            conf.Set(DFSConfigKeys.DfsNamenodeNameDirKey, Joiner.On(",").Join(sd0, sd1));
            try
            {
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(0).ManageNameDfsDirs(false
                                                                                             ).Format(true).Build();
                NameNode nn = cluster.GetNameNode();
                DoSaveNamespace(nn);
                Log.Info("After first save, images 0 and 2 should exist in both dirs");
                GenericTestUtils.AssertGlobEquals(cd0, "fsimage_\\d*", NNStorage.GetImageFileName
                                                      (0), NNStorage.GetImageFileName(2));
                GenericTestUtils.AssertGlobEquals(cd1, "fsimage_\\d*", NNStorage.GetImageFileName
                                                      (0), NNStorage.GetImageFileName(2));
                GenericTestUtils.AssertGlobEquals(cd0, "edits_.*", NNStorage.GetFinalizedEditsFileName
                                                      (1, 2), NNStorage.GetInProgressEditsFileName(3));
                GenericTestUtils.AssertGlobEquals(cd1, "edits_.*", NNStorage.GetFinalizedEditsFileName
                                                      (1, 2), NNStorage.GetInProgressEditsFileName(3));
                DoSaveNamespace(nn);
                Log.Info("After second save, image 0 should be purged, " + "and image 4 should exist in both."
                         );
                GenericTestUtils.AssertGlobEquals(cd0, "fsimage_\\d*", NNStorage.GetImageFileName
                                                      (2), NNStorage.GetImageFileName(4));
                GenericTestUtils.AssertGlobEquals(cd1, "fsimage_\\d*", NNStorage.GetImageFileName
                                                      (2), NNStorage.GetImageFileName(4));
                GenericTestUtils.AssertGlobEquals(cd0, "edits_.*", NNStorage.GetFinalizedEditsFileName
                                                      (3, 4), NNStorage.GetInProgressEditsFileName(5));
                GenericTestUtils.AssertGlobEquals(cd1, "edits_.*", NNStorage.GetFinalizedEditsFileName
                                                      (3, 4), NNStorage.GetInProgressEditsFileName(5));
                Log.Info("Failing first storage dir by chmodding it");
                NUnit.Framework.Assert.AreEqual(0, FileUtil.Chmod(cd0.GetAbsolutePath(), "000"));
                DoSaveNamespace(nn);
                Log.Info("Restoring accessibility of first storage dir");
                NUnit.Framework.Assert.AreEqual(0, FileUtil.Chmod(cd0.GetAbsolutePath(), "755"));
                Log.Info("nothing should have been purged in first storage dir");
                GenericTestUtils.AssertGlobEquals(cd0, "fsimage_\\d*", NNStorage.GetImageFileName
                                                      (2), NNStorage.GetImageFileName(4));
                GenericTestUtils.AssertGlobEquals(cd0, "edits_.*", NNStorage.GetFinalizedEditsFileName
                                                      (3, 4), NNStorage.GetInProgressEditsFileName(5));
                Log.Info("fsimage_2 should be purged in second storage dir");
                GenericTestUtils.AssertGlobEquals(cd1, "fsimage_\\d*", NNStorage.GetImageFileName
                                                      (4), NNStorage.GetImageFileName(6));
                GenericTestUtils.AssertGlobEquals(cd1, "edits_.*", NNStorage.GetFinalizedEditsFileName
                                                      (5, 6), NNStorage.GetInProgressEditsFileName(7));
                Log.Info("On next save, we should purge logs from the failed dir," + " but not images, since the image directory is in failed state."
                         );
                DoSaveNamespace(nn);
                GenericTestUtils.AssertGlobEquals(cd1, "fsimage_\\d*", NNStorage.GetImageFileName
                                                      (6), NNStorage.GetImageFileName(8));
                GenericTestUtils.AssertGlobEquals(cd1, "edits_.*", NNStorage.GetFinalizedEditsFileName
                                                      (7, 8), NNStorage.GetInProgressEditsFileName(9));
                GenericTestUtils.AssertGlobEquals(cd0, "fsimage_\\d*", NNStorage.GetImageFileName
                                                      (2), NNStorage.GetImageFileName(4));
                GenericTestUtils.AssertGlobEquals(cd0, "edits_.*", NNStorage.GetInProgressEditsFileName
                                                      (9));
            }
            finally
            {
                FileUtil.Chmod(cd0.GetAbsolutePath(), "755");
                Log.Info("Shutting down...");
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
Example #28
0
        public virtual void TestBlockReplacement()
        {
            Configuration Conf = new HdfsConfiguration();

            string[] InitialRacks      = new string[] { "/RACK0", "/RACK1", "/RACK2" };
            string[] NewRacks          = new string[] { "/RACK2" };
            short    ReplicationFactor = (short)3;
            int      DefaultBlockSize  = 1024;
            Random   r = new Random();

            Conf.SetLong(DFSConfigKeys.DfsBlockSizeKey, DefaultBlockSize);
            Conf.SetInt(DFSConfigKeys.DfsBytesPerChecksumKey, DefaultBlockSize / 2);
            Conf.SetLong(DFSConfigKeys.DfsBlockreportIntervalMsecKey, 500);
            cluster = new MiniDFSCluster.Builder(Conf).NumDataNodes(ReplicationFactor).Racks(
                InitialRacks).Build();
            try
            {
                cluster.WaitActive();
                FileSystem fs       = cluster.GetFileSystem();
                Path       fileName = new Path("/tmp.txt");
                // create a file with one block
                DFSTestUtil.CreateFile(fs, fileName, DefaultBlockSize, ReplicationFactor, r.NextLong
                                           ());
                DFSTestUtil.WaitReplication(fs, fileName, ReplicationFactor);
                // get all datanodes
                IPEndPoint           addr          = new IPEndPoint("localhost", cluster.GetNameNodePort());
                DFSClient            client        = new DFSClient(addr, Conf);
                IList <LocatedBlock> locatedBlocks = client.GetNamenode().GetBlockLocations("/tmp.txt"
                                                                                            , 0, DefaultBlockSize).GetLocatedBlocks();
                NUnit.Framework.Assert.AreEqual(1, locatedBlocks.Count);
                LocatedBlock   block    = locatedBlocks[0];
                DatanodeInfo[] oldNodes = block.GetLocations();
                NUnit.Framework.Assert.AreEqual(oldNodes.Length, 3);
                ExtendedBlock b = block.GetBlock();
                // add a fourth datanode to the cluster
                cluster.StartDataNodes(Conf, 1, true, null, NewRacks);
                cluster.WaitActive();
                DatanodeInfo[] datanodes = client.DatanodeReport(HdfsConstants.DatanodeReportType
                                                                 .All);
                // find out the new node
                DatanodeInfo newNode = null;
                foreach (DatanodeInfo node in datanodes)
                {
                    bool isNewNode = true;
                    foreach (DatanodeInfo oldNode in oldNodes)
                    {
                        if (node.Equals(oldNode))
                        {
                            isNewNode = false;
                            break;
                        }
                    }
                    if (isNewNode)
                    {
                        newNode = node;
                        break;
                    }
                }
                NUnit.Framework.Assert.IsTrue(newNode != null);
                DatanodeInfo         source  = null;
                AList <DatanodeInfo> proxies = new AList <DatanodeInfo>(2);
                foreach (DatanodeInfo node_1 in datanodes)
                {
                    if (node_1 != newNode)
                    {
                        if (node_1.GetNetworkLocation().Equals(newNode.GetNetworkLocation()))
                        {
                            source = node_1;
                        }
                        else
                        {
                            proxies.AddItem(node_1);
                        }
                    }
                }
                //current state: the newNode is on RACK2, and "source" is the other dn on RACK2.
                //the two datanodes on RACK0 and RACK1 are in "proxies".
                //"source" and both "proxies" all contain the block, while newNode doesn't yet.
                NUnit.Framework.Assert.IsTrue(source != null && proxies.Count == 2);
                // start to replace the block
                // case 1: proxySource does not contain the block
                Log.Info("Testcase 1: Proxy " + newNode + " does not contain the block " + b);
                NUnit.Framework.Assert.IsFalse(ReplaceBlock(b, source, newNode, proxies[0]));
                // case 2: destination already contains the block
                Log.Info("Testcase 2: Destination " + proxies[1] + " contains the block " + b);
                NUnit.Framework.Assert.IsFalse(ReplaceBlock(b, source, proxies[0], proxies[1]));
                // case 3: correct case
                Log.Info("Testcase 3: Source=" + source + " Proxy=" + proxies[0] + " Destination="
                         + newNode);
                NUnit.Framework.Assert.IsTrue(ReplaceBlock(b, source, proxies[0], newNode));
                // after cluster has time to resolve the over-replication,
                // block locations should contain two proxies and newNode
                // but not source
                CheckBlocks(new DatanodeInfo[] { newNode, proxies[0], proxies[1] }, fileName.ToString
                                (), DefaultBlockSize, ReplicationFactor, client);
                // case 4: proxies.get(0) is not a valid del hint
                // expect either source or newNode replica to be deleted instead
                Log.Info("Testcase 4: invalid del hint " + proxies[0]);
                NUnit.Framework.Assert.IsTrue(ReplaceBlock(b, proxies[0], proxies[1], source));
                // after cluster has time to resolve the over-replication,
                // block locations should contain two proxies,
                // and either source or newNode, but not both.
                CheckBlocks(Sharpen.Collections.ToArray(proxies, new DatanodeInfo[proxies.Count])
                            , fileName.ToString(), DefaultBlockSize, ReplicationFactor, client);
            }
            finally
            {
                cluster.Shutdown();
            }
        }
        public virtual void TestProcesOverReplicateBlock()
        {
            Configuration conf = new HdfsConfiguration();

            conf.SetLong(DFSConfigKeys.DfsDatanodeScanPeriodHoursKey, 100L);
            conf.SetLong(DFSConfigKeys.DfsBlockreportIntervalMsecKey, 1000L);
            conf.Set(DFSConfigKeys.DfsNamenodeReplicationPendingTimeoutSecKey, Sharpen.Extensions.ToString
                         (2));
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(3).Build();
            FileSystem     fs      = cluster.GetFileSystem();

            try
            {
                Path fileName = new Path("/foo1");
                DFSTestUtil.CreateFile(fs, fileName, 2, (short)3, 0L);
                DFSTestUtil.WaitReplication(fs, fileName, (short)3);
                // corrupt the block on datanode 0
                ExtendedBlock block = DFSTestUtil.GetFirstBlock(fs, fileName);
                NUnit.Framework.Assert.IsTrue(cluster.CorruptReplica(0, block));
                MiniDFSCluster.DataNodeProperties dnProps = cluster.StopDataNode(0);
                // remove block scanner log to trigger block scanning
                FilePath scanCursor = new FilePath(new FilePath(MiniDFSCluster.GetFinalizedDir(cluster
                                                                                               .GetInstanceStorageDir(0, 0), cluster.GetNamesystem().GetBlockPoolId()).GetParent
                                                                    ()).GetParent(), "scanner.cursor");
                //wait for one minute for deletion to succeed;
                for (int i = 0; !scanCursor.Delete(); i++)
                {
                    NUnit.Framework.Assert.IsTrue("Could not delete " + scanCursor.GetAbsolutePath()
                                                  + " in one minute", i < 60);
                    try
                    {
                        Sharpen.Thread.Sleep(1000);
                    }
                    catch (Exception)
                    {
                    }
                }
                // restart the datanode so the corrupt replica will be detected
                cluster.RestartDataNode(dnProps);
                DFSTestUtil.WaitReplication(fs, fileName, (short)2);
                string     blockPoolId     = cluster.GetNamesystem().GetBlockPoolId();
                DatanodeID corruptDataNode = DataNodeTestUtils.GetDNRegistrationForBP(cluster.GetDataNodes
                                                                                          ()[2], blockPoolId);
                FSNamesystem     namesystem = cluster.GetNamesystem();
                BlockManager     bm         = namesystem.GetBlockManager();
                HeartbeatManager hm         = bm.GetDatanodeManager().GetHeartbeatManager();
                try
                {
                    namesystem.WriteLock();
                    lock (hm)
                    {
                        // set live datanode's remaining space to be 0
                        // so they will be chosen to be deleted when over-replication occurs
                        string corruptMachineName = corruptDataNode.GetXferAddr();
                        foreach (DatanodeDescriptor datanode in hm.GetDatanodes())
                        {
                            if (!corruptMachineName.Equals(datanode.GetXferAddr()))
                            {
                                datanode.GetStorageInfos()[0].SetUtilizationForTesting(100L, 100L, 0, 100L);
                                datanode.UpdateHeartbeat(BlockManagerTestUtil.GetStorageReportsForDatanode(datanode
                                                                                                           ), 0L, 0L, 0, 0, null);
                            }
                        }
                        // decrease the replication factor to 1;
                        NameNodeAdapter.SetReplication(namesystem, fileName.ToString(), (short)1);
                        // corrupt one won't be chosen to be excess one
                        // without 4910 the number of live replicas would be 0: block gets lost
                        NUnit.Framework.Assert.AreEqual(1, bm.CountNodes(block.GetLocalBlock()).LiveReplicas
                                                            ());
                    }
                }
                finally
                {
                    namesystem.WriteUnlock();
                }
            }
            finally
            {
                cluster.Shutdown();
            }
        }
        /// <summary>
        /// Test that we can zero-copy read cached data even without disabling
        /// checksums.
        /// </summary>
        /// <exception cref="System.Exception"/>
        public virtual void TestZeroCopyReadOfCachedData()
        {
            BlockReaderTestUtil.EnableShortCircuitShmTracing();
            BlockReaderTestUtil.EnableBlockReaderFactoryTracing();
            BlockReaderTestUtil.EnableHdfsCachingTracing();
            int  TestFileLength    = BlockSize;
            Path TestPath          = new Path("/a");
            int  RandomSeed        = 23453;
            HdfsConfiguration conf = InitZeroCopyTest();

            conf.SetBoolean(DFSConfigKeys.DfsClientReadShortcircuitSkipChecksumKey, false);
            string Context = "testZeroCopyReadOfCachedData";

            conf.Set(DFSConfigKeys.DfsClientContext, Context);
            conf.SetLong(DFSConfigKeys.DfsDatanodeMaxLockedMemoryKey, DFSTestUtil.RoundUpToMultiple
                             (TestFileLength, (int)NativeIO.POSIX.GetCacheManipulator().GetOperatingSystemPageSize
                                 ()));
            MiniDFSCluster cluster = null;
            ByteBuffer     result  = null;
            ByteBuffer     result2 = null;

            cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build();
            cluster.WaitActive();
            FsDatasetSpi <object> fsd = cluster.GetDataNodes()[0].GetFSDataset();
            DistributedFileSystem fs  = cluster.GetFileSystem();

            DFSTestUtil.CreateFile(fs, TestPath, TestFileLength, (short)1, RandomSeed);
            DFSTestUtil.WaitReplication(fs, TestPath, (short)1);
            byte[] original = DFSTestUtil.CalculateFileContentsFromSeed(RandomSeed, TestFileLength
                                                                        );
            // Prior to caching, the file can't be read via zero-copy
            FSDataInputStream fsIn = fs.Open(TestPath);

            try
            {
                result = fsIn.Read(null, TestFileLength / 2, EnumSet.NoneOf <ReadOption>());
                NUnit.Framework.Assert.Fail("expected UnsupportedOperationException");
            }
            catch (NotSupportedException)
            {
            }
            // expected
            // Cache the file
            fs.AddCachePool(new CachePoolInfo("pool1"));
            long directiveId = fs.AddCacheDirective(new CacheDirectiveInfo.Builder().SetPath(
                                                        TestPath).SetReplication((short)1).SetPool("pool1").Build());
            int numBlocks = (int)Math.Ceil((double)TestFileLength / BlockSize);

            DFSTestUtil.VerifyExpectedCacheUsage(DFSTestUtil.RoundUpToMultiple(TestFileLength
                                                                               , BlockSize), numBlocks, cluster.GetDataNodes()[0].GetFSDataset());
            try
            {
                result = fsIn.Read(null, TestFileLength, EnumSet.NoneOf <ReadOption>());
            }
            catch (NotSupportedException)
            {
                NUnit.Framework.Assert.Fail("expected to be able to read cached file via zero-copy"
                                            );
            }
            Assert.AssertArrayEquals(Arrays.CopyOfRange(original, 0, BlockSize), ByteBufferToArray
                                         (result));
            // Test that files opened after the cache operation has finished
            // still get the benefits of zero-copy (regression test for HDFS-6086)
            FSDataInputStream fsIn2 = fs.Open(TestPath);

            try
            {
                result2 = fsIn2.Read(null, TestFileLength, EnumSet.NoneOf <ReadOption>());
            }
            catch (NotSupportedException)
            {
                NUnit.Framework.Assert.Fail("expected to be able to read cached file via zero-copy"
                                            );
            }
            Assert.AssertArrayEquals(Arrays.CopyOfRange(original, 0, BlockSize), ByteBufferToArray
                                         (result2));
            fsIn2.ReleaseBuffer(result2);
            fsIn2.Close();
            // check that the replica is anchored
            ExtendedBlock     firstBlock = DFSTestUtil.GetFirstBlock(fs, TestPath);
            ShortCircuitCache cache      = ClientContext.Get(Context, new DFSClient.Conf(conf)).GetShortCircuitCache
                                               ();

            WaitForReplicaAnchorStatus(cache, firstBlock, true, true, 1);
            // Uncache the replica
            fs.RemoveCacheDirective(directiveId);
            WaitForReplicaAnchorStatus(cache, firstBlock, false, true, 1);
            fsIn.ReleaseBuffer(result);
            WaitForReplicaAnchorStatus(cache, firstBlock, false, false, 1);
            DFSTestUtil.VerifyExpectedCacheUsage(0, 0, fsd);
            fsIn.Close();
            fs.Close();
            cluster.Shutdown();
        }