示例#1
0
        /// <exception cref="System.IO.IOException"/>
        private static void RunTest(string testCaseName, bool createFiles, int numInitialStorages
                                    , int expectedStoragesAfterTest)
        {
            Configuration  conf    = new HdfsConfiguration();
            MiniDFSCluster cluster = null;

            try
            {
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).StoragesPerDatanode(numInitialStorages
                                                                                               ).Build();
                cluster.WaitActive();
                DataNode dn0 = cluster.GetDataNodes()[0];
                // Ensure NN knows about the storage.
                DatanodeID         dnId         = dn0.GetDatanodeId();
                DatanodeDescriptor dnDescriptor = cluster.GetNamesystem().GetBlockManager().GetDatanodeManager
                                                      ().GetDatanode(dnId);
                Assert.AssertThat(dnDescriptor.GetStorageInfos().Length, IS.Is(numInitialStorages
                                                                               ));
                string bpid = cluster.GetNamesystem().GetBlockPoolId();
                DatanodeRegistration dnReg = dn0.GetDNRegistrationForBP(bpid);
                DataNodeTestUtils.TriggerBlockReport(dn0);
                if (createFiles)
                {
                    Path path = new Path("/", testCaseName);
                    DFSTestUtil.CreateFile(cluster.GetFileSystem(), path, 1024, (short)1, unchecked ((
                                                                                                         int)(0x1BAD5EED)));
                    DataNodeTestUtils.TriggerBlockReport(dn0);
                }
                // Generate a fake StorageReport that is missing one storage.
                StorageReport[] reports       = dn0.GetFSDataset().GetStorageReports(bpid);
                StorageReport[] prunedReports = new StorageReport[numInitialStorages - 1];
                System.Array.Copy(reports, 0, prunedReports, 0, prunedReports.Length);
                // Stop the DataNode and send fake heartbeat with missing storage.
                cluster.StopDataNode(0);
                cluster.GetNameNodeRpc().SendHeartbeat(dnReg, prunedReports, 0L, 0L, 0, 0, 0, null
                                                       );
                // Check that the missing storage was pruned.
                Assert.AssertThat(dnDescriptor.GetStorageInfos().Length, IS.Is(expectedStoragesAfterTest
                                                                               ));
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
示例#2
0
        public virtual void TestBlocksCounter()
        {
            DatanodeDescriptor dd = BlockManagerTestUtil.GetLocalDatanodeDescriptor(true);

            NUnit.Framework.Assert.AreEqual(0, dd.NumBlocks());
            BlockInfoContiguous blk  = new BlockInfoContiguous(new Block(1L), (short)1);
            BlockInfoContiguous blk1 = new BlockInfoContiguous(new Block(2L), (short)2);

            DatanodeStorageInfo[] storages = dd.GetStorageInfos();
            NUnit.Framework.Assert.IsTrue(storages.Length > 0);
            // add first block
            NUnit.Framework.Assert.IsTrue(storages[0].AddBlock(blk) == DatanodeStorageInfo.AddBlockResult
                                          .Added);
            NUnit.Framework.Assert.AreEqual(1, dd.NumBlocks());
            // remove a non-existent block
            NUnit.Framework.Assert.IsFalse(dd.RemoveBlock(blk1));
            NUnit.Framework.Assert.AreEqual(1, dd.NumBlocks());
            // add an existent block
            NUnit.Framework.Assert.IsFalse(storages[0].AddBlock(blk) == DatanodeStorageInfo.AddBlockResult
                                           .Added);
            NUnit.Framework.Assert.AreEqual(1, dd.NumBlocks());
            // add second block
            NUnit.Framework.Assert.IsTrue(storages[0].AddBlock(blk1) == DatanodeStorageInfo.AddBlockResult
                                          .Added);
            NUnit.Framework.Assert.AreEqual(2, dd.NumBlocks());
            // remove first block
            NUnit.Framework.Assert.IsTrue(dd.RemoveBlock(blk));
            NUnit.Framework.Assert.AreEqual(1, dd.NumBlocks());
            // remove second block
            NUnit.Framework.Assert.IsTrue(dd.RemoveBlock(blk1));
            NUnit.Framework.Assert.AreEqual(0, dd.NumBlocks());
        }
 private static void UpdateHeartbeatWithUsage(DatanodeDescriptor dn, long capacity
                                              , long dfsUsed, long remaining, long blockPoolUsed, long dnCacheCapacity, long dnCacheUsed
                                              , int xceiverCount, int volFailures)
 {
     dn.GetStorageInfos()[0].SetUtilizationForTesting(capacity, dfsUsed, remaining, blockPoolUsed
                                                      );
     dn.UpdateHeartbeat(BlockManagerTestUtil.GetStorageReportsForDatanode(dn), dnCacheCapacity
                        , dnCacheUsed, xceiverCount, volFailures, null);
 }
示例#4
0
        public virtual void TestSafeModeIBR()
        {
            DatanodeDescriptor  node = Org.Mockito.Mockito.Spy(nodes[0]);
            DatanodeStorageInfo ds   = node.GetStorageInfos()[0];

            node.isAlive = true;
            DatanodeRegistration nodeReg = new DatanodeRegistration(node, null, null, string.Empty
                                                                    );

            // pretend to be in safemode
            Org.Mockito.Mockito.DoReturn(true).When(fsn).IsInStartupSafeMode();
            // register new node
            bm.GetDatanodeManager().RegisterDatanode(nodeReg);
            bm.GetDatanodeManager().AddDatanode(node);
            // swap in spy
            NUnit.Framework.Assert.AreEqual(node, bm.GetDatanodeManager().GetDatanode(node));
            NUnit.Framework.Assert.AreEqual(0, ds.GetBlockReportCount());
            // send block report, should be processed
            Org.Mockito.Mockito.Reset(node);
            bm.ProcessReport(node, new DatanodeStorage(ds.GetStorageID()), BlockListAsLongs.Empty
                             , null, false);
            NUnit.Framework.Assert.AreEqual(1, ds.GetBlockReportCount());
            // send block report again, should NOT be processed
            Org.Mockito.Mockito.Reset(node);
            bm.ProcessReport(node, new DatanodeStorage(ds.GetStorageID()), BlockListAsLongs.Empty
                             , null, false);
            NUnit.Framework.Assert.AreEqual(1, ds.GetBlockReportCount());
            // re-register as if node restarted, should update existing node
            bm.GetDatanodeManager().RemoveDatanode(node);
            Org.Mockito.Mockito.Reset(node);
            bm.GetDatanodeManager().RegisterDatanode(nodeReg);
            Org.Mockito.Mockito.Verify(node).UpdateRegInfo(nodeReg);
            // send block report, should be processed after restart
            Org.Mockito.Mockito.Reset(node);
            bm.ProcessReport(node, new DatanodeStorage(ds.GetStorageID()), BlockListAsLongs.Empty
                             , null, false);
            // Reinitialize as registration with empty storage list pruned
            // node.storageMap.
            ds = node.GetStorageInfos()[0];
            NUnit.Framework.Assert.AreEqual(1, ds.GetBlockReportCount());
        }
示例#5
0
        public static StorageReport[] GetStorageReportsForDatanode(DatanodeDescriptor dnd
                                                                   )
        {
            AList <StorageReport> reports = new AList <StorageReport>();

            foreach (DatanodeStorageInfo storage in dnd.GetStorageInfos())
            {
                DatanodeStorage dns = new DatanodeStorage(storage.GetStorageID(), storage.GetState
                                                              (), storage.GetStorageType());
                StorageReport report = new StorageReport(dns, false, storage.GetCapacity(), storage
                                                         .GetDfsUsed(), storage.GetRemaining(), storage.GetBlockPoolUsed());
                reports.AddItem(report);
            }
            return(Sharpen.Collections.ToArray(reports, StorageReport.EmptyArray));
        }
示例#6
0
        public virtual void TestStorageWithRemainingCapacity()
        {
            Configuration  conf    = new HdfsConfiguration();
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).Build();
            FileSystem     fs      = FileSystem.Get(conf);
            Path           file1   = null;

            try
            {
                cluster.WaitActive();
                FSNamesystem         namesystem = cluster.GetNamesystem();
                string               poolId     = namesystem.GetBlockPoolId();
                DatanodeRegistration nodeReg    = DataNodeTestUtils.GetDNRegistrationForBP(cluster.GetDataNodes
                                                                                               ()[0], poolId);
                DatanodeDescriptor dd = NameNodeAdapter.GetDatanode(namesystem, nodeReg);
                // By default, MiniDFSCluster will create 1 datanode with 2 storages.
                // Assigning 64k for remaining storage capacity and will
                //create a file with 100k.
                foreach (DatanodeStorageInfo storage in dd.GetStorageInfos())
                {
                    storage.SetUtilizationForTesting(65536, 0, 65536, 0);
                }
                //sum of the remaining capacity of both the storages
                dd.SetRemaining(131072);
                file1 = new Path("testRemainingStorage.dat");
                try
                {
                    DFSTestUtil.CreateFile(fs, file1, 102400, 102400, 102400, (short)1, unchecked ((int
                                                                                                    )(0x1BAD5EED)));
                }
                catch (RemoteException re)
                {
                    GenericTestUtils.AssertExceptionContains("nodes instead of " + "minReplication",
                                                             re);
                }
            }
            finally
            {
                // Clean up
                NUnit.Framework.Assert.IsTrue(fs.Exists(file1));
                fs.Delete(file1, true);
                NUnit.Framework.Assert.IsTrue(!fs.Exists(file1));
                cluster.Shutdown();
            }
        }
示例#7
0
            public bool Get()
            {
                DatanodeDescriptor dnDescriptor = cluster.GetNamesystem().GetBlockManager().GetDatanodeManager
                                                      ().GetDatanode(datanodeToRemoveStorageFrom.GetDatanodeUuid());

                NUnit.Framework.Assert.IsNotNull(dnDescriptor);
                DatanodeStorageInfo[] infos = dnDescriptor.GetStorageInfos();
                foreach (DatanodeStorageInfo info in infos)
                {
                    if (info.GetStorageID().Equals(storageIdToRemove))
                    {
                        TestNameNodePrunesMissingStorages.Log.Info("Still found storage " + storageIdToRemove
                                                                   + " on " + info + ".");
                        return(false);
                    }
                }
                NUnit.Framework.Assert.AreEqual(NumStoragesPerDn - 1, infos.Length);
                return(true);
            }
示例#8
0
        public virtual void TestSafeModeIBRAfterIncremental()
        {
            DatanodeDescriptor  node = Org.Mockito.Mockito.Spy(nodes[0]);
            DatanodeStorageInfo ds   = node.GetStorageInfos()[0];

            node.isAlive = true;
            DatanodeRegistration nodeReg = new DatanodeRegistration(node, null, null, string.Empty
                                                                    );

            // pretend to be in safemode
            Org.Mockito.Mockito.DoReturn(true).When(fsn).IsInStartupSafeMode();
            // register new node
            bm.GetDatanodeManager().RegisterDatanode(nodeReg);
            bm.GetDatanodeManager().AddDatanode(node);
            // swap in spy
            NUnit.Framework.Assert.AreEqual(node, bm.GetDatanodeManager().GetDatanode(node));
            NUnit.Framework.Assert.AreEqual(0, ds.GetBlockReportCount());
            // send block report while pretending to already have blocks
            Org.Mockito.Mockito.Reset(node);
            Org.Mockito.Mockito.DoReturn(1).When(node).NumBlocks();
            bm.ProcessReport(node, new DatanodeStorage(ds.GetStorageID()), BlockListAsLongs.Empty
                             , null, false);
            NUnit.Framework.Assert.AreEqual(1, ds.GetBlockReportCount());
        }
示例#9
0
 public virtual void StartDecommission(DatanodeDescriptor node)
 {
     if (!node.IsDecommissionInProgress() && !node.IsDecommissioned())
     {
         // Update DN stats maintained by HeartbeatManager
         hbManager.StartDecommission(node);
         // hbManager.startDecommission will set dead node to decommissioned.
         if (node.IsDecommissionInProgress())
         {
             foreach (DatanodeStorageInfo storage in node.GetStorageInfos())
             {
                 Log.Info("Starting decommission of {} {} with {} blocks", node, storage, storage.
                          NumBlocks());
             }
             node.decommissioningStatus.SetStartTime(Time.MonotonicNow());
             pendingNodes.AddItem(node);
         }
     }
     else
     {
         Log.Trace("startDecommission: Node {} in {}, nothing to do." + node, node.GetAdminState
                       ());
     }
 }
示例#10
0
        /// <summary>choose local node of localMachine as the target.</summary>
        /// <remarks>
        /// choose local node of localMachine as the target.
        /// if localMachine is not available, choose a node on the same nodegroup or
        /// rack instead.
        /// </remarks>
        /// <returns>the chosen node</returns>
        /// <exception cref="Org.Apache.Hadoop.Hdfs.Server.Blockmanagement.BlockPlacementPolicy.NotEnoughReplicasException
        ///     "/>
        protected internal override DatanodeStorageInfo ChooseLocalStorage(Node localMachine
                                                                           , ICollection <Node> excludedNodes, long blocksize, int maxNodesPerRack, IList <DatanodeStorageInfo
                                                                                                                                                           > results, bool avoidStaleNodes, EnumMap <StorageType, int> storageTypes, bool fallbackToLocalRack
                                                                           )
        {
            // if no local machine, randomly choose one node
            if (localMachine == null)
            {
                return(ChooseRandom(NodeBase.Root, excludedNodes, blocksize, maxNodesPerRack, results
                                    , avoidStaleNodes, storageTypes));
            }
            // otherwise try local machine first
            if (localMachine is DatanodeDescriptor)
            {
                DatanodeDescriptor localDataNode = (DatanodeDescriptor)localMachine;
                if (excludedNodes.AddItem(localMachine))
                {
                    // was not in the excluded list
                    for (IEnumerator <KeyValuePair <StorageType, int> > iter = storageTypes.GetEnumerator
                                                                                   (); iter.HasNext();)
                    {
                        KeyValuePair <StorageType, int> entry = iter.Next();
                        foreach (DatanodeStorageInfo localStorage in DFSUtil.Shuffle(localDataNode.GetStorageInfos
                                                                                         ()))
                        {
                            StorageType type = entry.Key;
                            if (AddIfIsGoodTarget(localStorage, excludedNodes, blocksize, maxNodesPerRack, false
                                                  , results, avoidStaleNodes, type) >= 0)
                            {
                                int num = entry.Value;
                                if (num == 1)
                                {
                                    iter.Remove();
                                }
                                else
                                {
                                    entry.SetValue(num - 1);
                                }
                                return(localStorage);
                            }
                        }
                    }
                }
            }
            // try a node on local node group
            DatanodeStorageInfo chosenStorage = ChooseLocalNodeGroup((NetworkTopologyWithNodeGroup
                                                                      )clusterMap, localMachine, excludedNodes, blocksize, maxNodesPerRack, results, avoidStaleNodes
                                                                     , storageTypes);

            if (chosenStorage != null)
            {
                return(chosenStorage);
            }
            if (!fallbackToLocalRack)
            {
                return(null);
            }
            // try a node on local rack
            return(ChooseLocalRack(localMachine, excludedNodes, blocksize, maxNodesPerRack, results
                                   , avoidStaleNodes, storageTypes));
        }
示例#11
0
        public virtual void TestSafeModeIBRBeforeFirstFullBR()
        {
            // pretend to be in safemode
            Org.Mockito.Mockito.DoReturn(true).When(fsn).IsInStartupSafeMode();
            DatanodeDescriptor  node = nodes[0];
            DatanodeStorageInfo ds   = node.GetStorageInfos()[0];

            node.isAlive = true;
            DatanodeRegistration nodeReg = new DatanodeRegistration(node, null, null, string.Empty
                                                                    );

            // register new node
            bm.GetDatanodeManager().RegisterDatanode(nodeReg);
            bm.GetDatanodeManager().AddDatanode(node);
            NUnit.Framework.Assert.AreEqual(node, bm.GetDatanodeManager().GetDatanode(node));
            NUnit.Framework.Assert.AreEqual(0, ds.GetBlockReportCount());
            // Build a incremental report
            IList <ReceivedDeletedBlockInfo> rdbiList = new AList <ReceivedDeletedBlockInfo>();

            // Build a full report
            BlockListAsLongs.Builder builder = BlockListAsLongs.Builder();
            // blk_42 is finalized.
            long receivedBlockId = 42;
            // arbitrary
            BlockInfoContiguous receivedBlock = AddBlockToBM(receivedBlockId);

            rdbiList.AddItem(new ReceivedDeletedBlockInfo(new Block(receivedBlock), ReceivedDeletedBlockInfo.BlockStatus
                                                          .ReceivedBlock, null));
            builder.Add(new FinalizedReplica(receivedBlock, null, null));
            // blk_43 is under construction.
            long receivingBlockId = 43;
            BlockInfoContiguous receivingBlock = AddUcBlockToBM(receivingBlockId);

            rdbiList.AddItem(new ReceivedDeletedBlockInfo(new Block(receivingBlock), ReceivedDeletedBlockInfo.BlockStatus
                                                          .ReceivingBlock, null));
            builder.Add(new ReplicaBeingWritten(receivingBlock, null, null, null));
            // blk_44 has 2 records in IBR. It's finalized. So full BR has 1 record.
            long receivingReceivedBlockId = 44;
            BlockInfoContiguous receivingReceivedBlock = AddBlockToBM(receivingReceivedBlockId
                                                                      );

            rdbiList.AddItem(new ReceivedDeletedBlockInfo(new Block(receivingReceivedBlock),
                                                          ReceivedDeletedBlockInfo.BlockStatus.ReceivingBlock, null));
            rdbiList.AddItem(new ReceivedDeletedBlockInfo(new Block(receivingReceivedBlock),
                                                          ReceivedDeletedBlockInfo.BlockStatus.ReceivedBlock, null));
            builder.Add(new FinalizedReplica(receivingReceivedBlock, null, null));
            // blk_45 is not in full BR, because it's deleted.
            long ReceivedDeletedBlockId = 45;

            rdbiList.AddItem(new ReceivedDeletedBlockInfo(new Block(ReceivedDeletedBlockId),
                                                          ReceivedDeletedBlockInfo.BlockStatus.ReceivedBlock, null));
            rdbiList.AddItem(new ReceivedDeletedBlockInfo(new Block(ReceivedDeletedBlockId),
                                                          ReceivedDeletedBlockInfo.BlockStatus.DeletedBlock, null));
            // blk_46 exists in DN for a long time, so it's in full BR, but not in IBR.
            long existedBlockId = 46;
            BlockInfoContiguous existedBlock = AddBlockToBM(existedBlockId);

            builder.Add(new FinalizedReplica(existedBlock, null, null));
            // process IBR and full BR
            StorageReceivedDeletedBlocks srdb = new StorageReceivedDeletedBlocks(new DatanodeStorage
                                                                                     (ds.GetStorageID()), Sharpen.Collections.ToArray(rdbiList, new ReceivedDeletedBlockInfo
                                                                                                                                      [rdbiList.Count]));

            bm.ProcessIncrementalBlockReport(node, srdb);
            // Make sure it's the first full report
            NUnit.Framework.Assert.AreEqual(0, ds.GetBlockReportCount());
            bm.ProcessReport(node, new DatanodeStorage(ds.GetStorageID()), builder.Build(), null
                             , false);
            NUnit.Framework.Assert.AreEqual(1, ds.GetBlockReportCount());
            // verify the storage info is correct
            NUnit.Framework.Assert.IsTrue(bm.GetStoredBlock(new Block(receivedBlockId)).FindStorageInfo
                                              (ds) >= 0);
            NUnit.Framework.Assert.IsTrue(((BlockInfoContiguousUnderConstruction)bm.GetStoredBlock
                                               (new Block(receivingBlockId))).GetNumExpectedLocations() > 0);
            NUnit.Framework.Assert.IsTrue(bm.GetStoredBlock(new Block(receivingReceivedBlockId
                                                                      )).FindStorageInfo(ds) >= 0);
            NUnit.Framework.Assert.IsNull(bm.GetStoredBlock(new Block(ReceivedDeletedBlockId)
                                                            ));
            NUnit.Framework.Assert.IsTrue(bm.GetStoredBlock(new Block(existedBlock)).FindStorageInfo
                                              (ds) >= 0);
        }
        public virtual void TestHeartbeatBlockRecovery()
        {
            Configuration  conf    = new HdfsConfiguration();
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(3).Build();

            try
            {
                cluster.WaitActive();
                FSNamesystem     namesystem = cluster.GetNamesystem();
                HeartbeatManager hm         = namesystem.GetBlockManager().GetDatanodeManager().GetHeartbeatManager
                                                  ();
                string poolId = namesystem.GetBlockPoolId();
                DatanodeRegistration nodeReg1 = DataNodeTestUtils.GetDNRegistrationForBP(cluster.
                                                                                         GetDataNodes()[0], poolId);
                DatanodeDescriptor dd1 = NameNodeAdapter.GetDatanode(namesystem, nodeReg1);
                dd1.UpdateStorage(new DatanodeStorage(DatanodeStorage.GenerateUuid()));
                DatanodeRegistration nodeReg2 = DataNodeTestUtils.GetDNRegistrationForBP(cluster.
                                                                                         GetDataNodes()[1], poolId);
                DatanodeDescriptor dd2 = NameNodeAdapter.GetDatanode(namesystem, nodeReg2);
                dd2.UpdateStorage(new DatanodeStorage(DatanodeStorage.GenerateUuid()));
                DatanodeRegistration nodeReg3 = DataNodeTestUtils.GetDNRegistrationForBP(cluster.
                                                                                         GetDataNodes()[2], poolId);
                DatanodeDescriptor dd3 = NameNodeAdapter.GetDatanode(namesystem, nodeReg3);
                dd3.UpdateStorage(new DatanodeStorage(DatanodeStorage.GenerateUuid()));
                try
                {
                    namesystem.WriteLock();
                    lock (hm)
                    {
                        NameNodeAdapter.SendHeartBeat(nodeReg1, dd1, namesystem);
                        NameNodeAdapter.SendHeartBeat(nodeReg2, dd2, namesystem);
                        NameNodeAdapter.SendHeartBeat(nodeReg3, dd3, namesystem);
                        // Test with all alive nodes.
                        DFSTestUtil.ResetLastUpdatesWithOffset(dd1, 0);
                        DFSTestUtil.ResetLastUpdatesWithOffset(dd2, 0);
                        DFSTestUtil.ResetLastUpdatesWithOffset(dd3, 0);
                        DatanodeStorageInfo[] storages = new DatanodeStorageInfo[] { dd1.GetStorageInfos(
                                                                                         )[0], dd2.GetStorageInfos()[0], dd3.GetStorageInfos()[0] };
                        BlockInfoContiguousUnderConstruction blockInfo = new BlockInfoContiguousUnderConstruction
                                                                             (new Block(0, 0, GenerationStamp.LastReservedStamp), (short)3, HdfsServerConstants.BlockUCState
                                                                             .UnderRecovery, storages);
                        dd1.AddBlockToBeRecovered(blockInfo);
                        DatanodeCommand[] cmds = NameNodeAdapter.SendHeartBeat(nodeReg1, dd1, namesystem)
                                                 .GetCommands();
                        NUnit.Framework.Assert.AreEqual(1, cmds.Length);
                        NUnit.Framework.Assert.AreEqual(DatanodeProtocol.DnaRecoverblock, cmds[0].GetAction
                                                            ());
                        BlockRecoveryCommand recoveryCommand = (BlockRecoveryCommand)cmds[0];
                        NUnit.Framework.Assert.AreEqual(1, recoveryCommand.GetRecoveringBlocks().Count);
                        DatanodeInfo[] recoveringNodes = Sharpen.Collections.ToArray(recoveryCommand.GetRecoveringBlocks
                                                                                         (), new BlockRecoveryCommand.RecoveringBlock[0])[0].GetLocations();
                        NUnit.Framework.Assert.AreEqual(3, recoveringNodes.Length);
                        NUnit.Framework.Assert.AreEqual(recoveringNodes[0], dd1);
                        NUnit.Framework.Assert.AreEqual(recoveringNodes[1], dd2);
                        NUnit.Framework.Assert.AreEqual(recoveringNodes[2], dd3);
                        // Test with one stale node.
                        DFSTestUtil.ResetLastUpdatesWithOffset(dd1, 0);
                        // More than the default stale interval of 30 seconds.
                        DFSTestUtil.ResetLastUpdatesWithOffset(dd2, -40 * 1000);
                        DFSTestUtil.ResetLastUpdatesWithOffset(dd3, 0);
                        blockInfo = new BlockInfoContiguousUnderConstruction(new Block(0, 0, GenerationStamp
                                                                                       .LastReservedStamp), (short)3, HdfsServerConstants.BlockUCState.UnderRecovery, storages
                                                                             );
                        dd1.AddBlockToBeRecovered(blockInfo);
                        cmds = NameNodeAdapter.SendHeartBeat(nodeReg1, dd1, namesystem).GetCommands();
                        NUnit.Framework.Assert.AreEqual(1, cmds.Length);
                        NUnit.Framework.Assert.AreEqual(DatanodeProtocol.DnaRecoverblock, cmds[0].GetAction
                                                            ());
                        recoveryCommand = (BlockRecoveryCommand)cmds[0];
                        NUnit.Framework.Assert.AreEqual(1, recoveryCommand.GetRecoveringBlocks().Count);
                        recoveringNodes = Sharpen.Collections.ToArray(recoveryCommand.GetRecoveringBlocks
                                                                          (), new BlockRecoveryCommand.RecoveringBlock[0])[0].GetLocations();
                        NUnit.Framework.Assert.AreEqual(2, recoveringNodes.Length);
                        // dd2 is skipped.
                        NUnit.Framework.Assert.AreEqual(recoveringNodes[0], dd1);
                        NUnit.Framework.Assert.AreEqual(recoveringNodes[1], dd3);
                        // Test with all stale node.
                        DFSTestUtil.ResetLastUpdatesWithOffset(dd1, -60 * 1000);
                        // More than the default stale interval of 30 seconds.
                        DFSTestUtil.ResetLastUpdatesWithOffset(dd2, -40 * 1000);
                        DFSTestUtil.ResetLastUpdatesWithOffset(dd3, -80 * 1000);
                        blockInfo = new BlockInfoContiguousUnderConstruction(new Block(0, 0, GenerationStamp
                                                                                       .LastReservedStamp), (short)3, HdfsServerConstants.BlockUCState.UnderRecovery, storages
                                                                             );
                        dd1.AddBlockToBeRecovered(blockInfo);
                        cmds = NameNodeAdapter.SendHeartBeat(nodeReg1, dd1, namesystem).GetCommands();
                        NUnit.Framework.Assert.AreEqual(1, cmds.Length);
                        NUnit.Framework.Assert.AreEqual(DatanodeProtocol.DnaRecoverblock, cmds[0].GetAction
                                                            ());
                        recoveryCommand = (BlockRecoveryCommand)cmds[0];
                        NUnit.Framework.Assert.AreEqual(1, recoveryCommand.GetRecoveringBlocks().Count);
                        recoveringNodes = Sharpen.Collections.ToArray(recoveryCommand.GetRecoveringBlocks
                                                                          (), new BlockRecoveryCommand.RecoveringBlock[0])[0].GetLocations();
                        // Only dd1 is included since it heart beated and hence its not stale
                        // when the list of recovery blocks is constructed.
                        NUnit.Framework.Assert.AreEqual(3, recoveringNodes.Length);
                        NUnit.Framework.Assert.AreEqual(recoveringNodes[0], dd1);
                        NUnit.Framework.Assert.AreEqual(recoveringNodes[1], dd2);
                        NUnit.Framework.Assert.AreEqual(recoveringNodes[2], dd3);
                    }
                }
                finally
                {
                    namesystem.WriteUnlock();
                }
            }
            finally
            {
                cluster.Shutdown();
            }
        }