private BlockInfoContiguous AddUcBlockToBM(long blkId) { Block block = new Block(blkId); BlockInfoContiguousUnderConstruction blockInfo = new BlockInfoContiguousUnderConstruction (block, (short)3); BlockCollection bc = Org.Mockito.Mockito.Mock <BlockCollection>(); Org.Mockito.Mockito.DoReturn((short)3).When(bc).GetBlockReplication(); bm.blocksMap.AddBlockCollection(blockInfo, bc); return(blockInfo); }
/// <summary>Store block recovery work.</summary> internal virtual void AddBlockToBeRecovered(BlockInfoContiguousUnderConstruction block) { if (recoverBlocks.Contains(block)) { // this prevents adding the same block twice to the recovery queue BlockManager.Log.Info(block + " is already in the recovery queue"); return; } recoverBlocks.Offer(block); }
public virtual void TestInitializeBlockRecovery() { DatanodeStorageInfo s1 = DFSTestUtil.CreateDatanodeStorageInfo("10.10.1.1", "s1"); DatanodeDescriptor dd1 = s1.GetDatanodeDescriptor(); DatanodeStorageInfo s2 = DFSTestUtil.CreateDatanodeStorageInfo("10.10.1.2", "s2"); DatanodeDescriptor dd2 = s2.GetDatanodeDescriptor(); DatanodeStorageInfo s3 = DFSTestUtil.CreateDatanodeStorageInfo("10.10.1.3", "s3"); DatanodeDescriptor dd3 = s3.GetDatanodeDescriptor(); dd1.isAlive = dd2.isAlive = dd3.isAlive = true; BlockInfoContiguousUnderConstruction blockInfo = new BlockInfoContiguousUnderConstruction (new Block(0, 0, GenerationStamp.LastReservedStamp), (short)3, HdfsServerConstants.BlockUCState .UnderConstruction, new DatanodeStorageInfo[] { s1, s2, s3 }); // Recovery attempt #1. DFSTestUtil.ResetLastUpdatesWithOffset(dd1, -3 * 1000); DFSTestUtil.ResetLastUpdatesWithOffset(dd2, -1 * 1000); DFSTestUtil.ResetLastUpdatesWithOffset(dd3, -2 * 1000); blockInfo.InitializeBlockRecovery(1); BlockInfoContiguousUnderConstruction[] blockInfoRecovery = dd2.GetLeaseRecoveryCommand (1); NUnit.Framework.Assert.AreEqual(blockInfoRecovery[0], blockInfo); // Recovery attempt #2. DFSTestUtil.ResetLastUpdatesWithOffset(dd1, -2 * 1000); DFSTestUtil.ResetLastUpdatesWithOffset(dd2, -1 * 1000); DFSTestUtil.ResetLastUpdatesWithOffset(dd3, -3 * 1000); blockInfo.InitializeBlockRecovery(2); blockInfoRecovery = dd1.GetLeaseRecoveryCommand(1); NUnit.Framework.Assert.AreEqual(blockInfoRecovery[0], blockInfo); // Recovery attempt #3. DFSTestUtil.ResetLastUpdatesWithOffset(dd1, -2 * 1000); DFSTestUtil.ResetLastUpdatesWithOffset(dd2, -1 * 1000); DFSTestUtil.ResetLastUpdatesWithOffset(dd3, -3 * 1000); blockInfo.InitializeBlockRecovery(3); blockInfoRecovery = dd3.GetLeaseRecoveryCommand(1); NUnit.Framework.Assert.AreEqual(blockInfoRecovery[0], blockInfo); // Recovery attempt #4. // Reset everything. And again pick DN with most recent heart beat. DFSTestUtil.ResetLastUpdatesWithOffset(dd1, -2 * 1000); DFSTestUtil.ResetLastUpdatesWithOffset(dd2, -1 * 1000); DFSTestUtil.ResetLastUpdatesWithOffset(dd3, 0); blockInfo.InitializeBlockRecovery(3); blockInfoRecovery = dd3.GetLeaseRecoveryCommand(1); NUnit.Framework.Assert.AreEqual(blockInfoRecovery[0], blockInfo); }
/// <summary>Convert a complete block to an under construction block.</summary> /// <returns>BlockInfoUnderConstruction - an under construction block.</returns> public virtual BlockInfoContiguousUnderConstruction ConvertToBlockUnderConstruction (HdfsServerConstants.BlockUCState s, DatanodeStorageInfo[] targets) { if (IsComplete()) { BlockInfoContiguousUnderConstruction ucBlock = new BlockInfoContiguousUnderConstruction (this, GetBlockCollection().GetBlockReplication(), s, targets); ucBlock.SetBlockCollection(GetBlockCollection()); return(ucBlock); } // the block is already under construction BlockInfoContiguousUnderConstruction ucBlock_1 = (BlockInfoContiguousUnderConstruction )this; ucBlock_1.SetBlockUCState(s); ucBlock_1.SetExpectedLocations(targets); ucBlock_1.SetBlockCollection(GetBlockCollection()); return(ucBlock_1); }
public virtual void TestHeartbeatBlockRecovery() { Configuration conf = new HdfsConfiguration(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(3).Build(); try { cluster.WaitActive(); FSNamesystem namesystem = cluster.GetNamesystem(); HeartbeatManager hm = namesystem.GetBlockManager().GetDatanodeManager().GetHeartbeatManager (); string poolId = namesystem.GetBlockPoolId(); DatanodeRegistration nodeReg1 = DataNodeTestUtils.GetDNRegistrationForBP(cluster. GetDataNodes()[0], poolId); DatanodeDescriptor dd1 = NameNodeAdapter.GetDatanode(namesystem, nodeReg1); dd1.UpdateStorage(new DatanodeStorage(DatanodeStorage.GenerateUuid())); DatanodeRegistration nodeReg2 = DataNodeTestUtils.GetDNRegistrationForBP(cluster. GetDataNodes()[1], poolId); DatanodeDescriptor dd2 = NameNodeAdapter.GetDatanode(namesystem, nodeReg2); dd2.UpdateStorage(new DatanodeStorage(DatanodeStorage.GenerateUuid())); DatanodeRegistration nodeReg3 = DataNodeTestUtils.GetDNRegistrationForBP(cluster. GetDataNodes()[2], poolId); DatanodeDescriptor dd3 = NameNodeAdapter.GetDatanode(namesystem, nodeReg3); dd3.UpdateStorage(new DatanodeStorage(DatanodeStorage.GenerateUuid())); try { namesystem.WriteLock(); lock (hm) { NameNodeAdapter.SendHeartBeat(nodeReg1, dd1, namesystem); NameNodeAdapter.SendHeartBeat(nodeReg2, dd2, namesystem); NameNodeAdapter.SendHeartBeat(nodeReg3, dd3, namesystem); // Test with all alive nodes. DFSTestUtil.ResetLastUpdatesWithOffset(dd1, 0); DFSTestUtil.ResetLastUpdatesWithOffset(dd2, 0); DFSTestUtil.ResetLastUpdatesWithOffset(dd3, 0); DatanodeStorageInfo[] storages = new DatanodeStorageInfo[] { dd1.GetStorageInfos( )[0], dd2.GetStorageInfos()[0], dd3.GetStorageInfos()[0] }; BlockInfoContiguousUnderConstruction blockInfo = new BlockInfoContiguousUnderConstruction (new Block(0, 0, GenerationStamp.LastReservedStamp), (short)3, HdfsServerConstants.BlockUCState .UnderRecovery, storages); dd1.AddBlockToBeRecovered(blockInfo); DatanodeCommand[] cmds = NameNodeAdapter.SendHeartBeat(nodeReg1, dd1, namesystem) .GetCommands(); NUnit.Framework.Assert.AreEqual(1, cmds.Length); NUnit.Framework.Assert.AreEqual(DatanodeProtocol.DnaRecoverblock, cmds[0].GetAction ()); BlockRecoveryCommand recoveryCommand = (BlockRecoveryCommand)cmds[0]; NUnit.Framework.Assert.AreEqual(1, recoveryCommand.GetRecoveringBlocks().Count); DatanodeInfo[] recoveringNodes = Sharpen.Collections.ToArray(recoveryCommand.GetRecoveringBlocks (), new BlockRecoveryCommand.RecoveringBlock[0])[0].GetLocations(); NUnit.Framework.Assert.AreEqual(3, recoveringNodes.Length); NUnit.Framework.Assert.AreEqual(recoveringNodes[0], dd1); NUnit.Framework.Assert.AreEqual(recoveringNodes[1], dd2); NUnit.Framework.Assert.AreEqual(recoveringNodes[2], dd3); // Test with one stale node. DFSTestUtil.ResetLastUpdatesWithOffset(dd1, 0); // More than the default stale interval of 30 seconds. DFSTestUtil.ResetLastUpdatesWithOffset(dd2, -40 * 1000); DFSTestUtil.ResetLastUpdatesWithOffset(dd3, 0); blockInfo = new BlockInfoContiguousUnderConstruction(new Block(0, 0, GenerationStamp .LastReservedStamp), (short)3, HdfsServerConstants.BlockUCState.UnderRecovery, storages ); dd1.AddBlockToBeRecovered(blockInfo); cmds = NameNodeAdapter.SendHeartBeat(nodeReg1, dd1, namesystem).GetCommands(); NUnit.Framework.Assert.AreEqual(1, cmds.Length); NUnit.Framework.Assert.AreEqual(DatanodeProtocol.DnaRecoverblock, cmds[0].GetAction ()); recoveryCommand = (BlockRecoveryCommand)cmds[0]; NUnit.Framework.Assert.AreEqual(1, recoveryCommand.GetRecoveringBlocks().Count); recoveringNodes = Sharpen.Collections.ToArray(recoveryCommand.GetRecoveringBlocks (), new BlockRecoveryCommand.RecoveringBlock[0])[0].GetLocations(); NUnit.Framework.Assert.AreEqual(2, recoveringNodes.Length); // dd2 is skipped. NUnit.Framework.Assert.AreEqual(recoveringNodes[0], dd1); NUnit.Framework.Assert.AreEqual(recoveringNodes[1], dd3); // Test with all stale node. DFSTestUtil.ResetLastUpdatesWithOffset(dd1, -60 * 1000); // More than the default stale interval of 30 seconds. DFSTestUtil.ResetLastUpdatesWithOffset(dd2, -40 * 1000); DFSTestUtil.ResetLastUpdatesWithOffset(dd3, -80 * 1000); blockInfo = new BlockInfoContiguousUnderConstruction(new Block(0, 0, GenerationStamp .LastReservedStamp), (short)3, HdfsServerConstants.BlockUCState.UnderRecovery, storages ); dd1.AddBlockToBeRecovered(blockInfo); cmds = NameNodeAdapter.SendHeartBeat(nodeReg1, dd1, namesystem).GetCommands(); NUnit.Framework.Assert.AreEqual(1, cmds.Length); NUnit.Framework.Assert.AreEqual(DatanodeProtocol.DnaRecoverblock, cmds[0].GetAction ()); recoveryCommand = (BlockRecoveryCommand)cmds[0]; NUnit.Framework.Assert.AreEqual(1, recoveryCommand.GetRecoveringBlocks().Count); recoveringNodes = Sharpen.Collections.ToArray(recoveryCommand.GetRecoveringBlocks (), new BlockRecoveryCommand.RecoveringBlock[0])[0].GetLocations(); // Only dd1 is included since it heart beated and hence its not stale // when the list of recovery blocks is constructed. NUnit.Framework.Assert.AreEqual(3, recoveringNodes.Length); NUnit.Framework.Assert.AreEqual(recoveringNodes[0], dd1); NUnit.Framework.Assert.AreEqual(recoveringNodes[1], dd2); NUnit.Framework.Assert.AreEqual(recoveringNodes[2], dd3); } } finally { namesystem.WriteUnlock(); } } finally { cluster.Shutdown(); } }