/// <exception cref="System.IO.IOException"/> /// <exception cref="System.Exception"/> protected internal bool VerifyDeletedBlocks(LocatedBlocks locatedBlocks) { Log.Info("Verifying replica has no saved copy after deletion."); TriggerBlockReport(); while (DataNodeTestUtils.GetPendingAsyncDeletions(cluster.GetDataNodes()[0]) > 0L ) { Sharpen.Thread.Sleep(1000); } string bpid = cluster.GetNamesystem().GetBlockPoolId(); IList <FsVolumeSpi> volumes = cluster.GetDataNodes()[0].GetFSDataset().GetVolumes( ); // Make sure deleted replica does not have a copy on either finalized dir of // transient volume or finalized dir of non-transient volume foreach (FsVolumeSpi v in volumes) { FsVolumeImpl volume = (FsVolumeImpl)v; FilePath targetDir = (v.IsTransientStorage()) ? volume.GetBlockPoolSlice(bpid).GetFinalizedDir () : volume.GetBlockPoolSlice(bpid).GetLazypersistDir(); if (VerifyBlockDeletedFromDir(targetDir, locatedBlocks) == false) { return(false); } } return(true); }
public virtual void TestChooseReplicaToDelete() { MiniDFSCluster cluster = null; FileSystem fs = null; try { Configuration conf = new HdfsConfiguration(); conf.SetLong(DFSConfigKeys.DfsBlockSizeKey, SmallBlockSize); cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(3).Build(); fs = cluster.GetFileSystem(); FSNamesystem namesystem = cluster.GetNamesystem(); conf.SetLong(DFSConfigKeys.DfsHeartbeatIntervalKey, 300); cluster.StartDataNodes(conf, 1, true, null, null, null); DataNode lastDN = cluster.GetDataNodes()[3]; DatanodeRegistration dnReg = DataNodeTestUtils.GetDNRegistrationForBP(lastDN, namesystem .GetBlockPoolId()); string lastDNid = dnReg.GetDatanodeUuid(); Path fileName = new Path("/foo2"); DFSTestUtil.CreateFile(fs, fileName, SmallFileLength, (short)4, 0L); DFSTestUtil.WaitReplication(fs, fileName, (short)4); // Wait for tolerable number of heartbeats plus one DatanodeDescriptor nodeInfo = null; long lastHeartbeat = 0; long waitTime = DFSConfigKeys.DfsHeartbeatIntervalDefault * 1000 * (DFSConfigKeys .DfsNamenodeTolerateHeartbeatMultiplierDefault + 1); do { nodeInfo = namesystem.GetBlockManager().GetDatanodeManager().GetDatanode(dnReg); lastHeartbeat = nodeInfo.GetLastUpdateMonotonic(); }while (Time.MonotonicNow() - lastHeartbeat < waitTime); fs.SetReplication(fileName, (short)3); BlockLocation[] locs = fs.GetFileBlockLocations(fs.GetFileStatus(fileName), 0, long.MaxValue ); // All replicas for deletion should be scheduled on lastDN. // And should not actually be deleted, because lastDN does not heartbeat. namesystem.ReadLock(); ICollection <Block> dnBlocks = namesystem.GetBlockManager().excessReplicateMap[lastDNid ]; NUnit.Framework.Assert.AreEqual("Replicas on node " + lastDNid + " should have been deleted" , SmallFileLength / SmallBlockSize, dnBlocks.Count); namesystem.ReadUnlock(); foreach (BlockLocation location in locs) { NUnit.Framework.Assert.AreEqual("Block should still have 4 replicas", 4, location .GetNames().Length); } } finally { if (fs != null) { fs.Close(); } if (cluster != null) { cluster.Shutdown(); } } }
public virtual void Pipeline_01() { string MethodName = GenericTestUtils.GetMethodName(); if (Log.IsDebugEnabled()) { Log.Debug("Running " + MethodName); } Path filePath = new Path("/" + MethodName + ".dat"); DFSTestUtil.CreateFile(fs, filePath, FileSize, ReplFactor, rand.NextLong()); if (Log.IsDebugEnabled()) { Log.Debug("Invoking append but doing nothing otherwise..."); } FSDataOutputStream ofs = fs.Append(filePath); ofs.WriteBytes("Some more stuff to write"); ((DFSOutputStream)ofs.GetWrappedStream()).Hflush(); IList <LocatedBlock> lb = cluster.GetNameNodeRpc().GetBlockLocations(filePath.ToString (), FileSize - 1, FileSize).GetLocatedBlocks(); string bpid = cluster.GetNamesystem().GetBlockPoolId(); foreach (DataNode dn in cluster.GetDataNodes()) { Replica r = DataNodeTestUtils.FetchReplicaInfo(dn, bpid, lb[0].GetBlock().GetBlockId ()); NUnit.Framework.Assert.IsTrue("Replica on DN " + dn + " shouldn't be null", r != null); NUnit.Framework.Assert.AreEqual("Should be RBW replica on " + dn + " after sequence of calls append()/write()/hflush()" , HdfsServerConstants.ReplicaState.Rbw, r.GetState()); } ofs.Close(); }
public virtual void TestStaleNodes() { // Set two datanodes as stale for (int i = 0; i < 2; i++) { DataNode dn = cluster.GetDataNodes()[i]; DataNodeTestUtils.SetHeartbeatsDisabledForTests(dn, true); long staleInterval = Conf.GetLong(DFSConfigKeys.DfsNamenodeStaleDatanodeIntervalKey , DFSConfigKeys.DfsNamenodeStaleDatanodeIntervalDefault); DatanodeDescriptor dnDes = cluster.GetNameNode().GetNamesystem().GetBlockManager( ).GetDatanodeManager().GetDatanode(dn.GetDatanodeId()); DFSTestUtil.ResetLastUpdatesWithOffset(dnDes, -(staleInterval + 1)); } // Let HeartbeatManager to check heartbeat BlockManagerTestUtil.CheckHeartbeat(cluster.GetNameNode().GetNamesystem().GetBlockManager ()); MetricsAsserts.AssertGauge("StaleDataNodes", 2, MetricsAsserts.GetMetrics(NsMetrics )); // Reset stale datanodes for (int i_1 = 0; i_1 < 2; i_1++) { DataNode dn = cluster.GetDataNodes()[i_1]; DataNodeTestUtils.SetHeartbeatsDisabledForTests(dn, false); DatanodeDescriptor dnDes = cluster.GetNameNode().GetNamesystem().GetBlockManager( ).GetDatanodeManager().GetDatanode(dn.GetDatanodeId()); DFSTestUtil.ResetLastUpdatesWithOffset(dnDes, 0); } // Let HeartbeatManager to refresh BlockManagerTestUtil.CheckHeartbeat(cluster.GetNameNode().GetNamesystem().GetBlockManager ()); MetricsAsserts.AssertGauge("StaleDataNodes", 0, MetricsAsserts.GetMetrics(NsMetrics )); }
public virtual void TestDeletingBlocks() { MiniDFSCluster cluster = new MiniDFSCluster.Builder(new HdfsConfiguration()).Build (); try { cluster.WaitActive(); DataNode dn = cluster.GetDataNodes()[0]; FsDatasetImpl ds = (FsDatasetImpl)DataNodeTestUtils.GetFSDataset(dn); FsVolumeImpl vol = ds.GetVolumes()[0]; ExtendedBlock eb; ReplicaInfo info; IList <Block> blockList = new AList <Block>(); for (int i = 1; i <= 63; i++) { eb = new ExtendedBlock(Blockpool, i, 1, 1000 + i); info = new FinalizedReplica(eb.GetLocalBlock(), vol, vol.GetCurrentDir().GetParentFile ()); ds.volumeMap.Add(Blockpool, info); info.GetBlockFile().CreateNewFile(); info.GetMetaFile().CreateNewFile(); blockList.AddItem(info); } ds.Invalidate(Blockpool, Sharpen.Collections.ToArray(blockList, new Block[0])); try { Sharpen.Thread.Sleep(1000); } catch (Exception) { } // Nothing to do NUnit.Framework.Assert.IsTrue(ds.IsDeletingBlock(Blockpool, blockList[0].GetBlockId ())); blockList.Clear(); eb = new ExtendedBlock(Blockpool, 64, 1, 1064); info = new FinalizedReplica(eb.GetLocalBlock(), vol, vol.GetCurrentDir().GetParentFile ()); ds.volumeMap.Add(Blockpool, info); info.GetBlockFile().CreateNewFile(); info.GetMetaFile().CreateNewFile(); blockList.AddItem(info); ds.Invalidate(Blockpool, Sharpen.Collections.ToArray(blockList, new Block[0])); try { Sharpen.Thread.Sleep(1000); } catch (Exception) { } // Nothing to do NUnit.Framework.Assert.IsFalse(ds.IsDeletingBlock(Blockpool, blockList[0].GetBlockId ())); } finally { cluster.Shutdown(); } }
public virtual void TestArrayOutOfBoundsException() { MiniDFSCluster cluster = null; try { Configuration conf = new HdfsConfiguration(); cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(2).Build(); cluster.WaitActive(); FileSystem fs = cluster.GetFileSystem(); Path FilePath = new Path("/tmp.txt"); long FileLen = 1L; DFSTestUtil.CreateFile(fs, FilePath, FileLen, (short)2, 1L); // get the block string bpid = cluster.GetNamesystem().GetBlockPoolId(); FilePath storageDir = cluster.GetInstanceStorageDir(0, 0); FilePath dataDir = MiniDFSCluster.GetFinalizedDir(storageDir, bpid); NUnit.Framework.Assert.IsTrue("Data directory does not exist", dataDir.Exists()); ExtendedBlock blk = GetBlock(bpid, dataDir); if (blk == null) { storageDir = cluster.GetInstanceStorageDir(0, 1); dataDir = MiniDFSCluster.GetFinalizedDir(storageDir, bpid); blk = GetBlock(bpid, dataDir); } NUnit.Framework.Assert.IsFalse("Data directory does not contain any blocks or there was an " + "IO error", blk == null); // start a third datanode cluster.StartDataNodes(conf, 1, true, null, null); AList <DataNode> datanodes = cluster.GetDataNodes(); NUnit.Framework.Assert.AreEqual(datanodes.Count, 3); DataNode dataNode = datanodes[2]; // report corrupted block by the third datanode DatanodeRegistration dnR = DataNodeTestUtils.GetDNRegistrationForBP(dataNode, blk .GetBlockPoolId()); FSNamesystem ns = cluster.GetNamesystem(); ns.WriteLock(); try { cluster.GetNamesystem().GetBlockManager().FindAndMarkBlockAsCorrupt(blk, new DatanodeInfo (dnR), "TEST", "STORAGE_ID"); } finally { ns.WriteUnlock(); } // open the file fs.Open(FilePath); //clean up fs.Delete(FilePath, false); } finally { if (cluster != null) { cluster.Shutdown(); } } }
public virtual void TestPendingDeleteUnknownBlocks() { int fileNum = 5; // 5 files Path[] files = new Path[fileNum]; MiniDFSCluster.DataNodeProperties[] dnprops = new MiniDFSCluster.DataNodeProperties [Replication]; // create a group of files, each file contains 1 block for (int i = 0; i < fileNum; i++) { files[i] = new Path("/file" + i); DFSTestUtil.CreateFile(dfs, files[i], Blocksize, Replication, i); } // wait until all DataNodes have replicas WaitForReplication(); for (int i_1 = Replication - 1; i_1 >= 0; i_1--) { dnprops[i_1] = cluster.StopDataNode(i_1); } Sharpen.Thread.Sleep(2000); // delete 2 files, we still have 3 files remaining so that we can cover // every DN storage for (int i_2 = 0; i_2 < 2; i_2++) { dfs.Delete(files[i_2], true); } // restart NameNode cluster.RestartNameNode(false); InvalidateBlocks invalidateBlocks = (InvalidateBlocks)Whitebox.GetInternalState(cluster .GetNamesystem().GetBlockManager(), "invalidateBlocks"); InvalidateBlocks mockIb = Org.Mockito.Mockito.Spy(invalidateBlocks); Org.Mockito.Mockito.DoReturn(1L).When(mockIb).GetInvalidationDelay(); Whitebox.SetInternalState(cluster.GetNamesystem().GetBlockManager(), "invalidateBlocks" , mockIb); NUnit.Framework.Assert.AreEqual(0L, cluster.GetNamesystem().GetPendingDeletionBlocks ()); // restart DataNodes for (int i_3 = 0; i_3 < Replication; i_3++) { cluster.RestartDataNode(dnprops[i_3], true); } cluster.WaitActive(); for (int i_4 = 0; i_4 < Replication; i_4++) { DataNodeTestUtils.TriggerBlockReport(cluster.GetDataNodes()[i_4]); } Sharpen.Thread.Sleep(2000); // make sure we have received block reports by checking the total block # NUnit.Framework.Assert.AreEqual(3, cluster.GetNamesystem().GetBlocksTotal()); NUnit.Framework.Assert.AreEqual(4, cluster.GetNamesystem().GetPendingDeletionBlocks ()); cluster.RestartNameNode(true); Sharpen.Thread.Sleep(6000); NUnit.Framework.Assert.AreEqual(3, cluster.GetNamesystem().GetBlocksTotal()); NUnit.Framework.Assert.AreEqual(0, cluster.GetNamesystem().GetPendingDeletionBlocks ()); }
/// <exception cref="System.IO.IOException"/> public static void CheckMetaInfo(ExtendedBlock b, DataNode dn) { Block metainfo = DataNodeTestUtils.GetFSDataset(dn).GetStoredBlock(b.GetBlockPoolId (), b.GetBlockId()); NUnit.Framework.Assert.AreEqual(b.GetBlockId(), metainfo.GetBlockId()); NUnit.Framework.Assert.AreEqual(b.GetNumBytes(), metainfo.GetNumBytes()); }
/// <exception cref="System.IO.IOException"/> /// <exception cref="System.Exception"/> private void TriggerHeartbeats(IList <DataNode> datanodes) { foreach (DataNode dn in datanodes) { DataNodeTestUtils.TriggerHeartbeat(dn); } Sharpen.Thread.Sleep(100); }
public virtual void TestDeadDatanode() { Configuration conf = new HdfsConfiguration(); conf.SetInt(DFSConfigKeys.DfsNamenodeHeartbeatRecheckIntervalKey, 500); conf.SetLong(DFSConfigKeys.DfsHeartbeatIntervalKey, 1L); cluster = new MiniDFSCluster.Builder(conf).Build(); cluster.WaitActive(); string poolId = cluster.GetNamesystem().GetBlockPoolId(); // wait for datanode to be marked live DataNode dn = cluster.GetDataNodes()[0]; DatanodeRegistration reg = DataNodeTestUtils.GetDNRegistrationForBP(cluster.GetDataNodes ()[0], poolId); DFSTestUtil.WaitForDatanodeState(cluster, reg.GetDatanodeUuid(), true, 20000); // Shutdown and wait for datanode to be marked dead dn.Shutdown(); DFSTestUtil.WaitForDatanodeState(cluster, reg.GetDatanodeUuid(), false, 20000); DatanodeProtocol dnp = cluster.GetNameNodeRpc(); ReceivedDeletedBlockInfo[] blocks = new ReceivedDeletedBlockInfo[] { new ReceivedDeletedBlockInfo (new Block(0), ReceivedDeletedBlockInfo.BlockStatus.ReceivedBlock, null) }; StorageReceivedDeletedBlocks[] storageBlocks = new StorageReceivedDeletedBlocks[] { new StorageReceivedDeletedBlocks(reg.GetDatanodeUuid(), blocks) }; // Ensure blockReceived call from dead datanode is rejected with IOException try { dnp.BlockReceivedAndDeleted(reg, poolId, storageBlocks); NUnit.Framework.Assert.Fail("Expected IOException is not thrown"); } catch (IOException) { } // Expected // Ensure blockReport from dead datanode is rejected with IOException StorageBlockReport[] report = new StorageBlockReport[] { new StorageBlockReport(new DatanodeStorage(reg.GetDatanodeUuid()), BlockListAsLongs.Empty) }; try { dnp.BlockReport(reg, poolId, report, new BlockReportContext(1, 0, Runtime.NanoTime ())); NUnit.Framework.Assert.Fail("Expected IOException is not thrown"); } catch (IOException) { } // Expected // Ensure heartbeat from dead datanode is rejected with a command // that asks datanode to register again StorageReport[] rep = new StorageReport[] { new StorageReport(new DatanodeStorage (reg.GetDatanodeUuid()), false, 0, 0, 0, 0) }; DatanodeCommand[] cmd = dnp.SendHeartbeat(reg, rep, 0L, 0L, 0, 0, 0, null).GetCommands (); NUnit.Framework.Assert.AreEqual(1, cmd.Length); NUnit.Framework.Assert.AreEqual(cmd[0].GetAction(), RegisterCommand.Register.GetAction ()); }
public virtual void TestNoSpaceArchive() { Log.Info("testNoSpaceArchive"); TestStorageMover.PathPolicyMap pathPolicyMap = new TestStorageMover.PathPolicyMap (0); TestStorageMover.NamespaceScheme nsScheme = pathPolicyMap.NewNamespaceScheme(); TestStorageMover.ClusterScheme clusterScheme = new TestStorageMover.ClusterScheme (DefaultConf, NumDatanodes, Repl, GenStorageTypes(NumDatanodes), null); TestStorageMover.MigrationTest test = new TestStorageMover.MigrationTest(this, clusterScheme , nsScheme); try { test.RunBasicTest(false); // create 2 hot files with replication 3 short replication = 3; for (int i = 0; i < 2; i++) { Path p = new Path(pathPolicyMap.cold, "file" + i); DFSTestUtil.CreateFile(test.dfs, p, BlockSize, replication, 0L); WaitForAllReplicas(replication, p, test.dfs); } // set all the ARCHIVE volume to full foreach (DataNode dn in test.cluster.GetDataNodes()) { SetVolumeFull(dn, StorageType.Archive); DataNodeTestUtils.TriggerHeartbeat(dn); } { // test increasing replication but new replicas cannot be created // since no more ARCHIVE space. Path file0 = new Path(pathPolicyMap.cold, "file0"); TestStorageMover.Replication r = test.GetReplication(file0); NUnit.Framework.Assert.AreEqual(0, r.disk); short newReplication = (short)5; test.dfs.SetReplication(file0, newReplication); Sharpen.Thread.Sleep(10000); test.VerifyReplication(file0, 0, r.archive); } { // test creating a hot file Path p = new Path(pathPolicyMap.hot, "foo"); DFSTestUtil.CreateFile(test.dfs, p, BlockSize, (short)3, 0L); } { //test move a cold file to warm Path file1 = new Path(pathPolicyMap.cold, "file1"); test.dfs.Rename(file1, pathPolicyMap.warm); test.Migrate(); test.Verify(true); } } finally { test.ShutdownCluster(); } }
/// <exception cref="System.IO.IOException"/> internal static long GetTotalDfsUsed(MiniDFSCluster cluster) { long total = 0; foreach (DataNode node in cluster.GetDataNodes()) { total += DataNodeTestUtils.GetFSDataset(node).GetDfsUsed(); } return(total); }
public virtual void TestNoSpaceDisk() { Log.Info("testNoSpaceDisk"); TestStorageMover.PathPolicyMap pathPolicyMap = new TestStorageMover.PathPolicyMap (0); TestStorageMover.NamespaceScheme nsScheme = pathPolicyMap.NewNamespaceScheme(); Configuration conf = new Configuration(DefaultConf); TestStorageMover.ClusterScheme clusterScheme = new TestStorageMover.ClusterScheme (conf, NumDatanodes, Repl, GenStorageTypes(NumDatanodes), null); TestStorageMover.MigrationTest test = new TestStorageMover.MigrationTest(this, clusterScheme , nsScheme); try { test.RunBasicTest(false); // create 2 hot files with replication 3 short replication = 3; for (int i = 0; i < 2; i++) { Path p = new Path(pathPolicyMap.hot, "file" + i); DFSTestUtil.CreateFile(test.dfs, p, BlockSize, replication, 0L); WaitForAllReplicas(replication, p, test.dfs); } // set all the DISK volume to full foreach (DataNode dn in test.cluster.GetDataNodes()) { SetVolumeFull(dn, StorageType.Disk); DataNodeTestUtils.TriggerHeartbeat(dn); } // test increasing replication. Since DISK is full, // new replicas should be stored in ARCHIVE as a fallback storage. Path file0 = new Path(pathPolicyMap.hot, "file0"); TestStorageMover.Replication r = test.GetReplication(file0); short newReplication = (short)5; test.dfs.SetReplication(file0, newReplication); Sharpen.Thread.Sleep(10000); test.VerifyReplication(file0, r.disk, newReplication - r.disk); // test creating a cold file and then increase replication Path p_1 = new Path(pathPolicyMap.cold, "foo"); DFSTestUtil.CreateFile(test.dfs, p_1, BlockSize, replication, 0L); test.VerifyReplication(p_1, 0, replication); test.dfs.SetReplication(p_1, newReplication); Sharpen.Thread.Sleep(10000); test.VerifyReplication(p_1, 0, newReplication); //test move a hot file to warm Path file1 = new Path(pathPolicyMap.hot, "file1"); test.dfs.Rename(file1, pathPolicyMap.warm); test.Migrate(); test.VerifyFile(new Path(pathPolicyMap.warm, "file1"), Warm.GetId()); } finally { test.ShutdownCluster(); } }
public bool Get() { foreach (DataNode dn in cluster.GetDataNodes()) { if (DataNodeTestUtils.GetPendingAsyncDeletions(dn) > 0) { return(false); } } return(true); }
// let the NN finish deletion /// <summary>Verify block locations after running the migration tool.</summary> /// <exception cref="System.Exception"/> internal virtual void Verify(bool verifyAll) { foreach (DataNode dn in this.cluster.GetDataNodes()) { DataNodeTestUtils.TriggerBlockReport(dn); } if (verifyAll) { this.VerifyNamespace(); } }
/// <summary>Stop the heartbeat of a datanode in the MiniDFSCluster</summary> /// <param name="cluster">The MiniDFSCluster</param> /// <param name="hostName">The hostName of the datanode to be stopped</param> /// <returns>The DataNode whose heartbeat has been stopped</returns> private DataNode StopDataNodeHeartbeat(MiniDFSCluster cluster, string hostName) { foreach (DataNode dn in cluster.GetDataNodes()) { if (dn.GetDatanodeId().GetHostName().Equals(hostName)) { DataNodeTestUtils.SetHeartbeatsDisabledForTests(dn, true); return(dn); } } return(null); }
/// <summary>Corrupt a block on a data node.</summary> /// <remarks> /// Corrupt a block on a data node. Replace the block file content with content /// of 1, 2, ...BLOCK_SIZE. /// </remarks> /// <param name="block">the ExtendedBlock to be corrupted</param> /// <param name="dn">the data node where the block needs to be corrupted</param> /// <exception cref="System.IO.FileNotFoundException"/> /// <exception cref="System.IO.IOException"/> private static void CorruptBlock(ExtendedBlock block, DataNode dn) { FilePath f = DataNodeTestUtils.GetBlockFile(dn, block.GetBlockPoolId(), block.GetLocalBlock ()); RandomAccessFile raFile = new RandomAccessFile(f, "rw"); byte[] bytes = new byte[(int)BlockSize]; for (int i = 0; i < BlockSize; i++) { bytes[i] = unchecked ((byte)(i)); } raFile.Write(bytes); raFile.Close(); }
/// <exception cref="System.Exception"/> public override void DoAnAction() { foreach (DataNode dn in this._enclosing.cluster.GetDataNodes()) { DataNodeTestUtils.TriggerDeletionReport(dn); DataNodeTestUtils.TriggerHeartbeat(dn); } for (int i = 0; i < 2; i++) { NameNode nn = this._enclosing.cluster.GetNameNode(i); BlockManagerTestUtil.ComputeAllPendingWork(nn.GetNamesystem().GetBlockManager()); } Sharpen.Thread.Sleep(interval); }
/// <exception cref="System.IO.IOException"/> private int GetTrueReplication(MiniDFSCluster cluster, ExtendedBlock block) { int count = 0; foreach (DataNode dn in cluster.GetDataNodes()) { if (DataNodeTestUtils.GetFSDataset(dn).GetStoredBlock(block.GetBlockPoolId(), block .GetBlockId()) != null) { count++; } } return(count); }
/// <summary>TC11: Racing rename</summary> /// <exception cref="System.Exception"/> private void TestTC11(bool appendToNewBlock) { Path p = new Path("/TC11/foo" + (appendToNewBlock ? "0" : "1")); System.Console.Out.WriteLine("p=" + p); //a. Create file and write one block of data. Close file. int len1 = (int)BlockSize; { FSDataOutputStream @out = fs.Create(p, false, buffersize, Replication, BlockSize); AppendTestUtil.Write(@out, 0, len1); @out.Close(); } //b. Reopen file in "append" mode. Append half block of data. FSDataOutputStream out_1 = appendToNewBlock ? fs.Append(p, EnumSet.Of(CreateFlag. Append, CreateFlag.NewBlock), 4096, null) : fs.Append(p); int len2 = (int)BlockSize / 2; AppendTestUtil.Write(out_1, len1, len2); out_1.Hflush(); //c. Rename file to file.new. Path pnew = new Path(p + ".new"); NUnit.Framework.Assert.IsTrue(fs.Rename(p, pnew)); //d. Close file handle that was opened in (b). out_1.Close(); //check block sizes long len = fs.GetFileStatus(pnew).GetLen(); LocatedBlocks locatedblocks = fs.dfs.GetNamenode().GetBlockLocations(pnew.ToString (), 0L, len); int numblock = locatedblocks.LocatedBlockCount(); for (int i = 0; i < numblock; i++) { LocatedBlock lb = locatedblocks.Get(i); ExtendedBlock blk = lb.GetBlock(); long size = lb.GetBlockSize(); if (i < numblock - 1) { NUnit.Framework.Assert.AreEqual(BlockSize, size); } foreach (DatanodeInfo datanodeinfo in lb.GetLocations()) { DataNode dn = cluster.GetDataNode(datanodeinfo.GetIpcPort()); Block metainfo = DataNodeTestUtils.GetFSDataset(dn).GetStoredBlock(blk.GetBlockPoolId (), blk.GetBlockId()); NUnit.Framework.Assert.AreEqual(size, metainfo.GetNumBytes()); } } }
/// <summary>TC7: Corrupted replicas are present.</summary> /// <exception cref="System.IO.IOException">an exception might be thrown</exception> /// <exception cref="System.Exception"/> private void TestTC7(bool appendToNewBlock) { short repl = 2; Path p = new Path("/TC7/foo" + (appendToNewBlock ? "0" : "1")); System.Console.Out.WriteLine("p=" + p); //a. Create file with replication factor of 2. Write half block of data. Close file. int len1 = (int)(BlockSize / 2); { FSDataOutputStream @out = fs.Create(p, false, buffersize, repl, BlockSize); AppendTestUtil.Write(@out, 0, len1); @out.Close(); } DFSTestUtil.WaitReplication(fs, p, repl); //b. Log into one datanode that has one replica of this block. // Find the block file on this datanode and truncate it to zero size. LocatedBlocks locatedblocks = fs.dfs.GetNamenode().GetBlockLocations(p.ToString() , 0L, len1); NUnit.Framework.Assert.AreEqual(1, locatedblocks.LocatedBlockCount()); LocatedBlock lb = locatedblocks.Get(0); ExtendedBlock blk = lb.GetBlock(); NUnit.Framework.Assert.AreEqual(len1, lb.GetBlockSize()); DatanodeInfo[] datanodeinfos = lb.GetLocations(); NUnit.Framework.Assert.AreEqual(repl, datanodeinfos.Length); DataNode dn = cluster.GetDataNode(datanodeinfos[0].GetIpcPort()); FilePath f = DataNodeTestUtils.GetBlockFile(dn, blk.GetBlockPoolId(), blk.GetLocalBlock ()); RandomAccessFile raf = new RandomAccessFile(f, "rw"); AppendTestUtil.Log.Info("dn=" + dn + ", blk=" + blk + " (length=" + blk.GetNumBytes () + ")"); NUnit.Framework.Assert.AreEqual(len1, raf.Length()); raf.SetLength(0); raf.Close(); //c. Open file in "append mode". Append a new block worth of data. Close file. int len2 = (int)BlockSize; { FSDataOutputStream @out = appendToNewBlock ? fs.Append(p, EnumSet.Of(CreateFlag.Append , CreateFlag.NewBlock), 4096, null) : fs.Append(p); AppendTestUtil.Write(@out, len1, len2); @out.Close(); } //d. Reopen file and read two blocks worth of data. AppendTestUtil.Check(fs, p, len1 + len2); }
/// <exception cref="System.IO.IOException"/> private static void RunTest(string testCaseName, bool createFiles, int numInitialStorages , int expectedStoragesAfterTest) { Configuration conf = new HdfsConfiguration(); MiniDFSCluster cluster = null; try { cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).StoragesPerDatanode(numInitialStorages ).Build(); cluster.WaitActive(); DataNode dn0 = cluster.GetDataNodes()[0]; // Ensure NN knows about the storage. DatanodeID dnId = dn0.GetDatanodeId(); DatanodeDescriptor dnDescriptor = cluster.GetNamesystem().GetBlockManager().GetDatanodeManager ().GetDatanode(dnId); Assert.AssertThat(dnDescriptor.GetStorageInfos().Length, IS.Is(numInitialStorages )); string bpid = cluster.GetNamesystem().GetBlockPoolId(); DatanodeRegistration dnReg = dn0.GetDNRegistrationForBP(bpid); DataNodeTestUtils.TriggerBlockReport(dn0); if (createFiles) { Path path = new Path("/", testCaseName); DFSTestUtil.CreateFile(cluster.GetFileSystem(), path, 1024, (short)1, unchecked (( int)(0x1BAD5EED))); DataNodeTestUtils.TriggerBlockReport(dn0); } // Generate a fake StorageReport that is missing one storage. StorageReport[] reports = dn0.GetFSDataset().GetStorageReports(bpid); StorageReport[] prunedReports = new StorageReport[numInitialStorages - 1]; System.Array.Copy(reports, 0, prunedReports, 0, prunedReports.Length); // Stop the DataNode and send fake heartbeat with missing storage. cluster.StopDataNode(0); cluster.GetNameNodeRpc().SendHeartbeat(dnReg, prunedReports, 0L, 0L, 0, 0, 0, null ); // Check that the missing storage was pruned. Assert.AssertThat(dnDescriptor.GetStorageInfos().Length, IS.Is(expectedStoragesAfterTest )); } finally { if (cluster != null) { cluster.Shutdown(); } } }
public virtual void TestStorageWithRemainingCapacity() { Configuration conf = new HdfsConfiguration(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).Build(); FileSystem fs = FileSystem.Get(conf); Path file1 = null; try { cluster.WaitActive(); FSNamesystem namesystem = cluster.GetNamesystem(); string poolId = namesystem.GetBlockPoolId(); DatanodeRegistration nodeReg = DataNodeTestUtils.GetDNRegistrationForBP(cluster.GetDataNodes ()[0], poolId); DatanodeDescriptor dd = NameNodeAdapter.GetDatanode(namesystem, nodeReg); // By default, MiniDFSCluster will create 1 datanode with 2 storages. // Assigning 64k for remaining storage capacity and will //create a file with 100k. foreach (DatanodeStorageInfo storage in dd.GetStorageInfos()) { storage.SetUtilizationForTesting(65536, 0, 65536, 0); } //sum of the remaining capacity of both the storages dd.SetRemaining(131072); file1 = new Path("testRemainingStorage.dat"); try { DFSTestUtil.CreateFile(fs, file1, 102400, 102400, 102400, (short)1, unchecked ((int )(0x1BAD5EED))); } catch (RemoteException re) { GenericTestUtils.AssertExceptionContains("nodes instead of " + "minReplication", re); } } finally { // Clean up NUnit.Framework.Assert.IsTrue(fs.Exists(file1)); fs.Delete(file1, true); NUnit.Framework.Assert.IsTrue(!fs.Exists(file1)); cluster.Shutdown(); } }
public virtual void TestWriteToRbw() { MiniDFSCluster cluster = new MiniDFSCluster.Builder(new HdfsConfiguration()).Build (); try { cluster.WaitActive(); DataNode dn = cluster.GetDataNodes()[0]; FsDatasetImpl dataSet = (FsDatasetImpl)DataNodeTestUtils.GetFSDataset(dn); // set up replicasMap string bpid = cluster.GetNamesystem().GetBlockPoolId(); ExtendedBlock[] blocks = Setup(bpid, dataSet); // test writeToRbw TestWriteToRbw(dataSet, blocks); } finally { cluster.Shutdown(); } }
/// <exception cref="System.IO.IOException"/> private void TestDataNodeRedirect(Path path) { // Create the file if (hdfs.Exists(path)) { hdfs.Delete(path, true); } FSDataOutputStream @out = hdfs.Create(path, (short)1); @out.WriteBytes("0123456789"); @out.Close(); // Get the path's block location so we can determine // if we were redirected to the right DN. BlockLocation[] locations = hdfs.GetFileBlockLocations(path, 0, 10); string xferAddr = locations[0].GetNames()[0]; // Connect to the NN to get redirected Uri u = hftpFs.GetNamenodeURL("/data" + ServletUtil.EncodePath(path.ToUri().GetPath ()), "ugi=userx,groupy"); HttpURLConnection conn = (HttpURLConnection)u.OpenConnection(); HttpURLConnection.SetFollowRedirects(true); conn.Connect(); conn.GetInputStream(); bool @checked = false; // Find the datanode that has the block according to locations // and check that the URL was redirected to this DN's info port foreach (DataNode node in cluster.GetDataNodes()) { DatanodeRegistration dnR = DataNodeTestUtils.GetDNRegistrationForBP(node, blockPoolId ); if (dnR.GetXferAddr().Equals(xferAddr)) { @checked = true; NUnit.Framework.Assert.AreEqual(dnR.GetInfoPort(), conn.GetURL().Port); } } NUnit.Framework.Assert.IsTrue("The test never checked that location of " + "the block and hftp desitnation are the same" , @checked); }
public virtual void TestRBWReportArrivesAfterEdits() { CountDownLatch brFinished = new CountDownLatch(1); GenericTestUtils.DelayAnswer delayer = new _DelayAnswer_521(brFinished, Log); // inform the test that our block report went through. FSDataOutputStream @out = fs.Create(TestFilePath); try { AppendTestUtil.Write(@out, 0, 10); @out.Hflush(); DataNode dn = cluster.GetDataNodes()[0]; DatanodeProtocolClientSideTranslatorPB spy = DataNodeTestUtils.SpyOnBposToNN(dn, nn2); Org.Mockito.Mockito.DoAnswer(delayer).When(spy).BlockReport(Org.Mockito.Mockito.AnyObject <DatanodeRegistration>(), Org.Mockito.Mockito.AnyString(), Org.Mockito.Mockito.AnyObject <StorageBlockReport[]>(), Org.Mockito.Mockito.AnyObject <BlockReportContext>()); dn.ScheduleAllBlockReport(0); delayer.WaitForCall(); } finally { IOUtils.CloseStream(@out); } cluster.TransitionToStandby(0); cluster.TransitionToActive(1); delayer.Proceed(); brFinished.Await(); // Verify that no replicas are marked corrupt, and that the // file is readable from the failed-over standby. BlockManagerTestUtil.UpdateState(nn1.GetNamesystem().GetBlockManager()); BlockManagerTestUtil.UpdateState(nn2.GetNamesystem().GetBlockManager()); NUnit.Framework.Assert.AreEqual(0, nn1.GetNamesystem().GetCorruptReplicaBlocks()); NUnit.Framework.Assert.AreEqual(0, nn2.GetNamesystem().GetCorruptReplicaBlocks()); DFSTestUtil.ReadFile(fs, TestFilePath); }
public virtual void TestBlockSynchronization() { int OrgFileSize = 3000; Configuration conf = new HdfsConfiguration(); conf.SetLong(DFSConfigKeys.DfsBlockSizeKey, BlockSize); cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(5).Build(); cluster.WaitActive(); //create a file DistributedFileSystem dfs = cluster.GetFileSystem(); string filestr = "/foo"; Path filepath = new Path(filestr); DFSTestUtil.CreateFile(dfs, filepath, OrgFileSize, ReplicationNum, 0L); NUnit.Framework.Assert.IsTrue(dfs.Exists(filepath)); DFSTestUtil.WaitReplication(dfs, filepath, ReplicationNum); //get block info for the last block LocatedBlock locatedblock = TestInterDatanodeProtocol.GetLastLocatedBlock(dfs.dfs .GetNamenode(), filestr); DatanodeInfo[] datanodeinfos = locatedblock.GetLocations(); NUnit.Framework.Assert.AreEqual(ReplicationNum, datanodeinfos.Length); //connect to data nodes DataNode[] datanodes = new DataNode[ReplicationNum]; for (int i = 0; i < ReplicationNum; i++) { datanodes[i] = cluster.GetDataNode(datanodeinfos[i].GetIpcPort()); NUnit.Framework.Assert.IsTrue(datanodes[i] != null); } //verify Block Info ExtendedBlock lastblock = locatedblock.GetBlock(); DataNode.Log.Info("newblocks=" + lastblock); for (int i_1 = 0; i_1 < ReplicationNum; i_1++) { CheckMetaInfo(lastblock, datanodes[i_1]); } DataNode.Log.Info("dfs.dfs.clientName=" + dfs.dfs.clientName); cluster.GetNameNodeRpc().Append(filestr, dfs.dfs.clientName, new EnumSetWritable < CreateFlag>(EnumSet.Of(CreateFlag.Append))); // expire lease to trigger block recovery. WaitLeaseRecovery(cluster); Block[] updatedmetainfo = new Block[ReplicationNum]; long oldSize = lastblock.GetNumBytes(); lastblock = TestInterDatanodeProtocol.GetLastLocatedBlock(dfs.dfs.GetNamenode(), filestr).GetBlock(); long currentGS = lastblock.GetGenerationStamp(); for (int i_2 = 0; i_2 < ReplicationNum; i_2++) { updatedmetainfo[i_2] = DataNodeTestUtils.GetFSDataset(datanodes[i_2]).GetStoredBlock (lastblock.GetBlockPoolId(), lastblock.GetBlockId()); NUnit.Framework.Assert.AreEqual(lastblock.GetBlockId(), updatedmetainfo[i_2].GetBlockId ()); NUnit.Framework.Assert.AreEqual(oldSize, updatedmetainfo[i_2].GetNumBytes()); NUnit.Framework.Assert.AreEqual(currentGS, updatedmetainfo[i_2].GetGenerationStamp ()); } // verify that lease recovery does not occur when namenode is in safemode System.Console.Out.WriteLine("Testing that lease recovery cannot happen during safemode." ); filestr = "/foo.safemode"; filepath = new Path(filestr); dfs.Create(filepath, (short)1); cluster.GetNameNodeRpc().SetSafeMode(HdfsConstants.SafeModeAction.SafemodeEnter, false); NUnit.Framework.Assert.IsTrue(dfs.dfs.Exists(filestr)); DFSTestUtil.WaitReplication(dfs, filepath, (short)1); WaitLeaseRecovery(cluster); // verify that we still cannot recover the lease LeaseManager lm = NameNodeAdapter.GetLeaseManager(cluster.GetNamesystem()); NUnit.Framework.Assert.IsTrue("Found " + lm.CountLease() + " lease, expected 1", lm.CountLease() == 1); cluster.GetNameNodeRpc().SetSafeMode(HdfsConstants.SafeModeAction.SafemodeLeave, false); }
/// <summary> /// Regression test for HDFS-7960.<p/> /// Shutting down a datanode, removing a storage directory, and restarting /// the DataNode should not produce zombie storages. /// </summary> /// <exception cref="System.Exception"/> public virtual void TestRemovingStorageDoesNotProduceZombies() { Configuration conf = new HdfsConfiguration(); conf.SetInt(DFSConfigKeys.DfsDatanodeFailedVolumesToleratedKey, 1); int NumStoragesPerDn = 2; MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(3).StoragesPerDatanode (NumStoragesPerDn).Build(); try { cluster.WaitActive(); foreach (DataNode dn in cluster.GetDataNodes()) { NUnit.Framework.Assert.AreEqual(NumStoragesPerDn, cluster.GetNamesystem().GetBlockManager ().GetDatanodeManager().GetDatanode(dn.GetDatanodeId()).GetStorageInfos().Length ); } // Create a file which will end up on all 3 datanodes. Path TestPath = new Path("/foo1"); DistributedFileSystem fs = cluster.GetFileSystem(); DFSTestUtil.CreateFile(fs, TestPath, 1024, (short)3, unchecked ((int)(0xcafecafe)) ); foreach (DataNode dn_1 in cluster.GetDataNodes()) { DataNodeTestUtils.TriggerBlockReport(dn_1); } ExtendedBlock block = DFSTestUtil.GetFirstBlock(fs, new Path("/foo1")); cluster.GetNamesystem().WriteLock(); string storageIdToRemove; string datanodeUuid; // Find the first storage which this block is in. try { IEnumerator <DatanodeStorageInfo> storageInfoIter = cluster.GetNamesystem().GetBlockManager ().GetStorages(block.GetLocalBlock()).GetEnumerator(); NUnit.Framework.Assert.IsTrue(storageInfoIter.HasNext()); DatanodeStorageInfo info = storageInfoIter.Next(); storageIdToRemove = info.GetStorageID(); datanodeUuid = info.GetDatanodeDescriptor().GetDatanodeUuid(); } finally { cluster.GetNamesystem().WriteUnlock(); } // Find the DataNode which holds that first storage. DataNode datanodeToRemoveStorageFrom; int datanodeToRemoveStorageFromIdx = 0; while (true) { if (datanodeToRemoveStorageFromIdx >= cluster.GetDataNodes().Count) { NUnit.Framework.Assert.Fail("failed to find datanode with uuid " + datanodeUuid); datanodeToRemoveStorageFrom = null; break; } DataNode dn_2 = cluster.GetDataNodes()[datanodeToRemoveStorageFromIdx]; if (dn_2.GetDatanodeUuid().Equals(datanodeUuid)) { datanodeToRemoveStorageFrom = dn_2; break; } datanodeToRemoveStorageFromIdx++; } // Find the volume within the datanode which holds that first storage. IList <FsVolumeSpi> volumes = datanodeToRemoveStorageFrom.GetFSDataset().GetVolumes (); NUnit.Framework.Assert.AreEqual(NumStoragesPerDn, volumes.Count); string volumeDirectoryToRemove = null; foreach (FsVolumeSpi volume in volumes) { if (volume.GetStorageID().Equals(storageIdToRemove)) { volumeDirectoryToRemove = volume.GetBasePath(); } } // Shut down the datanode and remove the volume. // Replace the volume directory with a regular file, which will // cause a volume failure. (If we merely removed the directory, // it would be re-initialized with a new storage ID.) NUnit.Framework.Assert.IsNotNull(volumeDirectoryToRemove); datanodeToRemoveStorageFrom.Shutdown(); FileUtil.FullyDelete(new FilePath(volumeDirectoryToRemove)); FileOutputStream fos = new FileOutputStream(volumeDirectoryToRemove); try { fos.Write(1); } finally { fos.Close(); } cluster.RestartDataNode(datanodeToRemoveStorageFromIdx); // Wait for the NameNode to remove the storage. Log.Info("waiting for the datanode to remove " + storageIdToRemove); GenericTestUtils.WaitFor(new _Supplier_227(cluster, datanodeToRemoveStorageFrom, storageIdToRemove, NumStoragesPerDn), 10, 30000); } finally { if (cluster != null) { cluster.Shutdown(); } } }
public virtual void TestPendingAndInvalidate() { Configuration Conf = new HdfsConfiguration(); Conf.SetLong(DFSConfigKeys.DfsBlockSizeKey, 1024); Conf.SetLong(DFSConfigKeys.DfsHeartbeatIntervalKey, DfsReplicationInterval); Conf.SetInt(DFSConfigKeys.DfsNamenodeReplicationIntervalKey, DfsReplicationInterval ); MiniDFSCluster cluster = new MiniDFSCluster.Builder(Conf).NumDataNodes(DatanodeCount ).Build(); cluster.WaitActive(); FSNamesystem namesystem = cluster.GetNamesystem(); BlockManager bm = namesystem.GetBlockManager(); DistributedFileSystem fs = cluster.GetFileSystem(); try { // 1. create a file Path filePath = new Path("/tmp.txt"); DFSTestUtil.CreateFile(fs, filePath, 1024, (short)3, 0L); // 2. disable the heartbeats foreach (DataNode dn in cluster.GetDataNodes()) { DataNodeTestUtils.SetHeartbeatsDisabledForTests(dn, true); } // 3. mark a couple of blocks as corrupt LocatedBlock block = NameNodeAdapter.GetBlockLocations(cluster.GetNameNode(), filePath .ToString(), 0, 1).Get(0); cluster.GetNamesystem().WriteLock(); try { bm.FindAndMarkBlockAsCorrupt(block.GetBlock(), block.GetLocations()[0], "STORAGE_ID" , "TEST"); bm.FindAndMarkBlockAsCorrupt(block.GetBlock(), block.GetLocations()[1], "STORAGE_ID" , "TEST"); } finally { cluster.GetNamesystem().WriteUnlock(); } BlockManagerTestUtil.ComputeAllPendingWork(bm); BlockManagerTestUtil.UpdateState(bm); NUnit.Framework.Assert.AreEqual(bm.GetPendingReplicationBlocksCount(), 1L); NUnit.Framework.Assert.AreEqual(bm.pendingReplications.GetNumReplicas(block.GetBlock ().GetLocalBlock()), 2); // 4. delete the file fs.Delete(filePath, true); // retry at most 10 times, each time sleep for 1s. Note that 10s is much // less than the default pending record timeout (5~10min) int retries = 10; long pendingNum = bm.GetPendingReplicationBlocksCount(); while (pendingNum != 0 && retries-- > 0) { Sharpen.Thread.Sleep(1000); // let NN do the deletion BlockManagerTestUtil.UpdateState(bm); pendingNum = bm.GetPendingReplicationBlocksCount(); } NUnit.Framework.Assert.AreEqual(pendingNum, 0L); } finally { cluster.Shutdown(); } }
public virtual void TestBlockReceived() { Configuration conf = new HdfsConfiguration(); conf.SetLong(DFSConfigKeys.DfsBlockSizeKey, 1024); MiniDFSCluster cluster = null; try { cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(DatanodeCount).Build(); cluster.WaitActive(); DistributedFileSystem hdfs = cluster.GetFileSystem(); FSNamesystem fsn = cluster.GetNamesystem(); BlockManager blkManager = fsn.GetBlockManager(); string file = "/tmp.txt"; Path filePath = new Path(file); short replFactor = 1; DFSTestUtil.CreateFile(hdfs, filePath, 1024L, replFactor, 0); // temporarily stop the heartbeat AList <DataNode> datanodes = cluster.GetDataNodes(); for (int i = 0; i < DatanodeCount; i++) { DataNodeTestUtils.SetHeartbeatsDisabledForTests(datanodes[i], true); } hdfs.SetReplication(filePath, (short)DatanodeCount); BlockManagerTestUtil.ComputeAllPendingWork(blkManager); NUnit.Framework.Assert.AreEqual(1, blkManager.pendingReplications.Size()); INodeFile fileNode = fsn.GetFSDirectory().GetINode4Write(file).AsFile(); Block[] blocks = fileNode.GetBlocks(); NUnit.Framework.Assert.AreEqual(DatanodeCount - 1, blkManager.pendingReplications .GetNumReplicas(blocks[0])); LocatedBlock locatedBlock = hdfs.GetClient().GetLocatedBlocks(file, 0).Get(0); DatanodeInfo existingDn = (locatedBlock.GetLocations())[0]; int reportDnNum = 0; string poolId = cluster.GetNamesystem().GetBlockPoolId(); // let two datanodes (other than the one that already has the data) to // report to NN for (int i_1 = 0; i_1 < DatanodeCount && reportDnNum < 2; i_1++) { if (!datanodes[i_1].GetDatanodeId().Equals(existingDn)) { DatanodeRegistration dnR = datanodes[i_1].GetDNRegistrationForBP(poolId); StorageReceivedDeletedBlocks[] report = new StorageReceivedDeletedBlocks[] { new StorageReceivedDeletedBlocks("Fake-storage-ID-Ignored", new ReceivedDeletedBlockInfo [] { new ReceivedDeletedBlockInfo(blocks[0], ReceivedDeletedBlockInfo.BlockStatus .ReceivedBlock, string.Empty) }) }; cluster.GetNameNodeRpc().BlockReceivedAndDeleted(dnR, poolId, report); reportDnNum++; } } NUnit.Framework.Assert.AreEqual(DatanodeCount - 3, blkManager.pendingReplications .GetNumReplicas(blocks[0])); // let the same datanodes report again for (int i_2 = 0; i_2 < DatanodeCount && reportDnNum < 2; i_2++) { if (!datanodes[i_2].GetDatanodeId().Equals(existingDn)) { DatanodeRegistration dnR = datanodes[i_2].GetDNRegistrationForBP(poolId); StorageReceivedDeletedBlocks[] report = new StorageReceivedDeletedBlocks[] { new StorageReceivedDeletedBlocks("Fake-storage-ID-Ignored", new ReceivedDeletedBlockInfo [] { new ReceivedDeletedBlockInfo(blocks[0], ReceivedDeletedBlockInfo.BlockStatus .ReceivedBlock, string.Empty) }) }; cluster.GetNameNodeRpc().BlockReceivedAndDeleted(dnR, poolId, report); reportDnNum++; } } NUnit.Framework.Assert.AreEqual(DatanodeCount - 3, blkManager.pendingReplications .GetNumReplicas(blocks[0])); // re-enable heartbeat for the datanode that has data for (int i_3 = 0; i_3 < DatanodeCount; i_3++) { DataNodeTestUtils.SetHeartbeatsDisabledForTests(datanodes[i_3], false); DataNodeTestUtils.TriggerHeartbeat(datanodes[i_3]); } Sharpen.Thread.Sleep(5000); NUnit.Framework.Assert.AreEqual(0, blkManager.pendingReplications.Size()); } finally { if (cluster != null) { cluster.Shutdown(); } } }