/// <exception cref="System.IO.IOException"/> /// <exception cref="Sharpen.TimeoutException"/> /// <exception cref="System.Exception"/> /// <exception cref="Org.Apache.Hadoop.Conf.ReconfigurationException"/> public virtual void TestAddBackRemovedVolume() { StartDFSCluster(1, 2); // Create some data on every volume. CreateFile(new Path("/test"), 32); DataNode dn = cluster.GetDataNodes()[0]; Configuration conf = dn.GetConf(); string oldDataDir = conf.Get(DFSConfigKeys.DfsDatanodeDataDirKey); string keepDataDir = oldDataDir.Split(",")[0]; string removeDataDir = oldDataDir.Split(",")[1]; dn.ReconfigurePropertyImpl(DFSConfigKeys.DfsDatanodeDataDirKey, keepDataDir); for (int i = 0; i < cluster.GetNumNameNodes(); i++) { string bpid = cluster.GetNamesystem(i).GetBlockPoolId(); BlockPoolSliceStorage bpsStorage = dn.GetStorage().GetBPStorage(bpid); // Make sure that there is no block pool level storage under removeDataDir. for (int j = 0; j < bpsStorage.GetNumStorageDirs(); j++) { Storage.StorageDirectory sd = bpsStorage.GetStorageDir(j); NUnit.Framework.Assert.IsFalse(sd.GetRoot().GetAbsolutePath().StartsWith(new FilePath (removeDataDir).GetAbsolutePath())); } NUnit.Framework.Assert.AreEqual(dn.GetStorage().GetBPStorage(bpid).GetNumStorageDirs (), 1); } // Bring the removed directory back. It only successes if all metadata about // this directory were removed from the previous step. dn.ReconfigurePropertyImpl(DFSConfigKeys.DfsDatanodeDataDirKey, oldDataDir); }
public virtual void TestDeletedBlockWhenAddBlockIsInEdit() { Configuration conf = new HdfsConfiguration(); cluster = new MiniDFSCluster.Builder(conf).NnTopology(MiniDFSNNTopology.SimpleHATopology ()).NumDataNodes(1).Build(); DFSClient client = null; try { cluster.WaitActive(); NUnit.Framework.Assert.AreEqual("Number of namenodes is not 2", 2, cluster.GetNumNameNodes ()); // Transitioning the namenode 0 to active. cluster.TransitionToActive(0); NUnit.Framework.Assert.IsTrue("Namenode 0 should be in active state", cluster.GetNameNode (0).IsActiveState()); NUnit.Framework.Assert.IsTrue("Namenode 1 should be in standby state", cluster.GetNameNode (1).IsStandbyState()); // Trigger heartbeat to mark DatanodeStorageInfo#heartbeatedSinceFailover // to true. DataNodeTestUtils.TriggerHeartbeat(cluster.GetDataNodes()[0]); FileSystem fs = cluster.GetFileSystem(0); // Trigger blockReport to mark DatanodeStorageInfo#blockContentsStale // to false. cluster.GetDataNodes()[0].TriggerBlockReport(new BlockReportOptions.Factory().SetIncremental (false).Build()); Path fileName = new Path("/tmp.txt"); // create a file with one block DFSTestUtil.CreateFile(fs, fileName, 10L, (short)1, 1234L); DFSTestUtil.WaitReplication(fs, fileName, (short)1); client = new DFSClient(cluster.GetFileSystem(0).GetUri(), conf); IList <LocatedBlock> locatedBlocks = client.GetNamenode().GetBlockLocations("/tmp.txt" , 0, 10L).GetLocatedBlocks(); NUnit.Framework.Assert.IsTrue(locatedBlocks.Count == 1); NUnit.Framework.Assert.IsTrue(locatedBlocks[0].GetLocations().Length == 1); // add a second datanode to the cluster cluster.StartDataNodes(conf, 1, true, null, null, null, null); NUnit.Framework.Assert.AreEqual("Number of datanodes should be 2", 2, cluster.GetDataNodes ().Count); DataNode dn0 = cluster.GetDataNodes()[0]; DataNode dn1 = cluster.GetDataNodes()[1]; string activeNNBPId = cluster.GetNamesystem(0).GetBlockPoolId(); DatanodeDescriptor sourceDnDesc = NameNodeAdapter.GetDatanode(cluster.GetNamesystem (0), dn0.GetDNRegistrationForBP(activeNNBPId)); DatanodeDescriptor destDnDesc = NameNodeAdapter.GetDatanode(cluster.GetNamesystem (0), dn1.GetDNRegistrationForBP(activeNNBPId)); ExtendedBlock block = DFSTestUtil.GetFirstBlock(fs, fileName); Log.Info("replaceBlock: " + ReplaceBlock(block, (DatanodeInfo)sourceDnDesc, (DatanodeInfo )sourceDnDesc, (DatanodeInfo)destDnDesc)); // Waiting for the FsDatasetAsyncDsikService to delete the block Sharpen.Thread.Sleep(3000); // Triggering the incremental block report to report the deleted block to // namnemode cluster.GetDataNodes()[0].TriggerBlockReport(new BlockReportOptions.Factory().SetIncremental (true).Build()); cluster.TransitionToStandby(0); cluster.TransitionToActive(1); NUnit.Framework.Assert.IsTrue("Namenode 1 should be in active state", cluster.GetNameNode (1).IsActiveState()); NUnit.Framework.Assert.IsTrue("Namenode 0 should be in standby state", cluster.GetNameNode (0).IsStandbyState()); client.Close(); // Opening a new client for new active namenode client = new DFSClient(cluster.GetFileSystem(1).GetUri(), conf); IList <LocatedBlock> locatedBlocks1 = client.GetNamenode().GetBlockLocations("/tmp.txt" , 0, 10L).GetLocatedBlocks(); NUnit.Framework.Assert.AreEqual(1, locatedBlocks1.Count); NUnit.Framework.Assert.AreEqual("The block should be only on 1 datanode ", 1, locatedBlocks1 [0].GetLocations().Length); } finally { IOUtils.Cleanup(null, client); cluster.Shutdown(); } }