public virtual void TestValidVolumesAtStartup() { Assume.AssumeTrue(!Runtime.GetProperty("os.name").StartsWith("Windows")); // Make sure no DNs are running. cluster.ShutdownDataNodes(); // Bring up a datanode with two default data dirs, but with one bad one. conf.SetInt(DFSConfigKeys.DfsDatanodeFailedVolumesToleratedKey, 1); // We use subdirectories 0 and 1 in order to have only a single // data dir's parent inject a failure. FilePath tld = new FilePath(MiniDFSCluster.GetBaseDirectory(), "badData"); FilePath dataDir1 = new FilePath(tld, "data1"); FilePath dataDir1Actual = new FilePath(dataDir1, "1"); dataDir1Actual.Mkdirs(); // Force an IOE to occur on one of the dfs.data.dir. FilePath dataDir2 = new FilePath(tld, "data2"); PrepareDirToFail(dataDir2); FilePath dataDir2Actual = new FilePath(dataDir2, "2"); // Start one DN, with manually managed DN dir conf.Set(DFSConfigKeys.DfsDatanodeDataDirKey, dataDir1Actual.GetPath() + "," + dataDir2Actual .GetPath()); cluster.StartDataNodes(conf, 1, false, null, null); cluster.WaitActive(); try { NUnit.Framework.Assert.IsTrue("The DN should have started up fine.", cluster.IsDataNodeUp ()); DataNode dn = cluster.GetDataNodes()[0]; string si = DataNodeTestUtils.GetFSDataset(dn).GetStorageInfo(); NUnit.Framework.Assert.IsTrue("The DN should have started with this directory", si .Contains(dataDir1Actual.GetPath())); NUnit.Framework.Assert.IsFalse("The DN shouldn't have a bad directory.", si.Contains (dataDir2Actual.GetPath())); } finally { cluster.ShutdownDataNodes(); FileUtil.Chmod(dataDir2.ToString(), "755"); } }
/// <exception cref="System.Exception"/> public virtual void TestDeleteBlockOnTransientStorage() { cluster = new MiniDFSCluster.Builder(Conf).StorageTypes(new StorageType[] { StorageType .RamDisk, StorageType.Default }).NumDataNodes(1).Build(); try { cluster.WaitActive(); bpid = cluster.GetNamesystem().GetBlockPoolId(); DataNode dataNode = cluster.GetDataNodes()[0]; fds = DataNodeTestUtils.GetFSDataset(cluster.GetDataNodes()[0]); client = cluster.GetFileSystem().GetClient(); scanner = new DirectoryScanner(dataNode, fds, Conf); scanner.SetRetainDiffs(true); FsDatasetTestUtil.StopLazyWriter(cluster.GetDataNodes()[0]); // Create a file file on RAM_DISK IList <LocatedBlock> blocks = CreateFile(GenericTestUtils.GetMethodName(), BlockLength , true); // Ensure no difference between volumeMap and disk. Scan(1, 0, 0, 0, 0, 0); // Make a copy of the block on DEFAULT storage and ensure that it is // picked up by the scanner. DuplicateBlock(blocks[0].GetBlock().GetBlockId()); Scan(2, 1, 0, 0, 0, 0, 1); // Ensure that the copy on RAM_DISK was deleted. VerifyStorageType(blocks[0].GetBlock().GetBlockId(), false); Scan(1, 0, 0, 0, 0, 0); } finally { if (scanner != null) { scanner.Shutdown(); scanner = null; } cluster.Shutdown(); cluster = null; } }
/// <exception cref="System.Exception"/> public virtual void RunTest(int parallelism) { cluster = new MiniDFSCluster.Builder(Conf).Build(); try { cluster.WaitActive(); bpid = cluster.GetNamesystem().GetBlockPoolId(); fds = DataNodeTestUtils.GetFSDataset(cluster.GetDataNodes()[0]); client = cluster.GetFileSystem().GetClient(); Conf.SetInt(DFSConfigKeys.DfsDatanodeDirectoryscanThreadsKey, parallelism); DataNode dataNode = cluster.GetDataNodes()[0]; scanner = new DirectoryScanner(dataNode, fds, Conf); scanner.SetRetainDiffs(true); // Add files with 100 blocks CreateFile(GenericTestUtils.GetMethodName(), BlockLength * 100, false); long totalBlocks = 100; // Test1: No difference between volumeMap and disk Scan(100, 0, 0, 0, 0, 0); // Test2: block metafile is missing long blockId = DeleteMetaFile(); Scan(totalBlocks, 1, 1, 0, 0, 1); VerifyGenStamp(blockId, GenerationStamp.GrandfatherGenerationStamp); Scan(totalBlocks, 0, 0, 0, 0, 0); // Test3: block file is missing blockId = DeleteBlockFile(); Scan(totalBlocks, 1, 0, 1, 0, 0); totalBlocks--; VerifyDeletion(blockId); Scan(totalBlocks, 0, 0, 0, 0, 0); // Test4: A block file exists for which there is no metafile and // a block in memory blockId = CreateBlockFile(); totalBlocks++; Scan(totalBlocks, 1, 1, 0, 1, 0); VerifyAddition(blockId, GenerationStamp.GrandfatherGenerationStamp, 0); Scan(totalBlocks, 0, 0, 0, 0, 0); // Test5: A metafile exists for which there is no block file and // a block in memory blockId = CreateMetaFile(); Scan(totalBlocks + 1, 1, 0, 1, 1, 0); FilePath metafile = new FilePath(GetMetaFile(blockId)); NUnit.Framework.Assert.IsTrue(!metafile.Exists()); Scan(totalBlocks, 0, 0, 0, 0, 0); // Test6: A block file and metafile exists for which there is no block in // memory blockId = CreateBlockMetaFile(); totalBlocks++; Scan(totalBlocks, 1, 0, 0, 1, 0); VerifyAddition(blockId, DefaultGenStamp, 0); Scan(totalBlocks, 0, 0, 0, 0, 0); // Test7: Delete bunch of metafiles for (int i = 0; i < 10; i++) { blockId = DeleteMetaFile(); } Scan(totalBlocks, 10, 10, 0, 0, 10); Scan(totalBlocks, 0, 0, 0, 0, 0); // Test8: Delete bunch of block files for (int i_1 = 0; i_1 < 10; i_1++) { blockId = DeleteBlockFile(); } Scan(totalBlocks, 10, 0, 10, 0, 0); totalBlocks -= 10; Scan(totalBlocks, 0, 0, 0, 0, 0); // Test9: create a bunch of blocks files for (int i_2 = 0; i_2 < 10; i_2++) { blockId = CreateBlockFile(); } totalBlocks += 10; Scan(totalBlocks, 10, 10, 0, 10, 0); Scan(totalBlocks, 0, 0, 0, 0, 0); // Test10: create a bunch of metafiles for (int i_3 = 0; i_3 < 10; i_3++) { blockId = CreateMetaFile(); } Scan(totalBlocks + 10, 10, 0, 10, 10, 0); Scan(totalBlocks, 0, 0, 0, 0, 0); // Test11: create a bunch block files and meta files for (int i_4 = 0; i_4 < 10; i_4++) { blockId = CreateBlockMetaFile(); } totalBlocks += 10; Scan(totalBlocks, 10, 0, 0, 10, 0); Scan(totalBlocks, 0, 0, 0, 0, 0); // Test12: truncate block files to test block length mismatch for (int i_5 = 0; i_5 < 10; i_5++) { TruncateBlockFile(); } Scan(totalBlocks, 10, 0, 0, 0, 10); Scan(totalBlocks, 0, 0, 0, 0, 0); // Test13: all the conditions combined CreateMetaFile(); CreateBlockFile(); CreateBlockMetaFile(); DeleteMetaFile(); DeleteBlockFile(); TruncateBlockFile(); Scan(totalBlocks + 3, 6, 2, 2, 3, 2); Scan(totalBlocks + 1, 0, 0, 0, 0, 0); // Test14: validate clean shutdown of DirectoryScanner ////assertTrue(scanner.getRunStatus()); //assumes "real" FSDataset, not sim scanner.Shutdown(); NUnit.Framework.Assert.IsFalse(scanner.GetRunStatus()); } finally { if (scanner != null) { scanner.Shutdown(); scanner = null; } cluster.Shutdown(); } }
/// <summary>Test write a file, verifies and closes it.</summary> /// <remarks> /// Test write a file, verifies and closes it. Then a couple of random blocks /// is removed and BlockReport is forced; the FSNamesystem is pushed to /// recalculate required DN's activities such as replications and so on. /// The number of missing and under-replicated blocks should be the same in /// case of a single-DN cluster. /// </remarks> /// <exception cref="System.IO.IOException">in case of errors</exception> public virtual void BlockReport_02() { string MethodName = GenericTestUtils.GetMethodName(); Log.Info("Running test " + MethodName); Path filePath = new Path("/" + MethodName + ".dat"); DFSTestUtil.CreateFile(fs, filePath, FileSize, ReplFactor, rand.NextLong()); // mock around with newly created blocks and delete some FilePath dataDir = new FilePath(cluster.GetDataDirectory()); NUnit.Framework.Assert.IsTrue(dataDir.IsDirectory()); IList <ExtendedBlock> blocks2Remove = new AList <ExtendedBlock>(); IList <int> removedIndex = new AList <int>(); IList <LocatedBlock> lBlocks = cluster.GetNameNodeRpc().GetBlockLocations(filePath .ToString(), FileStart, FileSize).GetLocatedBlocks(); while (removedIndex.Count != 2) { int newRemoveIndex = rand.Next(lBlocks.Count); if (!removedIndex.Contains(newRemoveIndex)) { removedIndex.AddItem(newRemoveIndex); } } foreach (int aRemovedIndex in removedIndex) { blocks2Remove.AddItem(lBlocks[aRemovedIndex].GetBlock()); } if (Log.IsDebugEnabled()) { Log.Debug("Number of blocks allocated " + lBlocks.Count); } DataNode dn0 = cluster.GetDataNodes()[DnN0]; foreach (ExtendedBlock b in blocks2Remove) { if (Log.IsDebugEnabled()) { Log.Debug("Removing the block " + b.GetBlockName()); } foreach (FilePath f in FindAllFiles(dataDir, new BlockReportTestBase.MyFileFilter (this, b.GetBlockName(), true))) { DataNodeTestUtils.GetFSDataset(dn0).UnfinalizeBlock(b); if (!f.Delete()) { Log.Warn("Couldn't delete " + b.GetBlockName()); } else { Log.Debug("Deleted file " + f.ToString()); } } } WaitTil(TimeUnit.Seconds.ToMillis(DnRescanExtraWait)); // all blocks belong to the same file, hence same BP string poolId = cluster.GetNamesystem().GetBlockPoolId(); DatanodeRegistration dnR = dn0.GetDNRegistrationForBP(poolId); StorageBlockReport[] reports = GetBlockReports(dn0, poolId, false, false); SendBlockReports(dnR, poolId, reports); BlockManagerTestUtil.GetComputedDatanodeWork(cluster.GetNamesystem().GetBlockManager ()); PrintStats(); NUnit.Framework.Assert.AreEqual("Wrong number of MissingBlocks is found", blocks2Remove .Count, cluster.GetNamesystem().GetMissingBlocksCount()); NUnit.Framework.Assert.AreEqual("Wrong number of UnderReplicatedBlocks is found", blocks2Remove.Count, cluster.GetNamesystem().GetUnderReplicatedBlocks()); }