/// <summary> /// Confirm that FSImage files in all StorageDirectory are the same, /// and non-empty, and there are the expected number of them. /// </summary> /// <param name="fsn">- the FSNamesystem being checked.</param> /// <param name="numImageDirs">- the configured number of StorageDirectory of type IMAGE. /// </param> /// <returns>- the md5 hash of the most recent FSImage files, which must all be the same. /// </returns> /// <exception cref="System.Exception"> /// if image files are empty or different, /// if less than two StorageDirectory are provided, or if the /// actual number of StorageDirectory is less than configured. /// </exception> public static string CheckImages(FSNamesystem fsn, int numImageDirs) { NNStorage stg = fsn.GetFSImage().GetStorage(); //any failed StorageDirectory is removed from the storageDirs list NUnit.Framework.Assert.AreEqual("Some StorageDirectories failed Upgrade", numImageDirs , stg.GetNumStorageDirs(NNStorage.NameNodeDirType.Image)); NUnit.Framework.Assert.IsTrue("Not enough fsimage copies in MiniDFSCluster " + "to test parallel write" , numImageDirs > 1); // List of "current/" directory from each SD IList <FilePath> dirs = FSImageTestUtil.GetCurrentDirs(stg, NNStorage.NameNodeDirType .Image); // across directories, all files with same names should be identical hashes FSImageTestUtil.AssertParallelFilesAreIdentical(dirs, Sharpen.Collections.EmptySet <string>()); FSImageTestUtil.AssertSameNewestImage(dirs); // Return the hash of the newest image file Storage.StorageDirectory firstSd = stg.DirIterator(NNStorage.NameNodeDirType.Image ).Next(); FilePath latestImage = FSImageTestUtil.FindLatestImageFile(firstSd); string md5 = FSImageTestUtil.GetImageFileMD5IgnoringTxId(latestImage); System.Console.Error.WriteLine("md5 of " + latestImage + ": " + md5); return(md5); }
public virtual void TestNameEditsConfigs() { Path file1 = new Path("TestNameEditsConfigs1"); Path file2 = new Path("TestNameEditsConfigs2"); Path file3 = new Path("TestNameEditsConfigs3"); MiniDFSCluster cluster = null; SecondaryNameNode secondary = null; Configuration conf = null; FileSystem fileSys = null; FilePath newNameDir = new FilePath(base_dir, "name"); FilePath newEditsDir = new FilePath(base_dir, "edits"); FilePath nameAndEdits = new FilePath(base_dir, "name_and_edits"); FilePath checkpointNameDir = new FilePath(base_dir, "secondname"); FilePath checkpointEditsDir = new FilePath(base_dir, "secondedits"); FilePath checkpointNameAndEdits = new FilePath(base_dir, "second_name_and_edits"); ImmutableList <FilePath> allCurrentDirs = ImmutableList.Of(new FilePath(nameAndEdits , "current"), new FilePath(newNameDir, "current"), new FilePath(newEditsDir, "current" ), new FilePath(checkpointNameAndEdits, "current"), new FilePath(checkpointNameDir , "current"), new FilePath(checkpointEditsDir, "current")); ImmutableList <FilePath> imageCurrentDirs = ImmutableList.Of(new FilePath(nameAndEdits , "current"), new FilePath(newNameDir, "current"), new FilePath(checkpointNameAndEdits , "current"), new FilePath(checkpointNameDir, "current")); // Start namenode with same dfs.namenode.name.dir and dfs.namenode.edits.dir conf = new HdfsConfiguration(); conf.Set(DFSConfigKeys.DfsNamenodeNameDirKey, nameAndEdits.GetPath()); conf.Set(DFSConfigKeys.DfsNamenodeEditsDirKey, nameAndEdits.GetPath()); conf.Set(DFSConfigKeys.DfsNamenodeCheckpointDirKey, checkpointNameAndEdits.GetPath ()); conf.Set(DFSConfigKeys.DfsNamenodeCheckpointEditsDirKey, checkpointNameAndEdits.GetPath ()); replication = (short)conf.GetInt(DFSConfigKeys.DfsReplicationKey, 3); // Manage our own dfs directories cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(NumDataNodes).ManageNameDfsDirs (false).Build(); cluster.WaitActive(); secondary = StartSecondaryNameNode(conf); fileSys = cluster.GetFileSystem(); try { NUnit.Framework.Assert.IsTrue(!fileSys.Exists(file1)); WriteFile(fileSys, file1, replication); CheckFile(fileSys, file1, replication); secondary.DoCheckpoint(); } finally { fileSys.Close(); cluster.Shutdown(); secondary.Shutdown(); } // Start namenode with additional dfs.namenode.name.dir and dfs.namenode.edits.dir conf = new HdfsConfiguration(); NUnit.Framework.Assert.IsTrue(newNameDir.Mkdir()); NUnit.Framework.Assert.IsTrue(newEditsDir.Mkdir()); conf.Set(DFSConfigKeys.DfsNamenodeNameDirKey, nameAndEdits.GetPath() + "," + newNameDir .GetPath()); conf.Set(DFSConfigKeys.DfsNamenodeEditsDirKey, nameAndEdits.GetPath() + "," + newEditsDir .GetPath()); conf.Set(DFSConfigKeys.DfsNamenodeCheckpointDirKey, checkpointNameDir.GetPath() + "," + checkpointNameAndEdits.GetPath()); conf.Set(DFSConfigKeys.DfsNamenodeCheckpointEditsDirKey, checkpointEditsDir.GetPath () + "," + checkpointNameAndEdits.GetPath()); replication = (short)conf.GetInt(DFSConfigKeys.DfsReplicationKey, 3); // Manage our own dfs directories. Do not format. cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(NumDataNodes).Format(false ).ManageNameDfsDirs(false).Build(); cluster.WaitActive(); secondary = StartSecondaryNameNode(conf); fileSys = cluster.GetFileSystem(); try { NUnit.Framework.Assert.IsTrue(fileSys.Exists(file1)); CheckFile(fileSys, file1, replication); CleanupFile(fileSys, file1); WriteFile(fileSys, file2, replication); CheckFile(fileSys, file2, replication); secondary.DoCheckpoint(); } finally { fileSys.Close(); cluster.Shutdown(); secondary.Shutdown(); } FSImageTestUtil.AssertParallelFilesAreIdentical(allCurrentDirs, ImmutableSet.Of("VERSION" )); FSImageTestUtil.AssertSameNewestImage(imageCurrentDirs); // Now remove common directory both have and start namenode with // separate name and edits dirs conf = new HdfsConfiguration(); conf.Set(DFSConfigKeys.DfsNamenodeNameDirKey, newNameDir.GetPath()); conf.Set(DFSConfigKeys.DfsNamenodeEditsDirKey, newEditsDir.GetPath()); conf.Set(DFSConfigKeys.DfsNamenodeCheckpointDirKey, checkpointNameDir.GetPath()); conf.Set(DFSConfigKeys.DfsNamenodeCheckpointEditsDirKey, checkpointEditsDir.GetPath ()); replication = (short)conf.GetInt(DFSConfigKeys.DfsReplicationKey, 3); cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(NumDataNodes).Format(false ).ManageNameDfsDirs(false).Build(); cluster.WaitActive(); secondary = StartSecondaryNameNode(conf); fileSys = cluster.GetFileSystem(); try { NUnit.Framework.Assert.IsTrue(!fileSys.Exists(file1)); NUnit.Framework.Assert.IsTrue(fileSys.Exists(file2)); CheckFile(fileSys, file2, replication); CleanupFile(fileSys, file2); WriteFile(fileSys, file3, replication); CheckFile(fileSys, file3, replication); secondary.DoCheckpoint(); } finally { fileSys.Close(); cluster.Shutdown(); secondary.Shutdown(); } // No edit logs in new name dir CheckImageAndEditsFilesExistence(newNameDir, true, false); CheckImageAndEditsFilesExistence(newEditsDir, false, true); CheckImageAndEditsFilesExistence(checkpointNameDir, true, false); CheckImageAndEditsFilesExistence(checkpointEditsDir, false, true); // Add old name_and_edits dir. File system should not read image or edits // from old dir NUnit.Framework.Assert.IsTrue(FileUtil.FullyDelete(new FilePath(nameAndEdits, "current" ))); NUnit.Framework.Assert.IsTrue(FileUtil.FullyDelete(new FilePath(checkpointNameAndEdits , "current"))); conf = new HdfsConfiguration(); conf.Set(DFSConfigKeys.DfsNamenodeNameDirKey, nameAndEdits.GetPath() + "," + newNameDir .GetPath()); conf.Set(DFSConfigKeys.DfsNamenodeEditsDirKey, nameAndEdits + "," + newEditsDir.GetPath ()); conf.Set(DFSConfigKeys.DfsNamenodeCheckpointDirKey, checkpointNameDir.GetPath() + "," + checkpointNameAndEdits.GetPath()); conf.Set(DFSConfigKeys.DfsNamenodeCheckpointEditsDirKey, checkpointEditsDir.GetPath () + "," + checkpointNameAndEdits.GetPath()); replication = (short)conf.GetInt(DFSConfigKeys.DfsReplicationKey, 3); cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(NumDataNodes).Format(false ).ManageNameDfsDirs(false).Build(); cluster.WaitActive(); secondary = StartSecondaryNameNode(conf); fileSys = cluster.GetFileSystem(); try { NUnit.Framework.Assert.IsTrue(!fileSys.Exists(file1)); NUnit.Framework.Assert.IsTrue(!fileSys.Exists(file2)); NUnit.Framework.Assert.IsTrue(fileSys.Exists(file3)); CheckFile(fileSys, file3, replication); secondary.DoCheckpoint(); } finally { fileSys.Close(); cluster.Shutdown(); secondary.Shutdown(); } CheckImageAndEditsFilesExistence(nameAndEdits, true, true); CheckImageAndEditsFilesExistence(checkpointNameAndEdits, true, true); }