/// <summary> /// Confirm that FSImage files in all StorageDirectory are the same, /// and non-empty, and there are the expected number of them. /// </summary> /// <param name="fsn">- the FSNamesystem being checked.</param> /// <param name="numImageDirs">- the configured number of StorageDirectory of type IMAGE. /// </param> /// <returns>- the md5 hash of the most recent FSImage files, which must all be the same. /// </returns> /// <exception cref="System.Exception"> /// if image files are empty or different, /// if less than two StorageDirectory are provided, or if the /// actual number of StorageDirectory is less than configured. /// </exception> public static string CheckImages(FSNamesystem fsn, int numImageDirs) { NNStorage stg = fsn.GetFSImage().GetStorage(); //any failed StorageDirectory is removed from the storageDirs list NUnit.Framework.Assert.AreEqual("Some StorageDirectories failed Upgrade", numImageDirs , stg.GetNumStorageDirs(NNStorage.NameNodeDirType.Image)); NUnit.Framework.Assert.IsTrue("Not enough fsimage copies in MiniDFSCluster " + "to test parallel write" , numImageDirs > 1); // List of "current/" directory from each SD IList <FilePath> dirs = FSImageTestUtil.GetCurrentDirs(stg, NNStorage.NameNodeDirType .Image); // across directories, all files with same names should be identical hashes FSImageTestUtil.AssertParallelFilesAreIdentical(dirs, Sharpen.Collections.EmptySet <string>()); FSImageTestUtil.AssertSameNewestImage(dirs); // Return the hash of the newest image file Storage.StorageDirectory firstSd = stg.DirIterator(NNStorage.NameNodeDirType.Image ).Next(); FilePath latestImage = FSImageTestUtil.FindLatestImageFile(firstSd); string md5 = FSImageTestUtil.GetImageFileMD5IgnoringTxId(latestImage); System.Console.Error.WriteLine("md5 of " + latestImage + ": " + md5); return(md5); }
/// <exception cref="System.Exception"/> private void AssertStorageDirsMatch(NameNode nn, BackupNode backup) { // Check that the stored files in the name dirs are identical IList <FilePath> dirs = Lists.NewArrayList(FSImageTestUtil.GetCurrentDirs(nn.GetFSImage ().GetStorage(), null)); Sharpen.Collections.AddAll(dirs, FSImageTestUtil.GetCurrentDirs(backup.GetFSImage ().GetStorage(), null)); FSImageTestUtil.AssertParallelFilesAreIdentical(dirs, ImmutableSet.Of("VERSION")); }
/// <exception cref="System.Exception"/> public static void AssertNNFilesMatch(MiniDFSCluster cluster) { IList <FilePath> curDirs = Lists.NewArrayList(); Sharpen.Collections.AddAll(curDirs, FSImageTestUtil.GetNameNodeCurrentDirs(cluster , 0)); Sharpen.Collections.AddAll(curDirs, FSImageTestUtil.GetNameNodeCurrentDirs(cluster , 1)); // Ignore seen_txid file, since the newly bootstrapped standby // will have a higher seen_txid than the one it bootstrapped from. ICollection <string> ignoredFiles = ImmutableSet.Of("seen_txid"); FSImageTestUtil.AssertParallelFilesAreIdentical(curDirs, ignoredFiles); }
public virtual void TestNameEditsConfigs() { Path file1 = new Path("TestNameEditsConfigs1"); Path file2 = new Path("TestNameEditsConfigs2"); Path file3 = new Path("TestNameEditsConfigs3"); MiniDFSCluster cluster = null; SecondaryNameNode secondary = null; Configuration conf = null; FileSystem fileSys = null; FilePath newNameDir = new FilePath(base_dir, "name"); FilePath newEditsDir = new FilePath(base_dir, "edits"); FilePath nameAndEdits = new FilePath(base_dir, "name_and_edits"); FilePath checkpointNameDir = new FilePath(base_dir, "secondname"); FilePath checkpointEditsDir = new FilePath(base_dir, "secondedits"); FilePath checkpointNameAndEdits = new FilePath(base_dir, "second_name_and_edits"); ImmutableList <FilePath> allCurrentDirs = ImmutableList.Of(new FilePath(nameAndEdits , "current"), new FilePath(newNameDir, "current"), new FilePath(newEditsDir, "current" ), new FilePath(checkpointNameAndEdits, "current"), new FilePath(checkpointNameDir , "current"), new FilePath(checkpointEditsDir, "current")); ImmutableList <FilePath> imageCurrentDirs = ImmutableList.Of(new FilePath(nameAndEdits , "current"), new FilePath(newNameDir, "current"), new FilePath(checkpointNameAndEdits , "current"), new FilePath(checkpointNameDir, "current")); // Start namenode with same dfs.namenode.name.dir and dfs.namenode.edits.dir conf = new HdfsConfiguration(); conf.Set(DFSConfigKeys.DfsNamenodeNameDirKey, nameAndEdits.GetPath()); conf.Set(DFSConfigKeys.DfsNamenodeEditsDirKey, nameAndEdits.GetPath()); conf.Set(DFSConfigKeys.DfsNamenodeCheckpointDirKey, checkpointNameAndEdits.GetPath ()); conf.Set(DFSConfigKeys.DfsNamenodeCheckpointEditsDirKey, checkpointNameAndEdits.GetPath ()); replication = (short)conf.GetInt(DFSConfigKeys.DfsReplicationKey, 3); // Manage our own dfs directories cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(NumDataNodes).ManageNameDfsDirs (false).Build(); cluster.WaitActive(); secondary = StartSecondaryNameNode(conf); fileSys = cluster.GetFileSystem(); try { NUnit.Framework.Assert.IsTrue(!fileSys.Exists(file1)); WriteFile(fileSys, file1, replication); CheckFile(fileSys, file1, replication); secondary.DoCheckpoint(); } finally { fileSys.Close(); cluster.Shutdown(); secondary.Shutdown(); } // Start namenode with additional dfs.namenode.name.dir and dfs.namenode.edits.dir conf = new HdfsConfiguration(); NUnit.Framework.Assert.IsTrue(newNameDir.Mkdir()); NUnit.Framework.Assert.IsTrue(newEditsDir.Mkdir()); conf.Set(DFSConfigKeys.DfsNamenodeNameDirKey, nameAndEdits.GetPath() + "," + newNameDir .GetPath()); conf.Set(DFSConfigKeys.DfsNamenodeEditsDirKey, nameAndEdits.GetPath() + "," + newEditsDir .GetPath()); conf.Set(DFSConfigKeys.DfsNamenodeCheckpointDirKey, checkpointNameDir.GetPath() + "," + checkpointNameAndEdits.GetPath()); conf.Set(DFSConfigKeys.DfsNamenodeCheckpointEditsDirKey, checkpointEditsDir.GetPath () + "," + checkpointNameAndEdits.GetPath()); replication = (short)conf.GetInt(DFSConfigKeys.DfsReplicationKey, 3); // Manage our own dfs directories. Do not format. cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(NumDataNodes).Format(false ).ManageNameDfsDirs(false).Build(); cluster.WaitActive(); secondary = StartSecondaryNameNode(conf); fileSys = cluster.GetFileSystem(); try { NUnit.Framework.Assert.IsTrue(fileSys.Exists(file1)); CheckFile(fileSys, file1, replication); CleanupFile(fileSys, file1); WriteFile(fileSys, file2, replication); CheckFile(fileSys, file2, replication); secondary.DoCheckpoint(); } finally { fileSys.Close(); cluster.Shutdown(); secondary.Shutdown(); } FSImageTestUtil.AssertParallelFilesAreIdentical(allCurrentDirs, ImmutableSet.Of("VERSION" )); FSImageTestUtil.AssertSameNewestImage(imageCurrentDirs); // Now remove common directory both have and start namenode with // separate name and edits dirs conf = new HdfsConfiguration(); conf.Set(DFSConfigKeys.DfsNamenodeNameDirKey, newNameDir.GetPath()); conf.Set(DFSConfigKeys.DfsNamenodeEditsDirKey, newEditsDir.GetPath()); conf.Set(DFSConfigKeys.DfsNamenodeCheckpointDirKey, checkpointNameDir.GetPath()); conf.Set(DFSConfigKeys.DfsNamenodeCheckpointEditsDirKey, checkpointEditsDir.GetPath ()); replication = (short)conf.GetInt(DFSConfigKeys.DfsReplicationKey, 3); cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(NumDataNodes).Format(false ).ManageNameDfsDirs(false).Build(); cluster.WaitActive(); secondary = StartSecondaryNameNode(conf); fileSys = cluster.GetFileSystem(); try { NUnit.Framework.Assert.IsTrue(!fileSys.Exists(file1)); NUnit.Framework.Assert.IsTrue(fileSys.Exists(file2)); CheckFile(fileSys, file2, replication); CleanupFile(fileSys, file2); WriteFile(fileSys, file3, replication); CheckFile(fileSys, file3, replication); secondary.DoCheckpoint(); } finally { fileSys.Close(); cluster.Shutdown(); secondary.Shutdown(); } // No edit logs in new name dir CheckImageAndEditsFilesExistence(newNameDir, true, false); CheckImageAndEditsFilesExistence(newEditsDir, false, true); CheckImageAndEditsFilesExistence(checkpointNameDir, true, false); CheckImageAndEditsFilesExistence(checkpointEditsDir, false, true); // Add old name_and_edits dir. File system should not read image or edits // from old dir NUnit.Framework.Assert.IsTrue(FileUtil.FullyDelete(new FilePath(nameAndEdits, "current" ))); NUnit.Framework.Assert.IsTrue(FileUtil.FullyDelete(new FilePath(checkpointNameAndEdits , "current"))); conf = new HdfsConfiguration(); conf.Set(DFSConfigKeys.DfsNamenodeNameDirKey, nameAndEdits.GetPath() + "," + newNameDir .GetPath()); conf.Set(DFSConfigKeys.DfsNamenodeEditsDirKey, nameAndEdits + "," + newEditsDir.GetPath ()); conf.Set(DFSConfigKeys.DfsNamenodeCheckpointDirKey, checkpointNameDir.GetPath() + "," + checkpointNameAndEdits.GetPath()); conf.Set(DFSConfigKeys.DfsNamenodeCheckpointEditsDirKey, checkpointEditsDir.GetPath () + "," + checkpointNameAndEdits.GetPath()); replication = (short)conf.GetInt(DFSConfigKeys.DfsReplicationKey, 3); cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(NumDataNodes).Format(false ).ManageNameDfsDirs(false).Build(); cluster.WaitActive(); secondary = StartSecondaryNameNode(conf); fileSys = cluster.GetFileSystem(); try { NUnit.Framework.Assert.IsTrue(!fileSys.Exists(file1)); NUnit.Framework.Assert.IsTrue(!fileSys.Exists(file2)); NUnit.Framework.Assert.IsTrue(fileSys.Exists(file3)); CheckFile(fileSys, file3, replication); secondary.DoCheckpoint(); } finally { fileSys.Close(); cluster.Shutdown(); secondary.Shutdown(); } CheckImageAndEditsFilesExistence(nameAndEdits, true, true); CheckImageAndEditsFilesExistence(checkpointNameAndEdits, true, true); }
/// <exception cref="System.Exception"/> internal virtual void TestCheckpoint(HdfsServerConstants.StartupOption op) { Path file1 = new Path("/checkpoint.dat"); Path file2 = new Path("/checkpoint2.dat"); Path file3 = new Path("/backup.dat"); Configuration conf = new HdfsConfiguration(); HAUtil.SetAllowStandbyReads(conf, true); short replication = (short)conf.GetInt("dfs.replication", 3); int numDatanodes = Math.Max(3, replication); conf.Set(DFSConfigKeys.DfsNamenodeBackupHttpAddressKey, "localhost:0"); conf.Set(DFSConfigKeys.DfsBlockreportInitialDelayKey, "0"); conf.SetInt(DFSConfigKeys.DfsDatanodeScanPeriodHoursKey, -1); // disable block scanner conf.SetInt(DFSConfigKeys.DfsNamenodeCheckpointTxnsKey, 1); MiniDFSCluster cluster = null; FileSystem fileSys = null; BackupNode backup = null; try { cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(0).Build(); fileSys = cluster.GetFileSystem(); // // verify that 'format' really blew away all pre-existing files // NUnit.Framework.Assert.IsTrue(!fileSys.Exists(file1)); NUnit.Framework.Assert.IsTrue(!fileSys.Exists(file2)); // // Create file1 // NUnit.Framework.Assert.IsTrue(fileSys.Mkdirs(file1)); // // Take a checkpoint // long txid = cluster.GetNameNodeRpc().GetTransactionID(); backup = StartBackupNode(conf, op, 1); WaitCheckpointDone(cluster, txid); } catch (IOException e) { Log.Error("Error in TestBackupNode:", e); NUnit.Framework.Assert.IsTrue(e.GetLocalizedMessage(), false); } finally { if (backup != null) { backup.Stop(); } if (fileSys != null) { fileSys.Close(); } if (cluster != null) { cluster.Shutdown(); } } FilePath nnCurDir = new FilePath(BaseDir, "name1/current/"); FilePath bnCurDir = new FilePath(GetBackupNodeDir(op, 1), "/current/"); FSImageTestUtil.AssertParallelFilesAreIdentical(ImmutableList.Of(bnCurDir, nnCurDir ), ImmutableSet.Of <string>("VERSION")); try { // // Restart cluster and verify that file1 still exist. // cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(numDatanodes).Format(false ).Build(); fileSys = cluster.GetFileSystem(); // check that file1 still exists NUnit.Framework.Assert.IsTrue(fileSys.Exists(file1)); fileSys.Delete(file1, true); // create new file file2 fileSys.Mkdirs(file2); // // Take a checkpoint // long txid = cluster.GetNameNodeRpc().GetTransactionID(); backup = StartBackupNode(conf, op, 1); WaitCheckpointDone(cluster, txid); for (int i = 0; i < 10; i++) { fileSys.Mkdirs(new Path("file_" + i)); } txid = cluster.GetNameNodeRpc().GetTransactionID(); backup.DoCheckpoint(); WaitCheckpointDone(cluster, txid); txid = cluster.GetNameNodeRpc().GetTransactionID(); backup.DoCheckpoint(); WaitCheckpointDone(cluster, txid); // Try BackupNode operations IPEndPoint add = backup.GetNameNodeAddress(); // Write to BN FileSystem bnFS = FileSystem.Get(new Path("hdfs://" + NetUtils.GetHostPortString( add)).ToUri(), conf); bool canWrite = true; try { Org.Apache.Hadoop.Hdfs.Server.Namenode.TestCheckpoint.WriteFile(bnFS, file3, replication ); } catch (IOException eio) { Log.Info("Write to " + backup.GetRole() + " failed as expected: ", eio); canWrite = false; } NUnit.Framework.Assert.IsFalse("Write to BackupNode must be prohibited.", canWrite ); // Reads are allowed for BackupNode, but not for CheckpointNode bool canRead = true; try { bnFS.Exists(file2); } catch (IOException eio) { Log.Info("Read from " + backup.GetRole() + " failed: ", eio); canRead = false; } NUnit.Framework.Assert.AreEqual("Reads to BackupNode are allowed, but not CheckpointNode." , canRead, backup.IsRole(HdfsServerConstants.NamenodeRole.Backup)); Org.Apache.Hadoop.Hdfs.Server.Namenode.TestCheckpoint.WriteFile(fileSys, file3, replication ); Org.Apache.Hadoop.Hdfs.Server.Namenode.TestCheckpoint.CheckFile(fileSys, file3, replication ); // should also be on BN right away NUnit.Framework.Assert.IsTrue("file3 does not exist on BackupNode", op != HdfsServerConstants.StartupOption .Backup || backup.GetNamesystem().GetFileInfo(file3.ToUri().GetPath(), false) != null); } catch (IOException e) { Log.Error("Error in TestBackupNode:", e); throw new Exception(e); } finally { if (backup != null) { backup.Stop(); } if (fileSys != null) { fileSys.Close(); } if (cluster != null) { cluster.Shutdown(); } } FSImageTestUtil.AssertParallelFilesAreIdentical(ImmutableList.Of(bnCurDir, nnCurDir ), ImmutableSet.Of <string>("VERSION")); try { // // Restart cluster and verify that file2 exists and // file1 does not exist. // cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(0).Format(false).Build(); fileSys = cluster.GetFileSystem(); NUnit.Framework.Assert.IsTrue(!fileSys.Exists(file1)); // verify that file2 exists NUnit.Framework.Assert.IsTrue(fileSys.Exists(file2)); } catch (IOException e) { Log.Error("Error in TestBackupNode: ", e); NUnit.Framework.Assert.IsTrue(e.GetLocalizedMessage(), false); } finally { fileSys.Close(); cluster.Shutdown(); } }