public static void SetUpCluster() { conf.Set(DFSConfigKeys.DfsNamenodeSecondaryHttpAddressKey, "0.0.0.0:0"); conf.SetLong(DFSConfigKeys.DfsNamenodeCheckpointTxnsKey, 500); cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(0).Build(); cluster.WaitActive(); snn = new SecondaryNameNode(conf); }
internal static bool IsValidRequestor(ServletContext context, string remoteUser, Configuration conf) { if (remoteUser == null) { // This really shouldn't happen... Log.Warn("Received null remoteUser while authorizing access to getImage servlet"); return(false); } ICollection <string> validRequestors = new HashSet <string>(); validRequestors.AddItem(SecurityUtil.GetServerPrincipal(conf.Get(DFSConfigKeys.DfsNamenodeKerberosPrincipalKey ), NameNode.GetAddress(conf).GetHostName())); try { validRequestors.AddItem(SecurityUtil.GetServerPrincipal(conf.Get(DFSConfigKeys.DfsSecondaryNamenodeKerberosPrincipalKey ), SecondaryNameNode.GetHttpAddress(conf).GetHostName())); } catch (Exception e) { // Don't halt if SecondaryNameNode principal could not be added. Log.Debug("SecondaryNameNode principal could not be added", e); string msg = string.Format("SecondaryNameNode principal not considered, %s = %s, %s = %s" , DFSConfigKeys.DfsSecondaryNamenodeKerberosPrincipalKey, conf.Get(DFSConfigKeys .DfsSecondaryNamenodeKerberosPrincipalKey), DFSConfigKeys.DfsNamenodeSecondaryHttpAddressKey , conf.GetTrimmed(DFSConfigKeys.DfsNamenodeSecondaryHttpAddressKey, DFSConfigKeys .DfsNamenodeSecondaryHttpAddressDefault)); Log.Warn(msg); } if (HAUtil.IsHAEnabled(conf, DFSUtil.GetNamenodeNameServiceId(conf))) { Configuration otherNnConf = HAUtil.GetConfForOtherNode(conf); validRequestors.AddItem(SecurityUtil.GetServerPrincipal(otherNnConf.Get(DFSConfigKeys .DfsNamenodeKerberosPrincipalKey), NameNode.GetAddress(otherNnConf).GetHostName( ))); } foreach (string v in validRequestors) { if (v != null && v.Equals(remoteUser)) { Log.Info("ImageServlet allowing checkpointer: " + remoteUser); return(true); } } if (HttpServer2.UserHasAdministratorAccess(context, remoteUser)) { Log.Info("ImageServlet allowing administrator: " + remoteUser); return(true); } Log.Info("ImageServlet rejecting: " + remoteUser); return(false); }
/// <exception cref="System.IO.IOException"/> private void DoIt(IDictionary <string, string> paramsToCorrupt) { MiniDFSCluster cluster = null; FileSystem fs = null; SecondaryNameNode snn = null; try { Configuration conf = new HdfsConfiguration(); cluster = new MiniDFSCluster.Builder(conf).Build(); cluster.WaitActive(); conf.Set(DFSConfigKeys.DfsNamenodeSecondaryHttpAddressKey, "0.0.0.0:0"); snn = new SecondaryNameNode(conf); fs = cluster.GetFileSystem(); fs.Mkdirs(new Path("/test/foo")); snn.DoCheckpoint(); IList <FilePath> versionFiles = snn.GetFSImage().GetStorage().GetFiles(null, "VERSION" ); snn.Shutdown(); foreach (FilePath versionFile in versionFiles) { foreach (KeyValuePair <string, string> paramToCorrupt in paramsToCorrupt) { string param = paramToCorrupt.Key; string val = paramToCorrupt.Value; System.Console.Out.WriteLine("Changing '" + param + "' to '" + val + "' in " + versionFile ); FSImageTestUtil.CorruptVersionFile(versionFile, param, val); } } snn = new SecondaryNameNode(conf); fs.Mkdirs(new Path("/test/bar")); snn.DoCheckpoint(); } finally { if (fs != null) { fs.Close(); } if (cluster != null) { cluster.Shutdown(); } if (snn != null) { snn.Shutdown(); } } }
/// <summary>Create a number of fsimage checkpoints</summary> /// <param name="count">number of checkpoints to create</param> /// <exception cref="System.IO.IOException"/> public virtual void CreateCheckPoint(int count) { Log.Info("--starting mini cluster"); // manage dirs parameter set to false MiniDFSCluster cluster = null; SecondaryNameNode sn = null; try { cluster = new MiniDFSCluster.Builder(config).ManageDataDfsDirs(false).ManageNameDfsDirs (false).Build(); cluster.WaitActive(); Log.Info("--starting Secondary Node"); // start secondary node sn = new SecondaryNameNode(config); NUnit.Framework.Assert.IsNotNull(sn); // Create count new files and checkpoints for (int i = 0; i < count; i++) { // create a file FileSystem fileSys = cluster.GetFileSystem(); Path p = new Path("t" + i); this.WriteFile(fileSys, p, 1); Log.Info("--file " + p.ToString() + " created"); Log.Info("--doing checkpoint"); sn.DoCheckpoint(); // this shouldn't fail Log.Info("--done checkpoint"); } } catch (IOException e) { NUnit.Framework.Assert.Fail(StringUtils.StringifyException(e)); System.Console.Error.WriteLine("checkpoint failed"); throw; } finally { if (sn != null) { sn.Shutdown(); } if (cluster != null) { cluster.Shutdown(); } Log.Info("--cluster shutdown"); } }
public virtual void TestMultipleSecondaryCheckpoint() { SecondaryNameNode secondary = null; try { cluster = new MiniDFSCluster.Builder(config).NumDataNodes(1).ManageNameDfsDirs(false ).Build(); cluster.WaitActive(); secondary = new SecondaryNameNode(config); FSImage fsImage = cluster.GetNameNode().GetFSImage(); PrintStorages(fsImage); FileSystem fs = cluster.GetFileSystem(); Path testPath = new Path("/", "test"); NUnit.Framework.Assert.IsTrue(fs.Mkdirs(testPath)); PrintStorages(fsImage); // Take name1 offline InvalidateStorage(fsImage, ImmutableSet.Of(path1)); // Simulate a 2NN beginning a checkpoint, but not finishing. This will // cause name1 to be restored. cluster.GetNameNodeRpc().RollEditLog(); PrintStorages(fsImage); // Now another 2NN comes along to do a full checkpoint. secondary.DoCheckpoint(); PrintStorages(fsImage); // The created file should still exist in the in-memory FS state after the // checkpoint. NUnit.Framework.Assert.IsTrue("path exists before restart", fs.Exists(testPath)); secondary.Shutdown(); // Restart the NN so it reloads the edits from on-disk. cluster.RestartNameNode(); // The created file should still exist after the restart. NUnit.Framework.Assert.IsTrue("path should still exist after restart", fs.Exists( testPath)); } finally { if (cluster != null) { cluster.Shutdown(); } if (secondary != null) { secondary.Shutdown(); } } }
public virtual void TestCheckPointDirsAreTrimmed() { MiniDFSCluster cluster = null; SecondaryNameNode secondary = null; FilePath checkpointNameDir1 = new FilePath(base_dir, "chkptName1"); FilePath checkpointEditsDir1 = new FilePath(base_dir, "chkptEdits1"); FilePath checkpointNameDir2 = new FilePath(base_dir, "chkptName2"); FilePath checkpointEditsDir2 = new FilePath(base_dir, "chkptEdits2"); FilePath nameDir = new FilePath(base_dir, "name1"); string whiteSpace = " \n \n "; Configuration conf = new HdfsConfiguration(); conf.Set(DFSConfigKeys.DfsNamenodeNameDirKey, nameDir.GetPath()); conf.SetStrings(DFSConfigKeys.DfsNamenodeCheckpointDirKey, whiteSpace + checkpointNameDir1 .GetPath() + whiteSpace, whiteSpace + checkpointNameDir2.GetPath() + whiteSpace); conf.SetStrings(DFSConfigKeys.DfsNamenodeCheckpointEditsDirKey, whiteSpace + checkpointEditsDir1 .GetPath() + whiteSpace, whiteSpace + checkpointEditsDir2.GetPath() + whiteSpace ); cluster = new MiniDFSCluster.Builder(conf).ManageNameDfsDirs(false).NumDataNodes( 3).Build(); try { cluster.WaitActive(); secondary = StartSecondaryNameNode(conf); secondary.DoCheckpoint(); NUnit.Framework.Assert.IsTrue(DFSConfigKeys.DfsNamenodeNameDirKey + " must be trimmed " , checkpointNameDir1.Exists()); NUnit.Framework.Assert.IsTrue(DFSConfigKeys.DfsNamenodeNameDirKey + " must be trimmed " , checkpointNameDir2.Exists()); NUnit.Framework.Assert.IsTrue(DFSConfigKeys.DfsNamenodeCheckpointEditsDirKey + " must be trimmed " , checkpointEditsDir1.Exists()); NUnit.Framework.Assert.IsTrue(DFSConfigKeys.DfsNamenodeCheckpointEditsDirKey + " must be trimmed " , checkpointEditsDir2.Exists()); } finally { secondary.Shutdown(); cluster.Shutdown(); } }
public virtual void TestStorageRestoreFailure() { SecondaryNameNode secondary = null; // On windows, revoking write+execute permission on name2 does not // prevent us from creating files in name2\current. Hence we revoke // permissions on name2\current for the test. string nameDir2 = Shell.Windows ? (new FilePath(path2, "current").GetAbsolutePath ()) : path2.ToString(); string nameDir3 = Shell.Windows ? (new FilePath(path3, "current").GetAbsolutePath ()) : path3.ToString(); try { cluster = new MiniDFSCluster.Builder(config).NumDataNodes(0).ManageNameDfsDirs(false ).Build(); cluster.WaitActive(); secondary = new SecondaryNameNode(config); PrintStorages(cluster.GetNameNode().GetFSImage()); FileSystem fs = cluster.GetFileSystem(); Path path = new Path("/", "test"); NUnit.Framework.Assert.IsTrue(fs.Mkdirs(path)); // invalidate storage by removing rwx permission from name2 and name3 NUnit.Framework.Assert.IsTrue(FileUtil.Chmod(nameDir2, "000") == 0); NUnit.Framework.Assert.IsTrue(FileUtil.Chmod(nameDir3, "000") == 0); secondary.DoCheckpoint(); // should remove name2 and name3 PrintStorages(cluster.GetNameNode().GetFSImage()); path = new Path("/", "test1"); NUnit.Framework.Assert.IsTrue(fs.Mkdirs(path)); System.Diagnostics.Debug.Assert((cluster.GetNameNode().GetFSImage().GetStorage(). GetNumStorageDirs() == 1)); secondary.DoCheckpoint(); // shouldn't be able to restore name 2 and 3 System.Diagnostics.Debug.Assert((cluster.GetNameNode().GetFSImage().GetStorage(). GetNumStorageDirs() == 1)); NUnit.Framework.Assert.IsTrue(FileUtil.Chmod(nameDir2, "755") == 0); NUnit.Framework.Assert.IsTrue(FileUtil.Chmod(nameDir3, "755") == 0); secondary.DoCheckpoint(); // should restore name 2 and 3 System.Diagnostics.Debug.Assert((cluster.GetNameNode().GetFSImage().GetStorage(). GetNumStorageDirs() == 3)); } finally { if (path2.Exists()) { FileUtil.Chmod(nameDir2, "755"); } if (path3.Exists()) { FileUtil.Chmod(nameDir3, "755"); } if (cluster != null) { cluster.Shutdown(); } if (secondary != null) { secondary.Shutdown(); } } }
public virtual void TestStorageRestore() { int numDatanodes = 0; cluster = new MiniDFSCluster.Builder(config).NumDataNodes(numDatanodes).ManageNameDfsDirs (false).Build(); cluster.WaitActive(); SecondaryNameNode secondary = new SecondaryNameNode(config); System.Console.Out.WriteLine("****testStorageRestore: Cluster and SNN started"); PrintStorages(cluster.GetNameNode().GetFSImage()); FileSystem fs = cluster.GetFileSystem(); Path path = new Path("/", "test"); NUnit.Framework.Assert.IsTrue(fs.Mkdirs(path)); System.Console.Out.WriteLine("****testStorageRestore: dir 'test' created, invalidating storage..." ); InvalidateStorage(cluster.GetNameNode().GetFSImage(), ImmutableSet.Of(path2, path3 )); PrintStorages(cluster.GetNameNode().GetFSImage()); System.Console.Out.WriteLine("****testStorageRestore: storage invalidated"); path = new Path("/", "test1"); NUnit.Framework.Assert.IsTrue(fs.Mkdirs(path)); System.Console.Out.WriteLine("****testStorageRestore: dir 'test1' created"); // We did another edit, so the still-active directory at 'path1' // should now differ from the others FSImageTestUtil.AssertFileContentsDifferent(2, new FilePath(path1, "current/" + NNStorage.GetInProgressEditsFileName (1)), new FilePath(path2, "current/" + NNStorage.GetInProgressEditsFileName(1)), new FilePath(path3, "current/" + NNStorage.GetInProgressEditsFileName(1))); FSImageTestUtil.AssertFileContentsSame(new FilePath(path2, "current/" + NNStorage.GetInProgressEditsFileName (1)), new FilePath(path3, "current/" + NNStorage.GetInProgressEditsFileName(1))); System.Console.Out.WriteLine("****testStorageRestore: checkfiles(false) run"); secondary.DoCheckpoint(); ///should enable storage.. // We should have a checkpoint through txid 4 in the two image dirs // (txid=4 for BEGIN, mkdir, mkdir, END) FSImageTestUtil.AssertFileContentsSame(new FilePath(path1, "current/" + NNStorage.GetImageFileName (4)), new FilePath(path2, "current/" + NNStorage.GetImageFileName(4))); NUnit.Framework.Assert.IsFalse("Should not have any image in an edits-only directory" , new FilePath(path3, "current/" + NNStorage.GetImageFileName(4)).Exists()); // Should have finalized logs in the directory that didn't fail NUnit.Framework.Assert.IsTrue("Should have finalized logs in the directory that didn't fail" , new FilePath(path1, "current/" + NNStorage.GetFinalizedEditsFileName(1, 4)).Exists ()); // Should not have finalized logs in the failed directories NUnit.Framework.Assert.IsFalse("Should not have finalized logs in the failed directories" , new FilePath(path2, "current/" + NNStorage.GetFinalizedEditsFileName(1, 4)).Exists ()); NUnit.Framework.Assert.IsFalse("Should not have finalized logs in the failed directories" , new FilePath(path3, "current/" + NNStorage.GetFinalizedEditsFileName(1, 4)).Exists ()); // The new log segment should be in all of the directories. FSImageTestUtil.AssertFileContentsSame(new FilePath(path1, "current/" + NNStorage.GetInProgressEditsFileName (5)), new FilePath(path2, "current/" + NNStorage.GetInProgressEditsFileName(5)), new FilePath(path3, "current/" + NNStorage.GetInProgressEditsFileName(5))); string md5BeforeEdit = FSImageTestUtil.GetFileMD5(new FilePath(path1, "current/" + NNStorage.GetInProgressEditsFileName(5))); // The original image should still be the previously failed image // directory after it got restored, since it's still useful for // a recovery! FSImageTestUtil.AssertFileContentsSame(new FilePath(path1, "current/" + NNStorage.GetImageFileName (0)), new FilePath(path2, "current/" + NNStorage.GetImageFileName(0))); // Do another edit to verify that all the logs are active. path = new Path("/", "test2"); NUnit.Framework.Assert.IsTrue(fs.Mkdirs(path)); // Logs should be changed by the edit. string md5AfterEdit = FSImageTestUtil.GetFileMD5(new FilePath(path1, "current/" + NNStorage.GetInProgressEditsFileName(5))); NUnit.Framework.Assert.IsFalse(md5BeforeEdit.Equals(md5AfterEdit)); // And all logs should be changed. FSImageTestUtil.AssertFileContentsSame(new FilePath(path1, "current/" + NNStorage.GetInProgressEditsFileName (5)), new FilePath(path2, "current/" + NNStorage.GetInProgressEditsFileName(5)), new FilePath(path3, "current/" + NNStorage.GetInProgressEditsFileName(5))); secondary.Shutdown(); cluster.Shutdown(); // All logs should be finalized by clean shutdown FSImageTestUtil.AssertFileContentsSame(new FilePath(path1, "current/" + NNStorage.GetFinalizedEditsFileName (5, 7)), new FilePath(path2, "current/" + NNStorage.GetFinalizedEditsFileName(5, 7)), new FilePath(path3, "current/" + NNStorage.GetFinalizedEditsFileName(5, 7)) ); }
public virtual void TestNameEditsConfigs() { Path file1 = new Path("TestNameEditsConfigs1"); Path file2 = new Path("TestNameEditsConfigs2"); Path file3 = new Path("TestNameEditsConfigs3"); MiniDFSCluster cluster = null; SecondaryNameNode secondary = null; Configuration conf = null; FileSystem fileSys = null; FilePath newNameDir = new FilePath(base_dir, "name"); FilePath newEditsDir = new FilePath(base_dir, "edits"); FilePath nameAndEdits = new FilePath(base_dir, "name_and_edits"); FilePath checkpointNameDir = new FilePath(base_dir, "secondname"); FilePath checkpointEditsDir = new FilePath(base_dir, "secondedits"); FilePath checkpointNameAndEdits = new FilePath(base_dir, "second_name_and_edits"); ImmutableList <FilePath> allCurrentDirs = ImmutableList.Of(new FilePath(nameAndEdits , "current"), new FilePath(newNameDir, "current"), new FilePath(newEditsDir, "current" ), new FilePath(checkpointNameAndEdits, "current"), new FilePath(checkpointNameDir , "current"), new FilePath(checkpointEditsDir, "current")); ImmutableList <FilePath> imageCurrentDirs = ImmutableList.Of(new FilePath(nameAndEdits , "current"), new FilePath(newNameDir, "current"), new FilePath(checkpointNameAndEdits , "current"), new FilePath(checkpointNameDir, "current")); // Start namenode with same dfs.namenode.name.dir and dfs.namenode.edits.dir conf = new HdfsConfiguration(); conf.Set(DFSConfigKeys.DfsNamenodeNameDirKey, nameAndEdits.GetPath()); conf.Set(DFSConfigKeys.DfsNamenodeEditsDirKey, nameAndEdits.GetPath()); conf.Set(DFSConfigKeys.DfsNamenodeCheckpointDirKey, checkpointNameAndEdits.GetPath ()); conf.Set(DFSConfigKeys.DfsNamenodeCheckpointEditsDirKey, checkpointNameAndEdits.GetPath ()); replication = (short)conf.GetInt(DFSConfigKeys.DfsReplicationKey, 3); // Manage our own dfs directories cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(NumDataNodes).ManageNameDfsDirs (false).Build(); cluster.WaitActive(); secondary = StartSecondaryNameNode(conf); fileSys = cluster.GetFileSystem(); try { NUnit.Framework.Assert.IsTrue(!fileSys.Exists(file1)); WriteFile(fileSys, file1, replication); CheckFile(fileSys, file1, replication); secondary.DoCheckpoint(); } finally { fileSys.Close(); cluster.Shutdown(); secondary.Shutdown(); } // Start namenode with additional dfs.namenode.name.dir and dfs.namenode.edits.dir conf = new HdfsConfiguration(); NUnit.Framework.Assert.IsTrue(newNameDir.Mkdir()); NUnit.Framework.Assert.IsTrue(newEditsDir.Mkdir()); conf.Set(DFSConfigKeys.DfsNamenodeNameDirKey, nameAndEdits.GetPath() + "," + newNameDir .GetPath()); conf.Set(DFSConfigKeys.DfsNamenodeEditsDirKey, nameAndEdits.GetPath() + "," + newEditsDir .GetPath()); conf.Set(DFSConfigKeys.DfsNamenodeCheckpointDirKey, checkpointNameDir.GetPath() + "," + checkpointNameAndEdits.GetPath()); conf.Set(DFSConfigKeys.DfsNamenodeCheckpointEditsDirKey, checkpointEditsDir.GetPath () + "," + checkpointNameAndEdits.GetPath()); replication = (short)conf.GetInt(DFSConfigKeys.DfsReplicationKey, 3); // Manage our own dfs directories. Do not format. cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(NumDataNodes).Format(false ).ManageNameDfsDirs(false).Build(); cluster.WaitActive(); secondary = StartSecondaryNameNode(conf); fileSys = cluster.GetFileSystem(); try { NUnit.Framework.Assert.IsTrue(fileSys.Exists(file1)); CheckFile(fileSys, file1, replication); CleanupFile(fileSys, file1); WriteFile(fileSys, file2, replication); CheckFile(fileSys, file2, replication); secondary.DoCheckpoint(); } finally { fileSys.Close(); cluster.Shutdown(); secondary.Shutdown(); } FSImageTestUtil.AssertParallelFilesAreIdentical(allCurrentDirs, ImmutableSet.Of("VERSION" )); FSImageTestUtil.AssertSameNewestImage(imageCurrentDirs); // Now remove common directory both have and start namenode with // separate name and edits dirs conf = new HdfsConfiguration(); conf.Set(DFSConfigKeys.DfsNamenodeNameDirKey, newNameDir.GetPath()); conf.Set(DFSConfigKeys.DfsNamenodeEditsDirKey, newEditsDir.GetPath()); conf.Set(DFSConfigKeys.DfsNamenodeCheckpointDirKey, checkpointNameDir.GetPath()); conf.Set(DFSConfigKeys.DfsNamenodeCheckpointEditsDirKey, checkpointEditsDir.GetPath ()); replication = (short)conf.GetInt(DFSConfigKeys.DfsReplicationKey, 3); cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(NumDataNodes).Format(false ).ManageNameDfsDirs(false).Build(); cluster.WaitActive(); secondary = StartSecondaryNameNode(conf); fileSys = cluster.GetFileSystem(); try { NUnit.Framework.Assert.IsTrue(!fileSys.Exists(file1)); NUnit.Framework.Assert.IsTrue(fileSys.Exists(file2)); CheckFile(fileSys, file2, replication); CleanupFile(fileSys, file2); WriteFile(fileSys, file3, replication); CheckFile(fileSys, file3, replication); secondary.DoCheckpoint(); } finally { fileSys.Close(); cluster.Shutdown(); secondary.Shutdown(); } // No edit logs in new name dir CheckImageAndEditsFilesExistence(newNameDir, true, false); CheckImageAndEditsFilesExistence(newEditsDir, false, true); CheckImageAndEditsFilesExistence(checkpointNameDir, true, false); CheckImageAndEditsFilesExistence(checkpointEditsDir, false, true); // Add old name_and_edits dir. File system should not read image or edits // from old dir NUnit.Framework.Assert.IsTrue(FileUtil.FullyDelete(new FilePath(nameAndEdits, "current" ))); NUnit.Framework.Assert.IsTrue(FileUtil.FullyDelete(new FilePath(checkpointNameAndEdits , "current"))); conf = new HdfsConfiguration(); conf.Set(DFSConfigKeys.DfsNamenodeNameDirKey, nameAndEdits.GetPath() + "," + newNameDir .GetPath()); conf.Set(DFSConfigKeys.DfsNamenodeEditsDirKey, nameAndEdits + "," + newEditsDir.GetPath ()); conf.Set(DFSConfigKeys.DfsNamenodeCheckpointDirKey, checkpointNameDir.GetPath() + "," + checkpointNameAndEdits.GetPath()); conf.Set(DFSConfigKeys.DfsNamenodeCheckpointEditsDirKey, checkpointEditsDir.GetPath () + "," + checkpointNameAndEdits.GetPath()); replication = (short)conf.GetInt(DFSConfigKeys.DfsReplicationKey, 3); cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(NumDataNodes).Format(false ).ManageNameDfsDirs(false).Build(); cluster.WaitActive(); secondary = StartSecondaryNameNode(conf); fileSys = cluster.GetFileSystem(); try { NUnit.Framework.Assert.IsTrue(!fileSys.Exists(file1)); NUnit.Framework.Assert.IsTrue(!fileSys.Exists(file2)); NUnit.Framework.Assert.IsTrue(fileSys.Exists(file3)); CheckFile(fileSys, file3, replication); secondary.DoCheckpoint(); } finally { fileSys.Close(); cluster.Shutdown(); secondary.Shutdown(); } CheckImageAndEditsFilesExistence(nameAndEdits, true, true); CheckImageAndEditsFilesExistence(checkpointNameAndEdits, true, true); }
public virtual void TestSNNStartup() { //setUpConfig(); Log.Info("--starting SecondNN startup test"); // different name dirs config.Set(DFSConfigKeys.DfsNamenodeNameDirKey, Org.Apache.Hadoop.Hdfs.Server.Common.Util.FileAsURI (new FilePath(hdfsDir, "name")).ToString()); config.Set(DFSConfigKeys.DfsNamenodeEditsDirKey, Org.Apache.Hadoop.Hdfs.Server.Common.Util.FileAsURI (new FilePath(hdfsDir, "name")).ToString()); // same checkpoint dirs config.Set(DFSConfigKeys.DfsNamenodeCheckpointEditsDirKey, Org.Apache.Hadoop.Hdfs.Server.Common.Util.FileAsURI (new FilePath(hdfsDir, "chkpt_edits")).ToString()); config.Set(DFSConfigKeys.DfsNamenodeCheckpointDirKey, Org.Apache.Hadoop.Hdfs.Server.Common.Util.FileAsURI (new FilePath(hdfsDir, "chkpt")).ToString()); Log.Info("--starting NN "); MiniDFSCluster cluster = null; SecondaryNameNode sn = null; NameNode nn = null; try { cluster = new MiniDFSCluster.Builder(config).ManageDataDfsDirs(false).ManageNameDfsDirs (false).Build(); cluster.WaitActive(); nn = cluster.GetNameNode(); NUnit.Framework.Assert.IsNotNull(nn); // start secondary node Log.Info("--starting SecondNN"); sn = new SecondaryNameNode(config); NUnit.Framework.Assert.IsNotNull(sn); Log.Info("--doing checkpoint"); sn.DoCheckpoint(); // this shouldn't fail Log.Info("--done checkpoint"); // now verify that image and edits are created in the different directories FSImage image = nn.GetFSImage(); Storage.StorageDirectory sd = image.GetStorage().GetStorageDir(0); //only one NUnit.Framework.Assert.AreEqual(sd.GetStorageDirType(), NNStorage.NameNodeDirType .ImageAndEdits); image.GetStorage(); FilePath imf = NNStorage.GetStorageFile(sd, NNStorage.NameNodeFile.Image, 0); image.GetStorage(); FilePath edf = NNStorage.GetStorageFile(sd, NNStorage.NameNodeFile.Edits, 0); Log.Info("--image file " + imf.GetAbsolutePath() + "; len = " + imf.Length()); Log.Info("--edits file " + edf.GetAbsolutePath() + "; len = " + edf.Length()); FSImage chkpImage = sn.GetFSImage(); VerifyDifferentDirs(chkpImage, imf.Length(), edf.Length()); } catch (IOException e) { NUnit.Framework.Assert.Fail(StringUtils.StringifyException(e)); System.Console.Error.WriteLine("checkpoint failed"); throw; } finally { if (sn != null) { sn.Shutdown(); } if (cluster != null) { cluster.Shutdown(); } } }
public _PrivilegedAction_358(SecondaryNameNode _enclosing) { this._enclosing = _enclosing; }