public virtual void Setup() { conf = new HdfsConfiguration(); conf.SetBoolean(DFSConfigKeys.DfsNamenodeEnableRetryCacheKey, true); conf.SetInt(DFSConfigKeys.DfsClientTestDropNamenodeResponseNumKey, 2); cluster = new MiniDFSCluster.Builder(conf).NnTopology(MiniDFSNNTopology.SimpleHATopology ()).NumDataNodes(3).Build(); cluster.WaitActive(); cluster.TransitionToActive(namenodeId); HATestUtil.SetFailoverConfigurations(cluster, conf); filesystem = (DistributedFileSystem)HATestUtil.ConfigureFailoverFs(cluster, conf); namesystem = cluster.GetNamesystem(namenodeId); metrics = namesystem.GetRetryCache().GetMetricsForTests(); }
public virtual void TestWrappedFailoverProxyProvider() { // setup the config with the dummy provider class Configuration config = new HdfsConfiguration(conf); string logicalName = HATestUtil.GetLogicalHostname(cluster); HATestUtil.SetFailoverConfigurations(cluster, config, logicalName); config.Set(DFSConfigKeys.DfsClientFailoverProxyProviderKeyPrefix + "." + logicalName , typeof(TestDFSClientFailover.DummyLegacyFailoverProxyProvider).FullName); Path p = new Path("hdfs://" + logicalName + "/"); // not to use IP address for token service SecurityUtil.SetTokenServiceUseIp(false); // Logical URI should be used. NUnit.Framework.Assert.IsTrue("Legacy proxy providers should use logical URI.", HAUtil .UseLogicalUri(config, p.ToUri())); }
public virtual void TestLogicalUriShouldNotHavePorts() { Configuration config = new HdfsConfiguration(conf); string logicalName = HATestUtil.GetLogicalHostname(cluster); HATestUtil.SetFailoverConfigurations(cluster, config, logicalName); Path p = new Path("hdfs://" + logicalName + ":12345/"); try { p.GetFileSystem(config).Exists(p); NUnit.Framework.Assert.Fail("Did not fail with fake FS"); } catch (IOException ioe) { GenericTestUtils.AssertExceptionContains("does not use port information", ioe); } }
public virtual void TestFailoverAfterOpen() { Configuration conf = DFSTestUtil.NewHAConfiguration(LogicalName); conf.Set(CommonConfigurationKeysPublic.FsDefaultNameKey, HdfsConstants.HdfsUriScheme + "://" + LogicalName); MiniDFSCluster cluster = null; FileSystem fs = null; Path p = new Path("/test"); byte[] data = Sharpen.Runtime.GetBytesForString("Hello"); try { cluster = new MiniDFSCluster.Builder(conf).NnTopology(topo).NumDataNodes(1).Build (); HATestUtil.SetFailoverConfigurations(cluster, conf, LogicalName); cluster.WaitActive(); fs = FileSystem.Get(WebhdfsUri, conf); cluster.TransitionToActive(1); FSDataOutputStream @out = fs.Create(p); cluster.ShutdownNameNode(1); cluster.TransitionToActive(0); @out.Write(data); @out.Close(); FSDataInputStream @in = fs.Open(p); byte[] buf = new byte[data.Length]; IOUtils.ReadFully(@in, buf, 0, buf.Length); Assert.AssertArrayEquals(data, buf); } finally { IOUtils.Cleanup(null, fs); if (cluster != null) { cluster.Shutdown(); } } }
public virtual void TestFormatShouldBeIgnoredForNonFileBasedDirs() { Configuration conf = new HdfsConfiguration(); string logicalName = "mycluster"; // DFS_NAMENODE_RPC_ADDRESS_KEY are required to identify the NameNode // is configured in HA, then only DFS_NAMENODE_SHARED_EDITS_DIR_KEY // is considered. string localhost = "127.0.0.1"; IPEndPoint nnAddr1 = new IPEndPoint(localhost, 8020); IPEndPoint nnAddr2 = new IPEndPoint(localhost, 9020); HATestUtil.SetFailoverConfigurations(conf, logicalName, nnAddr1, nnAddr2); conf.Set(DFSConfigKeys.DfsNamenodeNameDirKey, new FilePath(DfsBaseDir, "name").GetAbsolutePath ()); conf.SetBoolean(DFSConfigKeys.DfsNamenodeSupportAllowFormatKey, true); conf.Set(DFSUtil.AddKeySuffixes(DFSConfigKeys.DfsNamenodeEditsPluginPrefix, "dummy" ), typeof(TestGenericJournalConf.DummyJournalManager).FullName); conf.Set(DFSConfigKeys.DfsNamenodeSharedEditsDirKey, "dummy://" + localhost + ":2181/ledgers" ); conf.Set(DFSConfigKeys.DfsHaNamenodeIdKey, "nn1"); // An internal assert is added to verify the working of test NameNode.Format(conf); }
public virtual void TestMoverCliWithHAConf() { Configuration conf = new HdfsConfiguration(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(new HdfsConfiguration()).NnTopology (MiniDFSNNTopology.SimpleHATopology()).NumDataNodes(0).Build(); HATestUtil.SetFailoverConfigurations(cluster, conf, "MyCluster"); try { IDictionary <URI, IList <Path> > movePaths = Mover.Cli.GetNameNodePathsToMove(conf, "-p", "/foo", "/bar"); ICollection <URI> namenodes = DFSUtil.GetNsServiceRpcUris(conf); NUnit.Framework.Assert.AreEqual(1, namenodes.Count); NUnit.Framework.Assert.AreEqual(1, movePaths.Count); URI nn = namenodes.GetEnumerator().Next(); NUnit.Framework.Assert.AreEqual(new URI("hdfs://MyCluster"), nn); NUnit.Framework.Assert.IsTrue(movePaths.Contains(nn)); CheckMovePaths(movePaths[nn], new Path("/foo"), new Path("/bar")); } finally { cluster.Shutdown(); } }
/// <summary> /// Make sure the WebHdfsFileSystem will retry based on RetriableException when /// rpcServer is null in NamenodeWebHdfsMethods while NameNode starts up. /// </summary> /// <exception cref="System.Exception"/> public virtual void TestRetryWhileNNStartup() { Configuration conf = DFSTestUtil.NewHAConfiguration(LogicalName); MiniDFSCluster cluster = null; IDictionary <string, bool> resultMap = new Dictionary <string, bool>(); try { cluster = new MiniDFSCluster.Builder(conf).NnTopology(topo).NumDataNodes(0).Build (); HATestUtil.SetFailoverConfigurations(cluster, conf, LogicalName); cluster.WaitActive(); cluster.TransitionToActive(0); NameNode namenode = cluster.GetNameNode(0); NamenodeProtocols rpcServer = namenode.GetRpcServer(); Whitebox.SetInternalState(namenode, "rpcServer", null); new _Thread_212(this, conf, resultMap).Start(); Sharpen.Thread.Sleep(1000); Whitebox.SetInternalState(namenode, "rpcServer", rpcServer); lock (this) { while (!resultMap.Contains("mkdirs")) { Sharpen.Runtime.Wait(this); } NUnit.Framework.Assert.IsTrue(resultMap["mkdirs"]); } } finally { if (cluster != null) { cluster.Shutdown(); } } }