public virtual void TestSecureHAToken() { Configuration conf = DFSTestUtil.NewHAConfiguration(LogicalName); conf.SetBoolean(DFSConfigKeys.DfsNamenodeDelegationTokenAlwaysUseKey, true); MiniDFSCluster cluster = null; WebHdfsFileSystem fs = null; try { cluster = new MiniDFSCluster.Builder(conf).NnTopology(topo).NumDataNodes(0).Build (); HATestUtil.SetFailoverConfigurations(cluster, conf, LogicalName); cluster.WaitActive(); fs = Org.Mockito.Mockito.Spy((WebHdfsFileSystem)FileSystem.Get(WebhdfsUri, conf)); FileSystemTestHelper.AddFileSystemForTesting(WebhdfsUri, conf, fs); cluster.TransitionToActive(0); Org.Apache.Hadoop.Security.Token.Token <object> token = ((Org.Apache.Hadoop.Security.Token.Token <DelegationTokenIdentifier>)fs.GetDelegationToken(null)); cluster.ShutdownNameNode(0); cluster.TransitionToActive(1); token.Renew(conf); token.Cancel(conf); Org.Mockito.Mockito.Verify(fs).RenewDelegationToken(token); Org.Mockito.Mockito.Verify(fs).CancelDelegationToken(token); } finally { IOUtils.Cleanup(null, fs); if (cluster != null) { cluster.Shutdown(); } } }
public virtual void TestHA() { Configuration conf = DFSTestUtil.NewHAConfiguration(LogicalName); MiniDFSCluster cluster = null; FileSystem fs = null; try { cluster = new MiniDFSCluster.Builder(conf).NnTopology(topo).NumDataNodes(0).Build (); HATestUtil.SetFailoverConfigurations(cluster, conf, LogicalName); cluster.WaitActive(); fs = FileSystem.Get(WebhdfsUri, conf); cluster.TransitionToActive(0); Path dir = new Path("/test"); NUnit.Framework.Assert.IsTrue(fs.Mkdirs(dir)); cluster.ShutdownNameNode(0); cluster.TransitionToActive(1); Path dir2 = new Path("/test2"); NUnit.Framework.Assert.IsTrue(fs.Mkdirs(dir2)); } finally { IOUtils.Cleanup(null, fs); if (cluster != null) { cluster.Shutdown(); } } }
public virtual void TestMultipleNamespacesConfigured() { Configuration conf = DFSTestUtil.NewHAConfiguration(LogicalName); MiniDFSCluster cluster = null; WebHdfsFileSystem fs = null; try { cluster = new MiniDFSCluster.Builder(conf).NnTopology(topo).NumDataNodes(1).Build (); HATestUtil.SetFailoverConfigurations(cluster, conf, LogicalName); cluster.WaitActive(); DFSTestUtil.AddHAConfiguration(conf, LogicalName + "remote"); DFSTestUtil.SetFakeHttpAddresses(conf, LogicalName + "remote"); fs = (WebHdfsFileSystem)FileSystem.Get(WebhdfsUri, conf); NUnit.Framework.Assert.AreEqual(2, fs.GetResolvedNNAddr().Length); } finally { IOUtils.Cleanup(null, fs); if (cluster != null) { cluster.Shutdown(); } } }
public virtual void TestDeserializeHAToken() { Configuration conf = DFSTestUtil.NewHAConfiguration(LogicalName); Org.Apache.Hadoop.Security.Token.Token <DelegationTokenIdentifier> token = new Org.Apache.Hadoop.Security.Token.Token <DelegationTokenIdentifier>(); QueryStringDecoder decoder = new QueryStringDecoder(WebHdfsHandler.WebhdfsPrefix + "/?" + NamenodeAddressParam.Name + "=" + LogicalName + "&" + DelegationParam.Name + "=" + token.EncodeToUrlString()); ParameterParser testParser = new ParameterParser(decoder, conf); Org.Apache.Hadoop.Security.Token.Token <DelegationTokenIdentifier> tok2 = testParser .DelegationToken(); NUnit.Framework.Assert.IsTrue(HAUtil.IsTokenForLogicalUri(tok2)); }
public virtual void TestFailoverAfterOpen() { Configuration conf = DFSTestUtil.NewHAConfiguration(LogicalName); conf.Set(CommonConfigurationKeysPublic.FsDefaultNameKey, HdfsConstants.HdfsUriScheme + "://" + LogicalName); MiniDFSCluster cluster = null; FileSystem fs = null; Path p = new Path("/test"); byte[] data = Sharpen.Runtime.GetBytesForString("Hello"); try { cluster = new MiniDFSCluster.Builder(conf).NnTopology(topo).NumDataNodes(1).Build (); HATestUtil.SetFailoverConfigurations(cluster, conf, LogicalName); cluster.WaitActive(); fs = FileSystem.Get(WebhdfsUri, conf); cluster.TransitionToActive(1); FSDataOutputStream @out = fs.Create(p); cluster.ShutdownNameNode(1); cluster.TransitionToActive(0); @out.Write(data); @out.Close(); FSDataInputStream @in = fs.Open(p); byte[] buf = new byte[data.Length]; IOUtils.ReadFully(@in, buf, 0, buf.Length); Assert.AssertArrayEquals(data, buf); } finally { IOUtils.Cleanup(null, fs); if (cluster != null) { cluster.Shutdown(); } } }
/// <summary> /// Make sure the WebHdfsFileSystem will retry based on RetriableException when /// rpcServer is null in NamenodeWebHdfsMethods while NameNode starts up. /// </summary> /// <exception cref="System.Exception"/> public virtual void TestRetryWhileNNStartup() { Configuration conf = DFSTestUtil.NewHAConfiguration(LogicalName); MiniDFSCluster cluster = null; IDictionary <string, bool> resultMap = new Dictionary <string, bool>(); try { cluster = new MiniDFSCluster.Builder(conf).NnTopology(topo).NumDataNodes(0).Build (); HATestUtil.SetFailoverConfigurations(cluster, conf, LogicalName); cluster.WaitActive(); cluster.TransitionToActive(0); NameNode namenode = cluster.GetNameNode(0); NamenodeProtocols rpcServer = namenode.GetRpcServer(); Whitebox.SetInternalState(namenode, "rpcServer", null); new _Thread_212(this, conf, resultMap).Start(); Sharpen.Thread.Sleep(1000); Whitebox.SetInternalState(namenode, "rpcServer", rpcServer); lock (this) { while (!resultMap.Contains("mkdirs")) { Sharpen.Runtime.Wait(this); } NUnit.Framework.Assert.IsTrue(resultMap["mkdirs"]); } } finally { if (cluster != null) { cluster.Shutdown(); } } }