/// <summary> /// Tests setting the rpc port to the same as the web port to test that /// an exception /// is thrown when trying to re-use the same port /// </summary> /// <exception cref="System.IO.IOException"/> public virtual void TestThatMatchingRPCandHttpPortsThrowException() { NameNode nameNode = null; try { Configuration conf = new HdfsConfiguration(); FilePath nameDir = new FilePath(MiniDFSCluster.GetBaseDirectory(), "name"); conf.Set(DFSConfigKeys.DfsNamenodeNameDirKey, nameDir.GetAbsolutePath()); Random rand = new Random(); int port = 30000 + rand.Next(30000); // set both of these to the same port. It should fail. FileSystem.SetDefaultUri(conf, "hdfs://localhost:" + port); conf.Set(DFSConfigKeys.DfsNamenodeHttpAddressKey, "127.0.0.1:" + port); DFSTestUtil.FormatNameNode(conf); nameNode = new NameNode(conf); } finally { if (nameNode != null) { nameNode.Stop(); } } }
/// <summary> /// HDFS-3013: NameNode format command doesn't pick up /// dfs.namenode.name.dir.NameServiceId configuration. /// </summary> /// <exception cref="System.IO.IOException"/> public virtual void TestGenericKeysForNameNodeFormat() { Configuration conf = new HdfsConfiguration(); // Set ephemeral ports conf.Set(DFSConfigKeys.DfsNamenodeRpcAddressKey, "127.0.0.1:0"); conf.Set(DFSConfigKeys.DfsNamenodeHttpAddressKey, "127.0.0.1:0"); conf.Set(DFSConfigKeys.DfsNameservices, "ns1"); // Set a nameservice-specific configuration for name dir FilePath dir = new FilePath(MiniDFSCluster.GetBaseDirectory(), "testGenericKeysForNameNodeFormat" ); if (dir.Exists()) { FileUtil.FullyDelete(dir); } conf.Set(DFSConfigKeys.DfsNamenodeNameDirKey + ".ns1", dir.GetAbsolutePath()); // Format and verify the right dir is formatted. DFSTestUtil.FormatNameNode(conf); GenericTestUtils.AssertExists(dir); // Ensure that the same dir is picked up by the running NN NameNode nameNode = new NameNode(conf); nameNode.Stop(); }
/// <summary> /// Tests to make sure the returned addresses are correct in case of default /// configuration with no federation /// </summary> /// <exception cref="System.Exception"/> public virtual void TestNonFederation() { HdfsConfiguration conf = new HdfsConfiguration(false); // Returned namenode address should match default address conf.Set(CommonConfigurationKeysPublic.FsDefaultNameKey, "hdfs://localhost:1000"); VerifyAddresses(conf, TestGetConf.TestType.Namenode, false, "localhost:1000"); VerifyAddresses(conf, TestGetConf.TestType.Nnrpcaddresses, true, "localhost:1000" ); // Returned address should match backupnode RPC address conf.Set(DFSConfigKeys.DfsNamenodeBackupAddressKey, "localhost:1001"); VerifyAddresses(conf, TestGetConf.TestType.Backup, false, "localhost:1001"); // Returned address should match secondary http address conf.Set(DFSConfigKeys.DfsNamenodeSecondaryHttpAddressKey, "localhost:1002"); VerifyAddresses(conf, TestGetConf.TestType.Secondary, false, "localhost:1002"); // Returned namenode address should match service RPC address conf = new HdfsConfiguration(); conf.Set(DFSConfigKeys.DfsNamenodeServiceRpcAddressKey, "localhost:1000"); conf.Set(DFSConfigKeys.DfsNamenodeRpcAddressKey, "localhost:1001"); VerifyAddresses(conf, TestGetConf.TestType.Namenode, false, "localhost:1000"); VerifyAddresses(conf, TestGetConf.TestType.Nnrpcaddresses, true, "localhost:1000" ); // Returned address should match RPC address conf = new HdfsConfiguration(); conf.Set(DFSConfigKeys.DfsNamenodeRpcAddressKey, "localhost:1001"); VerifyAddresses(conf, TestGetConf.TestType.Namenode, false, "localhost:1001"); VerifyAddresses(conf, TestGetConf.TestType.Nnrpcaddresses, true, "localhost:1001" ); }
public virtual void TestFencingConfigPerNameNode() { Org.Mockito.Mockito.DoReturn(StandbyReadyResult).When(mockProtocol).GetServiceStatus (); string nsSpecificKey = DFSConfigKeys.DfsHaFenceMethodsKey + "." + Nsid; string nnSpecificKey = nsSpecificKey + ".nn1"; HdfsConfiguration conf = GetHAConf(); // Set the default fencer to succeed conf.Set(DFSConfigKeys.DfsHaFenceMethodsKey, GetFencerTrueCommand()); tool.SetConf(conf); NUnit.Framework.Assert.AreEqual(0, RunTool("-failover", "nn1", "nn2", "--forcefence" )); // Set the NN-specific fencer to fail. Should fail to fence. conf.Set(nnSpecificKey, GetFencerFalseCommand()); tool.SetConf(conf); NUnit.Framework.Assert.AreEqual(-1, RunTool("-failover", "nn1", "nn2", "--forcefence" )); conf.Unset(nnSpecificKey); // Set an NS-specific fencer to fail. Should fail. conf.Set(nsSpecificKey, GetFencerFalseCommand()); tool.SetConf(conf); NUnit.Framework.Assert.AreEqual(-1, RunTool("-failover", "nn1", "nn2", "--forcefence" )); // Set the NS-specific fencer to succeed. Should succeed conf.Set(nsSpecificKey, GetFencerTrueCommand()); tool.SetConf(conf); NUnit.Framework.Assert.AreEqual(0, RunTool("-failover", "nn1", "nn2", "--forcefence" )); }
public virtual void TestSecureNameNode() { MiniDFSCluster cluster = null; try { string nnPrincipal = Runtime.GetProperty("dfs.namenode.kerberos.principal"); string nnSpnegoPrincipal = Runtime.GetProperty("dfs.namenode.kerberos.internal.spnego.principal" ); string nnKeyTab = Runtime.GetProperty("dfs.namenode.keytab.file"); NUnit.Framework.Assert.IsNotNull("NameNode principal was not specified", nnPrincipal ); NUnit.Framework.Assert.IsNotNull("NameNode SPNEGO principal was not specified", nnSpnegoPrincipal ); NUnit.Framework.Assert.IsNotNull("NameNode keytab was not specified", nnKeyTab); Configuration conf = new HdfsConfiguration(); conf.Set(CommonConfigurationKeys.HadoopSecurityAuthentication, "kerberos"); conf.Set(DFSConfigKeys.DfsNamenodeKerberosPrincipalKey, nnPrincipal); conf.Set(DFSConfigKeys.DfsNamenodeKerberosInternalSpnegoPrincipalKey, nnSpnegoPrincipal ); conf.Set(DFSConfigKeys.DfsNamenodeKeytabFileKey, nnKeyTab); cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(NumOfDatanodes).Build(); MiniDFSCluster clusterRef = cluster; cluster.WaitActive(); FileSystem fsForCurrentUser = cluster.GetFileSystem(); fsForCurrentUser.Mkdirs(new Path("/tmp")); fsForCurrentUser.SetPermission(new Path("/tmp"), new FsPermission((short)511)); // The user specified should not be a superuser string userPrincipal = Runtime.GetProperty("user.principal"); string userKeyTab = Runtime.GetProperty("user.keytab"); NUnit.Framework.Assert.IsNotNull("User principal was not specified", userPrincipal ); NUnit.Framework.Assert.IsNotNull("User keytab was not specified", userKeyTab); UserGroupInformation ugi = UserGroupInformation.LoginUserFromKeytabAndReturnUGI(userPrincipal , userKeyTab); FileSystem fs = ugi.DoAs(new _PrivilegedExceptionAction_105(clusterRef)); try { Path p = new Path("/users"); fs.Mkdirs(p); NUnit.Framework.Assert.Fail("User must not be allowed to write in /"); } catch (IOException) { } Path p_1 = new Path("/tmp/alpha"); fs.Mkdirs(p_1); NUnit.Framework.Assert.IsNotNull(fs.ListStatus(p_1)); NUnit.Framework.Assert.AreEqual(UserGroupInformation.AuthenticationMethod.Kerberos , ugi.GetAuthenticationMethod()); } finally { if (cluster != null) { cluster.Shutdown(); } } }
public virtual void TestCanReadData() { Path file1 = new Path("/fileToRead.dat"); Configuration conf = new HdfsConfiguration(); MiniDFSCluster cluster = null; FileSystem fileSys = null; BackupNode backup = null; try { // Start NameNode and BackupNode cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(0).Format(true).Build(); fileSys = cluster.GetFileSystem(); long txid = cluster.GetNameNodeRpc().GetTransactionID(); backup = StartBackupNode(conf, HdfsServerConstants.StartupOption.Backup, 1); WaitCheckpointDone(cluster, txid); // Setup dual NameNode configuration for DataNodes string rpcAddrKeyPreffix = DFSConfigKeys.DfsNamenodeRpcAddressKey + ".bnCluster"; string nnAddr = cluster.GetNameNode().GetNameNodeAddressHostPortString(); conf.Get(DFSConfigKeys.DfsNamenodeRpcAddressKey); string bnAddr = backup.GetNameNodeAddressHostPortString(); conf.Set(DFSConfigKeys.DfsNameservices, "bnCluster"); conf.Set(DFSConfigKeys.DfsNameserviceId, "bnCluster"); conf.Set(DFSConfigKeys.DfsHaNamenodesKeyPrefix + ".bnCluster", "nnActive, nnBackup" ); conf.Set(rpcAddrKeyPreffix + ".nnActive", nnAddr); conf.Set(rpcAddrKeyPreffix + ".nnBackup", bnAddr); cluster.StartDataNodes(conf, 3, true, HdfsServerConstants.StartupOption.Regular, null); DFSTestUtil.CreateFile(fileSys, file1, 8192, (short)3, 0); // Read the same file from file systems pointing to NN and BN FileSystem bnFS = FileSystem.Get(new Path("hdfs://" + bnAddr).ToUri(), conf); string nnData = DFSTestUtil.ReadFile(fileSys, file1); string bnData = DFSTestUtil.ReadFile(bnFS, file1); NUnit.Framework.Assert.AreEqual("Data read from BackupNode and NameNode is not the same." , nnData, bnData); } catch (IOException e) { Log.Error("Error in TestBackupNode: ", e); NUnit.Framework.Assert.IsTrue(e.GetLocalizedMessage(), false); } finally { if (fileSys != null) { fileSys.Close(); } if (backup != null) { backup.Stop(); } if (cluster != null) { cluster.Shutdown(); } } }
private Configuration GetConf() { Configuration conf = new HdfsConfiguration(); FileSystem.SetDefaultUri(conf, "hdfs://localhost:0"); conf.Set(DFSConfigKeys.DfsNamenodeHttpAddressKey, "0.0.0.0:0"); conf.Set(DFSConfigKeys.DfsNamenodeNameDirKey, NameDir); conf.Set(DFSConfigKeys.DfsNamenodeEditsDirKey, NameDir); conf.SetBoolean(DFSConfigKeys.DfsPermissionsEnabledKey, false); return(conf); }
public virtual void TestNNThroughput() { Configuration conf = new HdfsConfiguration(); FilePath nameDir = new FilePath(MiniDFSCluster.GetBaseDirectory(), "name"); conf.Set(DFSConfigKeys.DfsNamenodeNameDirKey, nameDir.GetAbsolutePath()); FileSystem.SetDefaultUri(conf, "hdfs://localhost:" + 0); conf.Set(DFSConfigKeys.DfsNamenodeHttpAddressKey, "0.0.0.0:0"); DFSTestUtil.FormatNameNode(conf); string[] args = new string[] { "-op", "all" }; NNThroughputBenchmark.RunBenchmark(conf, Arrays.AsList(args)); }
internal static Configuration CreateConf() { Configuration conf = new HdfsConfiguration(); TestBalancer.InitConf(conf); conf.SetLong(DFSConfigKeys.DfsBlockSizeKey, DefaultBlockSize); conf.Set(CommonConfigurationKeysPublic.NetTopologyImplKey, typeof(NetworkTopologyWithNodeGroup ).FullName); conf.Set(DFSConfigKeys.DfsBlockReplicatorClassnameKey, typeof(BlockPlacementPolicyWithNodeGroup ).FullName); return(conf); }
public virtual void TestWebHdfsAuditLogger() { Configuration conf = new HdfsConfiguration(); conf.Set(DFSConfigKeys.DfsNamenodeAuditLoggersKey, typeof(TestAuditLogger.DummyAuditLogger ).FullName); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).Build(); GetOpParam.OP op = GetOpParam.OP.Getfilestatus; try { cluster.WaitClusterUp(); NUnit.Framework.Assert.IsTrue(TestAuditLogger.DummyAuditLogger.initialized); URI uri = new URI("http", NetUtils.GetHostPortString(cluster.GetNameNode().GetHttpAddress ()), "/webhdfs/v1/", op.ToQueryString(), null); // non-proxy request HttpURLConnection conn = (HttpURLConnection)uri.ToURL().OpenConnection(); conn.SetRequestMethod(op.GetType().ToString()); conn.Connect(); NUnit.Framework.Assert.AreEqual(200, conn.GetResponseCode()); conn.Disconnect(); NUnit.Framework.Assert.AreEqual(1, TestAuditLogger.DummyAuditLogger.logCount); NUnit.Framework.Assert.AreEqual("127.0.0.1", TestAuditLogger.DummyAuditLogger.remoteAddr ); // non-trusted proxied request conn = (HttpURLConnection)uri.ToURL().OpenConnection(); conn.SetRequestMethod(op.GetType().ToString()); conn.SetRequestProperty("X-Forwarded-For", "1.1.1.1"); conn.Connect(); NUnit.Framework.Assert.AreEqual(200, conn.GetResponseCode()); conn.Disconnect(); NUnit.Framework.Assert.AreEqual(2, TestAuditLogger.DummyAuditLogger.logCount); NUnit.Framework.Assert.AreEqual("127.0.0.1", TestAuditLogger.DummyAuditLogger.remoteAddr ); // trusted proxied request conf.Set(ProxyServers.ConfHadoopProxyservers, "127.0.0.1"); ProxyUsers.RefreshSuperUserGroupsConfiguration(conf); conn = (HttpURLConnection)uri.ToURL().OpenConnection(); conn.SetRequestMethod(op.GetType().ToString()); conn.SetRequestProperty("X-Forwarded-For", "1.1.1.1"); conn.Connect(); NUnit.Framework.Assert.AreEqual(200, conn.GetResponseCode()); conn.Disconnect(); NUnit.Framework.Assert.AreEqual(3, TestAuditLogger.DummyAuditLogger.logCount); NUnit.Framework.Assert.AreEqual("1.1.1.1", TestAuditLogger.DummyAuditLogger.remoteAddr ); } finally { cluster.Shutdown(); } }
public virtual void TestLazyTokenFetchForSWebhdfs() { // for any(Token.class) MiniDFSCluster cluster = null; SWebHdfsFileSystem fs = null; try { Configuration clusterConf = new HdfsConfiguration(conf); SecurityUtil.SetAuthenticationMethod(UserGroupInformation.AuthenticationMethod.Simple , clusterConf); clusterConf.SetBoolean(DFSConfigKeys.DfsNamenodeDelegationTokenAlwaysUseKey, true ); string Basedir = Runtime.GetProperty("test.build.dir", "target/test-dir") + "/" + typeof(TestWebHdfsTokens).Name; string keystoresDir; string sslConfDir; clusterConf.SetBoolean(DFSConfigKeys.DfsWebhdfsEnabledKey, true); clusterConf.Set(DFSConfigKeys.DfsHttpPolicyKey, HttpConfig.Policy.HttpsOnly.ToString ()); clusterConf.Set(DFSConfigKeys.DfsNamenodeHttpsAddressKey, "localhost:0"); clusterConf.Set(DFSConfigKeys.DfsDatanodeHttpsAddressKey, "localhost:0"); FilePath @base = new FilePath(Basedir); FileUtil.FullyDelete(@base); @base.Mkdirs(); keystoresDir = new FilePath(Basedir).GetAbsolutePath(); sslConfDir = KeyStoreTestUtil.GetClasspathDir(typeof(TestWebHdfsTokens)); KeyStoreTestUtil.SetupSSLConfig(keystoresDir, sslConfDir, clusterConf, false); // trick the NN into thinking security is enabled w/o it trying // to login from a keytab UserGroupInformation.SetConfiguration(clusterConf); cluster = new MiniDFSCluster.Builder(clusterConf).NumDataNodes(1).Build(); cluster.WaitActive(); IPEndPoint addr = cluster.GetNameNode().GetHttpsAddress(); string nnAddr = NetUtils.GetHostPortString(addr); clusterConf.Set(DFSConfigKeys.DfsNamenodeHttpsAddressKey, nnAddr); SecurityUtil.SetAuthenticationMethod(UserGroupInformation.AuthenticationMethod.Kerberos , clusterConf); UserGroupInformation.SetConfiguration(clusterConf); uri = DFSUtil.CreateUri("swebhdfs", cluster.GetNameNode().GetHttpsAddress()); ValidateLazyTokenFetch(clusterConf); } finally { IOUtils.Cleanup(null, fs); if (cluster != null) { cluster.Shutdown(); } } }
public virtual void TestMoverFailedRetry() { // HDFS-8147 Configuration conf = new HdfsConfiguration(); conf.Set(DFSConfigKeys.DfsMoverRetryMaxAttemptsKey, "2"); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(3).StorageTypes (new StorageType[][] { new StorageType[] { StorageType.Disk, StorageType.Archive }, new StorageType[] { StorageType.Disk, StorageType.Archive }, new StorageType [] { StorageType.Disk, StorageType.Archive } }).Build(); try { cluster.WaitActive(); DistributedFileSystem dfs = cluster.GetFileSystem(); string file = "/testMoverFailedRetry"; // write to DISK FSDataOutputStream @out = dfs.Create(new Path(file), (short)2); @out.WriteChars("testMoverFailedRetry"); @out.Close(); // Delete block file so, block move will fail with FileNotFoundException LocatedBlock lb = dfs.GetClient().GetLocatedBlocks(file, 0).Get(0); cluster.CorruptBlockOnDataNodesByDeletingBlockFile(lb.GetBlock()); // move to ARCHIVE dfs.SetStoragePolicy(new Path(file), "COLD"); int rc = ToolRunner.Run(conf, new Mover.Cli(), new string[] { "-p", file.ToString () }); NUnit.Framework.Assert.AreEqual("Movement should fail after some retry", ExitStatus .IoException.GetExitCode(), rc); } finally { cluster.Shutdown(); } }
public virtual void TestBrokenLogger() { Configuration conf = new HdfsConfiguration(); conf.Set(DFSConfigKeys.DfsNamenodeAuditLoggersKey, typeof(TestAuditLogger.BrokenAuditLogger ).FullName); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).Build(); try { cluster.WaitClusterUp(); FileSystem fs = cluster.GetFileSystem(); long time = Runtime.CurrentTimeMillis(); fs.SetTimes(new Path("/"), time, time); NUnit.Framework.Assert.Fail("Expected exception due to broken audit logger."); } catch (RemoteException) { } finally { // Expected. cluster.Shutdown(); } }
/// <summary> /// Tests that an edits log created using CreateEditsLog is valid and can be /// loaded successfully by a namenode. /// </summary> /// <exception cref="System.Exception"/> public virtual void TestCanLoadCreatedEditsLog() { // Format namenode. HdfsConfiguration conf = new HdfsConfiguration(); FilePath nameDir = new FilePath(HdfsDir, "name"); conf.Set(DFSConfigKeys.DfsNamenodeNameDirKey, Util.FileAsURI(nameDir).ToString()); DFSTestUtil.FormatNameNode(conf); // Call CreateEditsLog and move the resulting edits to the name dir. CreateEditsLog.Main(new string[] { "-f", "1000", "0", "1", "-d", TestDir.GetAbsolutePath () }); Path editsWildcard = new Path(TestDir.GetAbsolutePath(), "*"); FileContext localFc = FileContext.GetLocalFSFileContext(); foreach (FileStatus edits in localFc.Util().GlobStatus(editsWildcard)) { Path src = edits.GetPath(); Path dst = new Path(new FilePath(nameDir, "current").GetAbsolutePath(), src.GetName ()); localFc.Rename(src, dst); } // Start a namenode to try to load the edits. cluster = new MiniDFSCluster.Builder(conf).Format(false).ManageNameDfsDirs(false) .WaitSafeMode(false).Build(); cluster.WaitClusterUp(); }
/// <exception cref="System.IO.IOException"/> private Configuration GetConf() { string baseDir = MiniDFSCluster.GetBaseDirectory(); string nameDirs = Org.Apache.Hadoop.Hdfs.Server.Common.Util.FileAsURI(new FilePath (baseDir, "name1")) + "," + Org.Apache.Hadoop.Hdfs.Server.Common.Util.FileAsURI( new FilePath(baseDir, "name2")); Configuration conf = new HdfsConfiguration(); FileSystem.SetDefaultUri(conf, "hdfs://localhost:0"); conf.Set(DFSConfigKeys.DfsNamenodeHttpAddressKey, "0.0.0.0:0"); conf.Set(DFSConfigKeys.DfsNamenodeNameDirKey, nameDirs); conf.Set(DFSConfigKeys.DfsNamenodeEditsDirKey, nameDirs); conf.Set(DFSConfigKeys.DfsNamenodeSecondaryHttpAddressKey, "0.0.0.0:0"); conf.SetBoolean(DFSConfigKeys.DfsPermissionsEnabledKey, false); return(conf); }
public virtual void TestServerSaslNoClientSasl() { HdfsConfiguration clusterConf = CreateSecureConfig("authentication,integrity,privacy" ); // Set short retry timeouts so this test runs faster clusterConf.SetInt(DFSConfigKeys.DfsClientRetryWindowBase, 10); StartCluster(clusterConf); HdfsConfiguration clientConf = new HdfsConfiguration(clusterConf); clientConf.Set(DFSConfigKeys.DfsDataTransferProtectionKey, string.Empty); GenericTestUtils.LogCapturer logs = GenericTestUtils.LogCapturer.CaptureLogs(LogFactory .GetLog(typeof(DataNode))); try { DoTest(clientConf); NUnit.Framework.Assert.Fail("Should fail if SASL data transfer protection is not " + "configured or not supported in client"); } catch (IOException e) { GenericTestUtils.AssertMatches(e.Message, "could only be replicated to 0 nodes"); } finally { logs.StopCapturing(); } GenericTestUtils.AssertMatches(logs.GetOutput(), "Failed to read expected SASL data transfer protection " + "handshake from client at"); }
public virtual void TestNamenodeRpcBindAny() { Configuration conf = new HdfsConfiguration(); // The name node in MiniDFSCluster only binds to 127.0.0.1. // We can set the bind address to 0.0.0.0 to make it listen // to all interfaces. conf.Set(DFSConfigKeys.DfsNamenodeRpcBindHostKey, "0.0.0.0"); MiniDFSCluster cluster = null; try { cluster = new MiniDFSCluster.Builder(conf).Build(); cluster.WaitActive(); NUnit.Framework.Assert.AreEqual("0.0.0.0", ((NameNodeRpcServer)cluster.GetNameNodeRpc ()).GetClientRpcServer().GetListenerAddress().GetHostName()); } finally { if (cluster != null) { cluster.Shutdown(); } // Reset the config conf.Unset(DFSConfigKeys.DfsNamenodeRpcBindHostKey); } }
public virtual void TestAuditLoggerWithSetPermission() { Configuration conf = new HdfsConfiguration(); conf.Set(DFSConfigKeys.DfsNamenodeAuditLoggersKey, typeof(TestAuditLogger.DummyAuditLogger ).FullName); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).Build(); try { cluster.WaitClusterUp(); NUnit.Framework.Assert.IsTrue(TestAuditLogger.DummyAuditLogger.initialized); TestAuditLogger.DummyAuditLogger.ResetLogCount(); FileSystem fs = cluster.GetFileSystem(); long time = Runtime.CurrentTimeMillis(); Path p = new Path("/"); fs.SetTimes(p, time, time); fs.SetPermission(p, new FsPermission(TestPermission)); NUnit.Framework.Assert.AreEqual(TestPermission, TestAuditLogger.DummyAuditLogger. foundPermission); NUnit.Framework.Assert.AreEqual(2, TestAuditLogger.DummyAuditLogger.logCount); } finally { cluster.Shutdown(); } }
/// <summary>HTTPS test is different since we need to setup SSL configuration.</summary> /// <remarks> /// HTTPS test is different since we need to setup SSL configuration. /// NN also binds the wildcard address for HTTPS port by default so we must /// pick a different host/port combination. /// </remarks> /// <exception cref="System.Exception"/> public virtual void TestHttpsBindHostKey() { Configuration conf = new HdfsConfiguration(); MiniDFSCluster cluster = null; Log.Info("Testing behavior without " + DfsNamenodeHttpsBindHostKey); SetupSsl(); conf.Set(DfsHttpPolicyKey, HttpConfig.Policy.HttpsOnly.ToString()); // NN should not bind the wildcard address by default. try { conf.Set(DfsNamenodeHttpsAddressKey, LocalhostServerAddress); cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(0).Build(); cluster.WaitActive(); string address = cluster.GetNameNode().GetHttpsAddress().ToString(); NUnit.Framework.Assert.IsFalse("HTTP Bind address not expected to be wildcard by default." , address.StartsWith(WildcardAddress)); } finally { if (cluster != null) { cluster.Shutdown(); cluster = null; } } Log.Info("Testing behavior with " + DfsNamenodeHttpsBindHostKey); // Tell NN to bind the wildcard address. conf.Set(DfsNamenodeHttpsBindHostKey, WildcardAddress); // Verify that NN binds wildcard address now. try { conf.Set(DfsNamenodeHttpsAddressKey, LocalhostServerAddress); cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(0).Build(); cluster.WaitActive(); string address = cluster.GetNameNode().GetHttpsAddress().ToString(); NUnit.Framework.Assert.IsTrue("HTTP Bind address " + address + " is not wildcard." , address.StartsWith(WildcardAddress)); } finally { if (cluster != null) { cluster.Shutdown(); } } }
/// <exception cref="System.Exception"/> public virtual void TestExtraArgsThrowsError() { HdfsConfiguration conf = new HdfsConfiguration(); conf.Set("mykey", "myval"); string[] args = new string[] { "-namenodes", "unexpected-arg" }; NUnit.Framework.Assert.IsTrue(RunTool(conf, args, false).Contains("Did not expect argument: unexpected-arg" )); }
/// <exception cref="System.IO.IOException"/> public virtual void TestServiceRpcBindHostKey() { Configuration conf = new HdfsConfiguration(); MiniDFSCluster cluster = null; Log.Info("Testing without " + DfsNamenodeServiceRpcBindHostKey); conf.Set(DfsNamenodeServiceRpcAddressKey, LocalhostServerAddress); // NN should not bind the wildcard address by default. try { cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(0).Build(); cluster.WaitActive(); string address = GetServiceRpcServerAddress(cluster); Assert.AssertThat("Bind address not expected to be wildcard by default.", address , IsNot.Not("/" + WildcardAddress)); } finally { if (cluster != null) { cluster.Shutdown(); cluster = null; } } Log.Info("Testing with " + DfsNamenodeServiceRpcBindHostKey); // Tell NN to bind the wildcard address. conf.Set(DfsNamenodeServiceRpcBindHostKey, WildcardAddress); // Verify that NN binds wildcard address now. try { cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(0).Build(); cluster.WaitActive(); string address = GetServiceRpcServerAddress(cluster); Assert.AssertThat("Bind address " + address + " is not wildcard.", address, IS.Is ("/" + WildcardAddress)); } finally { if (cluster != null) { cluster.Shutdown(); } } }
public virtual void TestSecureNameNode() { MiniDFSCluster cluster = null; try { string nnPrincipal = Runtime.GetProperty("dfs.namenode.kerberos.principal"); string nnSpnegoPrincipal = Runtime.GetProperty("dfs.namenode.kerberos.internal.spnego.principal" ); string nnKeyTab = Runtime.GetProperty("dfs.namenode.keytab.file"); NUnit.Framework.Assert.IsNotNull("NameNode principal was not specified", nnPrincipal ); NUnit.Framework.Assert.IsNotNull("NameNode SPNEGO principal was not specified", nnSpnegoPrincipal ); NUnit.Framework.Assert.IsNotNull("NameNode keytab was not specified", nnKeyTab); string dnPrincipal = Runtime.GetProperty("dfs.datanode.kerberos.principal"); string dnKeyTab = Runtime.GetProperty("dfs.datanode.keytab.file"); NUnit.Framework.Assert.IsNotNull("DataNode principal was not specified", dnPrincipal ); NUnit.Framework.Assert.IsNotNull("DataNode keytab was not specified", dnKeyTab); Configuration conf = new HdfsConfiguration(); conf.Set(CommonConfigurationKeys.HadoopSecurityAuthentication, "kerberos"); conf.Set(DFSConfigKeys.DfsNamenodeKerberosPrincipalKey, nnPrincipal); conf.Set(DFSConfigKeys.DfsNamenodeKerberosInternalSpnegoPrincipalKey, nnSpnegoPrincipal ); conf.Set(DFSConfigKeys.DfsNamenodeKeytabFileKey, nnKeyTab); conf.Set(DFSConfigKeys.DfsDatanodeKerberosPrincipalKey, dnPrincipal); conf.Set(DFSConfigKeys.DfsDatanodeKeytabFileKey, dnKeyTab); // Secure DataNode requires using ports lower than 1024. conf.Set(DFSConfigKeys.DfsDatanodeAddressKey, "127.0.0.1:1004"); conf.Set(DFSConfigKeys.DfsDatanodeHttpAddressKey, "127.0.0.1:1006"); conf.Set(DFSConfigKeys.DfsDatanodeDataDirKey, "700"); cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(NumOfDatanodes).CheckDataNodeAddrConfig (true).Build(); cluster.WaitActive(); NUnit.Framework.Assert.IsTrue(cluster.IsDataNodeUp()); } catch (Exception ex) { Sharpen.Runtime.PrintStackTrace(ex); } finally { if (cluster != null) { cluster.Shutdown(); } } }
public virtual void TestFailoverWithFencerConfigured() { Org.Mockito.Mockito.DoReturn(StandbyReadyResult).When(mockProtocol).GetServiceStatus (); HdfsConfiguration conf = GetHAConf(); conf.Set(DFSConfigKeys.DfsHaFenceMethodsKey, GetFencerTrueCommand()); tool.SetConf(conf); NUnit.Framework.Assert.AreEqual(0, RunTool("-failover", "nn1", "nn2")); }
/// <exception cref="System.Exception"/> public virtual void TestGetSpecificKey() { HdfsConfiguration conf = new HdfsConfiguration(); conf.Set("mykey", " myval "); string[] args = new string[] { "-confKey", "mykey" }; string toolResult = RunTool(conf, args, true); NUnit.Framework.Assert.AreEqual(string.Format("myval%n"), toolResult); }
public virtual void TestDataNodeAbortsIfNotHttpsOnly() { HdfsConfiguration clusterConf = CreateSecureConfig("authentication"); clusterConf.Set(DFSConfigKeys.DfsHttpPolicyKey, HttpConfig.Policy.HttpAndHttps.ToString ()); exception.Expect(typeof(RuntimeException)); exception.ExpectMessage("Cannot start secure DataNode"); StartCluster(clusterConf); }
public virtual void TestFailoverWithFenceAndBadFencer() { Org.Mockito.Mockito.DoReturn(StandbyReadyResult).When(mockProtocol).GetServiceStatus (); HdfsConfiguration conf = GetHAConf(); conf.Set(DFSConfigKeys.DfsHaFenceMethodsKey, "foobar!"); tool.SetConf(conf); NUnit.Framework.Assert.AreEqual(-1, RunTool("-failover", "nn1", "nn2", "--forcefence" )); }
public static void BeforeClassSetup() { Configuration conf = new HdfsConfiguration(); conf.SetBoolean(DFSConfigKeys.DfsWebhdfsEnabledKey, true); conf.Set(FsPermission.UmaskLabel, "000"); conf.SetInt(DFSConfigKeys.DfsNamenodeMaxComponentLengthKey, 0); cluster = new MiniDFSCluster.Builder(conf).Build(); webhdfs = WebHdfsTestUtil.GetWebHdfsFileSystem(conf, WebHdfsFileSystem.Scheme); dfs = cluster.GetFileSystem(); }
public virtual void TestPrivacy() { HdfsConfiguration clusterConf = CreateSecureConfig("authentication,integrity,privacy" ); StartCluster(clusterConf); HdfsConfiguration clientConf = new HdfsConfiguration(clusterConf); clientConf.Set(DFSConfigKeys.DfsDataTransferProtectionKey, "privacy"); DoTest(clientConf); }
public virtual void SetUp() { Called.Clear(); Configuration conf = new HdfsConfiguration(); conf.Set(DFSConfigKeys.DfsNamenodeInodeAttributesProviderKey, typeof(TestINodeAttributeProvider.MyAuthorizationProvider ).FullName); conf.SetBoolean(DFSConfigKeys.DfsNamenodeAclsEnabledKey, true); EditLogFileOutputStream.SetShouldSkipFsyncForTesting(true); miniDFS = new MiniDFSCluster.Builder(conf).Build(); }
public static void ClusterSetupAtBegining() { Configuration conf = new HdfsConfiguration(); // set permissions very restrictive conf.Set(CommonConfigurationKeys.FsPermissionsUmaskKey, "077"); cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(2).Build(); fc = FileContext.GetFileContext(cluster.GetURI(0), conf); defaultWorkingDirectory = fc.MakeQualified(new Path("/user/" + UserGroupInformation .GetCurrentUser().GetShortUserName())); fc.Mkdir(defaultWorkingDirectory, FileContext.DefaultPerm, true); }