public virtual void TestLazyTokenFetchForSWebhdfs() { // for any(Token.class) MiniDFSCluster cluster = null; SWebHdfsFileSystem fs = null; try { Configuration clusterConf = new HdfsConfiguration(conf); SecurityUtil.SetAuthenticationMethod(UserGroupInformation.AuthenticationMethod.Simple , clusterConf); clusterConf.SetBoolean(DFSConfigKeys.DfsNamenodeDelegationTokenAlwaysUseKey, true ); string Basedir = Runtime.GetProperty("test.build.dir", "target/test-dir") + "/" + typeof(TestWebHdfsTokens).Name; string keystoresDir; string sslConfDir; clusterConf.SetBoolean(DFSConfigKeys.DfsWebhdfsEnabledKey, true); clusterConf.Set(DFSConfigKeys.DfsHttpPolicyKey, HttpConfig.Policy.HttpsOnly.ToString ()); clusterConf.Set(DFSConfigKeys.DfsNamenodeHttpsAddressKey, "localhost:0"); clusterConf.Set(DFSConfigKeys.DfsDatanodeHttpsAddressKey, "localhost:0"); FilePath @base = new FilePath(Basedir); FileUtil.FullyDelete(@base); @base.Mkdirs(); keystoresDir = new FilePath(Basedir).GetAbsolutePath(); sslConfDir = KeyStoreTestUtil.GetClasspathDir(typeof(TestWebHdfsTokens)); KeyStoreTestUtil.SetupSSLConfig(keystoresDir, sslConfDir, clusterConf, false); // trick the NN into thinking security is enabled w/o it trying // to login from a keytab UserGroupInformation.SetConfiguration(clusterConf); cluster = new MiniDFSCluster.Builder(clusterConf).NumDataNodes(1).Build(); cluster.WaitActive(); IPEndPoint addr = cluster.GetNameNode().GetHttpsAddress(); string nnAddr = NetUtils.GetHostPortString(addr); clusterConf.Set(DFSConfigKeys.DfsNamenodeHttpsAddressKey, nnAddr); SecurityUtil.SetAuthenticationMethod(UserGroupInformation.AuthenticationMethod.Kerberos , clusterConf); UserGroupInformation.SetConfiguration(clusterConf); uri = DFSUtil.CreateUri("swebhdfs", cluster.GetNameNode().GetHttpsAddress()); ValidateLazyTokenFetch(clusterConf); } finally { IOUtils.Cleanup(null, fs); if (cluster != null) { cluster.Shutdown(); } } }
public virtual void TestLazyTokenFetchForWebhdfs() { // for any(Token.class) MiniDFSCluster cluster = null; WebHdfsFileSystem fs = null; try { Configuration clusterConf = new HdfsConfiguration(conf); SecurityUtil.SetAuthenticationMethod(UserGroupInformation.AuthenticationMethod.Simple , clusterConf); clusterConf.SetBoolean(DFSConfigKeys.DfsNamenodeDelegationTokenAlwaysUseKey, true ); // trick the NN into thinking security is enabled w/o it trying // to login from a keytab UserGroupInformation.SetConfiguration(clusterConf); cluster = new MiniDFSCluster.Builder(clusterConf).NumDataNodes(1).Build(); cluster.WaitActive(); SecurityUtil.SetAuthenticationMethod(UserGroupInformation.AuthenticationMethod.Kerberos , clusterConf); UserGroupInformation.SetConfiguration(clusterConf); uri = DFSUtil.CreateUri("webhdfs", cluster.GetNameNode().GetHttpAddress()); ValidateLazyTokenFetch(clusterConf); } finally { IOUtils.Cleanup(null, fs); if (cluster != null) { cluster.Shutdown(); } } }
public virtual void TestBlockTokenInLastLocatedBlock() { Configuration conf = new HdfsConfiguration(); conf.SetBoolean(DFSConfigKeys.DfsBlockAccessTokenEnableKey, true); conf.SetInt(DFSConfigKeys.DfsBlockSizeKey, 512); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build(); cluster.WaitActive(); try { FileSystem fs = cluster.GetFileSystem(); string fileName = "/testBlockTokenInLastLocatedBlock"; Path filePath = new Path(fileName); FSDataOutputStream @out = fs.Create(filePath, (short)1); @out.Write(new byte[1000]); // ensure that the first block is written out (see FSOutputSummer#flush) @out.Flush(); LocatedBlocks locatedBlocks = cluster.GetNameNodeRpc().GetBlockLocations(fileName , 0, 1000); while (locatedBlocks.GetLastLocatedBlock() == null) { Sharpen.Thread.Sleep(100); locatedBlocks = cluster.GetNameNodeRpc().GetBlockLocations(fileName, 0, 1000); } Org.Apache.Hadoop.Security.Token.Token <BlockTokenIdentifier> token = locatedBlocks .GetLastLocatedBlock().GetBlockToken(); NUnit.Framework.Assert.AreEqual(BlockTokenIdentifier.KindName, token.GetKind()); @out.Close(); } finally { cluster.Shutdown(); } }
/// <exception cref="System.Exception"/> public virtual void TestFadviseSkippedForSmallReads() { // start a cluster Log.Info("testFadviseSkippedForSmallReads"); tracker.Clear(); Configuration conf = new HdfsConfiguration(); conf.SetBoolean(DFSConfigKeys.DfsDatanodeDropCacheBehindReadsKey, true); conf.SetBoolean(DFSConfigKeys.DfsDatanodeDropCacheBehindWritesKey, true); MiniDFSCluster cluster = null; string TestPath = "/test"; int TestPathLen = MaxTestFileLen; FSDataInputStream fis = null; try { cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build(); cluster.WaitActive(); FileSystem fs = cluster.GetFileSystem(); // create new file CreateHdfsFile(fs, new Path(TestPath), TestPathLen, null); // Since the DataNode was configured with drop-behind, and we didn't // specify any policy, we should have done drop-behind. ExtendedBlock block = cluster.GetNameNode().GetRpcServer().GetBlockLocations(TestPath , 0, long.MaxValue).Get(0).GetBlock(); string fadvisedFileName = cluster.GetBlockFile(0, block).GetName(); TestCachingStrategy.Stats stats = tracker.GetStats(fadvisedFileName); stats.AssertDroppedInRange(0, TestPathLen - WritePacketSize); stats.Clear(); stats.AssertNotDroppedInRange(0, TestPathLen); // read file fis = fs.Open(new Path(TestPath)); byte[] buf = new byte[17]; fis.ReadFully(4096, buf, 0, buf.Length); // we should not have dropped anything because of the small read. stats = tracker.GetStats(fadvisedFileName); stats.AssertNotDroppedInRange(0, TestPathLen - WritePacketSize); } finally { IOUtils.Cleanup(null, fis); if (cluster != null) { cluster.Shutdown(); } } }
public static void ClusterSetupAtBegining() { SupportsBlocks = true; Conf.SetBoolean(DFSConfigKeys.DfsNamenodeDelegationTokenAlwaysUseKey, true); cluster = new MiniDFSCluster.Builder(Conf).NumDataNodes(2).Build(); cluster.WaitClusterUp(); fc = FileContext.GetFileContext(cluster.GetURI(0), Conf); }
public static void SetUp() { Configuration conf = new HdfsConfiguration(); conf.SetBoolean(DFSConfigKeys.DfsNamenodeDelegationTokenAlwaysUseKey, true); cluster = new MiniDFSCluster.Builder(conf).Build(); cluster.WaitActive(); }
public virtual void TestNoSaslAndSecurePortsIgnored() { HdfsConfiguration clusterConf = CreateSecureConfig(string.Empty); clusterConf.SetBoolean(DFSConfigKeys.IgnoreSecurePortsForTestingKey, true); StartCluster(clusterConf); DoTest(clusterConf); }
public static HdfsConfiguration InitZeroCopyTest() { Assume.AssumeTrue(NativeIO.IsAvailable()); Assume.AssumeTrue(SystemUtils.IsOsUnix); HdfsConfiguration conf = new HdfsConfiguration(); conf.SetBoolean(DFSConfigKeys.DfsClientReadShortcircuitKey, true); conf.SetLong(DFSConfigKeys.DfsBlockSizeKey, BlockSize); conf.SetInt(DFSConfigKeys.DfsClientMmapCacheSize, 3); conf.SetLong(DFSConfigKeys.DfsClientMmapCacheTimeoutMs, 100); conf.Set(DFSConfigKeys.DfsDomainSocketPathKey, new FilePath(sockDir.GetDir(), "TestRequestMmapAccess._PORT.sock" ).GetAbsolutePath()); conf.SetBoolean(DFSConfigKeys.DfsClientReadShortcircuitSkipChecksumKey, true); conf.SetLong(DFSConfigKeys.DfsHeartbeatIntervalKey, 1); conf.SetLong(DFSConfigKeys.DfsCachereportIntervalMsecKey, 1000); conf.SetLong(DFSConfigKeys.DfsNamenodePathBasedCacheRefreshIntervalMs, 1000); return(conf); }
/// <summary> /// Test the scenario where the DataNode defaults to not dropping the cache, /// but our client defaults are set. /// </summary> /// <exception cref="System.Exception"/> public virtual void TestClientDefaults() { // start a cluster Log.Info("testClientDefaults"); tracker.Clear(); Configuration conf = new HdfsConfiguration(); conf.SetBoolean(DFSConfigKeys.DfsDatanodeDropCacheBehindReadsKey, false); conf.SetBoolean(DFSConfigKeys.DfsDatanodeDropCacheBehindWritesKey, false); conf.SetBoolean(DFSConfigKeys.DfsClientCacheDropBehindReads, true); conf.SetBoolean(DFSConfigKeys.DfsClientCacheDropBehindWrites, true); MiniDFSCluster cluster = null; string TestPath = "/test"; int TestPathLen = MaxTestFileLen; try { cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build(); cluster.WaitActive(); FileSystem fs = cluster.GetFileSystem(); // create new file CreateHdfsFile(fs, new Path(TestPath), TestPathLen, null); // verify that we dropped everything from the cache during file creation. ExtendedBlock block = cluster.GetNameNode().GetRpcServer().GetBlockLocations(TestPath , 0, long.MaxValue).Get(0).GetBlock(); string fadvisedFileName = cluster.GetBlockFile(0, block).GetName(); TestCachingStrategy.Stats stats = tracker.GetStats(fadvisedFileName); stats.AssertDroppedInRange(0, TestPathLen - WritePacketSize); stats.Clear(); // read file ReadHdfsFile(fs, new Path(TestPath), long.MaxValue, null); // verify that we dropped everything from the cache. NUnit.Framework.Assert.IsNotNull(stats); stats.AssertDroppedInRange(0, TestPathLen - WritePacketSize); } finally { if (cluster != null) { cluster.Shutdown(); } } }
public virtual void TestRetryCacheConfig() { // By default retry configuration should be enabled Configuration conf = new HdfsConfiguration(); NUnit.Framework.Assert.IsNotNull(FSNamesystem.InitRetryCache(conf)); // If retry cache is disabled, it should not be created conf.SetBoolean(DFSConfigKeys.DfsNamenodeEnableRetryCacheKey, false); NUnit.Framework.Assert.IsNull(FSNamesystem.InitRetryCache(conf)); }
public virtual void SetUp() { Called.Clear(); Configuration conf = new HdfsConfiguration(); conf.Set(DFSConfigKeys.DfsNamenodeInodeAttributesProviderKey, typeof(TestINodeAttributeProvider.MyAuthorizationProvider ).FullName); conf.SetBoolean(DFSConfigKeys.DfsNamenodeAclsEnabledKey, true); EditLogFileOutputStream.SetShouldSkipFsyncForTesting(true); miniDFS = new MiniDFSCluster.Builder(conf).Build(); }
private Configuration GetConf() { Configuration conf = new HdfsConfiguration(); FileSystem.SetDefaultUri(conf, "hdfs://localhost:0"); conf.Set(DFSConfigKeys.DfsNamenodeHttpAddressKey, "0.0.0.0:0"); conf.Set(DFSConfigKeys.DfsNamenodeNameDirKey, NameDir); conf.Set(DFSConfigKeys.DfsNamenodeEditsDirKey, NameDir); conf.SetBoolean(DFSConfigKeys.DfsPermissionsEnabledKey, false); return(conf); }
public static void BeforeClassSetup() { Configuration conf = new HdfsConfiguration(); conf.SetBoolean(DFSConfigKeys.DfsWebhdfsEnabledKey, true); conf.Set(FsPermission.UmaskLabel, "000"); conf.SetInt(DFSConfigKeys.DfsNamenodeMaxComponentLengthKey, 0); cluster = new MiniDFSCluster.Builder(conf).Build(); webhdfs = WebHdfsTestUtil.GetWebHdfsFileSystem(conf, WebHdfsFileSystem.Scheme); dfs = cluster.GetFileSystem(); }
public static void ClusterSetupAtBegining() { SupportsBlocks = true; Conf.SetBoolean(DFSConfigKeys.DfsNamenodeDelegationTokenAlwaysUseKey, true); cluster = new MiniDFSCluster.Builder(Conf).NumDataNodes(2).Build(); cluster.WaitClusterUp(); fc = FileContext.GetFileContext(cluster.GetURI(0), Conf); Path defaultWorkingDirectory = fc.MakeQualified(new Path("/user/" + UserGroupInformation .GetCurrentUser().GetShortUserName())); fc.Mkdir(defaultWorkingDirectory, FileContext.DefaultPerm, true); }
public virtual void TestFailoverWithAutoHa() { Org.Mockito.Mockito.DoReturn(StandbyReadyResult).When(mockProtocol).GetServiceStatus (); // Turn on auto-HA in the config HdfsConfiguration conf = GetHAConf(); conf.SetBoolean(DFSConfigKeys.DfsHaAutoFailoverEnabledKey, true); conf.Set(DFSConfigKeys.DfsHaFenceMethodsKey, GetFencerTrueCommand()); tool.SetConf(conf); NUnit.Framework.Assert.AreEqual(0, RunTool("-failover", "nn1", "nn2")); Org.Mockito.Mockito.Verify(mockZkfcProtocol).GracefulFailover(); }
public virtual void TestMonitoringOperationsWithAutoHaEnabled() { Org.Mockito.Mockito.DoReturn(StandbyReadyResult).When(mockProtocol).GetServiceStatus (); // Turn on auto-HA HdfsConfiguration conf = GetHAConf(); conf.SetBoolean(DFSConfigKeys.DfsHaAutoFailoverEnabledKey, true); tool.SetConf(conf); NUnit.Framework.Assert.AreEqual(0, RunTool("-checkHealth", "nn1")); Org.Mockito.Mockito.Verify(mockProtocol).MonitorHealth(); NUnit.Framework.Assert.AreEqual(0, RunTool("-getServiceState", "nn1")); Org.Mockito.Mockito.Verify(mockProtocol).GetServiceStatus(); }
public static void Init() { baseDir = new FilePath(Runtime.GetProperty("test.build.dir", "target/test-dir"), typeof(TestSecureNNWithQJM).Name); FileUtil.FullyDelete(baseDir); NUnit.Framework.Assert.IsTrue(baseDir.Mkdirs()); Properties kdcConf = MiniKdc.CreateConf(); kdc = new MiniKdc(kdcConf, baseDir); kdc.Start(); baseConf = new HdfsConfiguration(); SecurityUtil.SetAuthenticationMethod(UserGroupInformation.AuthenticationMethod.Kerberos , baseConf); UserGroupInformation.SetConfiguration(baseConf); NUnit.Framework.Assert.IsTrue("Expected configuration to enable security", UserGroupInformation .IsSecurityEnabled()); string userName = UserGroupInformation.GetLoginUser().GetShortUserName(); FilePath keytabFile = new FilePath(baseDir, userName + ".keytab"); string keytab = keytabFile.GetAbsolutePath(); // Windows will not reverse name lookup "127.0.0.1" to "localhost". string krbInstance = Path.Windows ? "127.0.0.1" : "localhost"; kdc.CreatePrincipal(keytabFile, userName + "/" + krbInstance, "HTTP/" + krbInstance ); string hdfsPrincipal = userName + "/" + krbInstance + "@" + kdc.GetRealm(); string spnegoPrincipal = "HTTP/" + krbInstance + "@" + kdc.GetRealm(); baseConf.Set(DFSConfigKeys.DfsNamenodeKerberosPrincipalKey, hdfsPrincipal); baseConf.Set(DFSConfigKeys.DfsNamenodeKeytabFileKey, keytab); baseConf.Set(DFSConfigKeys.DfsDatanodeKerberosPrincipalKey, hdfsPrincipal); baseConf.Set(DFSConfigKeys.DfsDatanodeKeytabFileKey, keytab); baseConf.Set(DFSConfigKeys.DfsWebAuthenticationKerberosPrincipalKey, spnegoPrincipal ); baseConf.Set(DFSConfigKeys.DfsJournalnodeKeytabFileKey, keytab); baseConf.Set(DFSConfigKeys.DfsJournalnodeKerberosPrincipalKey, hdfsPrincipal); baseConf.Set(DFSConfigKeys.DfsJournalnodeKerberosInternalSpnegoPrincipalKey, spnegoPrincipal ); baseConf.SetBoolean(DFSConfigKeys.DfsBlockAccessTokenEnableKey, true); baseConf.Set(DFSConfigKeys.DfsDataTransferProtectionKey, "authentication"); baseConf.Set(DFSConfigKeys.DfsHttpPolicyKey, HttpConfig.Policy.HttpsOnly.ToString ()); baseConf.Set(DFSConfigKeys.DfsNamenodeHttpsAddressKey, "localhost:0"); baseConf.Set(DFSConfigKeys.DfsDatanodeHttpsAddressKey, "localhost:0"); baseConf.Set(DFSConfigKeys.DfsJournalnodeHttpsAddressKey, "localhost:0"); baseConf.SetInt(CommonConfigurationKeys.IpcClientConnectMaxRetriesOnSaslKey, 10); string keystoresDir = baseDir.GetAbsolutePath(); string sslConfDir = KeyStoreTestUtil.GetClasspathDir(typeof(TestSecureNNWithQJM)); KeyStoreTestUtil.SetupSSLConfig(keystoresDir, sslConfDir, baseConf, false); }
public static void SetupCluster() { Configuration conf = new HdfsConfiguration(); string[] racks = new string[] { "/rack1", "/rack1", "/rack1", "/rack2", "/rack2", "/rack2" }; storages = DFSTestUtil.CreateDatanodeStorageInfos(racks); dataNodes = DFSTestUtil.ToDatanodeDescriptor(storages); FileSystem.SetDefaultUri(conf, "hdfs://localhost:0"); conf.Set(DFSConfigKeys.DfsNamenodeHttpAddressKey, "0.0.0.0:0"); FilePath baseDir = PathUtils.GetTestDir(typeof(TestReplicationPolicy)); conf.Set(DFSConfigKeys.DfsNamenodeNameDirKey, new FilePath(baseDir, "name").GetPath ()); conf.SetBoolean(DFSConfigKeys.DfsNamenodeAvoidStaleDatanodeForReadKey, true); conf.SetBoolean(DFSConfigKeys.DfsNamenodeAvoidStaleDatanodeForWriteKey, true); conf.SetBoolean(DFSConfigKeys.DfsNamenodeReplicationConsiderloadKey, true); DFSTestUtil.FormatNameNode(conf); namenode = new NameNode(conf); int blockSize = 1024; dnrList = new AList <DatanodeRegistration>(); dnManager = namenode.GetNamesystem().GetBlockManager().GetDatanodeManager(); // Register DNs for (int i = 0; i < 6; i++) { DatanodeRegistration dnr = new DatanodeRegistration(dataNodes[i], new StorageInfo (HdfsServerConstants.NodeType.DataNode), new ExportedBlockKeys(), VersionInfo.GetVersion ()); dnrList.AddItem(dnr); dnManager.RegisterDatanode(dnr); dataNodes[i].GetStorageInfos()[0].SetUtilizationForTesting(2 * HdfsConstants.MinBlocksForWrite * blockSize, 0L, 2 * HdfsConstants.MinBlocksForWrite * blockSize, 0L); dataNodes[i].UpdateHeartbeat(BlockManagerTestUtil.GetStorageReportsForDatanode(dataNodes [i]), 0L, 0L, 0, 0, null); } }
private static Configuration GetDefaultConf() { HdfsConfiguration conf = new HdfsConfiguration(); conf.SetLong(DFSConfigKeys.DfsNamenodePathBasedCacheRefreshIntervalMs, 50); conf.SetLong(DFSConfigKeys.DfsCachereportIntervalMsecKey, 250); conf.SetLong(DFSConfigKeys.DfsBlockSizeKey, BlockSize); conf.SetLong(DFSConfigKeys.DfsDatanodeMaxLockedMemoryKey, TestFsDatasetCache.CacheCapacity ); conf.SetLong(DFSConfigKeys.DfsHeartbeatIntervalKey, 1); conf.SetBoolean(DFSConfigKeys.DfsClientReadShortcircuitKey, true); conf.Set(DFSConfigKeys.DfsDomainSocketPathKey, new FilePath(sockDir.GetDir(), "sock" ).GetAbsolutePath()); return(conf); }
/// <exception cref="System.IO.IOException"/> private Configuration GetConf() { string baseDir = MiniDFSCluster.GetBaseDirectory(); string nameDirs = Org.Apache.Hadoop.Hdfs.Server.Common.Util.FileAsURI(new FilePath (baseDir, "name1")) + "," + Org.Apache.Hadoop.Hdfs.Server.Common.Util.FileAsURI( new FilePath(baseDir, "name2")); Configuration conf = new HdfsConfiguration(); FileSystem.SetDefaultUri(conf, "hdfs://localhost:0"); conf.Set(DFSConfigKeys.DfsNamenodeHttpAddressKey, "0.0.0.0:0"); conf.Set(DFSConfigKeys.DfsNamenodeNameDirKey, nameDirs); conf.Set(DFSConfigKeys.DfsNamenodeEditsDirKey, nameDirs); conf.Set(DFSConfigKeys.DfsNamenodeSecondaryHttpAddressKey, "0.0.0.0:0"); conf.SetBoolean(DFSConfigKeys.DfsPermissionsEnabledKey, false); return(conf); }
public virtual void TestSetTokenServiceAndKind() { MiniDFSCluster cluster = null; try { Configuration clusterConf = new HdfsConfiguration(conf); SecurityUtil.SetAuthenticationMethod(UserGroupInformation.AuthenticationMethod.Simple , clusterConf); clusterConf.SetBoolean(DFSConfigKeys.DfsNamenodeDelegationTokenAlwaysUseKey, true ); // trick the NN into thinking s[ecurity is enabled w/o it trying // to login from a keytab UserGroupInformation.SetConfiguration(clusterConf); cluster = new MiniDFSCluster.Builder(clusterConf).NumDataNodes(0).Build(); cluster.WaitActive(); SecurityUtil.SetAuthenticationMethod(UserGroupInformation.AuthenticationMethod.Kerberos , clusterConf); WebHdfsFileSystem fs = WebHdfsTestUtil.GetWebHdfsFileSystem(clusterConf, "webhdfs" ); Whitebox.SetInternalState(fs, "canRefreshDelegationToken", true); URLConnectionFactory factory = new _URLConnectionFactory_268(new _ConnectionConfigurator_262 ()); Whitebox.SetInternalState(fs, "connectionFactory", factory); Org.Apache.Hadoop.Security.Token.Token <object> token1 = fs.GetDelegationToken(); NUnit.Framework.Assert.AreEqual(new Text("bar"), token1.GetKind()); HttpOpParam.OP op = GetOpParam.OP.Getdelegationtoken; Org.Apache.Hadoop.Security.Token.Token <DelegationTokenIdentifier> token2 = new _FsPathResponseRunner_281 (op, null, new RenewerParam(null)).Run(); NUnit.Framework.Assert.AreEqual(new Text("bar"), token2.GetKind()); NUnit.Framework.Assert.AreEqual(new Text("foo"), token2.GetService()); } finally { if (cluster != null) { cluster.Shutdown(); } } }
public virtual void TestMutativeOperationsWithAutoHaEnabled() { Org.Mockito.Mockito.DoReturn(StandbyReadyResult).When(mockProtocol).GetServiceStatus (); // Turn on auto-HA in the config HdfsConfiguration conf = GetHAConf(); conf.SetBoolean(DFSConfigKeys.DfsHaAutoFailoverEnabledKey, true); conf.Set(DFSConfigKeys.DfsHaFenceMethodsKey, GetFencerTrueCommand()); tool.SetConf(conf); // Should fail without the forcemanual flag NUnit.Framework.Assert.AreEqual(-1, RunTool("-transitionToActive", "nn1")); NUnit.Framework.Assert.IsTrue(errOutput.Contains("Refusing to manually manage")); NUnit.Framework.Assert.AreEqual(-1, RunTool("-transitionToStandby", "nn1")); NUnit.Framework.Assert.IsTrue(errOutput.Contains("Refusing to manually manage")); Org.Mockito.Mockito.Verify(mockProtocol, Org.Mockito.Mockito.Never()).TransitionToActive (AnyReqInfo()); Org.Mockito.Mockito.Verify(mockProtocol, Org.Mockito.Mockito.Never()).TransitionToStandby (AnyReqInfo()); // Force flag should bypass the check and change the request source // for the RPC SetupConfirmationOnSystemIn(); NUnit.Framework.Assert.AreEqual(0, RunTool("-transitionToActive", "-forcemanual", "nn1")); SetupConfirmationOnSystemIn(); NUnit.Framework.Assert.AreEqual(0, RunTool("-transitionToStandby", "-forcemanual" , "nn1")); Org.Mockito.Mockito.Verify(mockProtocol, Org.Mockito.Mockito.Times(1)).TransitionToActive (reqInfoCaptor.Capture()); Org.Mockito.Mockito.Verify(mockProtocol, Org.Mockito.Mockito.Times(1)).TransitionToStandby (reqInfoCaptor.Capture()); // All of the RPCs should have had the "force" source foreach (HAServiceProtocol.StateChangeRequestInfo ri in reqInfoCaptor.GetAllValues ()) { NUnit.Framework.Assert.AreEqual(HAServiceProtocol.RequestSource.RequestByUserForced , ri.GetSource()); } }
/// <exception cref="System.Exception"/> public virtual void TestSymlinkHdfsDisable() { Configuration conf = new HdfsConfiguration(); // disable symlink resolution conf.SetBoolean(CommonConfigurationKeys.FsClientResolveRemoteSymlinksKey, false); // spin up minicluster, get dfs and filecontext MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).Build(); DistributedFileSystem dfs = cluster.GetFileSystem(); FileContext fc = FileContext.GetFileContext(cluster.GetURI(0), conf); // Create test files/links FileContextTestHelper helper = new FileContextTestHelper("/tmp/TestSymlinkHdfsDisable" ); Path root = helper.GetTestRootPath(fc); Path target = new Path(root, "target"); Path link = new Path(root, "link"); DFSTestUtil.CreateFile(dfs, target, 4096, (short)1, unchecked ((int)(0xDEADDEAD))); fc.CreateSymlink(target, link, false); // Try to resolve links with FileSystem and FileContext try { fc.Open(link); NUnit.Framework.Assert.Fail("Expected error when attempting to resolve link"); } catch (IOException e) { GenericTestUtils.AssertExceptionContains("resolution is disabled", e); } try { dfs.Open(link); NUnit.Framework.Assert.Fail("Expected error when attempting to resolve link"); } catch (IOException e) { GenericTestUtils.AssertExceptionContains("resolution is disabled", e); } }
public virtual void TestDisableTopAuditLogger() { Configuration conf = new HdfsConfiguration(); conf.SetBoolean(DFSConfigKeys.NntopEnabledKey, false); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).Build(); try { cluster.WaitClusterUp(); IList <AuditLogger> auditLoggers = cluster.GetNameNode().GetNamesystem().GetAuditLoggers (); foreach (AuditLogger auditLogger in auditLoggers) { NUnit.Framework.Assert.IsFalse("top audit logger is still hooked in after it is disabled" , auditLogger is TopAuditLogger); } } finally { cluster.Shutdown(); } }
public virtual void TestFormatShouldBeIgnoredForNonFileBasedDirs() { Configuration conf = new HdfsConfiguration(); string logicalName = "mycluster"; // DFS_NAMENODE_RPC_ADDRESS_KEY are required to identify the NameNode // is configured in HA, then only DFS_NAMENODE_SHARED_EDITS_DIR_KEY // is considered. string localhost = "127.0.0.1"; IPEndPoint nnAddr1 = new IPEndPoint(localhost, 8020); IPEndPoint nnAddr2 = new IPEndPoint(localhost, 9020); HATestUtil.SetFailoverConfigurations(conf, logicalName, nnAddr1, nnAddr2); conf.Set(DFSConfigKeys.DfsNamenodeNameDirKey, new FilePath(DfsBaseDir, "name").GetAbsolutePath ()); conf.SetBoolean(DFSConfigKeys.DfsNamenodeSupportAllowFormatKey, true); conf.Set(DFSUtil.AddKeySuffixes(DFSConfigKeys.DfsNamenodeEditsPluginPrefix, "dummy" ), typeof(TestGenericJournalConf.DummyJournalManager).FullName); conf.Set(DFSConfigKeys.DfsNamenodeSharedEditsDirKey, "dummy://" + localhost + ":2181/ledgers" ); conf.Set(DFSConfigKeys.DfsHaNamenodeIdKey, "nn1"); // An internal assert is added to verify the working of test NameNode.Format(conf); }
/// <summary>Creates configuration for starting a secure cluster.</summary> /// <param name="dataTransferProtection">supported QOPs</param> /// <returns>configuration for starting a secure cluster</returns> /// <exception cref="System.Exception">if there is any failure</exception> protected internal virtual HdfsConfiguration CreateSecureConfig(string dataTransferProtection ) { HdfsConfiguration conf = new HdfsConfiguration(); SecurityUtil.SetAuthenticationMethod(UserGroupInformation.AuthenticationMethod.Kerberos , conf); conf.Set(DFSConfigKeys.DfsNamenodeKerberosPrincipalKey, hdfsPrincipal); conf.Set(DFSConfigKeys.DfsNamenodeKeytabFileKey, keytab); conf.Set(DFSConfigKeys.DfsDatanodeKerberosPrincipalKey, hdfsPrincipal); conf.Set(DFSConfigKeys.DfsDatanodeKeytabFileKey, keytab); conf.Set(DFSConfigKeys.DfsWebAuthenticationKerberosPrincipalKey, spnegoPrincipal); conf.SetBoolean(DFSConfigKeys.DfsBlockAccessTokenEnableKey, true); conf.Set(DFSConfigKeys.DfsDataTransferProtectionKey, dataTransferProtection); conf.Set(DFSConfigKeys.DfsHttpPolicyKey, HttpConfig.Policy.HttpsOnly.ToString()); conf.Set(DFSConfigKeys.DfsNamenodeHttpsAddressKey, "localhost:0"); conf.Set(DFSConfigKeys.DfsDatanodeHttpsAddressKey, "localhost:0"); conf.SetInt(CommonConfigurationKeys.IpcClientConnectMaxRetriesOnSaslKey, 10); string keystoresDir = baseDir.GetAbsolutePath(); string sslConfDir = KeyStoreTestUtil.GetClasspathDir(this.GetType()); KeyStoreTestUtil.SetupSSLConfig(keystoresDir, sslConfDir, conf, false); return(conf); }
public virtual void TestSaveNamespace() { DistributedFileSystem fs = null; try { Configuration conf = new HdfsConfiguration(); conf.SetBoolean(DFSConfigKeys.DfsNamenodeDelegationTokenAlwaysUseKey, true); cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(numDatanodes).Build(); cluster.WaitActive(); fs = cluster.GetFileSystem(); FSNamesystem namesystem = cluster.GetNamesystem(); string renewer = UserGroupInformation.GetLoginUser().GetUserName(); Org.Apache.Hadoop.Security.Token.Token <DelegationTokenIdentifier> token1 = namesystem .GetDelegationToken(new Text(renewer)); Org.Apache.Hadoop.Security.Token.Token <DelegationTokenIdentifier> token2 = namesystem .GetDelegationToken(new Text(renewer)); // Saving image without safe mode should fail DFSAdmin admin = new DFSAdmin(conf); string[] args = new string[] { "-saveNamespace" }; // verify that the edits file is NOT empty NameNode nn = cluster.GetNameNode(); foreach (Storage.StorageDirectory sd in nn.GetFSImage().GetStorage().DirIterable( null)) { FileJournalManager.EditLogFile log = FSImageTestUtil.FindLatestEditsLog(sd); NUnit.Framework.Assert.IsTrue(log.IsInProgress()); log.ValidateLog(); long numTransactions = (log.GetLastTxId() - log.GetFirstTxId()) + 1; NUnit.Framework.Assert.AreEqual("In-progress log " + log + " should have 5 transactions" , 5, numTransactions); } // Saving image in safe mode should succeed fs.SetSafeMode(HdfsConstants.SafeModeAction.SafemodeEnter); try { admin.Run(args); } catch (Exception e) { throw new IOException(e.Message); } // verify that the edits file is empty except for the START txn foreach (Storage.StorageDirectory sd_1 in nn.GetFSImage().GetStorage().DirIterable (null)) { FileJournalManager.EditLogFile log = FSImageTestUtil.FindLatestEditsLog(sd_1); NUnit.Framework.Assert.IsTrue(log.IsInProgress()); log.ValidateLog(); long numTransactions = (log.GetLastTxId() - log.GetFirstTxId()) + 1; NUnit.Framework.Assert.AreEqual("In-progress log " + log + " should only have START txn" , 1, numTransactions); } // restart cluster cluster.Shutdown(); cluster = null; cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(numDatanodes).Format(false ).Build(); cluster.WaitActive(); //Should be able to renew & cancel the delegation token after cluster restart try { RenewToken(token1); RenewToken(token2); } catch (IOException) { NUnit.Framework.Assert.Fail("Could not renew or cancel the token"); } namesystem = cluster.GetNamesystem(); Org.Apache.Hadoop.Security.Token.Token <DelegationTokenIdentifier> token3 = namesystem .GetDelegationToken(new Text(renewer)); Org.Apache.Hadoop.Security.Token.Token <DelegationTokenIdentifier> token4 = namesystem .GetDelegationToken(new Text(renewer)); // restart cluster again cluster.Shutdown(); cluster = null; cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(numDatanodes).Format(false ).Build(); cluster.WaitActive(); namesystem = cluster.GetNamesystem(); Org.Apache.Hadoop.Security.Token.Token <DelegationTokenIdentifier> token5 = namesystem .GetDelegationToken(new Text(renewer)); try { RenewToken(token1); RenewToken(token2); RenewToken(token3); RenewToken(token4); RenewToken(token5); } catch (IOException) { NUnit.Framework.Assert.Fail("Could not renew or cancel the token"); } // restart cluster again cluster.Shutdown(); cluster = null; cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(numDatanodes).Format(false ).Build(); cluster.WaitActive(); namesystem = cluster.GetNamesystem(); try { RenewToken(token1); CancelToken(token1); RenewToken(token2); CancelToken(token2); RenewToken(token3); CancelToken(token3); RenewToken(token4); CancelToken(token4); RenewToken(token5); CancelToken(token5); } catch (IOException) { NUnit.Framework.Assert.Fail("Could not renew or cancel the token"); } } finally { if (fs != null) { fs.Close(); } if (cluster != null) { cluster.Shutdown(); } } }
public virtual void TestCreate() { Configuration conf = new HdfsConfiguration(); conf.SetBoolean(DFSConfigKeys.DfsPermissionsEnabledKey, true); conf.Set(FsPermission.UmaskLabel, "000"); MiniDFSCluster cluster = null; FileSystem fs = null; try { cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(3).Build(); cluster.WaitActive(); fs = FileSystem.Get(conf); FsPermission rootPerm = CheckPermission(fs, "/", null); FsPermission inheritPerm = FsPermission.CreateImmutable((short)(rootPerm.ToShort( ) | 0xc0)); FsPermission dirPerm = new FsPermission((short)0x1ff); fs.Mkdirs(new Path("/a1/a2/a3"), dirPerm); CheckPermission(fs, "/a1", dirPerm); CheckPermission(fs, "/a1/a2", dirPerm); CheckPermission(fs, "/a1/a2/a3", dirPerm); dirPerm = new FsPermission((short)0x53); FsPermission permission = FsPermission.CreateImmutable((short)(dirPerm.ToShort() | 0xc0)); fs.Mkdirs(new Path("/aa/1/aa/2/aa/3"), dirPerm); CheckPermission(fs, "/aa/1", permission); CheckPermission(fs, "/aa/1/aa/2", permission); CheckPermission(fs, "/aa/1/aa/2/aa/3", dirPerm); FsPermission filePerm = new FsPermission((short)0x124); Path p = new Path("/b1/b2/b3.txt"); FSDataOutputStream @out = fs.Create(p, filePerm, true, conf.GetInt(CommonConfigurationKeys .IoFileBufferSizeKey, 4096), fs.GetDefaultReplication(p), fs.GetDefaultBlockSize (p), null); @out.Write(123); @out.Close(); CheckPermission(fs, "/b1", inheritPerm); CheckPermission(fs, "/b1/b2", inheritPerm); CheckPermission(fs, "/b1/b2/b3.txt", filePerm); conf.Set(FsPermission.UmaskLabel, "022"); permission = FsPermission.CreateImmutable((short)0x1b6); FileSystem.Mkdirs(fs, new Path("/c1"), new FsPermission(permission)); FileSystem.Create(fs, new Path("/c1/c2.txt"), new FsPermission(permission)); CheckPermission(fs, "/c1", permission); CheckPermission(fs, "/c1/c2.txt", permission); } finally { try { if (fs != null) { fs.Close(); } } catch (Exception e) { Log.Error(StringUtils.StringifyException(e)); } try { if (cluster != null) { cluster.Shutdown(); } } catch (Exception e) { Log.Error(StringUtils.StringifyException(e)); } } }
public virtual void TestFilePermision() { Configuration conf = new HdfsConfiguration(); conf.SetBoolean(DFSConfigKeys.DfsPermissionsEnabledKey, true); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(3).Build(); cluster.WaitActive(); try { FileSystem nnfs = FileSystem.Get(conf); // test permissions on files that do not exist NUnit.Framework.Assert.IsFalse(nnfs.Exists(ChildFile1)); try { nnfs.SetOwner(ChildFile1, "foo", "bar"); NUnit.Framework.Assert.IsTrue(false); } catch (FileNotFoundException e) { Log.Info("GOOD: got " + e); } try { nnfs.SetPermission(ChildFile1, new FsPermission((short)0x1ff)); NUnit.Framework.Assert.IsTrue(false); } catch (FileNotFoundException e) { Log.Info("GOOD: got " + e); } // make sure nn can take user specified permission (with default fs // permission umask applied) FSDataOutputStream @out = nnfs.Create(ChildFile1, new FsPermission((short)0x1ff), true, 1024, (short)1, 1024, null); FileStatus status = nnfs.GetFileStatus(ChildFile1); // FS_PERMISSIONS_UMASK_DEFAULT is 0022 NUnit.Framework.Assert.IsTrue(status.GetPermission().ToString().Equals("rwxr-xr-x" )); nnfs.Delete(ChildFile1, false); // following dir/file creations are legal nnfs.Mkdirs(ChildDir1); @out = nnfs.Create(ChildFile1); status = nnfs.GetFileStatus(ChildFile1); NUnit.Framework.Assert.IsTrue(status.GetPermission().ToString().Equals("rw-r--r--" )); byte[] data = new byte[FileLen]; Ran.NextBytes(data); @out.Write(data); @out.Close(); nnfs.SetPermission(ChildFile1, new FsPermission("700")); status = nnfs.GetFileStatus(ChildFile1); NUnit.Framework.Assert.IsTrue(status.GetPermission().ToString().Equals("rwx------" )); // following read is legal byte[] dataIn = new byte[FileLen]; FSDataInputStream fin = nnfs.Open(ChildFile1); int bytesRead = fin.Read(dataIn); NUnit.Framework.Assert.IsTrue(bytesRead == FileLen); for (int i = 0; i < FileLen; i++) { NUnit.Framework.Assert.AreEqual(data[i], dataIn[i]); } // test execution bit support for files nnfs.SetPermission(ChildFile1, new FsPermission("755")); status = nnfs.GetFileStatus(ChildFile1); NUnit.Framework.Assert.IsTrue(status.GetPermission().ToString().Equals("rwxr-xr-x" )); nnfs.SetPermission(ChildFile1, new FsPermission("744")); status = nnfs.GetFileStatus(ChildFile1); NUnit.Framework.Assert.IsTrue(status.GetPermission().ToString().Equals("rwxr--r--" )); nnfs.SetPermission(ChildFile1, new FsPermission("700")); //////////////////////////////////////////////////////////////// // test illegal file/dir creation UserGroupInformation userGroupInfo = UserGroupInformation.CreateUserForTesting(UserName , GroupNames); FileSystem userfs = DFSTestUtil.GetFileSystemAs(userGroupInfo, conf); // make sure mkdir of a existing directory that is not owned by // this user does not throw an exception. userfs.Mkdirs(ChildDir1); // illegal mkdir NUnit.Framework.Assert.IsTrue(!CanMkdirs(userfs, ChildDir2)); // illegal file creation NUnit.Framework.Assert.IsTrue(!CanCreate(userfs, ChildFile2)); // illegal file open NUnit.Framework.Assert.IsTrue(!CanOpen(userfs, ChildFile1)); nnfs.SetPermission(RootPath, new FsPermission((short)0x1ed)); nnfs.SetPermission(ChildDir1, new FsPermission("777")); nnfs.SetPermission(new Path("/"), new FsPermission((short)0x1ff)); Path RenamePath = new Path("/foo/bar"); userfs.Mkdirs(RenamePath); NUnit.Framework.Assert.IsTrue(CanRename(userfs, RenamePath, ChildDir1)); } finally { cluster.Shutdown(); } }
/// <exception cref="System.IO.IOException"/> public virtual void TestHandleTruncatedBlockFile() { MiniDFSCluster cluster = null; HdfsConfiguration conf = new HdfsConfiguration(); conf.SetBoolean(DFSConfigKeys.DfsClientReadShortcircuitKey, true); conf.SetBoolean(DFSConfigKeys.DfsClientReadShortcircuitSkipChecksumKey, false); conf.Set(DFSConfigKeys.DfsDomainSocketPathKey, "/tmp/testHandleTruncatedBlockFile._PORT" ); conf.Set(DFSConfigKeys.DfsChecksumTypeKey, "CRC32C"); Path TestPath = new Path("/a"); Path TestPath2 = new Path("/b"); long RandomSeed = 4567L; long RandomSeed2 = 4568L; FSDataInputStream fsIn = null; int TestLength = 3456; try { cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build(); cluster.WaitActive(); FileSystem fs = cluster.GetFileSystem(); DFSTestUtil.CreateFile(fs, TestPath, TestLength, (short)1, RandomSeed); DFSTestUtil.CreateFile(fs, TestPath2, TestLength, (short)1, RandomSeed2); fsIn = cluster.GetFileSystem().Open(TestPath2); byte[] original = new byte[TestLength]; IOUtils.ReadFully(fsIn, original, 0, TestLength); fsIn.Close(); fsIn = null; try { DFSTestUtil.WaitReplication(fs, TestPath, (short)1); } catch (Exception e) { NUnit.Framework.Assert.Fail("unexpected InterruptedException during " + "waitReplication: " + e); } catch (TimeoutException e) { NUnit.Framework.Assert.Fail("unexpected TimeoutException during " + "waitReplication: " + e); } ExtendedBlock block = DFSTestUtil.GetFirstBlock(fs, TestPath); FilePath dataFile = cluster.GetBlockFile(0, block); cluster.Shutdown(); cluster = null; RandomAccessFile raf = null; try { raf = new RandomAccessFile(dataFile, "rw"); raf.SetLength(0); } finally { if (raf != null) { raf.Close(); } } cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Format(false).Build(); cluster.WaitActive(); fs = cluster.GetFileSystem(); fsIn = fs.Open(TestPath); try { byte[] buf = new byte[100]; fsIn.Seek(2000); fsIn.ReadFully(buf, 0, buf.Length); NUnit.Framework.Assert.Fail("shouldn't be able to read from corrupt 0-length " + "block file."); } catch (IOException e) { DFSClient.Log.Error("caught exception ", e); } fsIn.Close(); fsIn = null; // We should still be able to read the other file. // This is important because it indicates that we detected that the // previous block was corrupt, rather than blaming the problem on // communication. fsIn = fs.Open(TestPath2); byte[] buf_1 = new byte[original.Length]; fsIn.ReadFully(buf_1, 0, buf_1.Length); TestBlockReaderLocal.AssertArrayRegionsEqual(original, 0, buf_1, 0, original.Length ); fsIn.Close(); fsIn = null; } finally { if (fsIn != null) { fsIn.Close(); } if (cluster != null) { cluster.Shutdown(); } } }