/// <exception cref="Javax.Servlet.ServletException"/> protected internal virtual string FindRedirectUrl() { string addr; if (proxyUriBases.Count == 1) { // external proxy or not RM HA addr = proxyUriBases.Values.GetEnumerator().Next(); } else { // RM HA YarnConfiguration conf = new YarnConfiguration(); string activeRMId = RMHAUtils.FindActiveRMHAId(conf); string addressPropertyPrefix = YarnConfiguration.UseHttps(conf) ? YarnConfiguration .RmWebappHttpsAddress : YarnConfiguration.RmWebappAddress; string host = conf.Get(HAUtil.AddSuffix(addressPropertyPrefix, activeRMId)); addr = proxyUriBases[host]; } if (addr == null) { throw new ServletException("Could not determine the proxy server for redirection" ); } return(addr); }
public virtual void TestStartup() { Configuration conf = new Configuration(); HAUtil.SetAllowStandbyReads(conf, true); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NnTopology(MiniDFSNNTopology .SimpleHATopology()).NumDataNodes(0).Build(); try { // During HA startup, both nodes should be in // standby and we shouldn't have any edits files // in any edits directory! IList <URI> allDirs = Lists.NewArrayList(); Sharpen.Collections.AddAll(allDirs, cluster.GetNameDirs(0)); Sharpen.Collections.AddAll(allDirs, cluster.GetNameDirs(1)); allDirs.AddItem(cluster.GetSharedEditsDir(0, 1)); AssertNoEditFiles(allDirs); // Set the first NN to active, make sure it creates edits // in its own dirs and the shared dir. The standby // should still have no edits! cluster.TransitionToActive(0); AssertEditFiles(cluster.GetNameDirs(0), NNStorage.GetInProgressEditsFileName(1)); AssertEditFiles(Sharpen.Collections.SingletonList(cluster.GetSharedEditsDir(0, 1) ), NNStorage.GetInProgressEditsFileName(1)); AssertNoEditFiles(cluster.GetNameDirs(1)); cluster.GetNameNode(0).GetRpcServer().Mkdirs("/test", FsPermission.CreateImmutable ((short)0x1ed), true); // Restarting the standby should not finalize any edits files // in the shared directory when it starts up! cluster.RestartNameNode(1); AssertEditFiles(cluster.GetNameDirs(0), NNStorage.GetInProgressEditsFileName(1)); AssertEditFiles(Sharpen.Collections.SingletonList(cluster.GetSharedEditsDir(0, 1) ), NNStorage.GetInProgressEditsFileName(1)); AssertNoEditFiles(cluster.GetNameDirs(1)); // Additionally it should not have applied any in-progress logs // at start-up -- otherwise, it would have read half-way into // the current log segment, and on the next roll, it would have to // either replay starting in the middle of the segment (not allowed) // or double-replay the edits (incorrect). NUnit.Framework.Assert.IsNull(NameNodeAdapter.GetFileInfo(cluster.GetNameNode(1), "/test", true)); cluster.GetNameNode(0).GetRpcServer().Mkdirs("/test2", FsPermission.CreateImmutable ((short)0x1ed), true); // If we restart NN0, it'll come back as standby, and we can // transition NN1 to active and make sure it reads edits correctly at this point. cluster.RestartNameNode(0); cluster.TransitionToActive(1); // NN1 should have both the edits that came before its restart, and the edits that // came after its restart. NUnit.Framework.Assert.IsNotNull(NameNodeAdapter.GetFileInfo(cluster.GetNameNode( 1), "/test", true)); NUnit.Framework.Assert.IsNotNull(NameNodeAdapter.GetFileInfo(cluster.GetNameNode( 1), "/test2", true)); } finally { cluster.Shutdown(); } }
public static string GetResolvedRemoteRMWebAppURLWithoutScheme(Configuration conf , HttpConfig.Policy httpPolicy) { IPEndPoint address = null; string rmId = null; if (HAUtil.IsHAEnabled(conf)) { // If HA enabled, pick one of the RM-IDs and rely on redirect to go to // the Active RM rmId = (string)Sharpen.Collections.ToArray(HAUtil.GetRMHAIds(conf))[0]; } if (httpPolicy == HttpConfig.Policy.HttpsOnly) { address = conf.GetSocketAddr(rmId == null ? YarnConfiguration.RmWebappHttpsAddress : HAUtil.AddSuffix(YarnConfiguration.RmWebappHttpsAddress, rmId), YarnConfiguration .DefaultRmWebappHttpsAddress, YarnConfiguration.DefaultRmWebappHttpsPort); } else { address = conf.GetSocketAddr(rmId == null ? YarnConfiguration.RmWebappAddress : HAUtil .AddSuffix(YarnConfiguration.RmWebappAddress, rmId), YarnConfiguration.DefaultRmWebappAddress , YarnConfiguration.DefaultRmWebappPort); } return(GetResolvedAddress(address)); }
/// <exception cref="System.IO.IOException"/> private void ParseConfAndFindOtherNN() { Configuration conf = GetConf(); nsId = DFSUtil.GetNamenodeNameServiceId(conf); if (!HAUtil.IsHAEnabled(conf, nsId)) { throw new HadoopIllegalArgumentException("HA is not enabled for this namenode."); } nnId = HAUtil.GetNameNodeId(conf, nsId); NameNode.InitializeGenericKeys(conf, nsId, nnId); if (!HAUtil.UsesSharedEditsDir(conf)) { throw new HadoopIllegalArgumentException("Shared edits storage is not enabled for this namenode." ); } Configuration otherNode = HAUtil.GetConfForOtherNode(conf); otherNNId = HAUtil.GetNameNodeId(otherNode, nsId); otherIpcAddr = NameNode.GetServiceAddress(otherNode, true); Preconditions.CheckArgument(otherIpcAddr.Port != 0 && !otherIpcAddr.Address.IsAnyLocalAddress (), "Could not determine valid IPC address for other NameNode (%s)" + ", got: %s" , otherNNId, otherIpcAddr); string scheme = DFSUtil.GetHttpClientScheme(conf); otherHttpAddr = DFSUtil.GetInfoServerWithDefaultHost(otherIpcAddr.GetHostName(), otherNode, scheme).ToURL(); dirsToFormat = FSNamesystem.GetNamespaceDirs(conf); editUrisToFormat = FSNamesystem.GetNamespaceEditsDirs(conf, false); sharedEditsUris = FSNamesystem.GetSharedEditsDirs(conf); }
/// <exception cref="System.Exception"/> protected override void ServiceInit(Configuration conf) { conf = conf is YarnConfiguration ? conf : new YarnConfiguration(conf); string zkQuorum = conf.Get(YarnConfiguration.RmZkAddress); if (zkQuorum == null) { throw new YarnRuntimeException("Embedded automatic failover " + "is enabled, but " + YarnConfiguration.RmZkAddress + " is not set"); } string rmId = HAUtil.GetRMHAId(conf); string clusterId = YarnConfiguration.GetClusterId(conf); localActiveNodeInfo = CreateActiveNodeInfo(clusterId, rmId); string zkBasePath = conf.Get(YarnConfiguration.AutoFailoverZkBasePath, YarnConfiguration .DefaultAutoFailoverZkBasePath); string electionZNode = zkBasePath + "/" + clusterId; long zkSessionTimeout = conf.GetLong(YarnConfiguration.RmZkTimeoutMs, YarnConfiguration .DefaultRmZkTimeoutMs); IList <ACL> zkAcls = RMZKUtils.GetZKAcls(conf); IList <ZKUtil.ZKAuthInfo> zkAuths = RMZKUtils.GetZKAuths(conf); int maxRetryNum = conf.GetInt(CommonConfigurationKeys.HaFcElectorZkOpRetriesKey, CommonConfigurationKeys.HaFcElectorZkOpRetriesDefault); elector = new ActiveStandbyElector(zkQuorum, (int)zkSessionTimeout, electionZNode , zkAcls, zkAuths, this, maxRetryNum); elector.EnsureParentZNode(); if (!IsParentZnodeSafe(clusterId)) { NotifyFatalError(electionZNode + " znode has invalid data! " + "Might need formatting!" ); } base.ServiceInit(conf); }
public virtual void TestGetRMDelegationTokenService() { string defaultRMAddress = YarnConfiguration.DefaultRmAddress; YarnConfiguration conf = new YarnConfiguration(); // HA is not enabled Text tokenService = ClientRMProxy.GetRMDelegationTokenService(conf); string[] services = tokenService.ToString().Split(","); NUnit.Framework.Assert.AreEqual(1, services.Length); foreach (string service in services) { NUnit.Framework.Assert.IsTrue("Incorrect token service name", service.Contains(defaultRMAddress )); } // HA is enabled conf.SetBoolean(YarnConfiguration.RmHaEnabled, true); conf.Set(YarnConfiguration.RmHaIds, "rm1,rm2"); conf.Set(HAUtil.AddSuffix(YarnConfiguration.RmHostname, "rm1"), "0.0.0.0"); conf.Set(HAUtil.AddSuffix(YarnConfiguration.RmHostname, "rm2"), "0.0.0.0"); tokenService = ClientRMProxy.GetRMDelegationTokenService(conf); services = tokenService.ToString().Split(","); NUnit.Framework.Assert.AreEqual(2, services.Length); foreach (string service_1 in services) { NUnit.Framework.Assert.IsTrue("Incorrect token service name", service_1.Contains( defaultRMAddress)); } }
/// <exception cref="System.IO.IOException"/> public SecondaryNameNode(Configuration conf, SecondaryNameNode.CommandLineOpts commandLineOpts ) { try { string nsId = DFSUtil.GetSecondaryNameServiceId(conf); if (HAUtil.IsHAEnabled(conf, nsId)) { throw new IOException("Cannot use SecondaryNameNode in an HA cluster." + " The Standby Namenode will perform checkpointing." ); } NameNode.InitializeGenericKeys(conf, nsId, null); Initialize(conf, commandLineOpts); } catch (IOException e) { Shutdown(); throw; } catch (HadoopIllegalArgumentException e) { Shutdown(); throw; } }
/// <summary> /// This test also serves to test /// <see cref="Org.Apache.Hadoop.Hdfs.HAUtil.GetProxiesForAllNameNodesInNameservice(Org.Apache.Hadoop.Conf.Configuration, string) /// "/> /// and /// <see cref="Org.Apache.Hadoop.Hdfs.DFSUtil.GetRpcAddressesForNameserviceId(Org.Apache.Hadoop.Conf.Configuration, string, string) /// "/> /// by virtue of the fact that it wouldn't work properly if the proxies /// returned were not for the correct NNs. /// </summary> /// <exception cref="System.Exception"/> public virtual void TestIsAtLeastOneActive() { MiniDFSCluster cluster = new MiniDFSCluster.Builder(new HdfsConfiguration()).NnTopology (MiniDFSNNTopology.SimpleHATopology()).NumDataNodes(0).Build(); try { Configuration conf = new HdfsConfiguration(); HATestUtil.SetFailoverConfigurations(cluster, conf); IList <ClientProtocol> namenodes = HAUtil.GetProxiesForAllNameNodesInNameservice(conf , HATestUtil.GetLogicalHostname(cluster)); NUnit.Framework.Assert.AreEqual(2, namenodes.Count); NUnit.Framework.Assert.IsFalse(HAUtil.IsAtLeastOneActive(namenodes)); cluster.TransitionToActive(0); NUnit.Framework.Assert.IsTrue(HAUtil.IsAtLeastOneActive(namenodes)); cluster.TransitionToStandby(0); NUnit.Framework.Assert.IsFalse(HAUtil.IsAtLeastOneActive(namenodes)); cluster.TransitionToActive(1); NUnit.Framework.Assert.IsTrue(HAUtil.IsAtLeastOneActive(namenodes)); cluster.TransitionToStandby(1); NUnit.Framework.Assert.IsFalse(HAUtil.IsAtLeastOneActive(namenodes)); } finally { if (cluster != null) { cluster.Shutdown(); } } }
public virtual void SetUpCluster() { conf = new Configuration(); conf.SetInt(DFSConfigKeys.DfsNamenodeCheckpointCheckPeriodKey, 1); conf.SetInt(DFSConfigKeys.DfsNamenodeCheckpointTxnsKey, 1); conf.SetInt(DFSConfigKeys.DfsNamenodeNumCheckpointsRetainedKey, 10); conf.SetInt(DFSConfigKeys.DfsHaTaileditsPeriodKey, 1); HAUtil.SetAllowStandbyReads(conf, true); if (clusterType == TestFailureToReadEdits.TestType.SharedDirHa) { MiniDFSNNTopology topology = MiniQJMHACluster.CreateDefaultTopology(10000); cluster = new MiniDFSCluster.Builder(conf).NnTopology(topology).NumDataNodes(0).CheckExitOnShutdown (false).Build(); } else { MiniQJMHACluster.Builder builder = new MiniQJMHACluster.Builder(conf); builder.GetDfsBuilder().NumDataNodes(0).CheckExitOnShutdown(false); miniQjmHaCluster = builder.Build(); cluster = miniQjmHaCluster.GetDfsCluster(); } cluster.WaitActive(); nn0 = cluster.GetNameNode(0); nn1 = cluster.GetNameNode(1); cluster.TransitionToActive(0); fs = HATestUtil.ConfigureFailoverFs(cluster, conf); }
protected override HAServiceTarget ResolveTarget(string rmId) { ICollection <string> rmIds = HAUtil.GetRMHAIds(GetConf()); if (!rmIds.Contains(rmId)) { StringBuilder msg = new StringBuilder(); msg.Append(rmId + " is not a valid serviceId. It should be one of "); foreach (string id in rmIds) { msg.Append(id + " "); } throw new ArgumentException(msg.ToString()); } try { YarnConfiguration conf = new YarnConfiguration(GetConf()); conf.Set(YarnConfiguration.RmHaId, rmId); return(new RMHAServiceTarget(conf)); } catch (ArgumentException) { throw new YarnRuntimeException("Could not connect to " + rmId + "; the configuration for it might be missing" ); } catch (IOException) { throw new YarnRuntimeException("Could not connect to RM HA Admin for node " + rmId ); } }
/// <summary> /// Regression test for HDFS-2795: /// - Start an HA cluster with a DN. /// </summary> /// <remarks> /// Regression test for HDFS-2795: /// - Start an HA cluster with a DN. /// - Write several blocks to the FS with replication 1. /// - Shutdown the DN /// - Wait for the NNs to declare the DN dead. All blocks will be under-replicated. /// - Restart the DN. /// In the bug, the standby node would only very slowly notice the blocks returning /// to the cluster. /// </remarks> /// <exception cref="System.Exception"/> public virtual void TestDatanodeRestarts() { Configuration conf = new Configuration(); conf.SetInt(DFSConfigKeys.DfsBlockSizeKey, 1024); // We read from the standby to watch block locations HAUtil.SetAllowStandbyReads(conf, true); conf.SetLong(DFSConfigKeys.DfsNamenodeAccesstimePrecisionKey, 0); conf.SetInt(DFSConfigKeys.DfsHaTaileditsPeriodKey, 1); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NnTopology(MiniDFSNNTopology .SimpleHATopology()).NumDataNodes(1).Build(); try { NameNode nn0 = cluster.GetNameNode(0); NameNode nn1 = cluster.GetNameNode(1); cluster.TransitionToActive(0); // Create 5 blocks. DFSTestUtil.CreateFile(cluster.GetFileSystem(0), TestFilePath, 5 * 1024, (short)1 , 1L); HATestUtil.WaitForStandbyToCatchUp(nn0, nn1); // Stop the DN. DataNode dn = cluster.GetDataNodes()[0]; string dnName = dn.GetDatanodeId().GetXferAddr(); MiniDFSCluster.DataNodeProperties dnProps = cluster.StopDataNode(0); // Make sure both NNs register it as dead. BlockManagerTestUtil.NoticeDeadDatanode(nn0, dnName); BlockManagerTestUtil.NoticeDeadDatanode(nn1, dnName); BlockManagerTestUtil.UpdateState(nn0.GetNamesystem().GetBlockManager()); BlockManagerTestUtil.UpdateState(nn1.GetNamesystem().GetBlockManager()); NUnit.Framework.Assert.AreEqual(5, nn0.GetNamesystem().GetUnderReplicatedBlocks() ); // The SBN will not have any blocks in its neededReplication queue // since the SBN doesn't process replication. NUnit.Framework.Assert.AreEqual(0, nn1.GetNamesystem().GetUnderReplicatedBlocks() ); LocatedBlocks locs = nn1.GetRpcServer().GetBlockLocations(TestFile, 0, 1); NUnit.Framework.Assert.AreEqual("Standby should have registered that the block has no replicas" , 0, locs.Get(0).GetLocations().Length); cluster.RestartDataNode(dnProps); // Wait for both NNs to re-register the DN. cluster.WaitActive(0); cluster.WaitActive(1); BlockManagerTestUtil.UpdateState(nn0.GetNamesystem().GetBlockManager()); BlockManagerTestUtil.UpdateState(nn1.GetNamesystem().GetBlockManager()); NUnit.Framework.Assert.AreEqual(0, nn0.GetNamesystem().GetUnderReplicatedBlocks() ); NUnit.Framework.Assert.AreEqual(0, nn1.GetNamesystem().GetUnderReplicatedBlocks() ); locs = nn1.GetRpcServer().GetBlockLocations(TestFile, 0, 1); NUnit.Framework.Assert.AreEqual("Standby should have registered that the block has replicas again" , 1, locs.Get(0).GetLocations().Length); } finally { cluster.Shutdown(); } }
/// <summary> /// Test that getContentSummary on Standby should should throw standby /// exception. /// </summary> /// <exception cref="System.Exception"/> public virtual void TestgetContentSummaryOnStandby() { Configuration nn1conf = cluster.GetConfiguration(1); // just reset the standby reads to default i.e False on standby. HAUtil.SetAllowStandbyReads(nn1conf, false); cluster.RestartNameNode(1); cluster.GetNameNodeRpc(1).GetContentSummary("/"); }
private void SetHARMConfigurationWithEphemeralPorts(int index, Configuration conf ) { string hostname = Org.Apache.Hadoop.Yarn.Server.MiniYARNCluster.GetHostname(); foreach (string confKey in YarnConfiguration.GetServiceAddressConfKeys(conf)) { conf.Set(HAUtil.AddSuffix(confKey, rmIds[index]), hostname + ":0"); } }
/// <exception cref="System.Exception"/> public virtual void TestHAUtilClonesDelegationTokens() { Org.Apache.Hadoop.Security.Token.Token <DelegationTokenIdentifier> token = GetDelegationToken (fs, "JobTracker"); UserGroupInformation ugi = UserGroupInformation.CreateRemoteUser("test"); URI haUri = new URI("hdfs://my-ha-uri/"); token.SetService(HAUtil.BuildTokenServiceForLogicalUri(haUri, HdfsConstants.HdfsUriScheme )); ugi.AddToken(token); ICollection <IPEndPoint> nnAddrs = new HashSet <IPEndPoint>(); nnAddrs.AddItem(new IPEndPoint("localhost", nn0.GetNameNodeAddress().Port)); nnAddrs.AddItem(new IPEndPoint("localhost", nn1.GetNameNodeAddress().Port)); HAUtil.CloneDelegationTokenForLogicalUri(ugi, haUri, nnAddrs); ICollection <Org.Apache.Hadoop.Security.Token.Token <TokenIdentifier> > tokens = ugi .GetTokens(); NUnit.Framework.Assert.AreEqual(3, tokens.Count); Log.Info("Tokens:\n" + Joiner.On("\n").Join(tokens)); DelegationTokenSelector dts = new DelegationTokenSelector(); // check that the token selected for one of the physical IPC addresses // matches the one we received foreach (IPEndPoint addr in nnAddrs) { Text ipcDtService = SecurityUtil.BuildTokenService(addr); Org.Apache.Hadoop.Security.Token.Token <DelegationTokenIdentifier> token2 = dts.SelectToken (ipcDtService, ugi.GetTokens()); NUnit.Framework.Assert.IsNotNull(token2); Assert.AssertArrayEquals(token.GetIdentifier(), token2.GetIdentifier()); Assert.AssertArrayEquals(token.GetPassword(), token2.GetPassword()); } // switch to host-based tokens, shouldn't match existing tokens SecurityUtilTestHelper.SetTokenServiceUseIp(false); foreach (IPEndPoint addr_1 in nnAddrs) { Text ipcDtService = SecurityUtil.BuildTokenService(addr_1); Org.Apache.Hadoop.Security.Token.Token <DelegationTokenIdentifier> token2 = dts.SelectToken (ipcDtService, ugi.GetTokens()); NUnit.Framework.Assert.IsNull(token2); } // reclone the tokens, and see if they match now HAUtil.CloneDelegationTokenForLogicalUri(ugi, haUri, nnAddrs); foreach (IPEndPoint addr_2 in nnAddrs) { Text ipcDtService = SecurityUtil.BuildTokenService(addr_2); Org.Apache.Hadoop.Security.Token.Token <DelegationTokenIdentifier> token2 = dts.SelectToken (ipcDtService, ugi.GetTokens()); NUnit.Framework.Assert.IsNotNull(token2); Assert.AssertArrayEquals(token.GetIdentifier(), token2.GetIdentifier()); Assert.AssertArrayEquals(token.GetPassword(), token2.GetPassword()); } }
internal static bool IsValidRequestor(ServletContext context, string remoteUser, Configuration conf) { if (remoteUser == null) { // This really shouldn't happen... Log.Warn("Received null remoteUser while authorizing access to getImage servlet"); return(false); } ICollection <string> validRequestors = new HashSet <string>(); validRequestors.AddItem(SecurityUtil.GetServerPrincipal(conf.Get(DFSConfigKeys.DfsNamenodeKerberosPrincipalKey ), NameNode.GetAddress(conf).GetHostName())); try { validRequestors.AddItem(SecurityUtil.GetServerPrincipal(conf.Get(DFSConfigKeys.DfsSecondaryNamenodeKerberosPrincipalKey ), SecondaryNameNode.GetHttpAddress(conf).GetHostName())); } catch (Exception e) { // Don't halt if SecondaryNameNode principal could not be added. Log.Debug("SecondaryNameNode principal could not be added", e); string msg = string.Format("SecondaryNameNode principal not considered, %s = %s, %s = %s" , DFSConfigKeys.DfsSecondaryNamenodeKerberosPrincipalKey, conf.Get(DFSConfigKeys .DfsSecondaryNamenodeKerberosPrincipalKey), DFSConfigKeys.DfsNamenodeSecondaryHttpAddressKey , conf.GetTrimmed(DFSConfigKeys.DfsNamenodeSecondaryHttpAddressKey, DFSConfigKeys .DfsNamenodeSecondaryHttpAddressDefault)); Log.Warn(msg); } if (HAUtil.IsHAEnabled(conf, DFSUtil.GetNamenodeNameServiceId(conf))) { Configuration otherNnConf = HAUtil.GetConfForOtherNode(conf); validRequestors.AddItem(SecurityUtil.GetServerPrincipal(otherNnConf.Get(DFSConfigKeys .DfsNamenodeKerberosPrincipalKey), NameNode.GetAddress(otherNnConf).GetHostName( ))); } foreach (string v in validRequestors) { if (v != null && v.Equals(remoteUser)) { Log.Info("ImageServlet allowing checkpointer: " + remoteUser); return(true); } } if (HttpServer2.UserHasAdministratorAccess(context, remoteUser)) { Log.Info("ImageServlet allowing administrator: " + remoteUser); return(true); } Log.Info("ImageServlet rejecting: " + remoteUser); return(false); }
private void InitResourceManager(int index, Configuration conf) { lock (this) { if (HAUtil.IsHAEnabled(conf)) { conf.Set(YarnConfiguration.RmHaId, rmIds[index]); } resourceManagers[index].Init(conf); resourceManagers[index].GetRMContext().GetDispatcher().Register(typeof(RMAppAttemptEventType ), new _EventHandler_296(this)); } }
public virtual void SetupCluster() { conf = new Configuration(); conf.SetInt(DFSConfigKeys.DfsHaLogrollPeriodKey, 1); conf.SetInt(DFSConfigKeys.DfsHaTaileditsPeriodKey, 1); HAUtil.SetAllowStandbyReads(conf, true); MiniDFSNNTopology topology = MiniDFSNNTopology.SimpleHATopology(); cluster = new MiniDFSCluster.Builder(conf).NnTopology(topology).NumDataNodes(0).Build (); cluster.WaitActive(); ShutdownClusterAndRemoveSharedEditsDir(); }
/// <exception cref="System.Exception"/> public virtual void TestStandbyIsHot() { Configuration conf = new Configuration(); // We read from the standby to watch block locations HAUtil.SetAllowStandbyReads(conf, true); conf.SetInt(DFSConfigKeys.DfsHaTaileditsPeriodKey, 1); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NnTopology(MiniDFSNNTopology .SimpleHATopology()).NumDataNodes(3).Build(); try { cluster.WaitActive(); cluster.TransitionToActive(0); NameNode nn1 = cluster.GetNameNode(0); NameNode nn2 = cluster.GetNameNode(1); FileSystem fs = HATestUtil.ConfigureFailoverFs(cluster, conf); Sharpen.Thread.Sleep(1000); System.Console.Error.WriteLine("=================================="); DFSTestUtil.WriteFile(fs, TestFilePath, TestFileData); // Have to force an edit log roll so that the standby catches up nn1.GetRpcServer().RollEditLog(); System.Console.Error.WriteLine("=================================="); // Block locations should show up on standby. Log.Info("Waiting for block locations to appear on standby node"); WaitForBlockLocations(cluster, nn2, TestFile, 3); // Trigger immediate heartbeats and block reports so // that the active "trusts" all of the DNs cluster.TriggerHeartbeats(); cluster.TriggerBlockReports(); // Change replication Log.Info("Changing replication to 1"); fs.SetReplication(TestFilePath, (short)1); BlockManagerTestUtil.ComputeAllPendingWork(nn1.GetNamesystem().GetBlockManager()); WaitForBlockLocations(cluster, nn1, TestFile, 1); nn1.GetRpcServer().RollEditLog(); Log.Info("Waiting for lowered replication to show up on standby"); WaitForBlockLocations(cluster, nn2, TestFile, 1); // Change back to 3 Log.Info("Changing replication to 3"); fs.SetReplication(TestFilePath, (short)3); BlockManagerTestUtil.ComputeAllPendingWork(nn1.GetNamesystem().GetBlockManager()); nn1.GetRpcServer().RollEditLog(); Log.Info("Waiting for higher replication to show up on standby"); WaitForBlockLocations(cluster, nn2, TestFile, 3); } finally { cluster.Shutdown(); } }
public virtual void SetupCluster() { Configuration conf = new Configuration(); conf.SetInt(DFSConfigKeys.DfsHaTaileditsPeriodKey, 1); HAUtil.SetAllowStandbyReads(conf, true); cluster = new MiniDFSCluster.Builder(conf).NnTopology(MiniDFSNNTopology.SimpleHATopology ()).NumDataNodes(1).WaitSafeMode(false).Build(); cluster.WaitActive(); nn0 = cluster.GetNameNode(0); nn1 = cluster.GetNameNode(1); fs = HATestUtil.ConfigureFailoverFs(cluster, conf); cluster.TransitionToActive(0); }
/// <summary> /// Determine the address of the NN we are checkpointing /// as well as our own HTTP address from the configuration. /// </summary> /// <exception cref="System.IO.IOException"></exception> private void SetNameNodeAddresses(Configuration conf) { // Look up our own address. myNNAddress = GetHttpAddress(conf); // Look up the active node's address Configuration confForActive = HAUtil.GetConfForOtherNode(conf); activeNNAddress = GetHttpAddress(confForActive); // Sanity-check. Preconditions.CheckArgument(CheckAddress(activeNNAddress), "Bad address for active NN: %s" , activeNNAddress); Preconditions.CheckArgument(CheckAddress(myNNAddress), "Bad address for standby NN: %s" , myNNAddress); }
public virtual void TestDeserializeHAToken() { Configuration conf = DFSTestUtil.NewHAConfiguration(LogicalName); Org.Apache.Hadoop.Security.Token.Token <DelegationTokenIdentifier> token = new Org.Apache.Hadoop.Security.Token.Token <DelegationTokenIdentifier>(); QueryStringDecoder decoder = new QueryStringDecoder(WebHdfsHandler.WebhdfsPrefix + "/?" + NamenodeAddressParam.Name + "=" + LogicalName + "&" + DelegationParam.Name + "=" + token.EncodeToUrlString()); ParameterParser testParser = new ParameterParser(decoder, conf); Org.Apache.Hadoop.Security.Token.Token <DelegationTokenIdentifier> tok2 = testParser .DelegationToken(); NUnit.Framework.Assert.IsTrue(HAUtil.IsTokenForLogicalUri(tok2)); }
/// <summary> /// Derive the namenode http address from the current file system, /// either default or as set by "-fs" in the generic options. /// </summary> /// <returns>Returns http address or null if failure.</returns> /// <exception cref="System.IO.IOException">if we can't determine the active NN address /// </exception> private URI GetCurrentNamenodeAddress(Path target) { //String nnAddress = null; Configuration conf = GetConf(); //get the filesystem object to verify it is an HDFS system FileSystem fs = target.GetFileSystem(conf); if (!(fs is DistributedFileSystem)) { System.Console.Error.WriteLine("FileSystem is " + fs.GetUri()); return(null); } return(DFSUtil.GetInfoServer(HAUtil.GetAddressOfActive(fs), conf, DFSUtil.GetHttpClientScheme (conf))); }
/// <exception cref="System.Exception"/> public virtual void TestHdfsGetCanonicalServiceName() { Configuration conf = dfs.GetConf(); URI haUri = HATestUtil.GetLogicalUri(cluster); AbstractFileSystem afs = AbstractFileSystem.CreateFileSystem(haUri, conf); string haService = HAUtil.BuildTokenServiceForLogicalUri(haUri, HdfsConstants.HdfsUriScheme ).ToString(); NUnit.Framework.Assert.AreEqual(haService, afs.GetCanonicalServiceName()); Org.Apache.Hadoop.Security.Token.Token <object> token = afs.GetDelegationTokens(UserGroupInformation .GetCurrentUser().GetShortUserName())[0]; NUnit.Framework.Assert.AreEqual(haService, token.GetService().ToString()); // make sure the logical uri is handled correctly token.Renew(conf); token.Cancel(conf); }
/// <summary> /// HDFS-3062: DistributedFileSystem.getCanonicalServiceName() throws an /// exception if the URI is a logical URI. /// </summary> /// <remarks> /// HDFS-3062: DistributedFileSystem.getCanonicalServiceName() throws an /// exception if the URI is a logical URI. This bug fails the combination of /// ha + mapred + security. /// </remarks> /// <exception cref="System.Exception"/> public virtual void TestDFSGetCanonicalServiceName() { URI hAUri = HATestUtil.GetLogicalUri(cluster); string haService = HAUtil.BuildTokenServiceForLogicalUri(hAUri, HdfsConstants.HdfsUriScheme ).ToString(); NUnit.Framework.Assert.AreEqual(haService, dfs.GetCanonicalServiceName()); string renewer = UserGroupInformation.GetCurrentUser().GetShortUserName(); Org.Apache.Hadoop.Security.Token.Token <DelegationTokenIdentifier> token = GetDelegationToken (dfs, renewer); NUnit.Framework.Assert.AreEqual(haService, token.GetService().ToString()); // make sure the logical uri is handled correctly token.Renew(dfs.GetConf()); token.Cancel(dfs.GetConf()); }
/// <exception cref="System.Exception"/> private void SetUpHaCluster(bool security) { conf = new Configuration(); conf.SetBoolean(CommonConfigurationKeys.HadoopSecurityAuthorization, security); cluster = new MiniQJMHACluster.Builder(conf).Build(); SetHAConf(conf, cluster.GetDfsCluster().GetNameNode(0).GetHostAndPort(), cluster. GetDfsCluster().GetNameNode(1).GetHostAndPort()); cluster.GetDfsCluster().GetNameNode(0).GetHostAndPort(); admin = new DFSAdmin(); admin.SetConf(conf); NUnit.Framework.Assert.IsTrue(HAUtil.IsHAEnabled(conf, "ns1")); originOut = System.Console.Out; originErr = System.Console.Error; Runtime.SetOut(new TextWriter(@out)); Runtime.SetErr(new TextWriter(err)); }
/// <exception cref="System.IO.IOException"/> private TokenAspect.TokenManagementDelegator GetInstance <_T0>(Org.Apache.Hadoop.Security.Token.Token <_T0> token, Configuration conf) where _T0 : TokenIdentifier { URI uri; string scheme = GetSchemeByKind(token.GetKind()); if (HAUtil.IsTokenForLogicalUri(token)) { uri = HAUtil.GetServiceUriFromToken(scheme, token); } else { IPEndPoint address = SecurityUtil.GetTokenServiceAddr(token); uri = URI.Create(scheme + "://" + NetUtils.GetHostPortString(address)); } return((TokenAspect.TokenManagementDelegator)FileSystem.Get(uri, conf)); }
public virtual void Init(Configuration configuration, RMProxy <T> rmProxy, Type protocol ) { this.rmProxy = rmProxy; this.protocol = protocol; this.rmProxy.CheckAllowedProtocols(this.protocol); this.conf = new YarnConfiguration(configuration); ICollection <string> rmIds = HAUtil.GetRMHAIds(conf); this.rmServiceIds = Sharpen.Collections.ToArray(rmIds, new string[rmIds.Count]); conf.Set(YarnConfiguration.RmHaId, rmServiceIds[currentProxyIndex]); conf.SetInt(CommonConfigurationKeysPublic.IpcClientConnectMaxRetriesKey, conf.GetInt (YarnConfiguration.ClientFailoverRetries, YarnConfiguration.DefaultClientFailoverRetries )); conf.SetInt(CommonConfigurationKeysPublic.IpcClientConnectMaxRetriesOnSocketTimeoutsKey , conf.GetInt(YarnConfiguration.ClientFailoverRetriesOnSocketTimeouts, YarnConfiguration .DefaultClientFailoverRetriesOnSocketTimeouts)); }
public virtual void TestTailer() { Configuration conf = new HdfsConfiguration(); conf.SetInt(DFSConfigKeys.DfsHaTaileditsPeriodKey, 1); HAUtil.SetAllowStandbyReads(conf, true); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NnTopology(MiniDFSNNTopology .SimpleHATopology()).NumDataNodes(0).Build(); cluster.WaitActive(); cluster.TransitionToActive(0); NameNode nn1 = cluster.GetNameNode(0); NameNode nn2 = cluster.GetNameNode(1); try { for (int i = 0; i < DirsToMake / 2; i++) { NameNodeAdapter.Mkdirs(nn1, GetDirPath(i), new PermissionStatus("test", "test", new FsPermission((short)0x1ed)), true); } HATestUtil.WaitForStandbyToCatchUp(nn1, nn2); for (int i_1 = 0; i_1 < DirsToMake / 2; i_1++) { NUnit.Framework.Assert.IsTrue(NameNodeAdapter.GetFileInfo(nn2, GetDirPath(i_1), false ).IsDir()); } for (int i_2 = DirsToMake / 2; i_2 < DirsToMake; i_2++) { NameNodeAdapter.Mkdirs(nn1, GetDirPath(i_2), new PermissionStatus("test", "test", new FsPermission((short)0x1ed)), true); } HATestUtil.WaitForStandbyToCatchUp(nn1, nn2); for (int i_3 = DirsToMake / 2; i_3 < DirsToMake; i_3++) { NUnit.Framework.Assert.IsTrue(NameNodeAdapter.GetFileInfo(nn2, GetDirPath(i_3), false ).IsDir()); } } finally { cluster.Shutdown(); } }
public virtual void TestInitializeBKSharedEdits() { MiniDFSCluster cluster = null; try { Configuration conf = new Configuration(); HAUtil.SetAllowStandbyReads(conf, true); conf.SetInt(DFSConfigKeys.DfsHaTaileditsPeriodKey, 1); MiniDFSNNTopology topology = MiniDFSNNTopology.SimpleHATopology(); cluster = new MiniDFSCluster.Builder(conf).NnTopology(topology).NumDataNodes(0).Build (); cluster.WaitActive(); // Shutdown and clear the current filebased shared dir. cluster.ShutdownNameNodes(); FilePath shareddir = new FilePath(cluster.GetSharedEditsDir(0, 1)); NUnit.Framework.Assert.IsTrue("Initial Shared edits dir not fully deleted", FileUtil .FullyDelete(shareddir)); // Check namenodes should not start without shared dir. AssertCanNotStartNamenode(cluster, 0); AssertCanNotStartNamenode(cluster, 1); // Configure bkjm as new shared edits dir in both namenodes Configuration nn1Conf = cluster.GetConfiguration(0); Configuration nn2Conf = cluster.GetConfiguration(1); nn1Conf.Set(DFSConfigKeys.DfsNamenodeSharedEditsDirKey, BKJMUtil.CreateJournalURI ("/initializeSharedEdits").ToString()); nn2Conf.Set(DFSConfigKeys.DfsNamenodeSharedEditsDirKey, BKJMUtil.CreateJournalURI ("/initializeSharedEdits").ToString()); BKJMUtil.AddJournalManagerDefinition(nn1Conf); BKJMUtil.AddJournalManagerDefinition(nn2Conf); // Initialize the BKJM shared edits. NUnit.Framework.Assert.IsFalse(NameNode.InitializeSharedEdits(nn1Conf)); // NameNode should be able to start and should be in sync with BKJM as // shared dir AssertCanStartHANameNodes(cluster, conf, "/testBKJMInitialize"); } finally { if (cluster != null) { cluster.Shutdown(); } } }
public ConfiguredFailoverProxyProvider(Configuration conf, URI uri, Type xface) { Preconditions.CheckArgument(xface.IsAssignableFrom(typeof(NamenodeProtocols)), "Interface class %s is not a valid NameNode protocol!" ); this.xface = xface; this.conf = new Configuration(conf); int maxRetries = this.conf.GetInt(DFSConfigKeys.DfsClientFailoverConnectionRetriesKey , DFSConfigKeys.DfsClientFailoverConnectionRetriesDefault); this.conf.SetInt(CommonConfigurationKeysPublic.IpcClientConnectMaxRetriesKey, maxRetries ); int maxRetriesOnSocketTimeouts = this.conf.GetInt(DFSConfigKeys.DfsClientFailoverConnectionRetriesOnSocketTimeoutsKey , DFSConfigKeys.DfsClientFailoverConnectionRetriesOnSocketTimeoutsDefault); this.conf.SetInt(CommonConfigurationKeysPublic.IpcClientConnectMaxRetriesOnSocketTimeoutsKey , maxRetriesOnSocketTimeouts); try { ugi = UserGroupInformation.GetCurrentUser(); IDictionary <string, IDictionary <string, IPEndPoint> > map = DFSUtil.GetHaNnRpcAddresses (conf); IDictionary <string, IPEndPoint> addressesInNN = map[uri.GetHost()]; if (addressesInNN == null || addressesInNN.Count == 0) { throw new RuntimeException("Could not find any configured addresses " + "for URI " + uri); } ICollection <IPEndPoint> addressesOfNns = addressesInNN.Values; foreach (IPEndPoint address in addressesOfNns) { proxies.AddItem(new ConfiguredFailoverProxyProvider.AddressRpcProxyPair <T>(address )); } // The client may have a delegation token set for the logical // URI of the cluster. Clone this token to apply to each of the // underlying IPC addresses so that the IPC code can find it. HAUtil.CloneDelegationTokenForLogicalUri(ugi, uri, addressesOfNns); } catch (IOException e) { throw new RuntimeException(e); } }