public virtual void TestDelegationToken() { YarnConfiguration conf = new YarnConfiguration(); conf.Set(YarnConfiguration.RmPrincipal, "testuser/[email protected]"); conf.Set(CommonConfigurationKeysPublic.HadoopSecurityAuthentication, "kerberos"); UserGroupInformation.SetConfiguration(conf); ResourceScheduler scheduler = CreateMockScheduler(conf); long initialInterval = 10000l; long maxLifetime = 20000l; long renewInterval = 10000l; RMDelegationTokenSecretManager rmDtSecretManager = CreateRMDelegationTokenSecretManager (initialInterval, maxLifetime, renewInterval); rmDtSecretManager.StartThreads(); Log.Info("Creating DelegationTokenSecretManager with initialInterval: " + initialInterval + ", maxLifetime: " + maxLifetime + ", renewInterval: " + renewInterval); ClientRMService clientRMService = new TestClientRMTokens.ClientRMServiceForTest(this , conf, scheduler, rmDtSecretManager); clientRMService.Init(conf); clientRMService.Start(); ApplicationClientProtocol clientRMWithDT = null; try { // Create a user for the renewr and fake the authentication-method UserGroupInformation loggedInUser = UserGroupInformation.CreateRemoteUser("*****@*****.**" ); NUnit.Framework.Assert.AreEqual("testrenewer", loggedInUser.GetShortUserName()); // Default realm is APACHE.ORG loggedInUser.SetAuthenticationMethod(UserGroupInformation.AuthenticationMethod.Kerberos ); Token token = GetDelegationToken(loggedInUser, clientRMService, loggedInUser.GetShortUserName ()); long tokenFetchTime = Runtime.CurrentTimeMillis(); Log.Info("Got delegation token at: " + tokenFetchTime); // Now try talking to RMService using the delegation token clientRMWithDT = GetClientRMProtocolWithDT(token, clientRMService.GetBindAddress( ), "loginuser1", conf); GetNewApplicationRequest request = Org.Apache.Hadoop.Yarn.Util.Records.NewRecord < GetNewApplicationRequest>(); try { clientRMWithDT.GetNewApplication(request); } catch (IOException e) { NUnit.Framework.Assert.Fail("Unexpected exception" + e); } catch (YarnException e) { NUnit.Framework.Assert.Fail("Unexpected exception" + e); } // Renew after 50% of token age. while (Runtime.CurrentTimeMillis() < tokenFetchTime + initialInterval / 2) { Sharpen.Thread.Sleep(500l); } long nextExpTime = RenewDelegationToken(loggedInUser, clientRMService, token); long renewalTime = Runtime.CurrentTimeMillis(); Log.Info("Renewed token at: " + renewalTime + ", NextExpiryTime: " + nextExpTime); // Wait for first expiry, but before renewed expiry. while (Runtime.CurrentTimeMillis() > tokenFetchTime + initialInterval && Runtime. CurrentTimeMillis() < nextExpTime) { Sharpen.Thread.Sleep(500l); } Sharpen.Thread.Sleep(50l); // Valid token because of renewal. try { clientRMWithDT.GetNewApplication(request); } catch (IOException e) { NUnit.Framework.Assert.Fail("Unexpected exception" + e); } catch (YarnException e) { NUnit.Framework.Assert.Fail("Unexpected exception" + e); } // Wait for expiry. while (Runtime.CurrentTimeMillis() < renewalTime + renewInterval) { Sharpen.Thread.Sleep(500l); } Sharpen.Thread.Sleep(50l); Log.Info("At time: " + Runtime.CurrentTimeMillis() + ", token should be invalid"); // Token should have expired. try { clientRMWithDT.GetNewApplication(request); NUnit.Framework.Assert.Fail("Should not have succeeded with an expired token"); } catch (Exception e) { NUnit.Framework.Assert.AreEqual(typeof(SecretManager.InvalidToken).FullName, e.GetType ().FullName); NUnit.Framework.Assert.IsTrue(e.Message.Contains("is expired")); } // Test cancellation // Stop the existing proxy, start another. if (clientRMWithDT != null) { RPC.StopProxy(clientRMWithDT); clientRMWithDT = null; } token = GetDelegationToken(loggedInUser, clientRMService, loggedInUser.GetShortUserName ()); tokenFetchTime = Runtime.CurrentTimeMillis(); Log.Info("Got delegation token at: " + tokenFetchTime); // Now try talking to RMService using the delegation token clientRMWithDT = GetClientRMProtocolWithDT(token, clientRMService.GetBindAddress( ), "loginuser2", conf); request = Org.Apache.Hadoop.Yarn.Util.Records.NewRecord <GetNewApplicationRequest> (); try { clientRMWithDT.GetNewApplication(request); } catch (IOException e) { NUnit.Framework.Assert.Fail("Unexpected exception" + e); } catch (YarnException e) { NUnit.Framework.Assert.Fail("Unexpected exception" + e); } CancelDelegationToken(loggedInUser, clientRMService, token); if (clientRMWithDT != null) { RPC.StopProxy(clientRMWithDT); clientRMWithDT = null; } // Creating a new connection. clientRMWithDT = GetClientRMProtocolWithDT(token, clientRMService.GetBindAddress( ), "loginuser2", conf); Log.Info("Cancelled delegation token at: " + Runtime.CurrentTimeMillis()); // Verify cancellation worked. try { clientRMWithDT.GetNewApplication(request); NUnit.Framework.Assert.Fail("Should not have succeeded with a cancelled delegation token" ); } catch (IOException) { } catch (YarnException) { } // Test new version token // Stop the existing proxy, start another. if (clientRMWithDT != null) { RPC.StopProxy(clientRMWithDT); clientRMWithDT = null; } token = GetDelegationToken(loggedInUser, clientRMService, loggedInUser.GetShortUserName ()); byte[] tokenIdentifierContent = ((byte[])token.GetIdentifier().Array()); RMDelegationTokenIdentifier tokenIdentifier = new RMDelegationTokenIdentifier(); DataInputBuffer dib = new DataInputBuffer(); dib.Reset(tokenIdentifierContent, tokenIdentifierContent.Length); tokenIdentifier.ReadFields(dib); // Construct new version RMDelegationTokenIdentifier with additional field RMDelegationTokenIdentifierForTest newVersionTokenIdentifier = new RMDelegationTokenIdentifierForTest (tokenIdentifier, "message"); Org.Apache.Hadoop.Security.Token.Token <RMDelegationTokenIdentifier> newRMDTtoken = new Org.Apache.Hadoop.Security.Token.Token <RMDelegationTokenIdentifier>(newVersionTokenIdentifier , rmDtSecretManager); Org.Apache.Hadoop.Yarn.Api.Records.Token newToken = BuilderUtils.NewDelegationToken (newRMDTtoken.GetIdentifier(), newRMDTtoken.GetKind().ToString(), newRMDTtoken.GetPassword (), newRMDTtoken.GetService().ToString()); // Now try talking to RMService using the new version delegation token clientRMWithDT = GetClientRMProtocolWithDT(newToken, clientRMService.GetBindAddress (), "loginuser3", conf); request = Org.Apache.Hadoop.Yarn.Util.Records.NewRecord <GetNewApplicationRequest> (); try { clientRMWithDT.GetNewApplication(request); } catch (IOException e) { NUnit.Framework.Assert.Fail("Unexpected exception" + e); } catch (YarnException e) { NUnit.Framework.Assert.Fail("Unexpected exception" + e); } } finally { rmDtSecretManager.StopThreads(); // TODO PRECOMMIT Close proxies. if (clientRMWithDT != null) { RPC.StopProxy(clientRMWithDT); } } }
public Yarn() { Configuration = new YarnConfiguration(); CapacitySchedulerConfiguration = new CapacitySchedulerConfiguration(); }
public virtual void TestHeadroom() { CapacitySchedulerConfiguration csConf = new CapacitySchedulerConfiguration(); csConf.SetUserLimit(CapacitySchedulerConfiguration.Root + "." + A, 25); SetupQueueConfiguration(csConf); YarnConfiguration conf = new YarnConfiguration(); CapacitySchedulerContext csContext = Org.Mockito.Mockito.Mock <CapacitySchedulerContext >(); Org.Mockito.Mockito.When(csContext.GetConfiguration()).ThenReturn(csConf); Org.Mockito.Mockito.When(csContext.GetConf()).ThenReturn(conf); Org.Mockito.Mockito.When(csContext.GetMinimumResourceCapability()).ThenReturn(Resources .CreateResource(Gb)); Org.Mockito.Mockito.When(csContext.GetMaximumResourceCapability()).ThenReturn(Resources .CreateResource(16 * Gb)); Org.Mockito.Mockito.When(csContext.GetApplicationComparator()).ThenReturn(CapacityScheduler .applicationComparator); Org.Mockito.Mockito.When(csContext.GetQueueComparator()).ThenReturn(CapacityScheduler .queueComparator); Org.Mockito.Mockito.When(csContext.GetResourceCalculator()).ThenReturn(resourceCalculator ); Org.Mockito.Mockito.When(csContext.GetRMContext()).ThenReturn(rmContext); // Say cluster has 100 nodes of 16G each Org.Apache.Hadoop.Yarn.Api.Records.Resource clusterResource = Resources.CreateResource (100 * 16 * Gb); Org.Mockito.Mockito.When(csContext.GetClusterResource()).ThenReturn(clusterResource ); IDictionary <string, CSQueue> queues = new Dictionary <string, CSQueue>(); CapacityScheduler.ParseQueue(csContext, csConf, null, "root", queues, queues, TestUtils .spyHook); // Manipulate queue 'a' LeafQueue queue = TestLeafQueue.StubLeafQueue((LeafQueue)queues[A]); string host_0 = "host_0"; string rack_0 = "rack_0"; FiCaSchedulerNode node_0 = TestUtils.GetMockNode(host_0, rack_0, 0, 16 * Gb); string user_0 = "user_0"; string user_1 = "user_1"; RecordFactory recordFactory = RecordFactoryProvider.GetRecordFactory(null); RMContext rmContext = TestUtils.GetMockRMContext(); RMContext spyRMContext = Org.Mockito.Mockito.Spy(rmContext); ConcurrentMap <ApplicationId, RMApp> spyApps = Org.Mockito.Mockito.Spy(new ConcurrentHashMap <ApplicationId, RMApp>()); RMApp rmApp = Org.Mockito.Mockito.Mock <RMApp>(); ResourceRequest amResourceRequest = Org.Mockito.Mockito.Mock <ResourceRequest>(); Org.Apache.Hadoop.Yarn.Api.Records.Resource amResource = Resources.CreateResource (0, 0); Org.Mockito.Mockito.When(amResourceRequest.GetCapability()).ThenReturn(amResource ); Org.Mockito.Mockito.When(rmApp.GetAMResourceRequest()).ThenReturn(amResourceRequest ); Org.Mockito.Mockito.DoReturn(rmApp).When(spyApps)[(ApplicationId)Matchers.Any()]; Org.Mockito.Mockito.When(spyRMContext.GetRMApps()).ThenReturn(spyApps); Priority priority_1 = TestUtils.CreateMockPriority(1); // Submit first application with some resource-requests from user_0, // and check headroom ApplicationAttemptId appAttemptId_0_0 = TestUtils.GetMockApplicationAttemptId(0, 0); FiCaSchedulerApp app_0_0 = new FiCaSchedulerApp(appAttemptId_0_0, user_0, queue, queue.GetActiveUsersManager(), spyRMContext); queue.SubmitApplicationAttempt(app_0_0, user_0); IList <ResourceRequest> app_0_0_requests = new AList <ResourceRequest>(); app_0_0_requests.AddItem(TestUtils.CreateResourceRequest(ResourceRequest.Any, 1 * Gb, 2, true, priority_1, recordFactory)); app_0_0.UpdateResourceRequests(app_0_0_requests); // Schedule to compute queue.AssignContainers(clusterResource, node_0, new ResourceLimits(clusterResource )); Org.Apache.Hadoop.Yarn.Api.Records.Resource expectedHeadroom = Resources.CreateResource (10 * 16 * Gb, 1); NUnit.Framework.Assert.AreEqual(expectedHeadroom, app_0_0.GetHeadroom()); // Submit second application from user_0, check headroom ApplicationAttemptId appAttemptId_0_1 = TestUtils.GetMockApplicationAttemptId(1, 0); FiCaSchedulerApp app_0_1 = new FiCaSchedulerApp(appAttemptId_0_1, user_0, queue, queue.GetActiveUsersManager(), spyRMContext); queue.SubmitApplicationAttempt(app_0_1, user_0); IList <ResourceRequest> app_0_1_requests = new AList <ResourceRequest>(); app_0_1_requests.AddItem(TestUtils.CreateResourceRequest(ResourceRequest.Any, 1 * Gb, 2, true, priority_1, recordFactory)); app_0_1.UpdateResourceRequests(app_0_1_requests); // Schedule to compute queue.AssignContainers(clusterResource, node_0, new ResourceLimits(clusterResource )); // Schedule to compute NUnit.Framework.Assert.AreEqual(expectedHeadroom, app_0_0.GetHeadroom()); NUnit.Framework.Assert.AreEqual(expectedHeadroom, app_0_1.GetHeadroom()); // no change // Submit first application from user_1, check for new headroom ApplicationAttemptId appAttemptId_1_0 = TestUtils.GetMockApplicationAttemptId(2, 0); FiCaSchedulerApp app_1_0 = new FiCaSchedulerApp(appAttemptId_1_0, user_1, queue, queue.GetActiveUsersManager(), spyRMContext); queue.SubmitApplicationAttempt(app_1_0, user_1); IList <ResourceRequest> app_1_0_requests = new AList <ResourceRequest>(); app_1_0_requests.AddItem(TestUtils.CreateResourceRequest(ResourceRequest.Any, 1 * Gb, 2, true, priority_1, recordFactory)); app_1_0.UpdateResourceRequests(app_1_0_requests); // Schedule to compute queue.AssignContainers(clusterResource, node_0, new ResourceLimits(clusterResource )); // Schedule to compute expectedHeadroom = Resources.CreateResource(10 * 16 * Gb / 2, 1); // changes NUnit.Framework.Assert.AreEqual(expectedHeadroom, app_0_0.GetHeadroom()); NUnit.Framework.Assert.AreEqual(expectedHeadroom, app_0_1.GetHeadroom()); NUnit.Framework.Assert.AreEqual(expectedHeadroom, app_1_0.GetHeadroom()); // Now reduce cluster size and check for the smaller headroom clusterResource = Resources.CreateResource(90 * 16 * Gb); queue.AssignContainers(clusterResource, node_0, new ResourceLimits(clusterResource )); // Schedule to compute expectedHeadroom = Resources.CreateResource(9 * 16 * Gb / 2, 1); // changes NUnit.Framework.Assert.AreEqual(expectedHeadroom, app_0_0.GetHeadroom()); NUnit.Framework.Assert.AreEqual(expectedHeadroom, app_0_1.GetHeadroom()); NUnit.Framework.Assert.AreEqual(expectedHeadroom, app_1_0.GetHeadroom()); }
/// <exception cref="System.Exception"/> private void ValidateRMNMKeyExchange(YarnConfiguration conf) { // Default rolling and activation intervals are large enough, no need to // intervene DrainDispatcher dispatcher = new DrainDispatcher(); ResourceManager rm = new _ResourceManager_56(dispatcher); // Do nothing. // Don't need it, skip. rm.Init(conf); rm.Start(); // Testing ContainerToken and NMToken string containerToken = "Container Token : "; string nmToken = "NM Token : "; MockNM nm = new MockNM("host:1234", 3072, rm.GetResourceTrackerService()); RegisterNodeManagerResponse registrationResponse = nm.RegisterNode(); MasterKey containerTokenMasterKey = registrationResponse.GetContainerTokenMasterKey (); NUnit.Framework.Assert.IsNotNull(containerToken + "Registration should cause a key-update!" , containerTokenMasterKey); MasterKey nmTokenMasterKey = registrationResponse.GetNMTokenMasterKey(); NUnit.Framework.Assert.IsNotNull(nmToken + "Registration should cause a key-update!" , nmTokenMasterKey); dispatcher.Await(); NodeHeartbeatResponse response = nm.NodeHeartbeat(true); NUnit.Framework.Assert.IsNull(containerToken + "First heartbeat after registration shouldn't get any key updates!" , response.GetContainerTokenMasterKey()); NUnit.Framework.Assert.IsNull(nmToken + "First heartbeat after registration shouldn't get any key updates!" , response.GetNMTokenMasterKey()); dispatcher.Await(); response = nm.NodeHeartbeat(true); NUnit.Framework.Assert.IsNull(containerToken + "Even second heartbeat after registration shouldn't get any key updates!" , response.GetContainerTokenMasterKey()); NUnit.Framework.Assert.IsNull(nmToken + "Even second heartbeat after registration shouldn't get any key updates!" , response.GetContainerTokenMasterKey()); dispatcher.Await(); // Let's force a roll-over rm.GetRMContext().GetContainerTokenSecretManager().RollMasterKey(); rm.GetRMContext().GetNMTokenSecretManager().RollMasterKey(); // Heartbeats after roll-over and before activation should be fine. response = nm.NodeHeartbeat(true); NUnit.Framework.Assert.IsNotNull(containerToken + "Heartbeats after roll-over and before activation should not err out." , response.GetContainerTokenMasterKey()); NUnit.Framework.Assert.IsNotNull(nmToken + "Heartbeats after roll-over and before activation should not err out." , response.GetNMTokenMasterKey()); NUnit.Framework.Assert.AreEqual(containerToken + "Roll-over should have incremented the key-id only by one!" , containerTokenMasterKey.GetKeyId() + 1, response.GetContainerTokenMasterKey(). GetKeyId()); NUnit.Framework.Assert.AreEqual(nmToken + "Roll-over should have incremented the key-id only by one!" , nmTokenMasterKey.GetKeyId() + 1, response.GetNMTokenMasterKey().GetKeyId()); dispatcher.Await(); response = nm.NodeHeartbeat(true); NUnit.Framework.Assert.IsNull(containerToken + "Second heartbeat after roll-over shouldn't get any key updates!" , response.GetContainerTokenMasterKey()); NUnit.Framework.Assert.IsNull(nmToken + "Second heartbeat after roll-over shouldn't get any key updates!" , response.GetNMTokenMasterKey()); dispatcher.Await(); // Let's force activation rm.GetRMContext().GetContainerTokenSecretManager().ActivateNextMasterKey(); rm.GetRMContext().GetNMTokenSecretManager().ActivateNextMasterKey(); response = nm.NodeHeartbeat(true); NUnit.Framework.Assert.IsNull(containerToken + "Activation shouldn't cause any key updates!" , response.GetContainerTokenMasterKey()); NUnit.Framework.Assert.IsNull(nmToken + "Activation shouldn't cause any key updates!" , response.GetNMTokenMasterKey()); dispatcher.Await(); response = nm.NodeHeartbeat(true); NUnit.Framework.Assert.IsNull(containerToken + "Even second heartbeat after activation shouldn't get any key updates!" , response.GetContainerTokenMasterKey()); NUnit.Framework.Assert.IsNull(nmToken + "Even second heartbeat after activation shouldn't get any key updates!" , response.GetNMTokenMasterKey()); dispatcher.Await(); rm.Stop(); }
public virtual void TestContainerLimits() { LinuxContainerExecutor mockLCE = new TestCgroupsLCEResourcesHandler.MockLinuxContainerExecutor (); TestCgroupsLCEResourcesHandler.CustomCgroupsLCEResourceHandler handler = new TestCgroupsLCEResourcesHandler.CustomCgroupsLCEResourceHandler (); handler.generateLimitsMode = true; YarnConfiguration conf = new YarnConfiguration(); int numProcessors = 4; ResourceCalculatorPlugin plugin = Org.Mockito.Mockito.Mock <ResourceCalculatorPlugin >(); Org.Mockito.Mockito.DoReturn(numProcessors).When(plugin).GetNumProcessors(); handler.SetConf(conf); handler.InitConfig(); // create mock cgroup FilePath cgroupMountDir = CreateMockCgroupMount(cgroupDir); // create mock mtab FilePath mockMtab = CreateMockMTab(cgroupDir); // setup our handler and call init() handler.SetMtabFile(mockMtab.GetAbsolutePath()); handler.Init(mockLCE, plugin); // check values // default case - files shouldn't exist, strict mode off by default ContainerId id = ContainerId.FromString("container_1_1_1_1"); handler.PreExecute(id, Resource.NewInstance(1024, 1)); FilePath containerDir = new FilePath(cgroupMountDir, id.ToString()); NUnit.Framework.Assert.IsTrue(containerDir.Exists()); NUnit.Framework.Assert.IsTrue(containerDir.IsDirectory()); FilePath periodFile = new FilePath(containerDir, "cpu.cfs_period_us"); FilePath quotaFile = new FilePath(containerDir, "cpu.cfs_quota_us"); NUnit.Framework.Assert.IsFalse(periodFile.Exists()); NUnit.Framework.Assert.IsFalse(quotaFile.Exists()); // no files created because we're using all cpu FileUtils.DeleteQuietly(containerDir); conf.SetBoolean(YarnConfiguration.NmLinuxContainerCgroupsStrictResourceUsage, true ); handler.InitConfig(); handler.PreExecute(id, Resource.NewInstance(1024, YarnConfiguration.DefaultNmVcores )); NUnit.Framework.Assert.IsTrue(containerDir.Exists()); NUnit.Framework.Assert.IsTrue(containerDir.IsDirectory()); periodFile = new FilePath(containerDir, "cpu.cfs_period_us"); quotaFile = new FilePath(containerDir, "cpu.cfs_quota_us"); NUnit.Framework.Assert.IsFalse(periodFile.Exists()); NUnit.Framework.Assert.IsFalse(quotaFile.Exists()); // 50% of CPU FileUtils.DeleteQuietly(containerDir); conf.SetBoolean(YarnConfiguration.NmLinuxContainerCgroupsStrictResourceUsage, true ); handler.InitConfig(); handler.PreExecute(id, Resource.NewInstance(1024, YarnConfiguration.DefaultNmVcores / 2)); NUnit.Framework.Assert.IsTrue(containerDir.Exists()); NUnit.Framework.Assert.IsTrue(containerDir.IsDirectory()); periodFile = new FilePath(containerDir, "cpu.cfs_period_us"); quotaFile = new FilePath(containerDir, "cpu.cfs_quota_us"); NUnit.Framework.Assert.IsTrue(periodFile.Exists()); NUnit.Framework.Assert.IsTrue(quotaFile.Exists()); NUnit.Framework.Assert.AreEqual(500 * 1000, ReadIntFromFile(periodFile)); NUnit.Framework.Assert.AreEqual(1000 * 1000, ReadIntFromFile(quotaFile)); // CGroups set to 50% of CPU, container set to 50% of YARN CPU FileUtils.DeleteQuietly(containerDir); conf.SetBoolean(YarnConfiguration.NmLinuxContainerCgroupsStrictResourceUsage, true ); conf.SetInt(YarnConfiguration.NmResourcePercentagePhysicalCpuLimit, 50); handler.InitConfig(); handler.Init(mockLCE, plugin); handler.PreExecute(id, Resource.NewInstance(1024, YarnConfiguration.DefaultNmVcores / 2)); NUnit.Framework.Assert.IsTrue(containerDir.Exists()); NUnit.Framework.Assert.IsTrue(containerDir.IsDirectory()); periodFile = new FilePath(containerDir, "cpu.cfs_period_us"); quotaFile = new FilePath(containerDir, "cpu.cfs_quota_us"); NUnit.Framework.Assert.IsTrue(periodFile.Exists()); NUnit.Framework.Assert.IsTrue(quotaFile.Exists()); NUnit.Framework.Assert.AreEqual(1000 * 1000, ReadIntFromFile(periodFile)); NUnit.Framework.Assert.AreEqual(1000 * 1000, ReadIntFromFile(quotaFile)); FileUtils.DeleteQuietly(cgroupDir); }
/// <exception cref="System.IO.IOException"/> public RMHAServiceTarget(YarnConfiguration conf) { autoFailoverEnabled = HAUtil.IsAutomaticFailoverEnabled(conf); haAdminServiceAddress = conf.GetSocketAddr(YarnConfiguration.RmAdminAddress, YarnConfiguration .DefaultRmAdminAddress, YarnConfiguration.DefaultRmAdminPort); }
/// <exception cref="System.Exception"/> public virtual void TestFilterOverrides() { string filterInitializerConfKey = "hadoop.http.filter.initializers"; string[] filterInitializers = new string[] { typeof(AuthenticationFilterInitializer ).FullName, typeof(RMAuthenticationFilterInitializer).FullName, typeof(AuthenticationFilterInitializer ).FullName + "," + typeof(RMAuthenticationFilterInitializer).FullName, typeof(AuthenticationFilterInitializer ).FullName + ", " + typeof(RMAuthenticationFilterInitializer).FullName, typeof(AuthenticationFilterInitializer ).FullName + ", " + this.GetType().FullName }; foreach (string filterInitializer in filterInitializers) { resourceManager = new _ResourceManager_259(); // Skip the login. Configuration conf = new YarnConfiguration(); conf.Set(filterInitializerConfKey, filterInitializer); conf.Set("hadoop.security.authentication", "kerberos"); conf.Set("hadoop.http.authentication.type", "kerberos"); try { try { UserGroupInformation.SetConfiguration(conf); } catch (Exception) { // ignore we just care about getting true for // isSecurityEnabled() Log.Info("Got expected exception"); } resourceManager.Init(conf); resourceManager.StartWepApp(); } catch (RuntimeException) { // Exceptions are expected because we didn't setup everything // just want to test filter settings string tmp = resourceManager.GetConfig().Get(filterInitializerConfKey); if (filterInitializer.Contains(this.GetType().FullName)) { NUnit.Framework.Assert.AreEqual(typeof(RMAuthenticationFilterInitializer).FullName + "," + this.GetType().FullName, tmp); } else { NUnit.Framework.Assert.AreEqual(typeof(RMAuthenticationFilterInitializer).FullName , tmp); } resourceManager.Stop(); } } // simple mode overrides string[] simpleFilterInitializers = new string[] { string.Empty, typeof(StaticUserWebFilter ).FullName }; foreach (string filterInitializer_1 in simpleFilterInitializers) { resourceManager = new ResourceManager(); Configuration conf = new YarnConfiguration(); conf.Set(filterInitializerConfKey, filterInitializer_1); try { UserGroupInformation.SetConfiguration(conf); resourceManager.Init(conf); resourceManager.StartWepApp(); } catch (RuntimeException) { // Exceptions are expected because we didn't setup everything // just want to test filter settings string tmp = resourceManager.GetConfig().Get(filterInitializerConfKey); if (filterInitializer_1.Equals(typeof(StaticUserWebFilter).FullName)) { NUnit.Framework.Assert.AreEqual(typeof(RMAuthenticationFilterInitializer).FullName + "," + typeof(StaticUserWebFilter).FullName, tmp); } else { NUnit.Framework.Assert.AreEqual(typeof(RMAuthenticationFilterInitializer).FullName , tmp); } resourceManager.Stop(); } } }
public virtual void TestDelegationToken() { Logger rootLogger = LogManager.GetRootLogger(); rootLogger.SetLevel(Level.Debug); YarnConfiguration conf = new YarnConfiguration(new JobConf()); // Just a random principle conf.Set(JHAdminConfig.MrHistoryPrincipal, "RandomOrc/[email protected]"); conf.Set(CommonConfigurationKeysPublic.HadoopSecurityAuthentication, "kerberos"); UserGroupInformation.SetConfiguration(conf); long initialInterval = 10000l; long maxLifetime = 20000l; long renewInterval = 10000l; JobHistoryServer jobHistoryServer = null; MRClientProtocol clientUsingDT = null; long tokenFetchTime; try { jobHistoryServer = new _JobHistoryServer_87(initialInterval, maxLifetime, renewInterval ); // no keytab based login // Don't need it, skip.; // final JobHistoryServer jobHistoryServer = jhServer; jobHistoryServer.Init(conf); jobHistoryServer.Start(); MRClientProtocol hsService = jobHistoryServer.GetClientService().GetClientHandler (); // Fake the authentication-method UserGroupInformation loggedInUser = UserGroupInformation.CreateRemoteUser("*****@*****.**" ); NUnit.Framework.Assert.AreEqual("testrenewer", loggedInUser.GetShortUserName()); // Default realm is APACHE.ORG loggedInUser.SetAuthenticationMethod(UserGroupInformation.AuthenticationMethod.Kerberos ); Token token = GetDelegationToken(loggedInUser, hsService, loggedInUser.GetShortUserName ()); tokenFetchTime = Runtime.CurrentTimeMillis(); Log.Info("Got delegation token at: " + tokenFetchTime); // Now try talking to JHS using the delegation token clientUsingDT = GetMRClientProtocol(token, jobHistoryServer.GetClientService().GetBindAddress (), "TheDarkLord", conf); GetJobReportRequest jobReportRequest = Org.Apache.Hadoop.Yarn.Util.Records.NewRecord <GetJobReportRequest>(); jobReportRequest.SetJobId(MRBuilderUtils.NewJobId(123456, 1, 1)); try { clientUsingDT.GetJobReport(jobReportRequest); } catch (IOException e) { NUnit.Framework.Assert.AreEqual("Unknown job job_123456_0001", e.Message); } // Renew after 50% of token age. while (Runtime.CurrentTimeMillis() < tokenFetchTime + initialInterval / 2) { Sharpen.Thread.Sleep(500l); } long nextExpTime = RenewDelegationToken(loggedInUser, hsService, token); long renewalTime = Runtime.CurrentTimeMillis(); Log.Info("Renewed token at: " + renewalTime + ", NextExpiryTime: " + nextExpTime); // Wait for first expiry, but before renewed expiry. while (Runtime.CurrentTimeMillis() > tokenFetchTime + initialInterval && Runtime. CurrentTimeMillis() < nextExpTime) { Sharpen.Thread.Sleep(500l); } Sharpen.Thread.Sleep(50l); // Valid token because of renewal. try { clientUsingDT.GetJobReport(jobReportRequest); } catch (IOException e) { NUnit.Framework.Assert.AreEqual("Unknown job job_123456_0001", e.Message); } // Wait for expiry. while (Runtime.CurrentTimeMillis() < renewalTime + renewInterval) { Sharpen.Thread.Sleep(500l); } Sharpen.Thread.Sleep(50l); Log.Info("At time: " + Runtime.CurrentTimeMillis() + ", token should be invalid"); // Token should have expired. try { clientUsingDT.GetJobReport(jobReportRequest); NUnit.Framework.Assert.Fail("Should not have succeeded with an expired token"); } catch (IOException e) { NUnit.Framework.Assert.IsTrue(e.InnerException.Message.Contains("is expired")); } // Test cancellation // Stop the existing proxy, start another. if (clientUsingDT != null) { // RPC.stopProxy(clientUsingDT); clientUsingDT = null; } token = GetDelegationToken(loggedInUser, hsService, loggedInUser.GetShortUserName ()); tokenFetchTime = Runtime.CurrentTimeMillis(); Log.Info("Got delegation token at: " + tokenFetchTime); // Now try talking to HSService using the delegation token clientUsingDT = GetMRClientProtocol(token, jobHistoryServer.GetClientService().GetBindAddress (), "loginuser2", conf); try { clientUsingDT.GetJobReport(jobReportRequest); } catch (IOException e) { NUnit.Framework.Assert.Fail("Unexpected exception" + e); } CancelDelegationToken(loggedInUser, hsService, token); // Testing the token with different renewer to cancel the token Token tokenWithDifferentRenewer = GetDelegationToken(loggedInUser, hsService, "yarn" ); CancelDelegationToken(loggedInUser, hsService, tokenWithDifferentRenewer); if (clientUsingDT != null) { // RPC.stopProxy(clientUsingDT); clientUsingDT = null; } // Creating a new connection. clientUsingDT = GetMRClientProtocol(token, jobHistoryServer.GetClientService().GetBindAddress (), "loginuser2", conf); Log.Info("Cancelled delegation token at: " + Runtime.CurrentTimeMillis()); // Verify cancellation worked. try { clientUsingDT.GetJobReport(jobReportRequest); NUnit.Framework.Assert.Fail("Should not have succeeded with a cancelled delegation token" ); } catch (IOException) { } } finally { jobHistoryServer.Stop(); } }
public virtual void TestRedirect() { Configuration conf = new YarnConfiguration(); conf.Set(MRConfig.FrameworkName, MRConfig.YarnFrameworkName); conf.Set(YarnConfiguration.RmAddress, Rmaddress); conf.Set(JHAdminConfig.MrHistoryAddress, Hshostaddress); // Start the RM. TestClientRedirect.RMService rmService = new TestClientRedirect.RMService(this, "test" ); rmService.Init(conf); rmService.Start(); // Start the AM. TestClientRedirect.AMService amService = new TestClientRedirect.AMService(this); amService.Init(conf); amService.Start(conf); // Start the HS. TestClientRedirect.HistoryService historyService = new TestClientRedirect.HistoryService (this); historyService.Init(conf); historyService.Start(conf); Log.Info("services started"); Cluster cluster = new Cluster(conf); JobID jobID = new JobID("201103121733", 1); Counters counters = cluster.GetJob(jobID).GetCounters(); ValidateCounters(counters); NUnit.Framework.Assert.IsTrue(amContact); Log.Info("Sleeping for 5 seconds before stop for" + " the client socket to not get EOF immediately.." ); Sharpen.Thread.Sleep(5000); //bring down the AM service amService.Stop(); Log.Info("Sleeping for 5 seconds after stop for" + " the server to exit cleanly.." ); Sharpen.Thread.Sleep(5000); amRestarting = true; // Same client //results are returned from fake (not started job) counters = cluster.GetJob(jobID).GetCounters(); NUnit.Framework.Assert.AreEqual(0, counters.CountCounters()); Job job = cluster.GetJob(jobID); TaskID taskId = new TaskID(jobID, TaskType.Map, 0); TaskAttemptID tId = new TaskAttemptID(taskId, 0); //invoke all methods to check that no exception is thrown job.KillJob(); job.KillTask(tId); job.FailTask(tId); job.GetTaskCompletionEvents(0, 100); job.GetStatus(); job.GetTaskDiagnostics(tId); job.GetTaskReports(TaskType.Map); job.GetTrackingURL(); amRestarting = false; amService = new TestClientRedirect.AMService(this); amService.Init(conf); amService.Start(conf); amContact = false; //reset counters = cluster.GetJob(jobID).GetCounters(); ValidateCounters(counters); NUnit.Framework.Assert.IsTrue(amContact); // Stop the AM. It is not even restarting. So it should be treated as // completed. amService.Stop(); // Same client counters = cluster.GetJob(jobID).GetCounters(); ValidateCounters(counters); NUnit.Framework.Assert.IsTrue(hsContact); rmService.Stop(); historyService.Stop(); }
public virtual void Setup() { conf = GetConf(); }
public virtual void TestSuccessfulContainerLaunch() { FileContext localFS = FileContext.GetLocalFSFileContext(); localFS.Delete(new Path(localDir.GetAbsolutePath()), true); localFS.Delete(new Path(localLogDir.GetAbsolutePath()), true); localFS.Delete(new Path(remoteLogDir.GetAbsolutePath()), true); localDir.Mkdir(); localLogDir.Mkdir(); remoteLogDir.Mkdir(); YarnConfiguration conf = new YarnConfiguration(); Context context = new _NMContext_84(new NMContainerTokenSecretManager(conf), new NMTokenSecretManagerInNM(), null, null, new NMNullStateStoreService()); conf.Set(YarnConfiguration.NmLocalDirs, localDir.GetAbsolutePath()); conf.Set(YarnConfiguration.NmLogDirs, localLogDir.GetAbsolutePath()); conf.Set(YarnConfiguration.NmRemoteAppLogDir, remoteLogDir.GetAbsolutePath()); ContainerExecutor exec = new DefaultContainerExecutor(); exec.SetConf(conf); DeletionService del = new DeletionService(exec); Dispatcher dispatcher = new AsyncDispatcher(); NodeHealthCheckerService healthChecker = new NodeHealthCheckerService(); healthChecker.Init(conf); LocalDirsHandlerService dirsHandler = healthChecker.GetDiskHandler(); NodeManagerMetrics metrics = NodeManagerMetrics.Create(); NodeStatusUpdater nodeStatusUpdater = new _NodeStatusUpdaterImpl_106(context, dispatcher , healthChecker, metrics); // Don't start any updating thread. DummyContainerManager containerManager = new DummyContainerManager(context, exec, del, nodeStatusUpdater, metrics, new ApplicationACLsManager(conf), dirsHandler); nodeStatusUpdater.Init(conf); ((NodeManager.NMContext)context).SetContainerManager(containerManager); nodeStatusUpdater.Start(); containerManager.Init(conf); containerManager.Start(); ContainerLaunchContext launchContext = recordFactory.NewRecordInstance <ContainerLaunchContext >(); ApplicationId applicationId = ApplicationId.NewInstance(0, 0); ApplicationAttemptId applicationAttemptId = ApplicationAttemptId.NewInstance(applicationId , 0); ContainerId cID = ContainerId.NewContainerId(applicationAttemptId, 0); string user = "******"; StartContainerRequest scRequest = StartContainerRequest.NewInstance(launchContext , TestContainerManager.CreateContainerToken(cID, SimulatedRmIdentifier, context. GetNodeId(), user, context.GetContainerTokenSecretManager())); IList <StartContainerRequest> list = new AList <StartContainerRequest>(); list.AddItem(scRequest); StartContainersRequest allRequests = StartContainersRequest.NewInstance(list); containerManager.StartContainers(allRequests); BaseContainerManagerTest.WaitForContainerState(containerManager, cID, ContainerState .Running); IList <ContainerId> containerIds = new AList <ContainerId>(); containerIds.AddItem(cID); StopContainersRequest stopRequest = StopContainersRequest.NewInstance(containerIds ); containerManager.StopContainers(stopRequest); BaseContainerManagerTest.WaitForContainerState(containerManager, cID, ContainerState .Complete); containerManager.Stop(); }
public virtual void TestRMInitialsWithFileSystemBasedConfigurationProvider() { configuration.Set(YarnConfiguration.RmConfigurationProviderClass, "org.apache.hadoop.yarn.FileSystemBasedConfigurationProvider" ); // upload configurations FilePath excludeHostsFile = new FilePath(tmpDir.ToString(), "excludeHosts"); if (excludeHostsFile.Exists()) { excludeHostsFile.Delete(); } if (!excludeHostsFile.CreateNewFile()) { NUnit.Framework.Assert.Fail("Can not create " + "excludeHosts"); } PrintWriter fileWriter = new PrintWriter(excludeHostsFile); fileWriter.Write("0.0.0.0:123"); fileWriter.Close(); UploadToRemoteFileSystem(new Path(excludeHostsFile.GetAbsolutePath())); YarnConfiguration yarnConf = new YarnConfiguration(); yarnConf.Set(YarnConfiguration.YarnAdminAcl, "world:anyone:rwcda"); yarnConf.Set(YarnConfiguration.RmNodesExcludeFilePath, this.workingPath + "/excludeHosts" ); UploadConfiguration(yarnConf, "yarn-site.xml"); CapacitySchedulerConfiguration csConf = new CapacitySchedulerConfiguration(); csConf.Set("yarn.scheduler.capacity.maximum-applications", "5000"); UploadConfiguration(csConf, "capacity-scheduler.xml"); string aclsString = "alice,bob users,wheel"; Configuration newConf = new Configuration(); newConf.Set("security.applicationclient.protocol.acl", aclsString); UploadConfiguration(newConf, "hadoop-policy.xml"); Configuration conf = new Configuration(); conf.SetBoolean(CommonConfigurationKeysPublic.HadoopSecurityAuthorization, true); conf.Set("hadoop.proxyuser.test.groups", "test_groups"); conf.Set("hadoop.proxyuser.test.hosts", "test_hosts"); conf.SetClass(CommonConfigurationKeys.HadoopSecurityGroupMapping, typeof(TestRMAdminService.MockUnixGroupsMapping ), typeof(GroupMappingServiceProvider)); UploadConfiguration(conf, "core-site.xml"); // update the groups TestRMAdminService.MockUnixGroupsMapping.UpdateGroups(); ResourceManager resourceManager = null; try { try { resourceManager = new ResourceManager(); resourceManager.Init(configuration); resourceManager.Start(); } catch (Exception) { NUnit.Framework.Assert.Fail("Should not get any exceptions"); } // validate values for excludeHosts ICollection <string> excludeHosts = resourceManager.GetRMContext().GetNodesListManager ().GetHostsReader().GetExcludedHosts(); NUnit.Framework.Assert.IsTrue(excludeHosts.Count == 1); NUnit.Framework.Assert.IsTrue(excludeHosts.Contains("0.0.0.0:123")); // validate values for admin-acls string aclStringAfter = resourceManager.adminService.GetAccessControlList().GetAclString ().Trim(); NUnit.Framework.Assert.AreEqual(aclStringAfter, "world:anyone:rwcda," + UserGroupInformation .GetCurrentUser().GetShortUserName()); // validate values for queue configuration CapacityScheduler cs = (CapacityScheduler)resourceManager.GetRMContext().GetScheduler (); int maxAppsAfter = cs.GetConfiguration().GetMaximumSystemApplications(); NUnit.Framework.Assert.AreEqual(maxAppsAfter, 5000); // verify service Acls for AdminService ServiceAuthorizationManager adminServiceServiceManager = resourceManager.adminService .GetServer().GetServiceAuthorizationManager(); VerifyServiceACLsRefresh(adminServiceServiceManager, typeof(ApplicationClientProtocolPB ), aclsString); // verify service ACLs for ClientRMService ServiceAuthorizationManager clientRMServiceServiceManager = resourceManager.GetRMContext ().GetClientRMService().GetServer().GetServiceAuthorizationManager(); VerifyServiceACLsRefresh(clientRMServiceServiceManager, typeof(ApplicationClientProtocolPB ), aclsString); // verify service ACLs for ApplicationMasterService ServiceAuthorizationManager appMasterService = resourceManager.GetRMContext().GetApplicationMasterService ().GetServer().GetServiceAuthorizationManager(); VerifyServiceACLsRefresh(appMasterService, typeof(ApplicationClientProtocolPB), aclsString ); // verify service ACLs for ResourceTrackerService ServiceAuthorizationManager RTService = resourceManager.GetRMContext().GetResourceTrackerService ().GetServer().GetServiceAuthorizationManager(); VerifyServiceACLsRefresh(RTService, typeof(ApplicationClientProtocolPB), aclsString ); // verify ProxyUsers and ProxyHosts ProxyUsers.RefreshSuperUserGroupsConfiguration(configuration); NUnit.Framework.Assert.IsTrue(ProxyUsers.GetDefaultImpersonationProvider().GetProxyGroups ()["hadoop.proxyuser.test.groups"].Count == 1); NUnit.Framework.Assert.IsTrue(ProxyUsers.GetDefaultImpersonationProvider().GetProxyGroups ()["hadoop.proxyuser.test.groups"].Contains("test_groups")); NUnit.Framework.Assert.IsTrue(ProxyUsers.GetDefaultImpersonationProvider().GetProxyHosts ()["hadoop.proxyuser.test.hosts"].Count == 1); NUnit.Framework.Assert.IsTrue(ProxyUsers.GetDefaultImpersonationProvider().GetProxyHosts ()["hadoop.proxyuser.test.hosts"].Contains("test_hosts")); // verify UserToGroupsMappings IList <string> groupAfter = Groups.GetUserToGroupsMappingService(configuration).GetGroups (UserGroupInformation.GetCurrentUser().GetUserName()); NUnit.Framework.Assert.IsTrue(groupAfter.Contains("test_group_D") && groupAfter.Contains ("test_group_E") && groupAfter.Contains("test_group_F") && groupAfter.Count == 3 ); } finally { if (resourceManager != null) { resourceManager.Stop(); } } }
public virtual void TestRMHAWithFileSystemBasedConfiguration() { HAServiceProtocol.StateChangeRequestInfo requestInfo = new HAServiceProtocol.StateChangeRequestInfo (HAServiceProtocol.RequestSource.RequestByUser); configuration.Set(YarnConfiguration.RmConfigurationProviderClass, "org.apache.hadoop.yarn.FileSystemBasedConfigurationProvider" ); configuration.SetBoolean(YarnConfiguration.RmHaEnabled, true); configuration.SetBoolean(YarnConfiguration.AutoFailoverEnabled, false); configuration.Set(YarnConfiguration.RmHaIds, "rm1,rm2"); int @base = 100; foreach (string confKey in YarnConfiguration.GetServiceAddressConfKeys(configuration )) { configuration.Set(HAUtil.AddSuffix(confKey, "rm1"), "0.0.0.0:" + (@base + 20)); configuration.Set(HAUtil.AddSuffix(confKey, "rm2"), "0.0.0.0:" + (@base + 40)); @base = @base * 2; } Configuration conf1 = new Configuration(configuration); conf1.Set(YarnConfiguration.RmHaId, "rm1"); Configuration conf2 = new Configuration(configuration); conf2.Set(YarnConfiguration.RmHaId, "rm2"); // upload default configurations UploadDefaultConfiguration(); MockRM rm1 = null; MockRM rm2 = null; try { rm1 = new MockRM(conf1); rm1.Init(conf1); rm1.Start(); NUnit.Framework.Assert.IsTrue(rm1.GetRMContext().GetHAServiceState() == HAServiceProtocol.HAServiceState .Standby); rm2 = new MockRM(conf2); rm2.Init(conf1); rm2.Start(); NUnit.Framework.Assert.IsTrue(rm2.GetRMContext().GetHAServiceState() == HAServiceProtocol.HAServiceState .Standby); rm1.adminService.TransitionToActive(requestInfo); NUnit.Framework.Assert.IsTrue(rm1.GetRMContext().GetHAServiceState() == HAServiceProtocol.HAServiceState .Active); CapacitySchedulerConfiguration csConf = new CapacitySchedulerConfiguration(); csConf.Set("yarn.scheduler.capacity.maximum-applications", "5000"); UploadConfiguration(csConf, "capacity-scheduler.xml"); rm1.adminService.RefreshQueues(RefreshQueuesRequest.NewInstance()); int maxApps = ((CapacityScheduler)rm1.GetRMContext().GetScheduler()).GetConfiguration ().GetMaximumSystemApplications(); NUnit.Framework.Assert.AreEqual(maxApps, 5000); // Before failover happens, the maxApps is // still the default value on the standby rm : rm2 int maxAppsBeforeFailOver = ((CapacityScheduler)rm2.GetRMContext().GetScheduler() ).GetConfiguration().GetMaximumSystemApplications(); NUnit.Framework.Assert.AreEqual(maxAppsBeforeFailOver, 10000); // Do the failover rm1.adminService.TransitionToStandby(requestInfo); rm2.adminService.TransitionToActive(requestInfo); NUnit.Framework.Assert.IsTrue(rm1.GetRMContext().GetHAServiceState() == HAServiceProtocol.HAServiceState .Standby); NUnit.Framework.Assert.IsTrue(rm2.GetRMContext().GetHAServiceState() == HAServiceProtocol.HAServiceState .Active); int maxAppsAfter = ((CapacityScheduler)rm2.GetRMContext().GetScheduler()).GetConfiguration ().GetMaximumSystemApplications(); NUnit.Framework.Assert.AreEqual(maxAppsAfter, 5000); } finally { if (rm1 != null) { rm1.Stop(); } if (rm2 != null) { rm2.Stop(); } } }
static TestRMAdminService() { YarnConfiguration.AddDefaultResource(YarnConfiguration.CsConfigurationFile); }
/// <summary>Put timeline data in a JSON file via command line.</summary> /// <param name="path">path to the timeline data JSON file</param> /// <param name="type">the type of the timeline data in the JSON file</param> private static void PutTimelineDataInJSONFile(string path, string type) { FilePath jsonFile = new FilePath(path); if (!jsonFile.Exists()) { Log.Error("File [" + jsonFile.GetAbsolutePath() + "] doesn't exist"); return; } ObjectMapper mapper = new ObjectMapper(); YarnJacksonJaxbJsonProvider.ConfigObjectMapper(mapper); TimelineEntities entities = null; TimelineDomains domains = null; try { if (type.Equals(EntityDataType)) { entities = mapper.ReadValue <TimelineEntities>(jsonFile); } else { if (type.Equals(DomainDataType)) { domains = mapper.ReadValue <TimelineDomains>(jsonFile); } } } catch (Exception e) { Log.Error("Error when reading " + e.Message); Sharpen.Runtime.PrintStackTrace(e, System.Console.Error); return; } Configuration conf = new YarnConfiguration(); TimelineClient client = TimelineClient.CreateTimelineClient(); client.Init(conf); client.Start(); try { if (UserGroupInformation.IsSecurityEnabled() && conf.GetBoolean(YarnConfiguration .TimelineServiceEnabled, false)) { Org.Apache.Hadoop.Security.Token.Token <TimelineDelegationTokenIdentifier> token = client.GetDelegationToken(UserGroupInformation.GetCurrentUser().GetUserName()); UserGroupInformation.GetCurrentUser().AddToken(token); } if (type.Equals(EntityDataType)) { TimelinePutResponse response = client.PutEntities(Sharpen.Collections.ToArray(entities .GetEntities(), new TimelineEntity[entities.GetEntities().Count])); if (response.GetErrors().Count == 0) { Log.Info("Timeline entities are successfully put"); } else { foreach (TimelinePutResponse.TimelinePutError error in response.GetErrors()) { Log.Error("TimelineEntity [" + error.GetEntityType() + ":" + error.GetEntityId() + "] is not successfully put. Error code: " + error.GetErrorCode()); } } } else { if (type.Equals(DomainDataType)) { bool hasError = false; foreach (TimelineDomain domain in domains.GetDomains()) { try { client.PutDomain(domain); } catch (Exception e) { Log.Error("Error when putting domain " + domain.GetId(), e); hasError = true; } } if (!hasError) { Log.Info("Timeline domains are successfully put"); } } } } catch (RuntimeException e) { Log.Error("Error when putting the timeline data", e); } catch (Exception e) { Log.Error("Error when putting the timeline data", e); } finally { client.Stop(); } }
public virtual void Setup() { FileUtil.FullyDelete(TestDir); conf = new YarnConfiguration(); conf.Set(YarnConfiguration.RmLeveldbStorePath, TestDir.ToString()); }
/// <exception cref="System.Exception"/> public override int Run(string[] args) { // -directlyAccessNodeLabelStore is a additional option for node label // access, so just search if we have specified this option, and remove it IList <string> argsList = new AList <string>(); for (int i = 0; i < args.Length; i++) { if (args[i].Equals("-directlyAccessNodeLabelStore")) { directlyAccessNodeLabelStore = true; } else { argsList.AddItem(args[i]); } } args = Sharpen.Collections.ToArray(argsList, new string[0]); YarnConfiguration yarnConf = GetConf() == null ? new YarnConfiguration() : new YarnConfiguration (GetConf()); bool isHAEnabled = yarnConf.GetBoolean(YarnConfiguration.RmHaEnabled, YarnConfiguration .DefaultRmHaEnabled); if (args.Length < 1) { PrintUsage(string.Empty, isHAEnabled); return(-1); } int exitCode = -1; int i_1 = 0; string cmd = args[i_1++]; exitCode = 0; if ("-help".Equals(cmd)) { if (i_1 < args.Length) { PrintUsage(args[i_1], isHAEnabled); } else { PrintHelp(string.Empty, isHAEnabled); } return(exitCode); } if (Usage.Contains(cmd)) { if (isHAEnabled) { return(base.Run(args)); } System.Console.Out.WriteLine("Cannot run " + cmd + " when ResourceManager HA is not enabled" ); return(-1); } // // verify that we have enough command line parameters // if ("-refreshAdminAcls".Equals(cmd) || "-refreshQueues".Equals(cmd) || "-refreshNodes" .Equals(cmd) || "-refreshServiceAcl".Equals(cmd) || "-refreshUserToGroupsMappings" .Equals(cmd) || "-refreshSuperUserGroupsConfiguration".Equals(cmd)) { if (args.Length != 1) { PrintUsage(cmd, isHAEnabled); return(exitCode); } } try { if ("-refreshQueues".Equals(cmd)) { exitCode = RefreshQueues(); } else { if ("-refreshNodes".Equals(cmd)) { exitCode = RefreshNodes(); } else { if ("-refreshUserToGroupsMappings".Equals(cmd)) { exitCode = RefreshUserToGroupsMappings(); } else { if ("-refreshSuperUserGroupsConfiguration".Equals(cmd)) { exitCode = RefreshSuperUserGroupsConfiguration(); } else { if ("-refreshAdminAcls".Equals(cmd)) { exitCode = RefreshAdminAcls(); } else { if ("-refreshServiceAcl".Equals(cmd)) { exitCode = RefreshServiceAcls(); } else { if ("-getGroups".Equals(cmd)) { string[] usernames = Arrays.CopyOfRange(args, i_1, args.Length); exitCode = GetGroups(usernames); } else { if ("-addToClusterNodeLabels".Equals(cmd)) { if (i_1 >= args.Length) { System.Console.Error.WriteLine(NoLabelErrMsg); exitCode = -1; } else { exitCode = AddToClusterNodeLabels(args[i_1]); } } else { if ("-removeFromClusterNodeLabels".Equals(cmd)) { if (i_1 >= args.Length) { System.Console.Error.WriteLine(NoLabelErrMsg); exitCode = -1; } else { exitCode = RemoveFromClusterNodeLabels(args[i_1]); } } else { if ("-replaceLabelsOnNode".Equals(cmd)) { if (i_1 >= args.Length) { System.Console.Error.WriteLine(NoMappingErrMsg); exitCode = -1; } else { exitCode = ReplaceLabelsOnNodes(args[i_1]); } } else { exitCode = -1; System.Console.Error.WriteLine(Sharpen.Runtime.Substring(cmd, 1) + ": Unknown command" ); PrintUsage(string.Empty, isHAEnabled); } } } } } } } } } } } catch (ArgumentException arge) { exitCode = -1; System.Console.Error.WriteLine(Sharpen.Runtime.Substring(cmd, 1) + ": " + arge.GetLocalizedMessage ()); PrintUsage(cmd, isHAEnabled); } catch (RemoteException e) { // // This is a error returned by hadoop server. Print // out the first line of the error mesage, ignore the stack trace. exitCode = -1; try { string[] content; content = e.GetLocalizedMessage().Split("\n"); System.Console.Error.WriteLine(Sharpen.Runtime.Substring(cmd, 1) + ": " + content [0]); } catch (Exception ex) { System.Console.Error.WriteLine(Sharpen.Runtime.Substring(cmd, 1) + ": " + ex.GetLocalizedMessage ()); } } catch (Exception e) { exitCode = -1; System.Console.Error.WriteLine(Sharpen.Runtime.Substring(cmd, 1) + ": " + e.GetLocalizedMessage ()); } if (null != localNodeLabelsManager) { localNodeLabelsManager.Stop(); } return(exitCode); }
/// <exception cref="System.IO.IOException"/> public virtual void TestNodeLabelsDisabled() { DummyCommonNodeLabelsManager mgr = new DummyCommonNodeLabelsManager(); Configuration conf = new YarnConfiguration(); conf.SetBoolean(YarnConfiguration.NodeLabelsEnabled, false); mgr.Init(conf); mgr.Start(); bool caught = false; // add labels try { mgr.AddToCluserNodeLabels(ImmutableSet.Of("x")); } catch (IOException e) { AssertNodeLabelsDisabledErrorMessage(e); caught = true; } // check exception caught NUnit.Framework.Assert.IsTrue(caught); caught = false; // remove labels try { mgr.RemoveFromClusterNodeLabels(ImmutableSet.Of("x")); } catch (IOException e) { AssertNodeLabelsDisabledErrorMessage(e); caught = true; } // check exception caught NUnit.Framework.Assert.IsTrue(caught); caught = false; // add labels to node try { mgr.AddLabelsToNode(ImmutableMap.Of(NodeId.NewInstance("host", 0), CommonNodeLabelsManager .EmptyStringSet)); } catch (IOException e) { AssertNodeLabelsDisabledErrorMessage(e); caught = true; } // check exception caught NUnit.Framework.Assert.IsTrue(caught); caught = false; // remove labels from node try { mgr.RemoveLabelsFromNode(ImmutableMap.Of(NodeId.NewInstance("host", 0), CommonNodeLabelsManager .EmptyStringSet)); } catch (IOException e) { AssertNodeLabelsDisabledErrorMessage(e); caught = true; } // check exception caught NUnit.Framework.Assert.IsTrue(caught); caught = false; // replace labels on node try { mgr.ReplaceLabelsOnNode(ImmutableMap.Of(NodeId.NewInstance("host", 0), CommonNodeLabelsManager .EmptyStringSet)); } catch (IOException e) { AssertNodeLabelsDisabledErrorMessage(e); caught = true; } // check exception caught NUnit.Framework.Assert.IsTrue(caught); caught = false; mgr.Close(); }
public override void SetConf(Configuration conf) { conf = new YarnConfiguration(conf); base.SetConf(conf); }
protected internal virtual IPEndPoint GetRMAddress(YarnConfiguration conf, Type protocol ) { throw new NotSupportedException("This method should be invoked " + "from an instance of ClientRMProxy or ServerRMProxy" ); }
// To hold list of application for which event was received /// <exception cref="System.Exception"/> public virtual void TestNodeUsableEvent() { Logger rootLogger = LogManager.GetRootLogger(); rootLogger.SetLevel(Level.Debug); Dispatcher dispatcher = GetDispatcher(); YarnConfiguration conf = new YarnConfiguration(); MockRM rm = new _MockRM_62(dispatcher, conf); rm.Start(); MockNM nm1 = rm.RegisterNode("h1:1234", 28000); NodesListManager nodesListManager = rm.GetNodesListManager(); Resource clusterResource = Resource.NewInstance(28000, 8); RMNode rmnode = MockNodes.NewNodeInfo(1, clusterResource); // Create killing APP RMApp killrmApp = rm.SubmitApp(200); rm.KillApp(killrmApp.GetApplicationId()); rm.WaitForState(killrmApp.GetApplicationId(), RMAppState.Killed); // Create finish APP RMApp finshrmApp = rm.SubmitApp(2000); nm1.NodeHeartbeat(true); RMAppAttempt attempt = finshrmApp.GetCurrentAppAttempt(); MockAM am = rm.SendAMLaunched(attempt.GetAppAttemptId()); am.RegisterAppAttempt(); am.UnregisterAppAttempt(); nm1.NodeHeartbeat(attempt.GetAppAttemptId(), 1, ContainerState.Complete); am.WaitForState(RMAppAttemptState.Finished); // Create submitted App RMApp subrmApp = rm.SubmitApp(200); // Fire Event for NODE_USABLE nodesListManager.Handle(new NodesListManagerEvent(NodesListManagerEventType.NodeUsable , rmnode)); if (applist.Count > 0) { NUnit.Framework.Assert.IsTrue("Event based on running app expected " + subrmApp.GetApplicationId (), applist.Contains(subrmApp.GetApplicationId())); NUnit.Framework.Assert.IsFalse("Event based on finish app not expected " + finshrmApp .GetApplicationId(), applist.Contains(finshrmApp.GetApplicationId())); NUnit.Framework.Assert.IsFalse("Event based on killed app not expected " + killrmApp .GetApplicationId(), applist.Contains(killrmApp.GetApplicationId())); } else { NUnit.Framework.Assert.Fail("Events received should have beeen more than 1"); } applist.Clear(); // Fire Event for NODE_UNUSABLE nodesListManager.Handle(new NodesListManagerEvent(NodesListManagerEventType.NodeUnusable , rmnode)); if (applist.Count > 0) { NUnit.Framework.Assert.IsTrue("Event based on running app expected " + subrmApp.GetApplicationId (), applist.Contains(subrmApp.GetApplicationId())); NUnit.Framework.Assert.IsFalse("Event based on finish app not expected " + finshrmApp .GetApplicationId(), applist.Contains(finshrmApp.GetApplicationId())); NUnit.Framework.Assert.IsFalse("Event based on killed app not expected " + killrmApp .GetApplicationId(), applist.Contains(killrmApp.GetApplicationId())); } else { NUnit.Framework.Assert.Fail("Events received should have beeen more than 1"); } }
/// <exception cref="System.Exception"/> internal virtual void TestRMAppStateStore(RMStateStoreTestBase.RMStateStoreHelper stateStoreHelper, RMStateStoreTestBase.StoreStateVerifier verifier) { long submitTime = Runtime.CurrentTimeMillis(); long startTime = Runtime.CurrentTimeMillis() + 1234; Configuration conf = new YarnConfiguration(); RMStateStore store = stateStoreHelper.GetRMStateStore(); RMStateStoreTestBase.TestDispatcher dispatcher = new RMStateStoreTestBase.TestDispatcher (); store.SetRMDispatcher(dispatcher); RMContext rmContext = Org.Mockito.Mockito.Mock <RMContext>(); Org.Mockito.Mockito.When(rmContext.GetStateStore()).ThenReturn(store); AMRMTokenSecretManager appTokenMgr = Org.Mockito.Mockito.Spy(new AMRMTokenSecretManager (conf, rmContext)); MasterKeyData masterKeyData = appTokenMgr.CreateNewMasterKey(); Org.Mockito.Mockito.When(appTokenMgr.GetMasterKey()).ThenReturn(masterKeyData); ClientToAMTokenSecretManagerInRM clientToAMTokenMgr = new ClientToAMTokenSecretManagerInRM (); ApplicationAttemptId attemptId1 = ConverterUtils.ToApplicationAttemptId("appattempt_1352994193343_0001_000001" ); ApplicationId appId1 = attemptId1.GetApplicationId(); StoreApp(store, appId1, submitTime, startTime); verifier.AfterStoreApp(store, appId1); // create application token and client token key for attempt1 Org.Apache.Hadoop.Security.Token.Token <AMRMTokenIdentifier> appAttemptToken1 = GenerateAMRMToken (attemptId1, appTokenMgr); SecretKey clientTokenKey1 = clientToAMTokenMgr.CreateMasterKey(attemptId1); ContainerId containerId1 = StoreAttempt(store, attemptId1, "container_1352994193343_0001_01_000001" , appAttemptToken1, clientTokenKey1, dispatcher); string appAttemptIdStr2 = "appattempt_1352994193343_0001_000002"; ApplicationAttemptId attemptId2 = ConverterUtils.ToApplicationAttemptId(appAttemptIdStr2 ); // create application token and client token key for attempt2 Org.Apache.Hadoop.Security.Token.Token <AMRMTokenIdentifier> appAttemptToken2 = GenerateAMRMToken (attemptId2, appTokenMgr); SecretKey clientTokenKey2 = clientToAMTokenMgr.CreateMasterKey(attemptId2); ContainerId containerId2 = StoreAttempt(store, attemptId2, "container_1352994193343_0001_02_000001" , appAttemptToken2, clientTokenKey2, dispatcher); ApplicationAttemptId attemptIdRemoved = ConverterUtils.ToApplicationAttemptId("appattempt_1352994193343_0002_000001" ); ApplicationId appIdRemoved = attemptIdRemoved.GetApplicationId(); StoreApp(store, appIdRemoved, submitTime, startTime); StoreAttempt(store, attemptIdRemoved, "container_1352994193343_0002_01_000001", null , null, dispatcher); verifier.AfterStoreAppAttempt(store, attemptIdRemoved); RMApp mockRemovedApp = Org.Mockito.Mockito.Mock <RMApp>(); RMAppAttemptMetrics mockRmAppAttemptMetrics = Org.Mockito.Mockito.Mock <RMAppAttemptMetrics >(); Dictionary <ApplicationAttemptId, RMAppAttempt> attempts = new Dictionary <ApplicationAttemptId , RMAppAttempt>(); ApplicationSubmissionContext context = new ApplicationSubmissionContextPBImpl(); context.SetApplicationId(appIdRemoved); Org.Mockito.Mockito.When(mockRemovedApp.GetSubmitTime()).ThenReturn(submitTime); Org.Mockito.Mockito.When(mockRemovedApp.GetApplicationSubmissionContext()).ThenReturn (context); Org.Mockito.Mockito.When(mockRemovedApp.GetAppAttempts()).ThenReturn(attempts); Org.Mockito.Mockito.When(mockRemovedApp.GetUser()).ThenReturn("user1"); RMAppAttempt mockRemovedAttempt = Org.Mockito.Mockito.Mock <RMAppAttempt>(); Org.Mockito.Mockito.When(mockRemovedAttempt.GetAppAttemptId()).ThenReturn(attemptIdRemoved ); Org.Mockito.Mockito.When(mockRemovedAttempt.GetRMAppAttemptMetrics()).ThenReturn( mockRmAppAttemptMetrics); Org.Mockito.Mockito.When(mockRmAppAttemptMetrics.GetAggregateAppResourceUsage()). ThenReturn(new AggregateAppResourceUsage(0, 0)); attempts[attemptIdRemoved] = mockRemovedAttempt; store.RemoveApplication(mockRemovedApp); // remove application directory recursively. StoreApp(store, appIdRemoved, submitTime, startTime); StoreAttempt(store, attemptIdRemoved, "container_1352994193343_0002_01_000001", null , null, dispatcher); store.RemoveApplication(mockRemovedApp); // let things settle down Sharpen.Thread.Sleep(1000); store.Close(); // give tester a chance to modify app state in the store ModifyAppState(); // load state store = stateStoreHelper.GetRMStateStore(); store.SetRMDispatcher(dispatcher); RMStateStore.RMState state = store.LoadState(); IDictionary <ApplicationId, ApplicationStateData> rmAppState = state.GetApplicationState (); ApplicationStateData appState = rmAppState[appId1]; // app is loaded NUnit.Framework.Assert.IsNotNull(appState); // app is loaded correctly NUnit.Framework.Assert.AreEqual(submitTime, appState.GetSubmitTime()); NUnit.Framework.Assert.AreEqual(startTime, appState.GetStartTime()); // submission context is loaded correctly NUnit.Framework.Assert.AreEqual(appId1, appState.GetApplicationSubmissionContext( ).GetApplicationId()); ApplicationAttemptStateData attemptState = appState.GetAttempt(attemptId1); // attempt1 is loaded correctly NUnit.Framework.Assert.IsNotNull(attemptState); NUnit.Framework.Assert.AreEqual(attemptId1, attemptState.GetAttemptId()); NUnit.Framework.Assert.AreEqual(-1000, attemptState.GetAMContainerExitStatus()); // attempt1 container is loaded correctly NUnit.Framework.Assert.AreEqual(containerId1, attemptState.GetMasterContainer().GetId ()); // attempt1 client token master key is loaded correctly Assert.AssertArrayEquals(clientTokenKey1.GetEncoded(), attemptState.GetAppAttemptTokens ().GetSecretKey(RMStateStore.AmClientTokenMasterKeyName)); attemptState = appState.GetAttempt(attemptId2); // attempt2 is loaded correctly NUnit.Framework.Assert.IsNotNull(attemptState); NUnit.Framework.Assert.AreEqual(attemptId2, attemptState.GetAttemptId()); // attempt2 container is loaded correctly NUnit.Framework.Assert.AreEqual(containerId2, attemptState.GetMasterContainer().GetId ()); // attempt2 client token master key is loaded correctly Assert.AssertArrayEquals(clientTokenKey2.GetEncoded(), attemptState.GetAppAttemptTokens ().GetSecretKey(RMStateStore.AmClientTokenMasterKeyName)); //******* update application/attempt state *******// ApplicationStateData appState2 = ApplicationStateData.NewInstance(appState.GetSubmitTime (), appState.GetStartTime(), appState.GetUser(), appState.GetApplicationSubmissionContext (), RMAppState.Finished, "appDiagnostics", 1234); appState2.attempts.PutAll(appState.attempts); store.UpdateApplicationState(appState2); ApplicationAttemptStateData oldAttemptState = attemptState; ApplicationAttemptStateData newAttemptState = ApplicationAttemptStateData.NewInstance (oldAttemptState.GetAttemptId(), oldAttemptState.GetMasterContainer(), oldAttemptState .GetAppAttemptTokens(), oldAttemptState.GetStartTime(), RMAppAttemptState.Finished , "myTrackingUrl", "attemptDiagnostics", FinalApplicationStatus.Succeeded, 100, oldAttemptState.GetFinishTime(), 0, 0); store.UpdateApplicationAttemptState(newAttemptState); // test updating the state of an app/attempt whose initial state was not // saved. ApplicationId dummyAppId = ApplicationId.NewInstance(1234, 10); ApplicationSubmissionContext dummyContext = new ApplicationSubmissionContextPBImpl (); dummyContext.SetApplicationId(dummyAppId); ApplicationStateData dummyApp = ApplicationStateData.NewInstance(appState.GetSubmitTime (), appState.GetStartTime(), appState.GetUser(), dummyContext, RMAppState.Finished , "appDiagnostics", 1234); store.UpdateApplicationState(dummyApp); ApplicationAttemptId dummyAttemptId = ApplicationAttemptId.NewInstance(dummyAppId , 6); ApplicationAttemptStateData dummyAttempt = ApplicationAttemptStateData.NewInstance (dummyAttemptId, oldAttemptState.GetMasterContainer(), oldAttemptState.GetAppAttemptTokens (), oldAttemptState.GetStartTime(), RMAppAttemptState.Finished, "myTrackingUrl", "attemptDiagnostics", FinalApplicationStatus.Succeeded, 111, oldAttemptState.GetFinishTime (), 0, 0); store.UpdateApplicationAttemptState(dummyAttempt); // let things settle down Sharpen.Thread.Sleep(1000); store.Close(); // check updated application state. store = stateStoreHelper.GetRMStateStore(); store.SetRMDispatcher(dispatcher); RMStateStore.RMState newRMState = store.LoadState(); IDictionary <ApplicationId, ApplicationStateData> newRMAppState = newRMState.GetApplicationState (); NUnit.Framework.Assert.IsNotNull(newRMAppState[dummyApp.GetApplicationSubmissionContext ().GetApplicationId()]); ApplicationStateData updatedAppState = newRMAppState[appId1]; NUnit.Framework.Assert.AreEqual(appState.GetApplicationSubmissionContext().GetApplicationId (), updatedAppState.GetApplicationSubmissionContext().GetApplicationId()); NUnit.Framework.Assert.AreEqual(appState.GetSubmitTime(), updatedAppState.GetSubmitTime ()); NUnit.Framework.Assert.AreEqual(appState.GetStartTime(), updatedAppState.GetStartTime ()); NUnit.Framework.Assert.AreEqual(appState.GetUser(), updatedAppState.GetUser()); // new app state fields NUnit.Framework.Assert.AreEqual(RMAppState.Finished, updatedAppState.GetState()); NUnit.Framework.Assert.AreEqual("appDiagnostics", updatedAppState.GetDiagnostics( )); NUnit.Framework.Assert.AreEqual(1234, updatedAppState.GetFinishTime()); // check updated attempt state NUnit.Framework.Assert.IsNotNull(newRMAppState[dummyApp.GetApplicationSubmissionContext ().GetApplicationId()].GetAttempt(dummyAttemptId)); ApplicationAttemptStateData updatedAttemptState = updatedAppState.GetAttempt(newAttemptState .GetAttemptId()); NUnit.Framework.Assert.AreEqual(oldAttemptState.GetAttemptId(), updatedAttemptState .GetAttemptId()); NUnit.Framework.Assert.AreEqual(containerId2, updatedAttemptState.GetMasterContainer ().GetId()); Assert.AssertArrayEquals(clientTokenKey2.GetEncoded(), attemptState.GetAppAttemptTokens ().GetSecretKey(RMStateStore.AmClientTokenMasterKeyName)); // new attempt state fields NUnit.Framework.Assert.AreEqual(RMAppAttemptState.Finished, updatedAttemptState.GetState ()); NUnit.Framework.Assert.AreEqual("myTrackingUrl", updatedAttemptState.GetFinalTrackingUrl ()); NUnit.Framework.Assert.AreEqual("attemptDiagnostics", updatedAttemptState.GetDiagnostics ()); NUnit.Framework.Assert.AreEqual(100, updatedAttemptState.GetAMContainerExitStatus ()); NUnit.Framework.Assert.AreEqual(FinalApplicationStatus.Succeeded, updatedAttemptState .GetFinalApplicationStatus()); // assert store is in expected state after everything is cleaned NUnit.Framework.Assert.IsTrue(stateStoreHelper.IsFinalStateValid()); store.Close(); }
private DefaultYarnClusterHttpDriverConnection() { _applicationId = Environment.GetEnvironmentVariable(Constants.ReefYarnApplicationIdEnvironmentVariable); _yarnConfiguration = YarnConfiguration.GetConfiguration(); }
public virtual void TestRecovery() { Random r = new Random(); long seed = r.NextLong(); r.SetSeed(seed); System.Console.Out.WriteLine("SEED: " + seed); IList <Path> baseDirs = BuildDirs(r, @base, 4); CreateDirs(new Path("."), baseDirs); IList <Path> content = BuildDirs(r, new Path("."), 10); foreach (Path b in baseDirs) { CreateDirs(b, content); } Configuration conf = new YarnConfiguration(); conf.SetBoolean(YarnConfiguration.NmRecoveryEnabled, true); conf.SetInt(YarnConfiguration.DebugNmDeleteDelaySec, 1); NMMemoryStateStoreService stateStore = new NMMemoryStateStoreService(); stateStore.Init(conf); stateStore.Start(); DeletionService del = new DeletionService(new TestDeletionService.FakeDefaultContainerExecutor (), stateStore); try { del.Init(conf); del.Start(); foreach (Path p in content) { NUnit.Framework.Assert.IsTrue(lfs.Util().Exists(new Path(baseDirs[0], p))); del.Delete((long.Parse(p.GetName()) % 2) == 0 ? null : "dingo", p, Sharpen.Collections.ToArray (baseDirs, new Path[4])); } // restart the deletion service del.Stop(); del = new DeletionService(new TestDeletionService.FakeDefaultContainerExecutor(), stateStore); del.Init(conf); del.Start(); // verify paths are still eventually deleted int msecToWait = 10 * 1000; foreach (Path p_1 in baseDirs) { foreach (Path q in content) { Path fp = new Path(p_1, q); while (msecToWait > 0 && lfs.Util().Exists(fp)) { Sharpen.Thread.Sleep(100); msecToWait -= 100; } NUnit.Framework.Assert.IsFalse(lfs.Util().Exists(fp)); } } } finally { del.Close(); stateStore.Close(); } }
public static void Setup() { conf = new YarnConfiguration(); conf.SetClass(YarnConfiguration.RmScheduler, typeof(FifoScheduler), typeof(ResourceScheduler )); }
private YarnConfigurationUrlProvider( [Parameter(typeof(HadoopConfigurationDirectory))] string hadoopConfigDir, [Parameter(typeof(UseHttpsForYarnCommunication))] bool useHttps) { _yarnConfiguration = YarnConfiguration.GetConfiguration(hadoopConfigDir, useHttps: useHttps); }
public virtual void TestDelegationTokenOperationsRetry() { int newMaxRetries = 5; long newIntervalMs = 500; YarnConfiguration conf = new YarnConfiguration(); conf.SetInt(YarnConfiguration.TimelineServiceClientMaxRetries, newMaxRetries); conf.SetLong(YarnConfiguration.TimelineServiceClientRetryIntervalMs, newIntervalMs ); conf.SetBoolean(YarnConfiguration.TimelineServiceEnabled, true); // use kerberos to bypass the issue in HADOOP-11215 conf.Set(CommonConfigurationKeysPublic.HadoopSecurityAuthentication, "kerberos"); UserGroupInformation.SetConfiguration(conf); TimelineClientImpl client = CreateTimelineClient(conf); TestTimelineClient.TestTimlineDelegationTokenSecretManager dtManager = new TestTimelineClient.TestTimlineDelegationTokenSecretManager (); try { dtManager.StartThreads(); Sharpen.Thread.Sleep(3000); try { // try getting a delegation token client.GetDelegationToken(UserGroupInformation.GetCurrentUser().GetShortUserName( )); AssertFail(); } catch (RuntimeException ce) { AssertException(client, ce); } try { // try renew a delegation token TimelineDelegationTokenIdentifier timelineDT = new TimelineDelegationTokenIdentifier (new Text("tester"), new Text("tester"), new Text("tester")); client.RenewDelegationToken(new Org.Apache.Hadoop.Security.Token.Token <TimelineDelegationTokenIdentifier >(timelineDT.GetBytes(), dtManager.CreatePassword(timelineDT), timelineDT.GetKind (), new Text("0.0.0.0:8188"))); AssertFail(); } catch (RuntimeException ce) { AssertException(client, ce); } try { // try cancel a delegation token TimelineDelegationTokenIdentifier timelineDT = new TimelineDelegationTokenIdentifier (new Text("tester"), new Text("tester"), new Text("tester")); client.CancelDelegationToken(new Org.Apache.Hadoop.Security.Token.Token <TimelineDelegationTokenIdentifier >(timelineDT.GetBytes(), dtManager.CreatePassword(timelineDT), timelineDT.GetKind (), new Text("0.0.0.0:8188"))); AssertFail(); } catch (RuntimeException ce) { AssertException(client, ce); } } finally { client.Stop(); dtManager.StopThreads(); } }
public virtual void TestLimitsComputation() { CapacitySchedulerConfiguration csConf = new CapacitySchedulerConfiguration(); SetupQueueConfiguration(csConf); YarnConfiguration conf = new YarnConfiguration(); CapacitySchedulerContext csContext = Org.Mockito.Mockito.Mock <CapacitySchedulerContext >(); Org.Mockito.Mockito.When(csContext.GetConfiguration()).ThenReturn(csConf); Org.Mockito.Mockito.When(csContext.GetConf()).ThenReturn(conf); Org.Mockito.Mockito.When(csContext.GetMinimumResourceCapability()).ThenReturn(Resources .CreateResource(Gb, 1)); Org.Mockito.Mockito.When(csContext.GetMaximumResourceCapability()).ThenReturn(Resources .CreateResource(16 * Gb, 16)); Org.Mockito.Mockito.When(csContext.GetApplicationComparator()).ThenReturn(CapacityScheduler .applicationComparator); Org.Mockito.Mockito.When(csContext.GetQueueComparator()).ThenReturn(CapacityScheduler .queueComparator); Org.Mockito.Mockito.When(csContext.GetResourceCalculator()).ThenReturn(resourceCalculator ); Org.Mockito.Mockito.When(csContext.GetRMContext()).ThenReturn(rmContext); // Say cluster has 100 nodes of 16G each Org.Apache.Hadoop.Yarn.Api.Records.Resource clusterResource = Resources.CreateResource (100 * 16 * Gb, 100 * 16); Org.Mockito.Mockito.When(csContext.GetClusterResource()).ThenReturn(clusterResource ); IDictionary <string, CSQueue> queues = new Dictionary <string, CSQueue>(); CSQueue root = CapacityScheduler.ParseQueue(csContext, csConf, null, "root", queues , queues, TestUtils.spyHook); LeafQueue queue = (LeafQueue)queues[A]; Log.Info("Queue 'A' -" + " AMResourceLimit=" + queue.GetAMResourceLimit() + " UserAMResourceLimit=" + queue.GetUserAMResourceLimit()); NUnit.Framework.Assert.AreEqual(queue.GetAMResourceLimit(), Org.Apache.Hadoop.Yarn.Api.Records.Resource .NewInstance(160 * Gb, 1)); NUnit.Framework.Assert.AreEqual(queue.GetUserAMResourceLimit(), Org.Apache.Hadoop.Yarn.Api.Records.Resource .NewInstance(80 * Gb, 1)); NUnit.Framework.Assert.AreEqual((int)(clusterResource.GetMemory() * queue.GetAbsoluteCapacity ()), queue.GetMetrics().GetAvailableMB()); // Add some nodes to the cluster & test new limits clusterResource = Resources.CreateResource(120 * 16 * Gb); root.UpdateClusterResource(clusterResource, new ResourceLimits(clusterResource)); NUnit.Framework.Assert.AreEqual(queue.GetAMResourceLimit(), Org.Apache.Hadoop.Yarn.Api.Records.Resource .NewInstance(192 * Gb, 1)); NUnit.Framework.Assert.AreEqual(queue.GetUserAMResourceLimit(), Org.Apache.Hadoop.Yarn.Api.Records.Resource .NewInstance(96 * Gb, 1)); NUnit.Framework.Assert.AreEqual((int)(clusterResource.GetMemory() * queue.GetAbsoluteCapacity ()), queue.GetMetrics().GetAvailableMB()); // should return -1 if per queue setting not set NUnit.Framework.Assert.AreEqual((int)CapacitySchedulerConfiguration.Undefined, csConf .GetMaximumApplicationsPerQueue(queue.GetQueuePath())); int expectedMaxApps = (int)(CapacitySchedulerConfiguration.DefaultMaximumSystemApplicatiions * queue.GetAbsoluteCapacity()); NUnit.Framework.Assert.AreEqual(expectedMaxApps, queue.GetMaxApplications()); int expectedMaxAppsPerUser = (int)(expectedMaxApps * (queue.GetUserLimit() / 100.0f ) * queue.GetUserLimitFactor()); NUnit.Framework.Assert.AreEqual(expectedMaxAppsPerUser, queue.GetMaxApplicationsPerUser ()); // should default to global setting if per queue setting not set NUnit.Framework.Assert.AreEqual((long)CapacitySchedulerConfiguration.DefaultMaximumApplicationmastersResourcePercent , (long)csConf.GetMaximumApplicationMasterResourcePerQueuePercent(queue.GetQueuePath ())); // Change the per-queue max AM resources percentage. csConf.SetFloat("yarn.scheduler.capacity." + queue.GetQueuePath() + ".maximum-am-resource-percent" , 0.5f); // Re-create queues to get new configs. queues = new Dictionary <string, CSQueue>(); root = CapacityScheduler.ParseQueue(csContext, csConf, null, "root", queues, queues , TestUtils.spyHook); clusterResource = Resources.CreateResource(100 * 16 * Gb); queue = (LeafQueue)queues[A]; NUnit.Framework.Assert.AreEqual((long)0.5, (long)csConf.GetMaximumApplicationMasterResourcePerQueuePercent (queue.GetQueuePath())); NUnit.Framework.Assert.AreEqual(queue.GetAMResourceLimit(), Org.Apache.Hadoop.Yarn.Api.Records.Resource .NewInstance(800 * Gb, 1)); NUnit.Framework.Assert.AreEqual(queue.GetUserAMResourceLimit(), Org.Apache.Hadoop.Yarn.Api.Records.Resource .NewInstance(400 * Gb, 1)); // Change the per-queue max applications. csConf.SetInt("yarn.scheduler.capacity." + queue.GetQueuePath() + ".maximum-applications" , 9999); // Re-create queues to get new configs. queues = new Dictionary <string, CSQueue>(); root = CapacityScheduler.ParseQueue(csContext, csConf, null, "root", queues, queues , TestUtils.spyHook); queue = (LeafQueue)queues[A]; NUnit.Framework.Assert.AreEqual(9999, (int)csConf.GetMaximumApplicationsPerQueue( queue.GetQueuePath())); NUnit.Framework.Assert.AreEqual(9999, queue.GetMaxApplications()); expectedMaxAppsPerUser = (int)(9999 * (queue.GetUserLimit() / 100.0f) * queue.GetUserLimitFactor ()); NUnit.Framework.Assert.AreEqual(expectedMaxAppsPerUser, queue.GetMaxApplicationsPerUser ()); }