public static ClientRMService MockClientRMService(RMContext rmContext) { ClientRMService clientRMService = Org.Mockito.Mockito.Mock <ClientRMService>(); IList <ApplicationReport> appReports = new AList <ApplicationReport>(); foreach (RMApp app in rmContext.GetRMApps().Values) { ApplicationReport appReport = ApplicationReport.NewInstance(app.GetApplicationId( ), (ApplicationAttemptId)null, app.GetUser(), app.GetQueue(), app.GetName(), (string )null, 0, (Token)null, app.CreateApplicationState(), app.GetDiagnostics().ToString (), (string)null, app.GetStartTime(), app.GetFinishTime(), app.GetFinalApplicationStatus (), (ApplicationResourceUsageReport)null, app.GetTrackingUrl(), app.GetProgress( ), app.GetApplicationType(), (Token)null); appReports.AddItem(appReport); } GetApplicationsResponse response = Org.Mockito.Mockito.Mock <GetApplicationsResponse >(); Org.Mockito.Mockito.When(response.GetApplicationList()).ThenReturn(appReports); try { Org.Mockito.Mockito.When(clientRMService.GetApplications(Matchers.Any <GetApplicationsRequest >())).ThenReturn(response); } catch (YarnException) { NUnit.Framework.Assert.Fail("Exception is not expteced."); } return(clientRMService); }
/// <exception cref="System.IO.IOException"/> public static ResourceManager MockRm(int apps, int racks, int nodes, int mbsPerNode ) { RMContext rmContext = MockRMContext(apps, racks, nodes, mbsPerNode); return(MockRm(rmContext)); }
public FifoSchedulerInfo(ResourceManager rm) { // JAXB needs this RMContext rmContext = rm.GetRMContext(); FifoScheduler fs = (FifoScheduler)rm.GetResourceScheduler(); qName = fs.GetQueueInfo(string.Empty, false, false).GetQueueName(); QueueInfo qInfo = fs.GetQueueInfo(qName, true, true); this.usedCapacity = qInfo.GetCurrentCapacity(); this.capacity = qInfo.GetCapacity(); this.minQueueMemoryCapacity = fs.GetMinimumResourceCapability().GetMemory(); this.maxQueueMemoryCapacity = fs.GetMaximumResourceCapability().GetMemory(); this.qstate = qInfo.GetQueueState(); this.numNodes = rmContext.GetRMNodes().Count; this.usedNodeCapacity = 0; this.availNodeCapacity = 0; this.totalNodeCapacity = 0; this.numContainers = 0; foreach (RMNode ni in rmContext.GetRMNodes().Values) { SchedulerNodeReport report = fs.GetNodeReport(ni.GetNodeID()); this.usedNodeCapacity += report.GetUsedResource().GetMemory(); this.availNodeCapacity += report.GetAvailableResource().GetMemory(); this.totalNodeCapacity += ni.GetTotalCapability().GetMemory(); this.numContainers += fs.GetNodeReport(ni.GetNodeID()).GetNumContainers(); } }
public SchedulerApplicationAttempt(ApplicationAttemptId applicationAttemptId, string user, Queue queue, ActiveUsersManager activeUsersManager, RMContext rmContext) { // This pendingRelease is used in work-preserving recovery scenario to keep // track of the AM's outstanding release requests. RM on recovery could // receive the release request form AM before it receives the container status // from NM for recovery. In this case, the to-be-recovered containers reported // by NM should not be recovered. // Time of the last container scheduled at the current allowed level Preconditions.CheckNotNull(rmContext, "RMContext should not be null"); this.rmContext = rmContext; this.appSchedulingInfo = new AppSchedulingInfo(applicationAttemptId, user, queue, activeUsersManager, rmContext.GetEpoch()); this.queue = queue; this.pendingRelease = new HashSet <ContainerId>(); this.attemptId = applicationAttemptId; if (rmContext.GetRMApps() != null && rmContext.GetRMApps().Contains(applicationAttemptId .GetApplicationId())) { ApplicationSubmissionContext appSubmissionContext = rmContext.GetRMApps()[applicationAttemptId .GetApplicationId()].GetApplicationSubmissionContext(); if (appSubmissionContext != null) { unmanagedAM = appSubmissionContext.GetUnmanagedAM(); this.logAggregationContext = appSubmissionContext.GetLogAggregationContext(); } } }
public virtual void Setup() { // setup a context / conf csConf = new CapacitySchedulerConfiguration(); YarnConfiguration conf = new YarnConfiguration(); csContext = Org.Mockito.Mockito.Mock <CapacitySchedulerContext>(); Org.Mockito.Mockito.When(csContext.GetConfiguration()).ThenReturn(csConf); Org.Mockito.Mockito.When(csContext.GetConf()).ThenReturn(conf); Org.Mockito.Mockito.When(csContext.GetMinimumResourceCapability()).ThenReturn(Resources .CreateResource(Gb, 1)); Org.Mockito.Mockito.When(csContext.GetMaximumResourceCapability()).ThenReturn(Resources .CreateResource(16 * Gb, 32)); Org.Mockito.Mockito.When(csContext.GetClusterResource()).ThenReturn(Resources.CreateResource (100 * 16 * Gb, 100 * 32)); Org.Mockito.Mockito.When(csContext.GetResourceCalculator()).ThenReturn(resourceCalculator ); RMContext mockRMContext = TestUtils.GetMockRMContext(); Org.Mockito.Mockito.When(csContext.GetRMContext()).ThenReturn(mockRMContext); // create a queue PlanQueue pq = new PlanQueue(csContext, "root", null, null); reservationQueue = new ReservationQueue(csContext, "a", pq); }
public TestRMAppManager(TestAppManager _enclosing, RMContext context, ClientToAMTokenSecretManagerInRM clientToAMSecretManager, YarnScheduler scheduler, ApplicationMasterService masterService , ApplicationACLsManager applicationACLsManager, Configuration conf) : base(context, scheduler, masterService, applicationACLsManager, conf) { this._enclosing = _enclosing; }
public virtual void TestStateStoreAppLimitLargerThanMemoryAppLimit() { long now = Runtime.CurrentTimeMillis(); RMContext rmContext = MockRMContext(10, now - 20000); Configuration conf = new YarnConfiguration(); int maxAppsInMemory = 8; conf.SetInt(YarnConfiguration.RmMaxCompletedApplications, maxAppsInMemory); // larger than maxCompletedAppsInMemory, reset to RM_MAX_COMPLETED_APPLICATIONS. conf.SetInt(YarnConfiguration.RmStateStoreMaxCompletedApplications, 1000); TestAppManager.TestRMAppManager appMonitor = new TestAppManager.TestRMAppManager( this, rmContext, conf); AddToCompletedApps(appMonitor, rmContext); NUnit.Framework.Assert.AreEqual("Number of completed apps incorrect", 10, appMonitor .GetCompletedAppsListSize()); appMonitor.CheckAppNumCompletedLimit(); int numRemoveApps = 10 - maxAppsInMemory; NUnit.Framework.Assert.AreEqual("Number of apps incorrect after # completed check" , maxAppsInMemory, rmContext.GetRMApps().Count); NUnit.Framework.Assert.AreEqual("Number of completed apps incorrect after check", maxAppsInMemory, appMonitor.GetCompletedAppsListSize()); Org.Mockito.Mockito.Verify(rmContext.GetStateStore(), Org.Mockito.Mockito.Times(numRemoveApps )).RemoveApplication(Matchers.IsA <RMApp>()); NUnit.Framework.Assert.AreEqual(maxAppsInMemory, appMonitor.GetCompletedAppsInStateStore ()); }
/// <exception cref="System.Exception"/> public virtual void TestReconnectedNode() { CapacitySchedulerConfiguration conf = new CapacitySchedulerConfiguration(); conf.SetQueues("default", new string[] { "default" }); conf.SetCapacity("default", 100); FifoScheduler fs = new FifoScheduler(); fs.Init(conf); fs.Start(); // mock rmContext to avoid NPE. RMContext context = Org.Mockito.Mockito.Mock <RMContext>(); fs.Reinitialize(conf, null); fs.SetRMContext(context); RMNode n1 = MockNodes.NewNodeInfo(0, MockNodes.NewResource(4 * Gb), 1, "127.0.0.2" ); RMNode n2 = MockNodes.NewNodeInfo(0, MockNodes.NewResource(2 * Gb), 2, "127.0.0.3" ); fs.Handle(new NodeAddedSchedulerEvent(n1)); fs.Handle(new NodeAddedSchedulerEvent(n2)); fs.Handle(new NodeUpdateSchedulerEvent(n1)); NUnit.Framework.Assert.AreEqual(6 * Gb, fs.GetRootQueueMetrics().GetAvailableMB() ); // reconnect n1 with downgraded memory n1 = MockNodes.NewNodeInfo(0, MockNodes.NewResource(2 * Gb), 1, "127.0.0.2"); fs.Handle(new NodeRemovedSchedulerEvent(n1)); fs.Handle(new NodeAddedSchedulerEvent(n1)); fs.Handle(new NodeUpdateSchedulerEvent(n1)); NUnit.Framework.Assert.AreEqual(4 * Gb, fs.GetRootQueueMetrics().GetAvailableMB() ); fs.Stop(); }
public MockAM(RMContext context, ApplicationMasterProtocol amRMProtocol, ApplicationAttemptId attemptId) { this.context = context; this.amRMProtocol = amRMProtocol; this.attemptId = attemptId; }
public ApplicationMasterLauncher(RMContext context) : base(typeof(Org.Apache.Hadoop.Yarn.Server.Resourcemanager.Amlauncher.ApplicationMasterLauncher ).FullName) { this.context = context; this.launcherHandlingThread = new ApplicationMasterLauncher.LauncherThread(this); }
public static IList <RMNode> QueryRMNodes(RMContext context, EnumSet <NodeState> acceptedStates ) { // nodes contains nodes that are NEW, RUNNING OR UNHEALTHY AList <RMNode> results = new AList <RMNode>(); if (acceptedStates.Contains(NodeState.New) || acceptedStates.Contains(NodeState.Running ) || acceptedStates.Contains(NodeState.Unhealthy)) { foreach (RMNode rmNode in context.GetRMNodes().Values) { if (acceptedStates.Contains(rmNode.GetState())) { results.AddItem(rmNode); } } } // inactiveNodes contains nodes that are DECOMMISSIONED, LOST, OR REBOOTED if (acceptedStates.Contains(NodeState.Decommissioned) || acceptedStates.Contains( NodeState.Lost) || acceptedStates.Contains(NodeState.Rebooted)) { foreach (RMNode rmNode in context.GetInactiveRMNodes().Values) { if (acceptedStates.Contains(rmNode.GetState())) { results.AddItem(rmNode); } } } return(results); }
public override void SetRMContext(RMContext rmContext) { lock (this) { this.rmContext = rmContext; } }
/// <exception cref="System.IO.IOException"/> private void InitializeNodeLabels() { RMContext rmContext = distShellTest.yarnCluster.GetResourceManager(0).GetRMContext (); // Setup node labels RMNodeLabelsManager labelsMgr = rmContext.GetNodeLabelManager(); ICollection <string> labels = new HashSet <string>(); labels.AddItem("x"); labelsMgr.AddToCluserNodeLabels(labels); // Setup queue access to node labels distShellTest.conf.Set("yarn.scheduler.capacity.root.accessible-node-labels", "x" ); distShellTest.conf.Set("yarn.scheduler.capacity.root.accessible-node-labels.x.capacity" , "100"); distShellTest.conf.Set("yarn.scheduler.capacity.root.default.accessible-node-labels" , "x"); distShellTest.conf.Set("yarn.scheduler.capacity.root.default.accessible-node-labels.x.capacity" , "100"); rmContext.GetScheduler().Reinitialize(distShellTest.conf, rmContext); // Fetch node-ids from yarn cluster NodeId[] nodeIds = new NodeId[NumNms]; for (int i = 0; i < NumNms; i++) { NodeManager mgr = distShellTest.yarnCluster.GetNodeManager(i); nodeIds[i] = mgr.GetNMContext().GetNodeId(); } // Set label x to NM[1] labelsMgr.AddLabelsToNode(ImmutableMap.Of(nodeIds[1], labels)); }
public virtual void TestRMNMInfoMissmatch() { RMContext rmc = Org.Mockito.Mockito.Mock <RMContext>(); ResourceScheduler rms = Org.Mockito.Mockito.Mock <ResourceScheduler>(); ConcurrentMap <NodeId, RMNode> map = new ConcurrentHashMap <NodeId, RMNode>(); RMNode node = MockNodes.NewNodeInfo(1, MockNodes.NewResource(4 * 1024)); map[node.GetNodeID()] = node; Org.Mockito.Mockito.When(rmc.GetRMNodes()).ThenReturn(map); RMNMInfo rmInfo = new RMNMInfo(rmc, rms); string liveNMs = rmInfo.GetLiveNodeManagers(); ObjectMapper mapper = new ObjectMapper(); JsonNode jn = mapper.ReadTree(liveNMs); NUnit.Framework.Assert.AreEqual("Unexpected number of live nodes:", 1, jn.Size()); IEnumerator <JsonNode> it = jn.GetEnumerator(); while (it.HasNext()) { JsonNode n = it.Next(); NUnit.Framework.Assert.IsNotNull(n.Get("HostName")); NUnit.Framework.Assert.IsNotNull(n.Get("Rack")); NUnit.Framework.Assert.IsTrue("Node " + n.Get("NodeId") + " should be RUNNING", n .Get("State").AsText().Contains("RUNNING")); NUnit.Framework.Assert.IsNotNull(n.Get("NodeHTTPAddress")); NUnit.Framework.Assert.IsNotNull(n.Get("LastHealthUpdate")); NUnit.Framework.Assert.IsNotNull(n.Get("HealthReport")); NUnit.Framework.Assert.IsNotNull(n.Get("NodeManagerVersion")); NUnit.Framework.Assert.IsNull(n.Get("NumContainers")); NUnit.Framework.Assert.IsNull(n.Get("UsedMemoryMB")); NUnit.Framework.Assert.IsNull(n.Get("AvailableMemoryMB")); } }
/// <exception cref="System.IO.IOException"/> public override void Reinitialize(Configuration conf, RMContext rmContext) { lock (this) { SetConf(conf); } }
public virtual void TestHeadroom() { FairScheduler mockScheduler = Org.Mockito.Mockito.Mock <FairScheduler>(); Org.Mockito.Mockito.When(mockScheduler.GetClock()).ThenReturn(scheduler.GetClock( )); FSLeafQueue mockQueue = Org.Mockito.Mockito.Mock <FSLeafQueue>(); Resource queueMaxResources = Resource.NewInstance(5 * 1024, 3); Org.Apache.Hadoop.Yarn.Api.Records.Resource queueFairShare = Resources.CreateResource (4096, 2); Org.Apache.Hadoop.Yarn.Api.Records.Resource queueUsage = Org.Apache.Hadoop.Yarn.Api.Records.Resource .NewInstance(2048, 2); Org.Apache.Hadoop.Yarn.Api.Records.Resource queueStarvation = Resources.Subtract( queueFairShare, queueUsage); Org.Apache.Hadoop.Yarn.Api.Records.Resource queueMaxResourcesAvailable = Resources .Subtract(queueMaxResources, queueUsage); Org.Apache.Hadoop.Yarn.Api.Records.Resource clusterResource = Resources.CreateResource (8192, 8); Org.Apache.Hadoop.Yarn.Api.Records.Resource clusterUsage = Resources.CreateResource (2048, 2); Org.Apache.Hadoop.Yarn.Api.Records.Resource clusterAvailable = Resources.Subtract (clusterResource, clusterUsage); QueueMetrics fakeRootQueueMetrics = Org.Mockito.Mockito.Mock <QueueMetrics>(); Org.Mockito.Mockito.When(mockQueue.GetMaxShare()).ThenReturn(queueMaxResources); Org.Mockito.Mockito.When(mockQueue.GetFairShare()).ThenReturn(queueFairShare); Org.Mockito.Mockito.When(mockQueue.GetResourceUsage()).ThenReturn(queueUsage); Org.Mockito.Mockito.When(mockScheduler.GetClusterResource()).ThenReturn(clusterResource ); Org.Mockito.Mockito.When(fakeRootQueueMetrics.GetAllocatedResources()).ThenReturn (clusterUsage); Org.Mockito.Mockito.When(mockScheduler.GetRootQueueMetrics()).ThenReturn(fakeRootQueueMetrics ); ApplicationAttemptId applicationAttemptId = CreateAppAttemptId(1, 1); RMContext rmContext = resourceManager.GetRMContext(); FSAppAttempt schedulerApp = new FSAppAttempt(mockScheduler, applicationAttemptId, "user1", mockQueue, null, rmContext); // Min of Memory and CPU across cluster and queue is used in // DominantResourceFairnessPolicy Org.Mockito.Mockito.When(mockQueue.GetPolicy()).ThenReturn(SchedulingPolicy.GetInstance (typeof(DominantResourceFairnessPolicy))); VerifyHeadroom(schedulerApp, Min(queueStarvation.GetMemory(), clusterAvailable.GetMemory (), queueMaxResourcesAvailable.GetMemory()), Min(queueStarvation.GetVirtualCores (), clusterAvailable.GetVirtualCores(), queueMaxResourcesAvailable.GetVirtualCores ())); // Fair and Fifo ignore CPU of queue, so use cluster available CPU Org.Mockito.Mockito.When(mockQueue.GetPolicy()).ThenReturn(SchedulingPolicy.GetInstance (typeof(FairSharePolicy))); VerifyHeadroom(schedulerApp, Min(queueStarvation.GetMemory(), clusterAvailable.GetMemory (), queueMaxResourcesAvailable.GetMemory()), Math.Min(clusterAvailable.GetVirtualCores (), queueMaxResourcesAvailable.GetVirtualCores())); Org.Mockito.Mockito.When(mockQueue.GetPolicy()).ThenReturn(SchedulingPolicy.GetInstance (typeof(FifoPolicy))); VerifyHeadroom(schedulerApp, Min(queueStarvation.GetMemory(), clusterAvailable.GetMemory (), queueMaxResourcesAvailable.GetMemory()), Math.Min(clusterAvailable.GetVirtualCores (), queueMaxResourcesAvailable.GetVirtualCores())); }
public virtual void SetUp() { RMContext mockRMContext = TestRMWebApp.MockRMContext(3, numberOfRacks, numberOfNodesPerRack , 8 * TestRMWebApp.GiB); injector = WebAppTests.CreateMockInjector <RMContext>(mockRMContext, new _Module_63 (mockRMContext)); }
/// <summary>Create a secret manager</summary> /// <param name="delegationKeyUpdateInterval"> /// the number of seconds for rolling new /// secret keys. /// </param> /// <param name="delegationTokenMaxLifetime"> /// the maximum lifetime of the delegation /// tokens /// </param> /// <param name="delegationTokenRenewInterval">how often the tokens must be renewed</param> /// <param name="delegationTokenRemoverScanInterval"> /// how often the tokens are scanned /// for expired tokens /// </param> public RMDelegationTokenSecretManager(long delegationKeyUpdateInterval, long delegationTokenMaxLifetime , long delegationTokenRenewInterval, long delegationTokenRemoverScanInterval, RMContext rmContext) : base(delegationKeyUpdateInterval, delegationTokenMaxLifetime, delegationTokenRenewInterval , delegationTokenRemoverScanInterval) { this.rmContext = rmContext; }
protected internal override RMDelegationTokenSecretManager CreateRMDelegationTokenSecretManager (Configuration conf, RMContext rmContext) { // KeyUpdateInterval-> 1 seconds // TokenMaxLifetime-> 2 seconds. return(new TestRMDelegationTokens.TestRMDelegationTokenSecretManager(this, 1000, 1000, 2000, 1000, rmContext)); }
public ApplicationMasterService(RMContext rmContext, YarnScheduler scheduler) : base(typeof(Org.Apache.Hadoop.Yarn.Server.Resourcemanager.ApplicationMasterService ).FullName) { this.amLivelinessMonitor = rmContext.GetAMLivelinessMonitor(); this.rScheduler = scheduler; this.rmContext = rmContext; }
public MyRMAppManager(RMContext context, YarnScheduler scheduler, ApplicationMasterService masterService, ApplicationACLsManager applicationACLsManager, Configuration conf ) : base(context, scheduler, masterService, applicationACLsManager, conf) { this.conf = conf; this.rmContext = context; }
public AdminService(ResourceManager rm, RMContext rmContext) : base(typeof(Org.Apache.Hadoop.Yarn.Server.Resourcemanager.AdminService).FullName ) { // Address to use for binding. May be a wildcard address. this.rm = rm; this.rmContext = rmContext; }
public MyClientRMService(RMContext rmContext, YarnScheduler scheduler, RMAppManager rmAppManager, ApplicationACLsManager applicationACLsManager, QueueACLsManager queueACLsManager , RMDelegationTokenSecretManager rmDTSecretManager) : base(rmContext, scheduler, rmAppManager, applicationACLsManager, queueACLsManager , rmDTSecretManager) { this.rmContext = rmContext; }
public static void InitializeRMContext(int numContainers, AbstractYarnScheduler scheduler , RMContext mockRMContext) { Org.Mockito.Mockito.When(mockRMContext.GetScheduler()).ThenReturn(scheduler); Resource r = CalculateClusterResource(numContainers); Org.Mockito.Mockito.DoReturn(r).When(scheduler).GetClusterResource(); }
private static RMContext GetMockRMContext() { RMContext rmContext = Org.Mockito.Mockito.Mock <RMContext>(); RMNodeLabelsManager nlm = new NullRMNodeLabelsManager(); nlm.Init(new Configuration(false)); Org.Mockito.Mockito.When(rmContext.GetNodeLabelManager()).ThenReturn(nlm); return(rmContext); }
/// <exception cref="System.IO.IOException"/> private static FairScheduler MockFairSchedulerWithoutApps(RMContext rmContext) { FairScheduler fs = new _FairScheduler_207(); FairSchedulerConfiguration conf = new FairSchedulerConfiguration(); fs.SetRMContext(rmContext); fs.Init(conf); return(fs); }
public SchedulingMonitor(RMContext rmContext, SchedulingEditPolicy scheduleEditPolicy ) : base("SchedulingMonitor (" + scheduleEditPolicy.GetPolicyName() + ")") { //thread which runs periodically to see the last time since a heartbeat is //received. this.scheduleEditPolicy = scheduleEditPolicy; this.rmContext = rmContext; }
public AMLauncher(RMContext rmContext, RMAppAttempt application, AMLauncherEventType eventType, Configuration conf) { this.application = application; this.conf = conf; this.eventType = eventType; this.rmContext = rmContext; this.handler = rmContext.GetDispatcher().GetEventHandler(); this.masterContainer = application.GetMasterContainer(); }
/// <exception cref="System.Exception"/> public virtual void TestAMRMTokenSecretManagerStateStore(RMStateStoreTestBase.RMStateStoreHelper stateStoreHelper) { System.Console.Out.WriteLine("Start testing"); RMStateStore store = stateStoreHelper.GetRMStateStore(); RMStateStoreTestBase.TestDispatcher dispatcher = new RMStateStoreTestBase.TestDispatcher (); store.SetRMDispatcher(dispatcher); RMContext rmContext = Org.Mockito.Mockito.Mock <RMContext>(); Org.Mockito.Mockito.When(rmContext.GetStateStore()).ThenReturn(store); Configuration conf = new YarnConfiguration(); AMRMTokenSecretManager appTokenMgr = new AMRMTokenSecretManager(conf, rmContext); //create and save the first masterkey MasterKeyData firstMasterKeyData = appTokenMgr.CreateNewMasterKey(); AMRMTokenSecretManagerState state1 = AMRMTokenSecretManagerState.NewInstance(firstMasterKeyData .GetMasterKey(), null); rmContext.GetStateStore().StoreOrUpdateAMRMTokenSecretManager(state1, false); // load state store = stateStoreHelper.GetRMStateStore(); Org.Mockito.Mockito.When(rmContext.GetStateStore()).ThenReturn(store); store.SetRMDispatcher(dispatcher); RMStateStore.RMState state = store.LoadState(); NUnit.Framework.Assert.IsNotNull(state.GetAMRMTokenSecretManagerState()); NUnit.Framework.Assert.AreEqual(firstMasterKeyData.GetMasterKey(), state.GetAMRMTokenSecretManagerState ().GetCurrentMasterKey()); NUnit.Framework.Assert.IsNull(state.GetAMRMTokenSecretManagerState().GetNextMasterKey ()); //create and save the second masterkey MasterKeyData secondMasterKeyData = appTokenMgr.CreateNewMasterKey(); AMRMTokenSecretManagerState state2 = AMRMTokenSecretManagerState.NewInstance(firstMasterKeyData .GetMasterKey(), secondMasterKeyData.GetMasterKey()); rmContext.GetStateStore().StoreOrUpdateAMRMTokenSecretManager(state2, true); // load state store = stateStoreHelper.GetRMStateStore(); Org.Mockito.Mockito.When(rmContext.GetStateStore()).ThenReturn(store); store.SetRMDispatcher(dispatcher); RMStateStore.RMState state_2 = store.LoadState(); NUnit.Framework.Assert.IsNotNull(state_2.GetAMRMTokenSecretManagerState()); NUnit.Framework.Assert.AreEqual(firstMasterKeyData.GetMasterKey(), state_2.GetAMRMTokenSecretManagerState ().GetCurrentMasterKey()); NUnit.Framework.Assert.AreEqual(secondMasterKeyData.GetMasterKey(), state_2.GetAMRMTokenSecretManagerState ().GetNextMasterKey()); // re-create the masterKeyData based on the recovered masterkey // should have the same secretKey appTokenMgr.Recover(state_2); NUnit.Framework.Assert.AreEqual(appTokenMgr.GetCurrnetMasterKeyData().GetSecretKey (), firstMasterKeyData.GetSecretKey()); NUnit.Framework.Assert.AreEqual(appTokenMgr.GetNextMasterKeyData().GetSecretKey() , secondMasterKeyData.GetSecretKey()); store.Close(); }
public virtual void TestMove() { string user = "******"; Queue parentQueue = CreateQueue("parent", null); Queue oldQueue = CreateQueue("old", parentQueue); Queue newQueue = CreateQueue("new", parentQueue); QueueMetrics parentMetrics = parentQueue.GetMetrics(); QueueMetrics oldMetrics = oldQueue.GetMetrics(); QueueMetrics newMetrics = newQueue.GetMetrics(); ApplicationAttemptId appAttId = CreateAppAttemptId(0, 0); RMContext rmContext = Org.Mockito.Mockito.Mock <RMContext>(); Org.Mockito.Mockito.When(rmContext.GetEpoch()).ThenReturn(3L); SchedulerApplicationAttempt app = new SchedulerApplicationAttempt(appAttId, user, oldQueue, oldQueue.GetActiveUsersManager(), rmContext); oldMetrics.SubmitApp(user); // confirm that containerId is calculated based on epoch. NUnit.Framework.Assert.AreEqual(unchecked ((long)(0x30000000001L)), app.GetNewContainerId ()); // Resource request Resource requestedResource = Resource.NewInstance(1536, 2); Priority requestedPriority = Priority.NewInstance(2); ResourceRequest request = ResourceRequest.NewInstance(requestedPriority, ResourceRequest .Any, requestedResource, 3); app.UpdateResourceRequests(Arrays.AsList(request)); // Allocated container RMContainer container1 = CreateRMContainer(appAttId, 1, requestedResource); app.liveContainers[container1.GetContainerId()] = container1; SchedulerNode node = CreateNode(); app.appSchedulingInfo.Allocate(NodeType.OffSwitch, node, requestedPriority, request , container1.GetContainer()); // Reserved container Priority prio1 = Priority.NewInstance(1); Resource reservedResource = Resource.NewInstance(2048, 3); RMContainer container2 = CreateReservedRMContainer(appAttId, 1, reservedResource, node.GetNodeID(), prio1); IDictionary <NodeId, RMContainer> reservations = new Dictionary <NodeId, RMContainer >(); reservations[node.GetNodeID()] = container2; app.reservedContainers[prio1] = reservations; oldMetrics.ReserveResource(user, reservedResource); CheckQueueMetrics(oldMetrics, 1, 1, 1536, 2, 2048, 3, 3072, 4); CheckQueueMetrics(newMetrics, 0, 0, 0, 0, 0, 0, 0, 0); CheckQueueMetrics(parentMetrics, 1, 1, 1536, 2, 2048, 3, 3072, 4); app.Move(newQueue); CheckQueueMetrics(oldMetrics, 0, 0, 0, 0, 0, 0, 0, 0); CheckQueueMetrics(newMetrics, 1, 1, 1536, 2, 2048, 3, 3072, 4); CheckQueueMetrics(parentMetrics, 1, 1, 1536, 2, 2048, 3, 3072, 4); }